diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index c52a58eac8a..7d7efc51fa9 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -512,6 +512,75 @@ jobs: docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" +############################################################################################ +#################################### INSTALL PACKAGES ###################################### +############################################################################################ + InstallPackagesTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (amd64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + InstallPackagesTestAarch64: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (arm64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index cd4ee482702..6e728b6bfb0 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -946,6 +946,75 @@ jobs: run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 mark_release_ready.py +############################################################################################ +#################################### INSTALL PACKAGES ###################################### +############################################################################################ + InstallPackagesTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (amd64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + InstallPackagesTestAarch64: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (arm64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index c677ec4bf5c..1a46cf9a3d4 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -984,6 +984,75 @@ jobs: docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" +############################################################################################ +#################################### INSTALL PACKAGES ###################################### +############################################################################################ + InstallPackagesTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (amd64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + InstallPackagesTestAarch64: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (arm64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## @@ -2813,6 +2882,217 @@ jobs: docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" + # Parallel replicas + FunctionalStatefulTestDebugParallelReplicas: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (debug, ParallelReplicas) + REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestUBsanParallelReplicas: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_ubsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (ubsan, ParallelReplicas) + REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestMsanParallelReplicas: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_msan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (msan, ParallelReplicas) + REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestTsanParallelReplicas: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (tsan, ParallelReplicas) + REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestAsanParallelReplicas: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (asan, ParallelReplicas) + REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestReleaseParallelReplicas: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (release, ParallelReplicas) + REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################## ######################################### STRESS TESTS ####################################### ############################################################################################## diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c6555fd9f91..2ef05fe989b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,7 +15,8 @@ jobs: - name: Deploy packages and assets run: | GITHUB_TAG="${GITHUB_REF#refs/tags/}" - curl '${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' -d '' + curl --silent --data '' \ + '${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' ############################################################################################ ##################################### Docker images ####################################### ############################################################################################ diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 251087f33a5..95ef60686a7 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -604,6 +604,75 @@ jobs: run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 mark_release_ready.py +############################################################################################ +#################################### INSTALL PACKAGES ###################################### +############################################################################################ + InstallPackagesTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (amd64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + InstallPackagesTestAarch64: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/test_install + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Install packages (arm64) + REPO_COPY=${{runner.temp}}/test_install/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true + - name: Test packages installation + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 install_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################## ########################### FUNCTIONAl STATELESS TESTS ####################################### ############################################################################################## diff --git a/.gitmodules b/.gitmodules index b4673f113b7..13b1b2035be 100644 --- a/.gitmodules +++ b/.gitmodules @@ -257,6 +257,9 @@ [submodule "contrib/qpl"] path = contrib/qpl url = https://github.com/intel/qpl +[submodule "contrib/idxd-config"] + path = contrib/idxd-config + url = https://github.com/intel/idxd-config [submodule "contrib/wyhash"] path = contrib/wyhash url = https://github.com/wangyi-fudan/wyhash @@ -330,3 +333,6 @@ [submodule "contrib/crc32-vpmsum"] path = contrib/crc32-vpmsum url = https://github.com/antonblanchard/crc32-vpmsum.git +[submodule "contrib/liburing"] + path = contrib/liburing + url = https://github.com/axboe/liburing diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cb545f94e7..a89619aa7ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ * Parallel quorum inserts might work incorrectly with `*MergeTree` tables created with the deprecated syntax. Therefore, parallel quorum inserts support is completely disabled for such tables. It does not affect tables created with a new syntax. [#45430](https://github.com/ClickHouse/ClickHouse/pull/45430) ([Alexander Tokmakov](https://github.com/tavplubix)). * Use the `GetObjectAttributes` request instead of the `HeadObject` request to get the size of an object in AWS S3. This change fixes handling endpoints without explicit regions after updating the AWS SDK, for example. [#45288](https://github.com/ClickHouse/ClickHouse/pull/45288) ([Vitaly Baranov](https://github.com/vitlibar)). AWS S3 and Minio are tested, but keep in mind that various S3-compatible services (GCS, R2, B2) may have subtle incompatibilities. This change also may require you to adjust the ACL to allow the `GetObjectAttributes` request. * Forbid paths in timezone names. For example, a timezone name like `/usr/share/zoneinfo/Asia/Aden` is not allowed; the IANA timezone database name like `Asia/Aden` should be used. [#44225](https://github.com/ClickHouse/ClickHouse/pull/44225) ([Kruglov Pavel](https://github.com/Avogar)). +* Queries combining equijoin and constant expressions (e.g., `JOIN ON t1.x = t2.x AND 1 = 1`) are forbidden due to incorrect results. [#44016](https://github.com/ClickHouse/ClickHouse/pull/44016) ([Vladimir C](https://github.com/vdimir)). + #### New Feature * Dictionary source for extracting keys by traversing regular expressions tree. It can be used for User-Agent parsing. [#40878](https://github.com/ClickHouse/ClickHouse/pull/40878) ([Vage Ogannisian](https://github.com/nooblose)). [#43858](https://github.com/ClickHouse/ClickHouse/pull/43858) ([Han Fei](https://github.com/hanfei1991)). @@ -119,7 +121,6 @@ Add settings input_format_tsv/csv/custom_detect_header that enable this behaviou * Fix possible use of an uninitialized value after executing expressions after sorting. Closes [#43386](https://github.com/ClickHouse/ClickHouse/issues/43386) [#43635](https://github.com/ClickHouse/ClickHouse/pull/43635) ([Kruglov Pavel](https://github.com/Avogar)). * Better handling of NULL in aggregate combinators, fix possible segfault/logical error while using an obscure optimization `optimize_rewrite_sum_if_to_count_if`. Closes [#43758](https://github.com/ClickHouse/ClickHouse/issues/43758). [#43813](https://github.com/ClickHouse/ClickHouse/pull/43813) ([Kruglov Pavel](https://github.com/Avogar)). * Fix CREATE USER/ROLE query settings constraints. [#43993](https://github.com/ClickHouse/ClickHouse/pull/43993) ([Nikolay Degterinsky](https://github.com/evillique)). -* Fix incorrect behavior of `JOIN ON t1.x = t2.x AND 1 = 1`, forbid such queries. [#44016](https://github.com/ClickHouse/ClickHouse/pull/44016) ([Vladimir C](https://github.com/vdimir)). * Fixed bug with non-parsable default value for `EPHEMERAL` column in table metadata. [#44026](https://github.com/ClickHouse/ClickHouse/pull/44026) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). * Fix parsing of bad version from compatibility setting. [#44224](https://github.com/ClickHouse/ClickHouse/pull/44224) ([Kruglov Pavel](https://github.com/Avogar)). * Bring interval subtraction from datetime in line with addition. [#44241](https://github.com/ClickHouse/ClickHouse/pull/44241) ([ltrk2](https://github.com/ltrk2)). diff --git a/README.md b/README.md index db1aca87ee8..bcf2643c33d 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ ClickHouse® is an open-source column-oriented database management system that a * [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster. * [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. -* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time. +* [Slack](https://clickhousedb.slack.com/) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time. * [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events. * [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation. * [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev. diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 025ec39e7f3..21fc19c3103 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -141,6 +141,7 @@ add_contrib (simdjson-cmake simdjson) add_contrib (rapidjson-cmake rapidjson) add_contrib (fastops-cmake fastops) add_contrib (libuv-cmake libuv) +add_contrib (liburing-cmake liburing) add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv add_contrib (cassandra-cmake cassandra) # requires: libuv diff --git a/contrib/NuRaft b/contrib/NuRaft index 545b8c810a9..b56784be1ae 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 545b8c810a956b2efdc116e86be219af7e83d68a +Subproject commit b56784be1aec568fb72aff47f281097c017623cb diff --git a/contrib/aws b/contrib/aws index 4a12641211d..06a6610e6fb 160000 --- a/contrib/aws +++ b/contrib/aws @@ -1 +1 @@ -Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252 +Subproject commit 06a6610e6fb3385e22ad85014a67aa307825ffb1 diff --git a/contrib/azure b/contrib/azure index ea8c3044f43..096049bf24f 160000 --- a/contrib/azure +++ b/contrib/azure @@ -1 +1 @@ -Subproject commit ea8c3044f43f5afa7016d2d580ed201f495d7e94 +Subproject commit 096049bf24fffafcaccc132b9367694532716731 diff --git a/contrib/idxd-config b/contrib/idxd-config new file mode 160000 index 00000000000..f6605c41a73 --- /dev/null +++ b/contrib/idxd-config @@ -0,0 +1 @@ +Subproject commit f6605c41a735e3fdfef2d2d18655a33af6490b99 diff --git a/contrib/liburing b/contrib/liburing new file mode 160000 index 00000000000..f5a48392c4e --- /dev/null +++ b/contrib/liburing @@ -0,0 +1 @@ +Subproject commit f5a48392c4ea33f222cbebeb2e2fc31620162949 diff --git a/contrib/liburing-cmake/CMakeLists.txt b/contrib/liburing-cmake/CMakeLists.txt new file mode 100644 index 00000000000..02bc116c660 --- /dev/null +++ b/contrib/liburing-cmake/CMakeLists.txt @@ -0,0 +1,53 @@ +set (ENABLE_LIBURING_DEFAULT ${ENABLE_LIBRARIES}) + +if (NOT OS_LINUX) + set (ENABLE_LIBURING_DEFAULT OFF) +endif () + +option (ENABLE_LIBURING "Enable liburing" ${ENABLE_LIBURING_DEFAULT}) + +if (NOT ENABLE_LIBURING) + message (STATUS "Not using liburing") + return () +endif () + +set (LIBURING_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/liburing/src/include") +set (LIBURING_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/liburing/src") + +set (SRCS + "${LIBURING_SOURCE_DIR}/queue.c" + "${LIBURING_SOURCE_DIR}/register.c" + "${LIBURING_SOURCE_DIR}/setup.c" + "${LIBURING_SOURCE_DIR}/syscall.c" + "${LIBURING_SOURCE_DIR}/version.c" +) + +add_compile_definitions (_GNU_SOURCE) +add_compile_definitions (LIBURING_INTERNAL) + +set (LIBURING_COMPAT_INCLUDE_DIR "${ClickHouse_BINARY_DIR}/contrib/liburing/src/include-compat") +set (LIBURING_COMPAT_HEADER "${LIBURING_COMPAT_INCLUDE_DIR}/liburing/compat.h") + +set (LIBURING_CONFIG_HAS_KERNEL_RWF_T FALSE) +set (LIBURING_CONFIG_HAS_KERNEL_TIMESPEC FALSE) +set (LIBURING_CONFIG_HAS_OPEN_HOW FALSE) +set (LIBURING_CONFIG_HAS_STATX FALSE) +set (LIBURING_CONFIG_HAS_GLIBC_STATX FALSE) + +configure_file (compat.h.in ${LIBURING_COMPAT_HEADER}) + +set (LIBURING_GENERATED_INCLUDE_DIR "${ClickHouse_BINARY_DIR}/contrib/liburing/src/include") +set (LIBURING_VERSION_HEADER "${LIBURING_GENERATED_INCLUDE_DIR}/liburing/io_uring_version.h") + +file (READ "${LIBURING_SOURCE_DIR}/../liburing.spec" LIBURING_SPEC) + +string (REGEX MATCH "Version: ([0-9]+)\.([0-9]+)" _ ${LIBURING_SPEC}) +set (LIBURING_VERSION_MAJOR ${CMAKE_MATCH_1}) +set (LIBURING_VERSION_MINOR ${CMAKE_MATCH_2}) + +configure_file (io_uring_version.h.in ${LIBURING_VERSION_HEADER}) + +add_library (_liburing ${SRCS}) +add_library (ch_contrib::liburing ALIAS _liburing) + +target_include_directories (_liburing SYSTEM PUBLIC ${LIBURING_COMPAT_INCLUDE_DIR} ${LIBURING_GENERATED_INCLUDE_DIR} "${LIBURING_SOURCE_DIR}/include") diff --git a/contrib/liburing-cmake/compat.h.in b/contrib/liburing-cmake/compat.h.in new file mode 100644 index 00000000000..468e529cd33 --- /dev/null +++ b/contrib/liburing-cmake/compat.h.in @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef LIBURING_COMPAT_H +#define LIBURING_COMPAT_H + +# cmakedefine LIBURING_CONFIG_HAS_KERNEL_RWF_T +# cmakedefine LIBURING_CONFIG_HAS_KERNEL_TIMESPEC +# cmakedefine LIBURING_CONFIG_HAS_OPEN_HOW +# cmakedefine LIBURING_CONFIG_HAS_GLIBC_STATX +# cmakedefine LIBURING_CONFIG_HAS_STATX + +#if !defined(LIBURING_CONFIG_HAS_KERNEL_RWF_T) +typedef int __kernel_rwf_t; +#endif + +#if !defined(LIBURING_CONFIG_HAS_KERNEL_TIMESPEC) +#include + +struct __kernel_timespec { + int64_t tv_sec; + long long tv_nsec; +}; + +/* is not available, so it can't be included */ +#define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 1 + +#else +#include + +/* is included above and not needed again */ +#define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 1 + +#endif + +#if !defined(LIBURING_CONFIG_HAS_OPEN_HOW) +#include + +struct open_how { + uint64_t flags; + uint64_t mode; + uint64_t resolve; +}; +#else +#include +#endif + +#if !defined(LIBURING_CONFIG_HAS_GLIBC_STATX) && defined(LIBURING_CONFIG_HAS_STATX) +#include +#endif + +#endif diff --git a/contrib/liburing-cmake/io_uring_version.h.in b/contrib/liburing-cmake/io_uring_version.h.in new file mode 100644 index 00000000000..3fc6132b224 --- /dev/null +++ b/contrib/liburing-cmake/io_uring_version.h.in @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef LIBURING_VERSION_H +#define LIBURING_VERSION_H + +#define IO_URING_VERSION_MAJOR ${LIBURING_VERSION_MAJOR} +#define IO_URING_VERSION_MINOR ${LIBURING_VERSION_MINOR} + +#endif diff --git a/contrib/qpl b/contrib/qpl index becb7a1b15b..d75a29d95d8 160000 --- a/contrib/qpl +++ b/contrib/qpl @@ -1 +1 @@ -Subproject commit becb7a1b15bdb4845ec3721a550707ffa51d029d +Subproject commit d75a29d95d8a548297fce3549d21020005364dc8 diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index beef8432e7a..fc5548b0652 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -10,11 +10,30 @@ if (NOT ENABLE_QPL) return() endif() +## QPL has build dependency on libaccel-config. Here is to build libaccel-config which is required by QPL. +## libaccel-config is the utility library for controlling and configuring Intel® In-Memory Analytics Accelerator (Intel® IAA). +set (LIBACCEL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config") +set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake") +set (LIBACCEL_HEADER_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake/idxd-header") +set (SRCS + "${LIBACCEL_SOURCE_DIR}/accfg/lib/libaccfg.c" + "${LIBACCEL_SOURCE_DIR}/util/log.c" + "${LIBACCEL_SOURCE_DIR}/util/sysfs.c" +) + +add_library(accel-config ${SRCS}) + +target_compile_options(accel-config PRIVATE "-D_GNU_SOURCE") + +target_include_directories(accel-config BEFORE + PRIVATE ${UUID_DIR} + PRIVATE ${LIBACCEL_HEADER_DIR} + PRIVATE ${LIBACCEL_SOURCE_DIR}) + +## QPL build start here. set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl") set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources") set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl") -set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake") - set (EFFICIENT_WAIT OFF) set (BLOCK_ON_FAULT ON) set (LOG_HW_INIT OFF) @@ -315,13 +334,8 @@ target_compile_definitions(_qpl PRIVATE -DQPL_BADARG_CHECK PUBLIC -DENABLE_QPL_COMPRESSION) -find_library(LIBACCEL accel-config) -if(NOT LIBACCEL) - message(FATAL_ERROR "Please install QPL dependency library:libaccel-config from https://github.com/intel/idxd-config") -endif() - target_link_libraries(_qpl - PRIVATE ${LIBACCEL} + PRIVATE accel-config PRIVATE ${CMAKE_DL_LIBS}) add_library (ch_contrib::qpl ALIAS _qpl) diff --git a/contrib/qpl-cmake/idxd-header/config.h b/contrib/qpl-cmake/idxd-header/config.h new file mode 100644 index 00000000000..f03b0eac0b0 --- /dev/null +++ b/contrib/qpl-cmake/idxd-header/config.h @@ -0,0 +1,159 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Debug messages. */ +/* #undef ENABLE_DEBUG */ + +/* Documentation / man pages. */ +/* #define ENABLE_DOCS */ + +/* System logging. */ +#define ENABLE_LOGGING 1 + +/* accfg test support */ +/* #undef ENABLE_TEST */ + +/* Define to 1 if big-endian-arch */ +/* #undef HAVE_BIG_ENDIAN */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LINUX_VERSION_H 1 + +/* Define to 1 if little-endian-arch */ +#define HAVE_LITTLE_ENDIAN 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `secure_getenv' function. */ +#define HAVE_SECURE_GETENV 1 + +/* Define to 1 if you have statement expressions. */ +#define HAVE_STATEMENT_EXPR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if typeof works with your compiler. */ +#define HAVE_TYPEOF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if using libuuid */ +#define HAVE_UUID 1 + +/* Define to 1 if you have the `__secure_getenv' function. */ +/* #undef HAVE___SECURE_GETENV */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "accel-config" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "linux-dsa@lists.01.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "accel-config" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "accel-config 3.5.2.gitf6605c41" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "accel-config" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "https://github.com/xxx/accel-config" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.5.2.gitf6605c41" + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Enable extensions on AIX 3, Interix. */ +#ifndef _ALL_SOURCE +# define _ALL_SOURCE 1 +#endif +/* Enable GNU extensions on systems that have them. */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE 1 +#endif +/* Enable threading extensions on Solaris. */ +#ifndef _POSIX_PTHREAD_SEMANTICS +# define _POSIX_PTHREAD_SEMANTICS 1 +#endif +/* Enable extensions on HP NonStop. */ +#ifndef _TANDEM_SOURCE +# define _TANDEM_SOURCE 1 +#endif +/* Enable general extensions on Solaris. */ +#ifndef __EXTENSIONS__ +# define __EXTENSIONS__ 1 +#endif + + +/* Version number of package */ +#define VERSION "3.5.2.gitf6605c41" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Enable large inode numbers on Mac OS X 10.5. */ +#ifndef _DARWIN_USE_64_BIT_INODE +# define _DARWIN_USE_64_BIT_INODE 1 +#endif + +/* Number of bits in a file offset, on hosts where this is settable. */ +/* #undef _FILE_OFFSET_BITS */ + +/* Define for large files, on AIX-style hosts. */ +/* #undef _LARGE_FILES */ + +/* Define to 1 if on MINIX. */ +/* #undef _MINIX */ + +/* Define to 2 if the system does not provide POSIX.1 features except with + this defined. */ +/* #undef _POSIX_1_SOURCE */ + +/* Define to 1 if you need to in order for `stat' and other things to work. */ +/* #undef _POSIX_SOURCE */ + +/* Define to __typeof__ if your compiler spells it that way. */ +/* #undef typeof */ diff --git a/contrib/snappy-cmake/CMakeLists.txt b/contrib/snappy-cmake/CMakeLists.txt index 0997ea207e0..50cdc8732a1 100644 --- a/contrib/snappy-cmake/CMakeLists.txt +++ b/contrib/snappy-cmake/CMakeLists.txt @@ -1,6 +1,10 @@ set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/snappy") -set (SNAPPY_IS_BIG_ENDIAN 0) +if (ARCH_S390X) + set (SNAPPY_IS_BIG_ENDIAN 1) +else () + set (SNAPPY_IS_BIG_ENDIAN 0) +endif() set (HAVE_BYTESWAP_H 1) set (HAVE_SYS_MMAN_H 1) diff --git a/docker/docs/builder/Dockerfile b/docker/docs/builder/Dockerfile index 76bbea59e60..5946687dbef 100644 --- a/docker/docs/builder/Dockerfile +++ b/docker/docs/builder/Dockerfile @@ -21,5 +21,3 @@ RUN yarn config set registry https://registry.npmjs.org \ COPY run.sh /run.sh ENTRYPOINT ["/run.sh"] - -CMD ["yarn", "build"] diff --git a/docker/docs/builder/run.sh b/docker/docs/builder/run.sh index 87e6218547f..01c15cb4b0f 100755 --- a/docker/docs/builder/run.sh +++ b/docker/docs/builder/run.sh @@ -25,7 +25,8 @@ done sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then - export CI=true + export CI=true + yarn install exec yarn build "$@" fi diff --git a/docker/images.json b/docker/images.json index 323f53c0ae6..bd63aea24ba 100644 --- a/docker/images.json +++ b/docker/images.json @@ -134,6 +134,14 @@ "name": "clickhouse/keeper-jepsen-test", "dependent": [] }, + "docker/test/install/deb": { + "name": "clickhouse/install-deb-test", + "dependent": [] + }, + "docker/test/install/rpm": { + "name": "clickhouse/install-rpm-test", + "dependent": [] + }, "docker/docs/builder": { "name": "clickhouse/docs-builder", "dependent": [ diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 522fd354393..8a73d72b3a5 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.1.2.9" +ARG VERSION="23.1.3.5" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 434b4c3bff0..ba2d7430e06 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.1.2.9" +ARG VERSION="23.1.3.5" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 3cd50b06d5a..9c8b40df02b 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -139,6 +139,7 @@ function clone_submodules contrib/morton-nd contrib/xxHash contrib/simdjson + contrib/liburing ) git submodule sync @@ -161,6 +162,7 @@ function run_cmake "-DENABLE_NURAFT=1" "-DENABLE_SIMDJSON=1" "-DENABLE_JEMALLOC=1" + "-DENABLE_LIBURING=1" ) export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache" @@ -229,6 +231,7 @@ function run_tests --hung-check --fast-tests-only --no-random-settings + --no-random-merge-tree-settings --no-long --testname --shard diff --git a/docker/test/install/deb/Dockerfile b/docker/test/install/deb/Dockerfile new file mode 100644 index 00000000000..9614473c69b --- /dev/null +++ b/docker/test/install/deb/Dockerfile @@ -0,0 +1,64 @@ +FROM ubuntu:22.04 + +# The Dockerfile is nicely borrowed from +# https://github.com/lionelnicolas/docker-ubuntu-systemd/blob/83aa3249146f5df264fe45353f79fc76eb1e42d7/Dockerfile + +ENV \ + DEBIAN_FRONTEND=noninteractive \ + LANG=C.UTF-8 \ + container=docker \ + init=/lib/systemd/systemd + +# install systemd packages +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + systemd \ + && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists + +# configure systemd +# remove systemd 'wants' triggers +# remove everything except tmpfiles setup in sysinit target +# remove UTMP updater service +# disable /tmp mount +# fix missing BPF firewall support warning +# just for cosmetics, fix "not-found" entries while using "systemctl --all" +RUN \ + find \ + /etc/systemd/system/*.wants/* \ + /lib/systemd/system/multi-user.target.wants/* \ + /lib/systemd/system/sockets.target.wants/*initctl* \ + ! -type d \ + -delete && \ + find \ + /lib/systemd/system/sysinit.target.wants \ + ! -type d \ + ! -name '*systemd-tmpfiles-setup*' \ + -delete && \ + find \ + /lib/systemd \ + -name systemd-update-utmp-runlevel.service \ + -delete && \ + rm -vf /usr/share/systemd/tmp.mount && \ + sed -ri '/^IPAddressDeny/d' /lib/systemd/system/systemd-journald.service && \ + for MATCH in \ + plymouth-start.service \ + plymouth-quit-wait.service \ + syslog.socket \ + syslog.service \ + display-manager.service \ + systemd-sysusers.service \ + tmp.mount \ + systemd-udevd.service \ + ; do \ + grep -rn --binary-files=without-match ${MATCH} /lib/systemd/ | cut -d: -f1 | xargs sed -ri 's/(.*=.*)'${MATCH}'(.*)/\1\2/'; \ + done && \ + systemctl disable ondemand.service && \ + systemctl set-default multi-user.target + +VOLUME ["/run", "/run/lock"] + +STOPSIGNAL SIGRTMIN+3 + +ENTRYPOINT ["/lib/systemd/systemd"] diff --git a/docker/test/install/rpm/Dockerfile b/docker/test/install/rpm/Dockerfile new file mode 100644 index 00000000000..c55e0fe2507 --- /dev/null +++ b/docker/test/install/rpm/Dockerfile @@ -0,0 +1,55 @@ +FROM centos:8 + +# The Dockerfile is nicely borrowed from +# https://github.com/lionelnicolas/docker-ubuntu-systemd/blob/83aa3249146f5df264fe45353f79fc76eb1e42d7/Dockerfile + +ENV \ + LANG=C.UTF-8 \ + container=docker \ + init=/lib/systemd/systemd + +# configure systemd +# remove systemd 'wants' triggers +# remove everything except tmpfiles setup in sysinit target +# remove UTMP updater service +# disable /tmp mount +# fix missing BPF firewall support warning +# just for cosmetics, fix "not-found" entries while using "systemctl --all" +RUN \ + find \ + /etc/systemd/system/*.wants/ \ + /lib/systemd/system/multi-user.target.wants/ \ + /lib/systemd/system/local-fs.target.wants/ \ + /lib/systemd/system/sockets.target.wants/*initctl* \ + ! -type d \ + -delete && \ + find \ + /lib/systemd/system/sysinit.target.wants \ + ! -type d \ + ! -name '*systemd-tmpfiles-setup*' \ + -delete && \ + find \ + /lib/systemd \ + -name systemd-update-utmp-runlevel.service \ + -delete && \ + rm -vf /usr/share/systemd/tmp.mount && \ + sed -ri '/^IPAddressDeny/d' /lib/systemd/system/systemd-journald.service && \ + for MATCH in \ + plymouth-start.service \ + plymouth-quit-wait.service \ + syslog.socket \ + syslog.service \ + display-manager.service \ + systemd-sysusers.service \ + tmp.mount \ + systemd-udevd.service \ + ; do \ + grep -rn --binary-files=without-match ${MATCH} /lib/systemd/ | cut -d: -f1 | xargs sed -ri 's/(.*=.*)'${MATCH}'(.*)/\1\2/'; \ + done && \ + systemctl set-default multi-user.target + +VOLUME ["/run", "/run/lock"] + +STOPSIGNAL SIGRTMIN+3 + +ENTRYPOINT ["/lib/systemd/systemd"] diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index e69a85c0fca..80a43799914 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -126,13 +126,16 @@ function run_tests() fi set +e - clickhouse-test -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \ - --skip 00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" \ + + if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then + clickhouse-test --client="clickhouse-client --use_hedged_requests=0 --allow_experimental_parallel_reading_from_replicas=1 \ + --max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \ + -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \ "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt - - clickhouse-test --timeout 1200 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \ - 00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt - + else + clickhouse-test -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \ + "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt + fi set -e } diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index fef3fc4d228..8e7d0ef55b9 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -134,9 +134,9 @@ function run_tests() set +e clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ - --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ - | ts '%Y-%m-%d %H:%M:%S' \ - | tee -a test_output/test_result.txt + --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ + | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a test_output/test_result.txt set -e } diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index c606a8049bd..8347f67fed6 100644 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -11,6 +11,31 @@ set -x # core.COMM.PID-TID sysctl kernel.core_pattern='core.%e.%p-%P' +OK="\tOK\t\\N\t" +FAIL="\tFAIL\t\\N\t" + +FAILURE_CONTEXT_LINES=50 +FAILURE_CONTEXT_MAX_LINE_WIDTH=400 + +function escaped() +{ + # That's the simplest way I found to escape a string in bash. Yep, bash is the most convenient programming language. + # Also limit lines width just in case (too long lines are not really useful usually) + clickhouse local -S 's String' --input-format=LineAsString -q "select substr(s, 1, $FAILURE_CONTEXT_MAX_LINE_WIDTH) + from table format CustomSeparated settings format_custom_row_after_delimiter='\\\\\\\\n'" +} +function head_escaped() +{ + head -n $FAILURE_CONTEXT_LINES $1 | escaped +} +function unts() +{ + grep -Po "[0-9][0-9]:[0-9][0-9] \K.*" +} +function trim_server_logs() +{ + head -n $FAILURE_CONTEXT_LINES "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped +} function install_packages() { @@ -33,7 +58,9 @@ function configure() ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag # avoid too slow startup - sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|100000|10000|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp + sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \ + | sed "s|100000|10000|" \ + > /etc/clickhouse-server/config.d/keeper_port.xml.tmp sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml @@ -136,6 +163,7 @@ function stop() clickhouse stop --max-tries "$max_tries" --do-not-kill && return # We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces. + echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv kill -TERM "$(pidof gdb)" ||: sleep 5 echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log @@ -151,10 +179,11 @@ function start() if [ "$counter" -gt ${1:-120} ] then echo "Cannot start clickhouse-server" - echo -e "Cannot start clickhouse-server\tFAIL" >> /test_output/test_results.tsv + rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||: + echo -e "Cannot start clickhouse-server$FAIL$(trim_server_logs application_errors.txt)" >> /test_output/test_results.tsv cat /var/log/clickhouse-server/stdout.log - tail -n1000 /var/log/clickhouse-server/stderr.log - tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e ' RaftInstance:' -e ' RaftInstance' | tail -n1000 + tail -n100 /var/log/clickhouse-server/stderr.log + tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e ' RaftInstance:' -e ' RaftInstance' | tail -n100 break fi # use root to match with current uid @@ -252,9 +281,92 @@ start clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "SHOW TABLES FROM test" -clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" -clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" -clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" +clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, + EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, + UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, + Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), + RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, + FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), + CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, + IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, + WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, + SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, + IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, + IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, + Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, + RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), + BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, + DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, + RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, + LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, + RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, + ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, + OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, + UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, + URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, + ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), + IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) + ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" +clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, + EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, + UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, + RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), + URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, + FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, + UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, + MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, + SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, + ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, + SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, + FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, + HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, + GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, + HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, + HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, + FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, + LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, + RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, + ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, + OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, + UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, + URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, + ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), + IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) + ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" +clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, + VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, + Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, + EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, + AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), + RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, + SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, + ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, + SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, + UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, + FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, + FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, + Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, + BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), + Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), + WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, + ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, + ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, + ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, + ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, + ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, + OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, + UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, + PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, + PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), + CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, + StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, + OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, + UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, + ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), + Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, + DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) + ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) + SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" @@ -275,7 +387,9 @@ export ZOOKEEPER_FAULT_INJECTION=1 configure # But we still need default disk because some tables loaded only into it -sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml | sed "s|
s3
|
s3
default|" > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp +sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \ + | sed "s|
s3
|
s3
default|" \ + > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml @@ -283,8 +397,12 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau start ./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \ - && echo -e 'Test script exit code\tOK' >> /test_output/test_results.tsv \ - || echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv + && echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \ + || echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv + +# NOTE Hung check is implemented in docker/tests/stress/stress +rg -Fa "No queries hung" /test_output/test_results.tsv | grep -Fa "OK" \ + || echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log | unts)" stop mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log @@ -295,9 +413,10 @@ unset "${!THREAD_@}" start -clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \ - || (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \ - && rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt) +clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \ + || (rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \ + && echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(trim_server_logs application_errors.txt)" \ + >> /test_output/test_results.tsv) stop @@ -310,49 +429,54 @@ stop rg -Fa "==================" /var/log/clickhouse-server/stderr.log | rg -v "in query:" >> /test_output/tmp rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \ - && echo -e 'Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv + && echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \ + || echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv rm -f /test_output/tmp # OOM rg -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \ - && echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + && echo -e "Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \ + || echo -e "No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv # Logical errors -rg -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \ - && echo -e 'Logical error thrown (see clickhouse-server.log or logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv +rg -Fa "Code: 49. DB::Exception: " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \ + && echo -e "Logical error thrown (see clickhouse-server.log or logical_errors.txt)$FAIL$(head_escaped /test_output/logical_errors.txt)" >> /test_output/test_results.tsv \ + || echo -e "No logical errors$OK" >> /test_output/test_results.tsv # Remove file logical_errors.txt if it's empty [ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt # No such key errors rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \ - && echo -e 'S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'No lost s3 keys\tOK' >> /test_output/test_results.tsv + && echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \ + || echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv # Remove file no_such_key_errors.txt if it's empty [ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt # Crash rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \ - && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv + && echo -e "Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \ + || echo -e "Not crashed$OK" >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) rg -Fa " " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \ - && echo -e 'Fatal message in clickhouse-server.log (see fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + && echo -e "Fatal message in clickhouse-server.log (see fatal_messages.txt)$FAIL$(trim_server_logs fatal_messages.txt)" >> /test_output/test_results.tsv \ + || echo -e "No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv # Remove file fatal_messages.txt if it's empty [ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt rg -Fa "########################################" /test_output/* > /dev/null \ - && echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv + && echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv + +function get_gdb_log_context() +{ + rg -A50 -Fa " received signal " /test_output/gdb.log | head_escaped +} rg -Fa " received signal " /test_output/gdb.log > /dev/null \ - && echo -e 'Found signal in gdb.log\tFAIL' >> /test_output/test_results.tsv + && echo -e "Found signal in gdb.log$FAIL$(get_gdb_log_context)" >> /test_output/test_results.tsv if [ "$DISABLE_BC_CHECK" -ne "1" ]; then echo -e "Backward compatibility check\n" @@ -367,8 +491,8 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then echo "Download clickhouse-server from the previous release" mkdir previous_release_package_folder - echo $previous_release_tag | download_release_packages && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \ - || echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv + echo $previous_release_tag | download_release_packages && echo -e "Download script exit code$OK" >> /test_output/test_results.tsv \ + || echo -e "Download script failed$FAIL" >> /test_output/test_results.tsv mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log for table in query_log trace_log @@ -381,13 +505,13 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then # Check if we cloned previous release repository successfully if ! [ "$(ls -A previous_release_repository/tests/queries)" ] then - echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv + echo -e "Backward compatibility check: Failed to clone previous release tests$FAIL" >> /test_output/test_results.tsv elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ] then - echo -e "Backward compatibility check: Failed to download previous release packages\tFAIL" >> /test_output/test_results.tsv + echo -e "Backward compatibility check: Failed to download previous release packages$FAIL" >> /test_output/test_results.tsv else - echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv - echo -e "Successfully downloaded previous release packages\tOK" >> /test_output/test_results.tsv + echo -e "Successfully cloned previous release tests$OK" >> /test_output/test_results.tsv + echo -e "Successfully downloaded previous release packages$OK" >> /test_output/test_results.tsv # Uninstall current packages dpkg --remove clickhouse-client @@ -446,9 +570,10 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then mkdir tmp_stress_output - ./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \ - && echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv + ./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" \ + --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \ + && echo -e "Backward compatibility check: Test script exit code$OK" >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: Test script failed$FAIL" >> /test_output/test_results.tsv rm -rf tmp_stress_output # We experienced deadlocks in this command in very rare cases. Let's debug it: @@ -470,9 +595,9 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then export ZOOKEEPER_FAULT_INJECTION=0 configure start 500 - clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \ - || (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \ - && rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt) + clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \ + || (rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt \ + && echo -e "Backward compatibility check: Server failed to start$FAIL$(trim_server_logs bc_check_application_errors.txt)" >> /test_output/test_results.tsv) clickhouse-client --query="SELECT 'Server version: ', version()" @@ -488,8 +613,6 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then # FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server. # Let's just ignore all errors from queries ("} TCPHandler: Code:", "} executeQuery: Code:") # FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'") - # NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected - # ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part") # FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility echo "Check for Error messages in server log:" rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \ @@ -519,7 +642,6 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then -e "} TCPHandler: Code:" \ -e "} executeQuery: Code:" \ -e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \ - -e "This engine is deprecated and is not supported in transactions" \ -e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \ -e "The set of parts restored in place of" \ -e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \ @@ -530,8 +652,9 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then -e "Session expired" \ -e "TOO_MANY_PARTS" \ /var/log/clickhouse-server/clickhouse-server.backward.dirty.log | rg -Fa "" > /test_output/bc_check_error_messages.txt \ - && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + && echo -e "Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)$FAIL$(trim_server_logs bc_check_error_messages.txt)" \ + >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: No Error messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv # Remove file bc_check_error_messages.txt if it's empty [ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt @@ -540,34 +663,36 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then rg -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \ - && echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv + && echo -e "Backward compatibility check: Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: No sanitizer asserts$OK" >> /test_output/test_results.tsv rm -f /test_output/tmp # OOM rg -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \ - && echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + && echo -e "Backward compatibility check: Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv # Logical errors echo "Check for Logical errors in server log:" - rg -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \ - && echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv + rg -Fa -A20 "Code: 49. DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \ + && echo -e "Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)$FAIL$(trim_server_logs bc_check_logical_errors.txt)" \ + >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: No logical errors$OK" >> /test_output/test_results.tsv # Remove file bc_check_logical_errors.txt if it's empty [ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt # Crash rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \ - && echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv + && echo -e "Backward compatibility check: Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: Not crashed$OK" >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) echo "Check for Fatal message in server log:" rg -Fa " " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \ - && echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + && echo -e "Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)$FAIL$(trim_server_logs bc_check_fatal_messages.txt)" \ + >> /test_output/test_results.tsv \ + || echo -e "Backward compatibility check: No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv # Remove file bc_check_fatal_messages.txt if it's empty [ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt @@ -575,7 +700,8 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||: for table in query_log trace_log do - clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.backward.tsv.zst ||: + clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" \ + | zstd --threads=0 > /test_output/$table.backward.tsv.zst ||: done fi fi @@ -584,13 +710,28 @@ dmesg -T > /test_output/dmesg.log # OOM in dmesg -- those are real grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' /test_output/dmesg.log \ - && echo -e 'OOM in dmesg\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'No OOM in dmesg\tOK' >> /test_output/test_results.tsv + && echo -e "OOM in dmesg$FAIL$(head_escaped /test_output/dmesg.log)" >> /test_output/test_results.tsv \ + || echo -e "No OOM in dmesg$OK" >> /test_output/test_results.tsv mv /var/log/clickhouse-server/stderr.log /test_output/ # Write check result into check_status.tsv -clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%'), rowNumberInAllBlocks() LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv +# Try to choose most specific error for the whole check status +clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by +(test like 'Backward compatibility check%'), -- BC check goes last +(test like '%Sanitizer%') DESC, +(test like '%Killed by signal%') DESC, +(test like '%gdb.log%') DESC, +(test ilike '%possible deadlock%') DESC, +(test like '%start%') DESC, +(test like '%dmesg%') DESC, +(test like '%OOM%') DESC, +(test like '%Signal 9%') DESC, +(test like '%Fatal message%') DESC, +(test like '%Error message%') DESC, +(test like '%previous release%') DESC, +rowNumberInAllBlocks() +LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv [ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv # Core dumps diff --git a/docker/test/stress/stress b/docker/test/stress/stress index 3fce357cc19..86605b5ce0c 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- from multiprocessing import cpu_count -from subprocess import Popen, call, check_output, STDOUT +from subprocess import Popen, call, check_output, STDOUT, PIPE import os import argparse import logging @@ -299,14 +299,19 @@ if __name__ == "__main__": "00001_select_1", ] ) - res = call(cmd, shell=True, stderr=STDOUT) - hung_check_status = "No queries hung\tOK\n" + hung_check_log = os.path.join(args.output_folder, "hung_check.log") + tee = Popen(['/usr/bin/tee', hung_check_log], stdin=PIPE) + res = call(cmd, shell=True, stdout=tee.stdin, stderr=STDOUT) + tee.stdin.close() if res != 0 and have_long_running_queries: logging.info("Hung check failed with exit code {}".format(res)) - hung_check_status = "Hung check failed\tFAIL\n" - with open( - os.path.join(args.output_folder, "test_results.tsv"), "w+" - ) as results: - results.write(hung_check_status) + else: + hung_check_status = "No queries hung\tOK\t\\N\t\n" + with open( + os.path.join(args.output_folder, "test_results.tsv"), "w+" + ) as results: + results.write(hung_check_status) + os.remove(hung_check_log) + logging.info("Stress test finished") diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index f1cf029e9a2..0ee426f4e4d 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -48,6 +48,7 @@ RUN apt-get update \ gdb \ git \ gperf \ + libclang-rt-${LLVM_VERSION}-dev \ lld-${LLVM_VERSION} \ llvm-${LLVM_VERSION} \ llvm-${LLVM_VERSION}-dev \ diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index 28f3e211157..da58db8e45d 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -85,8 +85,16 @@ def process_test_log(log_path): if DATABASE_SIGN in line: test_end = True + # Python does not support TSV, so we have to escape '\t' and '\n' manually + # and hope that complex escape sequences will not break anything test_results = [ - (test[0], test[1], test[2], "".join(test[3])[:4096]) for test in test_results + ( + test[0], + test[1], + test[2], + "".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"), + ) + for test in test_results ] return ( diff --git a/docs/changelogs/v23.1.3.5-stable.md b/docs/changelogs/v23.1.3.5-stable.md new file mode 100644 index 00000000000..d4f39894bec --- /dev/null +++ b/docs/changelogs/v23.1.3.5-stable.md @@ -0,0 +1,17 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.1.3.5-stable (548b494bcce) FIXME as compared to v23.1.2.9-stable (8dfb1700858) + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#45896](https://github.com/ClickHouse/ClickHouse/issues/45896): Bugfix IPv6 parser for mixed ip4 address with missed first octet (like `::.1.2.3`). [#45871](https://github.com/ClickHouse/ClickHouse/pull/45871) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Get rid of progress timestamps in release publishing [#45818](https://github.com/ClickHouse/ClickHouse/pull/45818) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 0b26de57326..28aee32c717 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -16,6 +16,11 @@ Tests are located in `queries` directory. There are two subdirectories: `statele Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. +:::note +A common mistake when testing data types `DateTime` and `DateTime64` is assuming that the server uses a specific time zone (e.g. "UTC"). This is not the case, time zones in CI test runs +are deliberately randomized. The easiest workaround is to specify the time zone for test values explicitly, e.g. `toDateTime64(val, 3, 'Europe/Amsterdam')`. +::: + ### Running a Test Locally {#functional-test-locally} Start the ClickHouse server locally, listening on the default port (9000). To diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 74e9e96c260..eec8691a165 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -77,9 +77,12 @@ Optional parameters: - `rabbitmq_password` - RabbitMQ password. - `rabbitmq_commit_on_select` - Commit messages when select query is made. Default: `false`. - `rabbitmq_max_rows_per_message` — The maximum number of rows written in one RabbitMQ message for row-based formats. Default : `1`. +- `rabbitmq_empty_queue_backoff_start` — A start backoff point to reschedule read if the rabbitmq queue is empty. +- `rabbitmq_empty_queue_backoff_end` — An end backoff point to reschedule read if the rabbitmq queue is empty. -SSL connection: + + * [ ] SSL connection: Use either `rabbitmq_secure = 1` or `amqps` in connection address: `rabbitmq_address = 'amqps://guest:guest@localhost/vhost'`. The default behaviour of the used library is not to check if the created TLS connection is sufficiently secure. Whether the certificate is expired, self-signed, missing or invalid: the connection is simply permitted. More strict checking of certificates can possibly be implemented in the future. diff --git a/docs/en/engines/table-engines/mergetree-family/invertedindexes.md b/docs/en/engines/table-engines/mergetree-family/invertedindexes.md index 2899476b847..aa11258dc4a 100644 --- a/docs/en/engines/table-engines/mergetree-family/invertedindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/invertedindexes.md @@ -2,10 +2,10 @@ slug: /en/engines/table-engines/mergetree-family/invertedindexes sidebar_label: Inverted Indexes description: Quickly find search terms in text. -keywords: [full-text search, text search] +keywords: [full-text search, text search, inverted, index, indices] --- -# Inverted indexes [experimental] +# Full-text Search using Inverted Indexes [experimental] Inverted indexes are an experimental type of [secondary indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#available-types-of-indices) which provide fast text search capabilities for [String](/docs/en/sql-reference/data-types/string.md) or [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) @@ -13,7 +13,7 @@ columns. The main idea of an inverted index is to store a mapping from "terms" t tokenized cells of the string column. For example, the string cell "I will be a little late" is by default tokenized into six terms "I", "will", "be", "a", "little" and "late". Another kind of tokenizer is n-grams. For example, the result of 3-gram tokenization will be 21 terms "I w", " wi", "wil", "ill", "ll ", "l b", " be" etc. The more fine-granular the input strings are tokenized, the bigger but also the more -useful the resulting inverted index will be. +useful the resulting inverted index will be. :::warning Inverted indexes are experimental and should not be used in production environments yet. They may change in the future in backward-incompatible @@ -50,7 +50,7 @@ Being a type of skipping index, inverted indexes can be dropped or added to a co ``` sql ALTER TABLE tab DROP INDEX inv_idx; -ALTER TABLE tab ADD INDEX inv_idx(s) TYPE inverted(2) GRANULARITY 1; +ALTER TABLE tab ADD INDEX inv_idx(s) TYPE inverted(2); ``` To use the index, no special functions or syntax are required. Typical string search predicates automatically leverage the index. As @@ -74,7 +74,120 @@ controls the amount of data read consumed from the underlying column before a ne intermediate memory consumption for index construction but also improves lookup performance since fewer segments need to be checked on average to evaluate a query. +## Full-text search of the Hacker News dataset + +Let's look at the performance improvements of inverted indexes on a large dataset with lots of text. We will use 28.7M rows of comments on the popular Hacker News website. Here is the table without an inverted index: + +```sql +CREATE TABLE hackernews ( + id UInt64, + deleted UInt8, + type String, + author String, + timestamp DateTime, + comment String, + dead UInt8, + parent UInt64, + poll UInt64, + children Array(UInt32), + url String, + score UInt32, + title String, + parts Array(UInt32), + descendants UInt32 +) +ENGINE = MergeTree +ORDER BY (type, author); +``` + +The 28.7M rows are in a Parquet file in S3 - let's insert them into the `hackernews` table: + +```sql +INSERT INTO hackernews + SELECT * FROM s3Cluster( + 'default', + 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/hackernews/hacknernews.parquet', + 'Parquet', + ' + id UInt64, + deleted UInt8, + type String, + by String, + time DateTime, + text String, + dead UInt8, + parent UInt64, + poll UInt64, + kids Array(UInt32), + url String, + score UInt32, + title String, + parts Array(UInt32), + descendants UInt32'); +``` + +Consider the following simple search for the term `ClickHouse` (and its varied upper and lower cases) in the `comment` column: + +```sql +SELECT count() +FROM hackernews +WHERE hasToken(lower(comment), 'clickhouse'); +``` + +Notice it takes 3 seconds to execute the query: + +```response +┌─count()─┐ +│ 1145 │ +└─────────┘ + +1 row in set. Elapsed: 3.001 sec. Processed 28.74 million rows, 9.75 GB (9.58 million rows/s., 3.25 GB/s.) +``` + +We will use `ALTER TABLE` and add an inverted index on the lowercase of the `comment` column, then materialize it (which can take a while - wait for it to materialize): + +```sql +ALTER TABLE hackernews + ADD INDEX comment_lowercase(lower(comment)) TYPE inverted; + +ALTER TABLE hackernews MATERIALIZE INDEX comment_lowercase; +``` + +We run the same query... + +```sql +SELECT count() +FROM hackernews +WHERE hasToken(lower(comment), 'clickhouse') +``` + +...and notice the query executes 4x faster: + +```response +┌─count()─┐ +│ 1145 │ +└─────────┘ + +1 row in set. Elapsed: 0.747 sec. Processed 4.49 million rows, 1.77 GB (6.01 million rows/s., 2.37 GB/s.) +``` + +We can also search for one or all of multiple terms, i.e., disjunctions or conjunctions: + +```sql +-- multiple OR'ed terms +SELECT count(*) +FROM hackernews +WHERE multiSearchAny(lower(comment), ['oltp', 'olap']); + +-- multiple AND'ed terms +SELECT count(*) +FROM hackernews +WHERE hasToken(lower(comment), 'avx') AND hasToken(lower(comment), 'sve'); +``` + +:::note Unlike other secondary indices, inverted indexes (for now) map to row numbers (row ids) instead of granule ids. The reason for this design is performance. In practice, users often search for multiple terms at once. For example, filter predicate `WHERE s LIKE '%little%' OR s LIKE '%big%'` can be evaluated directly using an inverted index by forming the union of the row id lists for terms "little" and "big". This also means that the parameter `GRANULARITY` supplied to index creation has no meaning (it may be removed from the syntax in the future). +::: diff --git a/docs/en/engines/table-engines/special/executable.md b/docs/en/engines/table-engines/special/executable.md new file mode 100644 index 00000000000..5d01762f61b --- /dev/null +++ b/docs/en/engines/table-engines/special/executable.md @@ -0,0 +1,226 @@ +--- +slug: /en/engines/table-engines/special/executable +sidebar_position: 40 +sidebar_label: Executable +--- + +# Executable and ExecutablePool Table Engines + +The `Executable` and `ExecutablePool` table engines allow you to define a table whose rows are generated from a script that you define (by writing rows to **stdout**). The executable script is stored in the `users_scripts` directory and can read data from any source. + +- `Executable` tables: the script is run on every query +- `ExecutablePool` tables: maintains a pool of persistent processes, and takes processes from the pool for reads + +You can optionally include one or more input queries that stream their results to **stdin** for the script to read. + +## Creating an Executable Table + +The `Executable` table engine requires two parameters: the name of the script and the format of the incoming data. You can optionally pass in one or more input queries: + +```sql +Executable(script_name, format, [input_query...]) +``` + +Here are the relevant settings for an `Executable` table: + +- `send_chunk_header` + - Description: Send the number of rows in each chunk before sending a chunk to process. This setting can help to write your script in a more efficient way to preallocate some resources + - Default value: false +- `command_termination_timeout` + - Description: Command termination timeout in seconds + - Default value: 10 +- `command_read_timeout` + - Description: Timeout for reading data from command stdout in milliseconds + - Default value: 10000 +- `command_write_timeout` + - Description: Timeout for writing data to command stdin in milliseconds + - Default value: 10000 + + +Let's look at an example. The following Python script is named `my_script.py` and is saved in the `user_scripts` folder. It reads in a number `i` and prints `i` random strings, with each string preceded by a number that is separated by a tab: + +```python +#!/usr/bin/python3 + +import sys +import string +import random + +def main(): + + # Read input value + for number in sys.stdin: + i = int(number) + + # Generate some random rows + for id in range(0, i): + letters = string.ascii_letters + random_string = ''.join(random.choices(letters ,k=10)) + print(str(id) + '\t' + random_string + '\n', end='') + + # Flush results to stdout + sys.stdout.flush() + +if __name__ == "__main__": + main() +``` + +The following `my_executable_table` is built from the output of `my_script.py`, which will generate 10 random strings everytime you run a `SELECT` from `my_executable_table`: + +```sql +CREATE TABLE my_executable_table ( + x UInt32, + y String +) +ENGINE = Executable('my_script.py', TabSeparated, (SELECT 10)) +``` + +Creating the table returns immediately and does not invoke the script. Querying `my_executable_table` causes the script to be invoked: + +```sql +SELECT * FROM my_executable_table +``` + +```response +┌─x─┬─y──────────┐ +│ 0 │ BsnKBsNGNH │ +│ 1 │ mgHfBCUrWM │ +│ 2 │ iDQAVhlygr │ +│ 3 │ uNGwDuXyCk │ +│ 4 │ GcFdQWvoLB │ +│ 5 │ UkciuuOTVO │ +│ 6 │ HoKeCdHkbs │ +│ 7 │ xRvySxqAcR │ +│ 8 │ LKbXPHpyDI │ +│ 9 │ zxogHTzEVV │ +└───┴────────────┘ +``` + +## Passing Query Results to a Script + +Users of the Hacker News website leave comments. Python contains a natural language processing toolkit (`nltk`) with a `SentimentIntensityAnalyzer` for determining if comments are positive, negative, or neutral - including assigning a value between -1 (a very negative comment) and 1 (a very positive comment). Let's create an `Executable` table that computes the sentiment of Hacker News comments using `nltk`. + +This example uses the `hackernews` table described [here](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/invertedindexes/#full-text-search-of-the-hacker-news-dataset). The `hackernews` table includes an `id` column of type `UInt64` and a `String` column named `comment`. Let's start by defining the `Executable` table: + +```sql +CREATE TABLE sentiment ( + id UInt64, + sentiment Float32 +) +ENGINE = Executable( + 'sentiment.py', + TabSeparated, + (SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20) +); +``` + +Some comments about the `sentiment` table: + +- The file `sentiment.py` is saved in the `user_scripts` folder (the default folder of the `user_scripts_path` setting) +- The `TabSeparated` format means our Python script needs to generate rows of raw data that contain tab-separated values +- The query selects two columns from `hackernews`. The Python script will need to parse out those column values from the incoming rows + +Here is the defintion of `sentiment.py`: + +```python +#!/usr/local/bin/python3.9 + +import sys +import nltk +from nltk.sentiment import SentimentIntensityAnalyzer + +def main(): + sentiment_analyzer = SentimentIntensityAnalyzer() + + while True: + try: + row = sys.stdin.readline() + if row == '': + break + + split_line = row.split("\t") + + id = str(split_line[0]) + comment = split_line[1] + + score = sentiment_analyzer.polarity_scores(comment)['compound'] + print(id + '\t' + str(score) + '\n', end='') + sys.stdout.flush() + except BaseException as x: + break + +if __name__ == "__main__": + main() +``` + +Some comments about our Python script: + +- For this to work, you will need to run `nltk.downloader.download('vader_lexicon')`. This could have been placed in the script, but then it would have been downloaded every time a query was executed on the `sentiment` table - which is not efficient +- Each value of `row` is going to be a row in the result set of `SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20` +- The incoming row is tab-separated, so we parse out the `id` and `comment` using the Python `split` function +- The result of `polarity_scores` is a JSON object with a handful of values. We decided to just grab the `compound` value of this JSON object +- Recall that the `sentiment` table in ClickHouse uses the `TabSeparated` format and contains two columns, so our `print` function separates those columns with a tab + +Every time you write a query that selects rows from the `sentiment` table, the `SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20` query is executed and the result is passed to `sentiment.py`. Let's test it out: + +```sql +SELECT * +FROM sentiment +``` + +The response looks like: + +```response +┌───────id─┬─sentiment─┐ +│ 7398199 │ 0.4404 │ +│ 21640317 │ 0.1779 │ +│ 21462000 │ 0 │ +│ 25168863 │ 0 │ +│ 25168978 │ -0.1531 │ +│ 25169359 │ 0 │ +│ 25169394 │ -0.9231 │ +│ 25169766 │ 0.4137 │ +│ 25172570 │ 0.7469 │ +│ 25173687 │ 0.6249 │ +│ 28291534 │ 0 │ +│ 28291669 │ -0.4767 │ +│ 28291731 │ 0 │ +│ 28291949 │ -0.4767 │ +│ 28292004 │ 0.3612 │ +│ 28292050 │ -0.296 │ +│ 28292322 │ 0 │ +│ 28295172 │ 0.7717 │ +│ 28295288 │ 0.4404 │ +│ 21465723 │ -0.6956 │ +└──────────┴───────────┘ +``` + + +## Creating an ExecutablePool Table + +The syntax for `ExecutablePool` is similar to `Executable`, but there are a couple of relevant settings unique to an `ExecutablePool` table: + +- `pool_size` + - Description: Processes pool size. If size is 0, then there are no size restrictions + - Default value: 16 +- `max_command_execution_time` + - Description: Max command execution time in seconds + - Default value: 10 + +We can easily convert the `sentiment` table above to use `ExecutablePool` instead of `Executable`: + +```sql +CREATE TABLE sentiment_pooled ( + id UInt64, + sentiment Float32 +) +ENGINE = ExecutablePool( + 'sentiment.py', + TabSeparated, + (SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20000) +) +SETTINGS + pool_size = 4; +``` + +ClickHouse will maintain 4 processes on-demand when your client queries the `sentiment_pooled` table. \ No newline at end of file diff --git a/docs/en/operations/caches.md b/docs/en/operations/caches.md index d912b8a5990..0f9156048c4 100644 --- a/docs/en/operations/caches.md +++ b/docs/en/operations/caches.md @@ -22,6 +22,6 @@ Additional cache types: - [Dictionaries](../sql-reference/dictionaries/index.md) data cache. - Schema inference cache. - [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks. -- [(Experimental) Query result cache](query-result-cache.md). +- [(Experimental) Query cache](query-cache.md). To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements. diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md new file mode 100644 index 00000000000..1a486de7904 --- /dev/null +++ b/docs/en/operations/query-cache.md @@ -0,0 +1,112 @@ +--- +slug: /en/operations/query-cache +sidebar_position: 65 +sidebar_label: Query Cache [experimental] +--- + +# Query Cache [experimental] + +The query cache allows to compute `SELECT` queries just once and to serve further executions of the same query directly from the cache. +Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server. + +## Background, Design and Limitations + +Query caches can generally be viewed as transactionally consistent or inconsistent. + +- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the `SELECT` query changes + or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing + merges. Transactionally consistent caching is especially suitable for OLTP databases, for example + [MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query cache after v8.0) and + [Oracle](https://docs.oracle.com/database/121/TGDBA/tune_result_cache.htm). +- In transactionally inconsistent caches, slight inaccuracies in query results are accepted under the assumption that all cache entries are + assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period. + This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient, + consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically + slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be + served directly from the query cache. In this example, a reasonable validity period could be 30 min. + +Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result, +the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. +This reduces maintenance effort and avoids redundancy. + +:::warning +The query cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query +processing) where wrong results are returned. +::: + +## Configuration Settings and Usage + +As long as the result cache is experimental it must be activated using the following configuration setting: + +```sql +SET allow_experimental_query_cache = true; +``` + +Afterwards, setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries +of the current session should utilize the query cache. For example, the first execution of query + +```sql +SELECT some_expensive_calculation(column_1, column_2) +FROM table +SETTINGS use_query_cache = true; +``` + +will store the query result in the query cache. Subsequent executions of the same query (also with parameter `use_query_cache = true`) will +read the computed result from the cache and return it immediately. + +The way the cache is utilized can be configured in more detail using settings [enable_writes_to_query_cache](settings/settings.md#enable-writes-to-query-cache) +and [enable_reads_from_query_cache](settings/settings.md#enable-reads-from-query-cache) (both `true` by default). The former setting +controls whether query results are stored in the cache, whereas the latter setting determines if the database should try to retrieve query +results from the cache. For example, the following query will use the cache only passively, i.e. attempt to read from it but not store its +result in it: + +```sql +SELECT some_expensive_calculation(column_1, column_2) +FROM table +SETTINGS use_query_cache = true, enable_writes_to_query_cache = false; +``` + +For maximum control, it is generally recommended to provide settings "use_query_cache", "enable_writes_to_query_cache" and +"enable_reads_from_query_cache" only with specific queries. It is also possible to enable caching at user or profile level (e.g. via `SET +use_query_cache = true`) but one should keep in mind that all `SELECT` queries including monitoring or debugging queries to system tables +may return cached results then. + +The query cache can be cleared using statement `SYSTEM DROP QUERY CACHE`. The content of the query cache is displayed in system table +`system.query_cache`. The number of query cache hits and misses are shown as events "QueryCacheHits" and "QueryCacheMisses" in system table +`system.events`. Both counters are only updated for `SELECT` queries which run with setting "use_query_cache = true". Other queries do not +affect the cache miss counter. + +The query cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can be +changed (see below) but doing so is not recommended for security reasons. + +Query results are referenced in the query cache by the [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree) of +their query. This means that caching is agnostic to upper/lowercase, for example `SELECT 1` and `select 1` are treated as the same query. To +make the matching more natural, all query-level settings related to the query cache are removed from the AST. + +If the query was aborted due to an exception or user cancellation, no entry is written into the query cache. + +The size of the query cache, the maximum number of cache entries and the maximum size of cache entries (in bytes and in records) can +be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-cache). + +To define how long a query must run at least such that its result can be cached, you can use setting +[query_cache_min_query_duration](settings/settings.md#query-cache-min-query-duration). For example, the result of query + +``` sql +SELECT some_expensive_calculation(column_1, column_2) +FROM table +SETTINGS use_query_cache = true, query_cache_min_query_duration = 5000; +``` + +is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is +cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs). + +Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different +value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). + +Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using +setting [query_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-cache-store-results-of-queries-with-nondeterministic-functions). + +Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a +row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can +be marked accessible by other users (i.e. shared) by supplying setting +[query_cache_share_between_users](settings/settings.md#query-cache-share-between-users). diff --git a/docs/en/operations/query-result-cache.md b/docs/en/operations/query-result-cache.md deleted file mode 100644 index 496092ab3e4..00000000000 --- a/docs/en/operations/query-result-cache.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -slug: /en/operations/query-result-cache -sidebar_position: 65 -sidebar_label: Query Result Cache [experimental] ---- - -# Query Result Cache [experimental] - -The query result cache allows to compute `SELECT` queries just once and to serve further executions of the same query directly from the -cache. Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server. - -## Background, Design and Limitations - -Query result caches can generally be viewed as transactionally consistent or inconsistent. - -- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the `SELECT` query changes - or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing - merges. Transactionally consistent caching is especially suitable for OLTP databases, for example - [MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query result cache after v8.0) and - [Oracle](https://docs.oracle.com/database/121/TGDBA/tune_result_cache.htm). -- In transactionally inconsistent caches, slight inaccuracies in query results are accepted under the assumption that all cache entries are - assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period. - This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient, - consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically - slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be - served directly from the query result cache. In this example, a reasonable validity period could be 30 min. - -Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result, -the same caching logic and configuration is often duplicated. With ClickHouse's query result cache, the caching logic moves to the server -side. This reduces maintenance effort and avoids redundancy. - -:::warning -The query result cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query -processing) where wrong results are returned. -::: - -## Configuration Settings and Usage - -As long as the result cache is experimental it must be activated using the following configuration setting: - -```sql -SET allow_experimental_query_result_cache = true; -``` - -Afterwards, setting [use_query_result_cache](settings/settings.md#use-query-result-cache) can be used to control whether a specific query or -all queries of the current session should utilize the query result cache. For example, the first execution of query - -```sql -SELECT some_expensive_calculation(column_1, column_2) -FROM table -SETTINGS use_query_result_cache = true; -``` - -will store the query result in the query result cache. Subsequent executions of the same query (also with parameter `use_query_result_cache -= true`) will read the computed result from the cache and return it immediately. - -The way the cache is utilized can be configured in more detail using settings [enable_writes_to_query_result_cache](settings/settings.md#enable-writes-to-query-result-cache) -and [enable_reads_from_query_result_cache](settings/settings.md#enable-reads-from-query-result-cache) (both `true` by default). The first -settings controls whether query results are stored in the cache, whereas the second parameter determines if the database should try to -retrieve query results from the cache. For example, the following query will use the cache only passively, i.e. attempt to read from it but -not store its result in it: - -```sql -SELECT some_expensive_calculation(column_1, column_2) -FROM table -SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false; -``` - -For maximum control, it is generally recommended to provide settings "use_query_result_cache", "enable_writes_to_query_result_cache" and -"enable_reads_from_query_result_cache" only with specific queries. It is also possible to enable caching at user or profile level (e.g. via -`SET use_query_result_cache = true`) but one should keep in mind that all `SELECT` queries including monitoring or debugging queries to -system tables may return cached results then. - -The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed -in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and -"QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for `SELECT` queries which run with setting -"use_query_result_cache = true". Other queries do not affect the cache miss counter. - -The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can -be changed (see below) but doing so is not recommended for security reasons. - -Query results are referenced in the query result cache by the [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree) -of their query. This means that caching is agnostic to upper/lowercase, for example `SELECT 1` and `select 1` are treated as the same query. -To make the matching more natural, all query-level settings related to the query result cache are removed from the AST. - -If the query was aborted due to an exception or user cancellation, no entry is written into the query result cache. - -The size of the query result cache, the maximum number of cache entries and the maximum size of cache entries (in bytes and in records) can -be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-result-cache). - -To define how long a query must run at least such that its result can be cached, you can use setting -[query_result_cache_min_query_duration](settings/settings.md#query-result-cache-min-query-duration). For example, the result of query - -``` sql -SELECT some_expensive_calculation(column_1, column_2) -FROM table -SETTINGS use_query_result_cache = true, query_result_cache_min_query_duration = 5000; -``` - -is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is -cached - for that use setting [query_result_cache_min_query_runs](settings/settings.md#query-result-cache-min-query-runs). - -Entries in the query result cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a -different value can be specified at session, profile or query level using setting [query_result_cache_ttl](settings/settings.md#query-result-cache-ttl). - -Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using -setting [query_result_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-result-cache-store-results-of-queries-with-nondeterministic-functions). - -Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a -row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can -be marked accessible by other users (i.e. shared) by supplying setting -[query_result_cache_share_between_users](settings/settings.md#query-result-cache-share-between-users). diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 9a67edd75ca..7e7422f9045 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1270,30 +1270,32 @@ If the table does not exist, ClickHouse will create it. If the structure of the ``` -## query_result_cache {#server_configuration_parameters_query-result-cache} +## query_cache {#server_configuration_parameters_query-cache} -[Query result cache](../query-result-cache.md) configuration. +[Query cache](../query-cache.md) configuration. The following settings are available: -- `size`: The maximum cache size in bytes. 0 means the query result cache is disabled. Default value: `1073741824` (1 GiB). -- `max_entries`: The maximum number of SELECT query results stored in the cache. Default value: `1024`. -- `max_entry_size`: The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: `1048576` (1 MiB). -- `max_entry_records`: The maximum number of records SELECT query results may have to be saved in the cache. Default value: `30000000` (30 mil). +- `size`: The maximum cache size in bytes. 0 means the query cache is disabled. Default value: `1073741824` (1 GiB). +- `max_entries`: The maximum number of `SELECT` query results stored in the cache. Default value: `1024`. +- `max_entry_size`: The maximum size in bytes `SELECT` query results may have to be saved in the cache. Default value: `1048576` (1 MiB). +- `max_entry_rows`: The maximum number of rows `SELECT` query results may have to be saved in the cache. Default value: `30000000` (30 mil). + +Changed settings take effect immediately. :::warning -Data for the query result cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query result cache altogether. +Data for the query cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query cache altogether. ::: **Example** ```xml - + 1073741824 1024 1048576 - 30000000 - + 30000000 + ``` ## query_thread_log {#server_configuration_parameters-query_thread_log} diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index b383e0598a1..5bc174727ad 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -233,7 +233,7 @@ Possible values: Default value: 100. -Normally, the `use_async_block_ids_cache` updates as soon as there are updates in the watching keeper path. However, the cache updates might be too frequent and become a heavy burden. This minimum interval prevents the cache from updating too fast. Note that if we set this value too long, the block with duplicated inserts will have a longer retry time. +Normally, the `use_async_block_ids_cache` updates as soon as there are updates in the watching keeper path. However, the cache updates might be too frequent and become a heavy burden. This minimum interval prevents the cache from updating too fast. Note that if we set this value too long, the block with duplicated inserts will have a longer retry time. ## max_replicated_logs_to_keep diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1407971c4f2..32224056114 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1301,10 +1301,10 @@ Possible values: Default value: `3`. -## use_query_result_cache {#use-query-result-cache} +## use_query_cache {#use-query-cache} -If turned on, SELECT queries may utilize the [query result cache](../query-result-cache.md). Parameters [enable_reads_from_query_result_cache](#enable-reads-from-query-result-cache) -and [enable_writes_to_query_result_cache](#enable-writes-to-query-result-cache) control in more detail how the cache is used. +If turned on, `SELECT` queries may utilize the [query cache](../query-cache.md). Parameters [enable_reads_from_query_cache](#enable-reads-from-query-cache) +and [enable_writes_to_query_cache](#enable-writes-to-query-cache) control in more detail how the cache is used. Possible values: @@ -1313,9 +1313,9 @@ Possible values: Default value: `0`. -## enable_reads_from_query_result_cache {#enable-reads-from-query-result-cache} +## enable_reads_from_query_cache {#enable-reads-from-query-cache} -If turned on, results of SELECT queries are retrieved from the [query result cache](../query-result-cache.md). +If turned on, results of `SELECT` queries are retrieved from the [query cache](../query-cache.md). Possible values: @@ -1324,9 +1324,9 @@ Possible values: Default value: `1`. -## enable_writes_to_query_result_cache {#enable-writes-to-query-result-cache} +## enable_writes_to_query_cache {#enable-writes-to-query-cache} -If turned on, results of SELECT queries are stored in the [query result cache](../query-result-cache.md). +If turned on, results of `SELECT` queries are stored in the [query cache](../query-cache.md). Possible values: @@ -1335,9 +1335,9 @@ Possible values: Default value: `1`. -## query_result_cache_store_results_of_queries_with_nondeterministic_functions {#query-result-cache-store-results-of-queries-with-nondeterministic-functions} +## query_cache_store_results_of_queries_with_nondeterministic_functions {#query--store-results-of-queries-with-nondeterministic-functions} -If turned on, then results of SELECT queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query result cache](../query-result-cache.md). +If turned on, then results of `SELECT` queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query cache](../query-cache.md). Possible values: @@ -1346,9 +1346,9 @@ Possible values: Default value: `0`. -## query_result_cache_min_query_runs {#query-result-cache-min-query-runs} +## query_cache_min_query_runs {#query-cache-min-query-runs} -Minimum number of times a SELECT query must run before its result is stored in the [query result cache](../query-result-cache.md). +Minimum number of times a `SELECT` query must run before its result is stored in the [query cache](../query-cache.md). Possible values: @@ -1356,9 +1356,9 @@ Possible values: Default value: `0` -## query_result_cache_min_query_duration {#query-result-cache-min-query-duration} +## query_cache_min_query_duration {#query-cache-min-query-duration} -Minimum duration in milliseconds a query needs to run for its result to be stored in the [query result cache](../query-result-cache.md). +Minimum duration in milliseconds a query needs to run for its result to be stored in the [query cache](../query-cache.md). Possible values: @@ -1366,9 +1366,9 @@ Possible values: Default value: `0` -## query_result_cache_ttl {#query-result-cache-ttl} +## query_cache_ttl {#query-cache-ttl} -After this time in seconds entries in the [query result cache](../query-result-cache.md) become stale. +After this time in seconds entries in the [query cache](../query-cache.md) become stale. Possible values: @@ -1376,9 +1376,9 @@ Possible values: Default value: `60` -## query_result_cache_share_between_users {#query-result-cache-share-between-users} +## query_cache_share_between_users {#query-cache-share-between-users} -If turned on, the result of SELECT queries cached in the [query result cache](../query-result-cache.md) can be read by other users. +If turned on, the result of `SELECT` queries cached in the [query cache](../query-cache.md) can be read by other users. It is not recommended to enable this setting due to security reasons. Possible values: @@ -3689,6 +3689,30 @@ Default value: `0`. - [optimize_move_to_prewhere](#optimize_move_to_prewhere) setting +## optimize_using_constraints + +Use [constraints](../../sql-reference/statements/create/table#constraints) for query optimization. The default is `false`. + +Possible values: + +- true, false + +## optimize_append_index + +Use [constraints](../../sql-reference/statements/create/table#constraints) in order to append index condition. The default is `false`. + +Possible values: + +- true, false + +## optimize_substitute_columns + +Use [constraints](../../sql-reference/statements/create/table#constraints) for column substitution. The default is `false`. + +Possible values: + +- true, false + ## describe_include_subcolumns {#describe_include_subcolumns} Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md/#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md/#finding-null) or an [Array](../../sql-reference/data-types/array.md/#array-size) data type. diff --git a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md index 2026d086375..12da9be2847 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md @@ -1,5 +1,5 @@ --- -slug: /en/sql-reference/aggregate-functions/reference/sparkbar +slug: /en/sql-reference/aggregate-functions/reference/sparkbar sidebar_position: 311 sidebar_label: sparkbar --- @@ -7,9 +7,11 @@ sidebar_label: sparkbar # sparkbar The function plots a frequency histogram for values `x` and the repetition rate `y` of these values over the interval `[min_x, max_x]`. +Repetitions for all `x` falling into the same bucket are averaged, so data should be pre-aggregated. +Negative repetitions are ignored. - -If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end. +If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end. +Otherwise, values outside the interval are ignored. **Syntax** @@ -37,29 +39,24 @@ sparkbar(width[, min_x, max_x])(x, y) Query: ``` sql -CREATE TABLE spark_bar_data (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192; - -INSERT INTO spark_bar_data VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11'); +CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; -SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_data; +INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); -SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_data; +SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); + +SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); ``` Result: ``` text - ┌─sparkbar(9)(event_date, cnt)─┐ -│ │ -│ ▁▅▄▃██▅ ▁ │ -│ │ +│ ▂▅▂▃▆█ ▂ │ └──────────────────────────────┘ ┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐ -│ │ -│▁▄▄▂▅▇█▁ │ -│ │ +│ ▂▅▂▃▇▆█ │ └──────────────────────────────────────────────────────────────────────────┘ ``` diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index adea1ac0282..4dc6fd33849 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -5,7 +5,7 @@ sidebar_label: Storing Dictionaries in Memory --- import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md'; -# Storing Dictionaries in Memory +# Storing Dictionaries in Memory There are a variety of ways to store dictionaries in memory. @@ -25,7 +25,7 @@ ClickHouse generates an exception for errors with dictionaries. Examples of erro You can view the list of dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table. - + The configuration looks like this: @@ -299,11 +299,11 @@ Example: The table contains discounts for each advertiser in the format: To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). -:::warning +:::warning Values of `range_min` and `range_max` should fit in `Int64` type. ::: -Example: +Example: ``` xml @@ -459,7 +459,7 @@ select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res; │ 0.1 │ -- the only one range is matching: 2015-01-01 - Null └─────┘ -select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; +select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; ┌─res─┐ │ 0.2 │ -- two ranges are matching, range_min 2015-01-15 (0.2) is bigger than 2015-01-01 (0.1) └─────┘ @@ -496,7 +496,7 @@ select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res; │ 0.1 │ -- the only one range is matching: 2015-01-01 - Null └─────┘ -select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; +select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res; ┌─res─┐ │ 0.1 │ -- two ranges are matching, range_min 2015-01-01 (0.1) is less than 2015-01-15 (0.2) └─────┘ @@ -588,7 +588,7 @@ Set a large enough cache size. You need to experiment to select the number of ce 3. Assess memory consumption using the `system.dictionaries` table. 4. Increase or decrease the number of cells until the required memory consumption is reached. -:::warning +:::warning Do not use ClickHouse as a source, because it is slow to process queries with random reads. ::: @@ -660,25 +660,30 @@ This type of storage is for use with composite [keys](../../../sql-reference/dic This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN. -Example: The table contains network prefixes and their corresponding AS number and country code: +**Example** -``` text - +-----------|-----|------+ - | prefix | asn | cca2 | - +=================+=======+========+ - | 202.79.32.0/20 | 17501 | NP | - +-----------|-----|------+ - | 2620:0:870::/48 | 3856 | US | - +-----------|-----|------+ - | 2a02:6b8:1::/48 | 13238 | RU | - +-----------|-----|------+ - | 2001:db8::/32 | 65536 | ZZ | - +-----------|-----|------+ +Suppose we have a table in ClickHouse that contains our IP prefixes and mappings: + +```sql +CREATE TABLE my_ip_addresses ( + prefix String, + asn UInt32, + cca2 String +) +ENGINE = MergeTree +PRIMARY KEY prefix; ``` -When using this type of layout, the structure must have a composite key. +```sql +INSERT INTO my_ip_addresses VALUES + ('202.79.32.0/20', 17501, 'NP'), + ('2620:0:870::/48', 3856, 'US'), + ('2a02:6b8:1::/48', 13238, 'RU'), + ('2001:db8::/32', 65536, 'ZZ') +; +``` -Example: +Let's define an `ip_trie` dictionary for this table. The `ip_trie` layout requires a composite key: ``` xml @@ -712,26 +717,29 @@ Example: or ``` sql -CREATE DICTIONARY somedict ( +CREATE DICTIONARY my_ip_trie_dictionary ( prefix String, asn UInt32, cca2 String DEFAULT '??' ) PRIMARY KEY prefix +SOURCE(CLICKHOUSE(TABLE 'my_ip_addresses')) +LAYOUT(IP_TRIE) +LIFETIME(3600); ``` -The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet. +The key must have only one `String` type attribute that contains an allowed IP prefix. Other types are not supported yet. -For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys: +For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys. The syntax is: ``` sql dictGetT('dict_name', 'attr_name', tuple(ip)) ``` -The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6: +The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6. For example: ``` sql -dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) +select dictGet('my_ip_trie_dictionary', 'asn', tuple(IPv6StringToNum('2001:db8::1'))) ``` Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned. diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md index ab19399bd4e..586c0dc54e6 100644 --- a/docs/en/sql-reference/functions/comparison-functions.md +++ b/docs/en/sql-reference/functions/comparison-functions.md @@ -21,14 +21,14 @@ For example, you can’t compare a date with a string. You have to use a functio Strings are compared by bytes. A shorter string is smaller than all strings that start with it and that contain at least one more character. -## equals, a = b and a == b operator +### equals, a `=` b and a `==` b operator -## notEquals, a != b and a \<\> b operator +### notEquals, a `!=` b and a `<>` b operator -## less, \< operator +### less, `<` operator -## greater, \> operator +### greater, `>` operator -## lessOrEquals, \<= operator +### lessOrEquals, `<=` operator -## greaterOrEquals, \>= operator +### greaterOrEquals, `>=` operator diff --git a/docs/en/sql-reference/functions/geo/s2.md b/docs/en/sql-reference/functions/geo/s2.md index ed3c66a0f6f..3cd66cfaaeb 100644 --- a/docs/en/sql-reference/functions/geo/s2.md +++ b/docs/en/sql-reference/functions/geo/s2.md @@ -304,7 +304,7 @@ Result: └──────────────┘ ``` -## s2RectUinion +## s2RectUnion Returns the smallest rectangle containing the union of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space. diff --git a/docs/en/sql-reference/statements/alter/ttl.md b/docs/en/sql-reference/statements/alter/ttl.md index a312e8cad91..14865e7bce0 100644 --- a/docs/en/sql-reference/statements/alter/ttl.md +++ b/docs/en/sql-reference/statements/alter/ttl.md @@ -6,6 +6,10 @@ sidebar_label: TTL # Manipulations with Table TTL +:::note +If you are looking for details on using TTL for managing old data, check out the [Manage Data with TTL](/docs/en/guides/developer/ttl.md) user guide. The docs below demonstrate how to alter or remove an existing TTL rule. +::: + ## MODIFY TTL You can change [table TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) with a request of the following form: diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index a2d363da042..119f25d6d00 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -3,6 +3,7 @@ slug: /en/sql-reference/statements/create/table sidebar_position: 36 sidebar_label: TABLE title: "CREATE TABLE" +keywords: [compression, codec, schema, DDL] --- Creates a new table. This query can have various syntax forms depending on a use case. diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index 0dc6cc0d09a..e1987e50af4 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -2,15 +2,16 @@ slug: /en/sql-reference/statements/delete sidebar_position: 36 sidebar_label: DELETE +description: Lightweight deletes simplify the process of deleting data from the database. +keywords: [delete] +title: DELETE Statement --- -# DELETE Statement - ``` sql -DELETE FROM [db.]table [WHERE expr] +DELETE FROM [db.]table [ON CLUSTER cluster] [WHERE expr] ``` -`DELETE FROM` removes rows from table `[db.]table` that match expression `expr`. The deleted rows are marked as deleted immediately and will be automatically filtered out of all subsequent queries. Cleanup of data happens asynchronously in background. This feature is only available for MergeTree table engine family. +`DELETE FROM` removes rows from the table `[db.]table` that match the expression `expr`. The deleted rows are marked as deleted immediately and will be automatically filtered out of all subsequent queries. Cleanup of data happens asynchronously in the background. This feature is only available for the MergeTree table engine family. For example, the following query deletes all rows from the `hits` table where the `Title` column contains the text `hello`: @@ -32,7 +33,7 @@ SET allow_experimental_lightweight_delete = true; An [alternative way to delete rows](./alter/delete.md) in ClickHouse is `ALTER TABLE ... DELETE`, which might be more efficient if you do bulk deletes only occasionally and don't need the operation to be applied instantly. In most use cases the new lightweight `DELETE FROM` behavior will be considerably faster. :::warning -Even though deletes are becoming more lightweight in ClickHouse, they should still not be used as aggressively as on an OLTP system. Ligthweight deletes are currently efficient for wide parts, but for compact parts they can be a heavyweight operation, and it may be better to use `ALTER TABLE` for some scenarios. +Even though deletes are becoming more lightweight in ClickHouse, they should still not be used as aggressively as on an OLTP system. Lightweight deletes are currently efficient for wide parts, but for compact parts, they can be a heavyweight operation, and it may be better to use `ALTER TABLE` for some scenarios. ::: :::note @@ -41,3 +42,34 @@ Even though deletes are becoming more lightweight in ClickHouse, they should sti grant ALTER DELETE ON db.table to username; ``` ::: + +## Lightweight Delete Internals + +The idea behind Lightweight Delete is that when a `DELETE FROM table ...` query is executed ClickHouse only saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows become invisible for subsequent queries, but physically the rows are removed only later by subsequent merges. Writing this mask is usually much more lightweight than what is done by `ALTER table DELETE ...` query. + +### How it is implemented +The mask is implemented as a hidden `_row_exists` system column that stores True for all visible rows and False for deleted ones. This column is only present in a part if some rows in this part were deleted. In other words, the column is not persisted when it has all values equal to True. + +## SELECT query +When the column is present `SELECT ... FROM table WHERE condition` query internally is extended by an additional predicate on `_row_exists` and becomes similar to +```sql + SELECT ... FROM table PREWHERE _row_exists WHERE condition +``` +At execution time the column `_row_exists` is read to figure out which rows are not visible and if there are many deleted rows it can figure out which granules can be fully skipped when reading the rest of the columns. + +## DELETE query +`DELETE FROM table WHERE condition` is translated into `ALTER table UPDATE _row_exists = 0 WHERE condition` mutation. Internally this mutation is executed in 2 steps: +1. `SELECT count() FROM table WHERE condition` for each individual part to figure out if the part is affected. +2. Mutate affected parts, and make hardlinks for unaffected parts. Mutating a part in fact only writes `_row_exists` column and just hardlinks all other columns’ files in the case of Wide parts. But for Compact parts, all columns are rewritten because they all are stored together in one file. + +So if we compare Lightweight Delete to `ALTER DELETE` in the first step they both do the same thing to figure out which parts are affected, but in the second step `ALTER DELETE` does much more work because it reads and rewrites all columns’ files for the affected parts. + +With the described implementation now we can see what can negatively affect 'DELETE FROM' execution time: +- Heavy WHERE condition in DELETE query +- Mutations queue filled with other mutations, because all mutations on a table are executed sequentially +- Table having a very large number of data parts +- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file. + +:::note +This implementation might change in the future. +::: diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index 16fcb7b0c07..18b019dd017 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -510,3 +510,15 @@ Result: **See Also** - [system.settings](../../operations/system-tables/settings.md) table + +## SHOW ENGINES + +``` sql +SHOW ENGINES [INTO OUTFILE filename] [FORMAT format] +``` + +Outputs the content of the [system.table_engines](../../operations/system-tables/table_engines.md) table, that contains description of table engines supported by server and their feature support information. + +**See Also** + +- [system.table_engines](../../operations/system-tables/table_engines.md) table \ No newline at end of file diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 7ac0ce84e5b..f9f55acfcec 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -103,9 +103,9 @@ Its size can be configured using the server-level setting [uncompressed_cache_si Reset the compiled expression cache. The compiled expression cache is enabled/disabled with the query/user/profile-level setting [compile_expressions](../../operations/settings/settings.md#compile-expressions). -## DROP QUERY RESULT CACHE +## DROP QUERY CACHE -Resets the [query result cache](../../operations/query-result-cache.md). +Resets the [query cache](../../operations/query-cache.md). ## FLUSH LOGS @@ -283,7 +283,7 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name] Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a cluster. Will run until `receive_timeout` if fetches currently disabled for the table. ``` sql -SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name +SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name ``` After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. diff --git a/docs/en/sql-reference/table-functions/executable.md b/docs/en/sql-reference/table-functions/executable.md new file mode 100644 index 00000000000..635188763cf --- /dev/null +++ b/docs/en/sql-reference/table-functions/executable.md @@ -0,0 +1,97 @@ +--- +slug: /en/engines/table-functions/executable +sidebar_position: 55 +sidebar_label: executable +keywords: [udf, user defined function, clickhouse, executable, table, function] +--- + +# executable Table Function for UDFs + +The `executable` table function creates a table based on the output of a user-defined function (UDF) that you define in a script that outputs rows to **stdout**. The executable script is stored in the `users_scripts` directory and can read data from any source. + +You can optionally include one or more input queries that stream their results to **stdin** for the script to read. + +:::note +A key advantage between ordinary UDF functions and the `executable` table function and `Executable` table engine is that ordinary UDF functions cannot change the row count. For example, if the input is 100 rows, then the result must return 100 rows. When using the `executable` table function or `Executable` table engine, your script can make any data transformations you want, including complex aggregations. +::: + +## Syntax + +The `executable` table function requires three parameters and accepts an optional list of input queries: + +```sql +executable(script_name, format, structure, [input_query...]) +``` + +- `script_name`: the file name of the script. saved in the `user_scripts` folder (the default folder of the `user_scripts_path` setting) +- `format`: the format of the generated table +- `structure`: the table schema of the generated table +- `input_query`: an optional query (or collection or queries) whose results are passed to the script via **stdin** + +:::note +If you are going to invoke the same script repeatedly with the same input queries, consider using the [`Executable` table engine](../../engines/table-engines/special/executable.md). +::: + +The following Python script is named `generate_random.py` and is saved in the `user_scripts` folder. It reads in a number `i` and prints `i` random strings, with each string preceded by a number that is separated by a tab: + +```python +#!/usr/local/bin/python3.9 + +import sys +import string +import random + +def main(): + + # Read input value + for number in sys.stdin: + i = int(number) + + # Generate some random rows + for id in range(0, i): + letters = string.ascii_letters + random_string = ''.join(random.choices(letters ,k=10)) + print(str(id) + '\t' + random_string + '\n', end='') + + # Flush results to stdout + sys.stdout.flush() + +if __name__ == "__main__": + main() +``` + +Let's invoke the script and have it generate 10 random strings: + +```sql +SELECT * FROM executable('my_script.py', TabSeparated, 'id UInt32, random String', (SELECT 10)) +``` + +The response looks like: + +```response +┌─id─┬─random─────┐ +│ 0 │ xheXXCiSkH │ +│ 1 │ AqxvHAoTrl │ +│ 2 │ JYvPCEbIkY │ +│ 3 │ sWgnqJwGRm │ +│ 4 │ fTZGrjcLon │ +│ 5 │ ZQINGktPnd │ +│ 6 │ YFSvGGoezb │ +│ 7 │ QyMJJZOOia │ +│ 8 │ NfiyDDhmcI │ +│ 9 │ REJRdJpWrg │ +└────┴────────────┘ +``` + +## Passing Query Results to a Script + +Be sure to check out the example in the `Executable` table engine on [how to pass query results to a script](../../engines/table-engines/special/executable#passing-query-results-to-a-script). Here is how you execute the same script in that example using the `executable` table function: + +```sql +SELECT * FROM executable( + 'sentiment.py', + TabSeparated, + 'id UInt64, sentiment Float32', + (SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20) +); +``` \ No newline at end of file diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 32d33f1dca5..d7199717798 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -2,11 +2,12 @@ slug: /en/sql-reference/table-functions/s3 sidebar_position: 45 sidebar_label: s3 +keywords: [s3, gcs, bucket] --- # s3 Table Function -Provides table-like interface to select/insert files in [Amazon S3](https://aws.amazon.com/s3/). This table function is similar to [hdfs](../../sql-reference/table-functions/hdfs.md), but provides S3-specific features. +Provides a table-like interface to select/insert files in [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/). This table function is similar to the [hdfs function](../../sql-reference/table-functions/hdfs.md), but provides S3-specific features. **Syntax** @@ -14,9 +15,24 @@ Provides table-like interface to select/insert files in [Amazon S3](https://aws. s3(path [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` +:::tip GCS +The S3 Table Function integrates with Google Cloud Storage by using the GCS XML API and HMAC keys. See the [Google interoperability docs]( https://cloud.google.com/storage/docs/interoperability) for more details about the endpoint and HMAC. + +For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_id` and `aws_secret_access_key`. +::: + **Arguments** - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path). + + :::note GCS + The GCS path is in this format as the endpoint for the Google XML API is different than the JSON API: + ``` + https://storage.googleapis.com/// + ``` + and not ~~https://storage.cloud.google.com~~. + ::: + - `format` — The [format](../../interfaces/formats.md#formats) of the file. - `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`. - `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md index 7a9fc033542..958a4bd3504 100644 --- a/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md +++ b/docs/ru/sql-reference/aggregate-functions/reference/sparkbar.md @@ -1,14 +1,15 @@ --- -slug: /ru/sql-reference/aggregate-functions/reference/sparkbar +slug: /ru/sql-reference/aggregate-functions/reference/sparkbar sidebar_position: 311 sidebar_label: sparkbar --- # sparkbar {#sparkbar} -Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`. +Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`. Повторения для всех `x`, попавших в один бакет, усредняются, поэтому данные должны быть предварительно агрегированы. Отрицательные повторения игнорируются. Если интервал для построения не указан, то в качестве нижней границы интервала будет взято минимальное значение `x`, а в качестве верхней границы — максимальное значение `x`. +Значения `x` вне указанного интервала игнорируются. **Синтаксис** @@ -39,29 +40,23 @@ sparkbar(width[, min_x, max_x])(x, y) Запрос: ``` sql -CREATE TABLE spark_bar_data (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192; - -INSERT INTO spark_bar_data VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11'); +CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; -SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_data; +INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); -SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_data; +SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); + +SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date); ``` Результат: ``` text - ┌─sparkbar(9)(event_date, cnt)─┐ -│ │ -│ ▁▅▄▃██▅ ▁ │ -│ │ +│ ▂▅▂▃▆█ ▂ │ └──────────────────────────────┘ ┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐ -│ │ -│▁▄▄▂▅▇█▁ │ -│ │ +│ ▂▅▂▃▇▆█ │ └──────────────────────────────────────────────────────────────────────────┘ ``` - diff --git a/packages/build b/packages/build index c8fb77e9371..6ec991aad07 100755 --- a/packages/build +++ b/packages/build @@ -102,7 +102,8 @@ done EOF chmod +x "$PKG_PATH/install/doinst.sh" if [ -f "$PKG_PATH/DEBIAN/postinst" ]; then - tail +2 "$PKG_PATH/DEBIAN/postinst" >> "$PKG_PATH/install/doinst.sh" + # we don't need debconf source in doinst in any case + tail +2 "$PKG_PATH/DEBIAN/postinst" | grep -v debconf/confmodule >> "$PKG_PATH/install/doinst.sh" fi rm -rf "$PKG_PATH/DEBIAN" if [ -f "/usr/bin/pigz" ]; then diff --git a/packages/clickhouse-keeper.postinstall b/packages/clickhouse-keeper.postinstall new file mode 100644 index 00000000000..3d6cd484146 --- /dev/null +++ b/packages/clickhouse-keeper.postinstall @@ -0,0 +1,46 @@ +#!/bin/sh +set -e +# set -x + +PROGRAM=clickhouse-keeper +KEEPER_USER=${KEEPER_USER:=clickhouse} +KEEPER_GROUP=${KEEPER_GROUP:=clickhouse} +# Please note that we don't support paths with whitespaces. This is rather ignorant. +KEEPER_CONFDIR=${KEEPER_CONFDIR:=/etc/$PROGRAM} +KEEPER_DATADIR=${KEEPER_DATADIR:=/var/lib/clickhouse} +KEEPER_LOGDIR=${KEEPER_LOGDIR:=/var/log/$PROGRAM} + +[ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule +[ -f /etc/default/clickhouse-keeper ] && . /etc/default/clickhouse-keeper + +if [ ! -f "/etc/debian_version" ]; then + not_deb_os=1 +fi + +if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then + if ! getent group "${KEEPER_GROUP}" > /dev/null 2>&1 ; then + groupadd --system "${KEEPER_GROUP}" + fi + GID=$(getent group "${KEEPER_GROUP}" | cut -d: -f 3) + if ! id "${KEEPER_USER}" > /dev/null 2>&1 ; then + adduser --system --home /dev/null --no-create-home \ + --gid "${GID}" --shell /bin/false \ + "${KEEPER_USER}" + fi + + chown -R "${KEEPER_USER}:${KEEPER_GROUP}" "${KEEPER_CONFDIR}" + chmod 0755 "${KEEPER_CONFDIR}" + + if ! [ -d "${KEEPER_DATADIR}" ]; then + mkdir -p "${KEEPER_DATADIR}" + chown -R "${KEEPER_USER}:${KEEPER_GROUP}" "${KEEPER_DATADIR}" + chmod 0700 "${KEEPER_DATADIR}" + fi + + if ! [ -d "${KEEPER_LOGDIR}" ]; then + mkdir -p "${KEEPER_LOGDIR}" + chown -R "${KEEPER_USER}:${KEEPER_GROUP}" "${KEEPER_LOGDIR}" + chmod 0770 "${KEEPER_LOGDIR}" + fi +fi +# vim: ts=4: sw=4: sts=4: expandtab diff --git a/packages/clickhouse-keeper.service b/packages/clickhouse-keeper.service new file mode 100644 index 00000000000..2809074c93a --- /dev/null +++ b/packages/clickhouse-keeper.service @@ -0,0 +1,27 @@ +[Unit] +Description=ClickHouse Keeper - zookeeper compatible distributed coordination server +Requires=network-online.target +# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure +# that the time was adjusted already, if you use systemd-timesyncd you are +# safe, but if you use ntp or some other daemon, you should configure it +# additionaly. +After=time-sync.target network-online.target +Wants=time-sync.target + +[Service] +Type=simple +User=clickhouse +Group=clickhouse +Restart=always +RestartSec=30 +RuntimeDirectory=%p # %p is resolved to the systemd unit name +ExecStart=/usr/bin/clickhouse-keeper --config=/etc/clickhouse-keeper/keeper_config.xml --pid-file=%t/%p/%p.pid +# Minus means that this file is optional. +EnvironmentFile=-/etc/default/%p +LimitCORE=infinity +LimitNOFILE=500000 +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE + +[Install] +# ClickHouse should not start from the rescue shell (rescue.target). +WantedBy=multi-user.target diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml index 7586fa580e6..e9c2e929755 100644 --- a/packages/clickhouse-keeper.yaml +++ b/packages/clickhouse-keeper.yaml @@ -30,6 +30,8 @@ contents: type: config|noreplace - src: root/usr/bin/clickhouse-keeper dst: /usr/bin/clickhouse-keeper +- src: clickhouse-keeper.service + dst: /lib/systemd/system/clickhouse-keeper.service # docs - src: ../AUTHORS dst: /usr/share/doc/clickhouse-keeper/AUTHORS @@ -39,3 +41,6 @@ contents: dst: /usr/share/doc/clickhouse-keeper/LICENSE - src: ../README.md dst: /usr/share/doc/clickhouse-keeper/README.md + +scripts: + postinstall: ./clickhouse-keeper.postinstall diff --git a/packages/clickhouse-server.postinstall b/packages/clickhouse-server.postinstall index ff376b89bd4..2b9830faf3b 100644 --- a/packages/clickhouse-server.postinstall +++ b/packages/clickhouse-server.postinstall @@ -11,8 +11,6 @@ CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR:=/var/lib/clickhouse} CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR:=/var/log/clickhouse-server} CLICKHOUSE_BINDIR=${CLICKHOUSE_BINDIR:=/usr/bin} CLICKHOUSE_GENERIC_PROGRAM=${CLICKHOUSE_GENERIC_PROGRAM:=clickhouse} -EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config -CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml CLICKHOUSE_PIDDIR=/var/run/$PROGRAM [ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule diff --git a/packages/clickhouse-server.service b/packages/clickhouse-server.service index a1602482073..5ea30c062b0 100644 --- a/packages/clickhouse-server.service +++ b/packages/clickhouse-server.service @@ -17,10 +17,10 @@ User=clickhouse Group=clickhouse Restart=always RestartSec=30 -RuntimeDirectory=clickhouse-server -ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid +RuntimeDirectory=%p # %p is resolved to the systemd unit name +ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=%t/%p/%p.pid # Minus means that this file is optional. -EnvironmentFile=-/etc/default/clickhouse +EnvironmentFile=-/etc/default/%p LimitCORE=infinity LimitNOFILE=500000 CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index dae3aea2d2e..7fb0b1f154f 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -474,7 +474,7 @@ private: executor.sendQuery(ClientInfo::QueryKind::INITIAL_QUERY); ProfileInfo info; - while (Block block = executor.read()) + while (Block block = executor.readBlock()) info.update(block); executor.finish(); diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index d741eb30d4a..4d460c7ac48 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -538,24 +539,28 @@ void Client::connect() // Prints changed settings to stderr. Useful for debugging fuzzing failures. void Client::printChangedSettings() const { - const auto & changes = global_context->getSettingsRef().changes(); - if (!changes.empty()) + auto print_changes = [](const auto & changes, std::string_view settings_name) { - fmt::print(stderr, "Changed settings: "); - for (size_t i = 0; i < changes.size(); ++i) + if (!changes.empty()) { - if (i) + fmt::print(stderr, "Changed {}: ", settings_name); + for (size_t i = 0; i < changes.size(); ++i) { - fmt::print(stderr, ", "); + if (i) + fmt::print(stderr, ", "); + fmt::print(stderr, "{} = '{}'", changes[i].name, toString(changes[i].value)); } - fmt::print(stderr, "{} = '{}'", changes[i].name, toString(changes[i].value)); + + fmt::print(stderr, "\n"); } - fmt::print(stderr, "\n"); - } - else - { - fmt::print(stderr, "No changed settings.\n"); - } + else + { + fmt::print(stderr, "No changed {}.\n", settings_name); + } + }; + + print_changes(global_context->getSettingsRef().changes(), "settings"); + print_changes(cmd_merge_tree_settings.changes(), "MergeTree settings"); } @@ -1352,6 +1357,8 @@ void Client::readArguments( } else if (arg == "--allow_repeated_settings") allow_repeated_settings = true; + else if (arg == "--allow_merge_tree_settings") + allow_merge_tree_settings = true; else common_arguments.emplace_back(arg); } diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 816fa561a6a..2fc0eb27213 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -2041,7 +2040,7 @@ UInt64 ClusterCopier::executeQueryOnCluster( while (true) { - auto block = remote_query_executor->read(); + auto block = remote_query_executor->readBlock(); if (!block) break; } diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 3a0d3d3a6ca..ed3297ed7cb 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -362,6 +362,7 @@ try else path = std::filesystem::path{KEEPER_DEFAULT_PATH}; + std::filesystem::create_directories(path); /// Check that the process user id matches the owner of the data. const auto effective_user_id = geteuid(); diff --git a/programs/local/CMakeLists.txt b/programs/local/CMakeLists.txt index 6943af48ab9..565b67d0020 100644 --- a/programs/local/CMakeLists.txt +++ b/programs/local/CMakeLists.txt @@ -19,6 +19,9 @@ target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib) if (TARGET ch_rust::skim) target_link_libraries(clickhouse-local-lib PRIVATE ch_rust::skim) endif() +if (TARGET ch_contrib::azure_sdk) + target_link_libraries(clickhouse-local-lib PRIVATE ch_contrib::azure_sdk) +endif() # Always use internal readpassphrase target_link_libraries(clickhouse-local-lib PRIVATE readpassphrase) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 2f0f98ae857..133d629bbb1 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -51,6 +51,10 @@ #include #endif +#if USE_AZURE_BLOB_STORAGE +# include +#endif + namespace fs = std::filesystem; @@ -115,6 +119,14 @@ void LocalServer::initialize(Poco::Util::Application & self) config().getUInt("thread_pool_queue_size", 10000) ); +#if USE_AZURE_BLOB_STORAGE + /// See the explanation near the same line in Server.cpp + GlobalThreadPool::instance().addOnDestroyCallback([] + { + Azure::Storage::_internal::XmlGlobalDeinitialize(); + }); +#endif + IOThreadPool::initialize( config().getUInt("max_io_thread_pool_size", 100), config().getUInt("max_io_thread_pool_free_size", 0), diff --git a/programs/server/CMakeLists.txt b/programs/server/CMakeLists.txt index 2cfa748d585..855973d10e1 100644 --- a/programs/server/CMakeLists.txt +++ b/programs/server/CMakeLists.txt @@ -27,6 +27,9 @@ set (CLICKHOUSE_SERVER_LINK if (TARGET ch_contrib::jemalloc) list(APPEND CLICKHOUSE_SERVER_LINK PRIVATE ch_contrib::jemalloc) endif() +if (TARGET ch_contrib::azure_sdk) + list(APPEND CLICKHOUSE_SERVER_LINK PRIVATE ch_contrib::azure_sdk) +endif() clickhouse_program_add(server) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 8b6c43b450e..e0288415a2d 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -82,9 +82,7 @@ #include #include #include -#if USE_BORINGSSL #include -#endif #include #include #include @@ -128,6 +126,10 @@ # include #endif +#if USE_AZURE_BLOB_STORAGE +# include +#endif + namespace CurrentMetrics { extern const Metric Revision; @@ -750,6 +752,19 @@ try config().getUInt("max_thread_pool_free_size", 1000), config().getUInt("thread_pool_queue_size", 10000)); +#if USE_AZURE_BLOB_STORAGE + /// It makes sense to deinitialize libxml after joining of all threads + /// in global pool because libxml uses thread-local memory allocations via + /// 'pthread_key_create' and 'pthread_setspecific' which should be deallocated + /// at 'pthread_exit'. Deinitialization of libxml leads to call of 'pthread_key_delete' + /// and if it is done before joining of threads, allocated memory will not be freed + /// and there may be memory leaks in threads that used libxml. + GlobalThreadPool::instance().addOnDestroyCallback([] + { + Azure::Storage::_internal::XmlGlobalDeinitialize(); + }); +#endif + IOThreadPool::initialize( config().getUInt("max_io_thread_pool_size", 100), config().getUInt("max_io_thread_pool_free_size", 0), @@ -1331,9 +1346,8 @@ try global_context->updateStorageConfiguration(*config); global_context->updateInterserverCredentials(*config); -#if USE_BORINGSSL + global_context->updateQueryCacheConfiguration(*config); CompressionCodecEncrypted::Configuration::instance().tryLoad(*config, "encryption_codecs"); -#endif #if USE_SSL CertificateReloader::instance().tryLoad(*config); #endif @@ -1517,13 +1531,7 @@ try global_context->setMMappedFileCache(mmap_cache_size); /// A cache for query results. - size_t query_result_cache_size = config().getUInt64("query_result_cache.size", 1_GiB); - if (query_result_cache_size) - global_context->setQueryResultCache( - query_result_cache_size, - config().getUInt64("query_result_cache.max_entries", 1024), - config().getUInt64("query_result_cache.max_entry_size", 1_MiB), - config().getUInt64("query_result_cache.max_entry_records", 30'000'000)); + global_context->setQueryCache(config()); #if USE_EMBEDDED_COMPILER /// 128 MB @@ -1547,10 +1555,8 @@ try global_context->getMergeTreeSettings().sanityCheck(background_pool_tasks); global_context->getReplicatedMergeTreeSettings().sanityCheck(background_pool_tasks); } -#if USE_BORINGSSL /// try set up encryption. There are some errors in config, error will be printed and server wouldn't start. CompressionCodecEncrypted::Configuration::instance().load(config(), "encryption_codecs"); -#endif SCOPE_EXIT({ async_metrics.stop(); diff --git a/programs/server/config.xml b/programs/server/config.xml index 0ed8ec48e83..b1a1514edc8 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -854,6 +854,51 @@ + + + false + + 127.0.0.1 + 9000 + + + 127.0.0.2 + 9000 + + + 127.0.0.3 + 9000 + + + 127.0.0.4 + 9000 + + + 127.0.0.5 + 9000 + + + 127.0.0.6 + 9000 + + + 127.0.0.7 + 9000 + + + 127.0.0.8 + 9000 + + + 127.0.0.9 + 9000 + + + 127.0.0.10 + 9000 + + + @@ -1466,13 +1511,13 @@ --> - - + + - - + + don't replace it + LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); + } } -void QueryResultCache::Writer::buffer(Chunk && partial_query_result) +void QueryCache::Writer::buffer(Chunk && partial_query_result) { if (skip_insert) return; @@ -205,21 +208,29 @@ void QueryResultCache::Writer::buffer(Chunk && partial_query_result) { chunks->clear(); /// eagerly free some space skip_insert = true; + LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Skipped insert (query result too big), new_entry_size_in_bytes: {} ({}), new_entry_size_in_rows: {} ({}), query: {}", new_entry_size_in_bytes, max_entry_size_in_bytes, new_entry_size_in_rows, max_entry_size_in_rows, key.queryStringFromAst()); } } -void QueryResultCache::Writer::finalizeWrite() +void QueryCache::Writer::finalizeWrite() { if (skip_insert) return; if (std::chrono::duration_cast(std::chrono::system_clock::now() - query_start_time) < min_query_runtime) + { + LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Skipped insert (query not expensive enough), query: {}", key.queryStringFromAst()); return; + } std::lock_guard lock(mutex); if (auto it = cache.find(key); it != cache.end() && !is_stale(it->first)) - return; /// same check as in ctor because a parallel Writer could have inserted the current key in the meantime + { + /// same check as in ctor because a parallel Writer could have inserted the current key in the meantime + LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); + return; + } auto sufficient_space_in_cache = [this]() TSA_REQUIRES(mutex) { @@ -239,34 +250,36 @@ void QueryResultCache::Writer::finalizeWrite() } else ++it; - LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Removed {} stale entries", removed_items); + LOG_TRACE(&Poco::Logger::get("QueryCache"), "Removed {} stale entries", removed_items); } - /// Insert or replace if enough space - if (sufficient_space_in_cache()) + if (!sufficient_space_in_cache()) + LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Skipped insert (cache has insufficient space), query: {}", key.queryStringFromAst()); + else { + //// Insert or replace key cache_size_in_bytes += query_result.sizeInBytes(); if (auto it = cache.find(key); it != cache.end()) cache_size_in_bytes -= it->second.sizeInBytes(); // key replacement cache[key] = std::move(query_result); - LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Stored result of query {}", key.queryStringFromAst()); + LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stored result of query {}", key.queryStringFromAst()); } } -QueryResultCache::Reader::Reader(const Cache & cache_, const Key & key, size_t & cache_size_in_bytes_, const std::lock_guard &) +QueryCache::Reader::Reader(const Cache & cache_, const Key & key, size_t & cache_size_in_bytes_, const std::lock_guard &) { auto it = cache_.find(key); if (it == cache_.end()) { - LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "No entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(&Poco::Logger::get("QueryCache"), "No entry found for query {}", key.queryStringFromAst()); return; } if (it->first.username.has_value() && it->first.username != key.username) { - LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(&Poco::Logger::get("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); return; } @@ -274,53 +287,45 @@ QueryResultCache::Reader::Reader(const Cache & cache_, const Key & key, size_t & { cache_size_in_bytes_ -= it->second.sizeInBytes(); const_cast(cache_).erase(it); - LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Stale entry found and removed for query {}", key.queryStringFromAst()); + LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stale entry found and removed for query {}", key.queryStringFromAst()); return; } pipe = Pipe(std::make_shared(it->first.header, it->second.chunks)); - LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(&Poco::Logger::get("QueryCache"), "Entry found for query {}", key.queryStringFromAst()); } -bool QueryResultCache::Reader::hasCacheEntryForKey() const +bool QueryCache::Reader::hasCacheEntryForKey() const { bool res = !pipe.empty(); if (res) - ProfileEvents::increment(ProfileEvents::QueryResultCacheHits); + ProfileEvents::increment(ProfileEvents::QueryCacheHits); else - ProfileEvents::increment(ProfileEvents::QueryResultCacheMisses); + ProfileEvents::increment(ProfileEvents::QueryCacheMisses); return res; } -Pipe && QueryResultCache::Reader::getPipe() +Pipe && QueryCache::Reader::getPipe() { chassert(!pipe.empty()); // cf. hasCacheEntryForKey() return std::move(pipe); } -QueryResultCache::QueryResultCache(size_t max_cache_size_in_bytes_, size_t max_cache_entries_, size_t max_cache_entry_size_in_bytes_, size_t max_cache_entry_size_in_rows_) - : max_cache_size_in_bytes(max_cache_size_in_bytes_) - , max_cache_entries(max_cache_entries_) - , max_cache_entry_size_in_bytes(max_cache_entry_size_in_bytes_) - , max_cache_entry_size_in_rows(max_cache_entry_size_in_rows_) -{ -} - -QueryResultCache::Reader QueryResultCache::createReader(const Key & key) +QueryCache::Reader QueryCache::createReader(const Key & key) { std::lock_guard lock(mutex); return Reader(cache, key, cache_size_in_bytes, lock); } -QueryResultCache::Writer QueryResultCache::createWriter(const Key & key, std::chrono::milliseconds min_query_runtime) +QueryCache::Writer QueryCache::createWriter(const Key & key, std::chrono::milliseconds min_query_runtime) { std::lock_guard lock(mutex); return Writer(mutex, cache, key, cache_size_in_bytes, max_cache_size_in_bytes, max_cache_entries, max_cache_entry_size_in_bytes, max_cache_entry_size_in_rows, min_query_runtime); } -void QueryResultCache::reset() +void QueryCache::reset() { std::lock_guard lock(mutex); cache.clear(); @@ -328,16 +333,24 @@ void QueryResultCache::reset() cache_size_in_bytes = 0; } -size_t QueryResultCache::recordQueryRun(const Key & key) +size_t QueryCache::recordQueryRun(const Key & key) { - static constexpr size_t TIMES_EXECUTED_MAX_SIZE = 10'000; - - std::lock_guard times_executed_lock(mutex); + std::lock_guard lock(mutex); size_t times = ++times_executed[key]; // Regularly drop times_executed to avoid DOS-by-unlimited-growth. + static constexpr size_t TIMES_EXECUTED_MAX_SIZE = 10'000; if (times_executed.size() > TIMES_EXECUTED_MAX_SIZE) times_executed.clear(); return times; } +void QueryCache::updateConfiguration(const Poco::Util::AbstractConfiguration & config) +{ + std::lock_guard lock(mutex); + max_cache_size_in_bytes = config.getUInt64("query_cache.size", 1_GiB); + max_cache_entries = config.getUInt64("query_cache.max_entries", 1024); + max_cache_entry_size_in_bytes = config.getUInt64("query_cache.max_entry_size", 1_MiB); + max_cache_entry_size_in_rows = config.getUInt64("query_cache.max_entry_rows", 30'000'000); +} + } diff --git a/src/Interpreters/Cache/QueryResultCache.h b/src/Interpreters/Cache/QueryCache.h similarity index 86% rename from src/Interpreters/Cache/QueryResultCache.h rename to src/Interpreters/Cache/QueryCache.h index 65cab854a45..66477d77dcb 100644 --- a/src/Interpreters/Cache/QueryResultCache.h +++ b/src/Interpreters/Cache/QueryCache.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -18,7 +19,7 @@ bool astContainsNonDeterministicFunctions(ASTPtr ast, ContextPtr context); /// returned. In order to still obtain sufficiently up-to-date query results, a expiry time (TTL) must be specified for each cache entry /// after which it becomes stale and is ignored. Stale entries are removed opportunistically from the cache, they are only evicted when a /// new entry is inserted and the cache has insufficient capacity. -class QueryResultCache +class QueryCache { public: /// Represents a query result in the cache. @@ -82,9 +83,9 @@ public: /// Buffers multiple partial query result chunks (buffer()) and eventually stores them as cache entry (finalizeWrite()). /// /// Implementation note: Queries may throw exceptions during runtime, e.g. out-of-memory errors. In this case, no query result must be - /// written into the query result cache. Unfortunately, neither the Writer nor the special transform added on top of the query pipeline - /// which holds the Writer know whether they are destroyed because the query ended successfully or because of an exception (otherwise, - /// we could simply implement a check in their destructors). To handle exceptions correctly nevertheless, we do the actual insert in + /// written into the query cache. Unfortunately, neither the Writer nor the special transform added on top of the query pipeline which + /// holds the Writer know whether they are destroyed because the query ended successfully or because of an exception (otherwise, we + /// could simply implement a check in their destructors). To handle exceptions correctly nevertheless, we do the actual insert in /// finalizeWrite() as opposed to the Writer destructor. This function is then called only for successful queries in finish_callback() /// which runs before the transform and the Writer are destroyed, whereas for unsuccessful queries we do nothing (the Writer is /// destroyed w/o inserting anything). @@ -117,7 +118,7 @@ public: size_t max_entry_size_in_bytes_, size_t max_entry_size_in_rows_, std::chrono::milliseconds min_query_runtime_); - friend class QueryResultCache; /// for createWriter() + friend class QueryCache; /// for createWriter() }; /// Looks up a query result for a key in the cache and (if found) constructs a pipe with the query result chunks as source. @@ -129,10 +130,10 @@ public: private: Reader(const Cache & cache_, const Key & key, size_t & cache_size_in_bytes_, const std::lock_guard &); Pipe pipe; - friend class QueryResultCache; /// for createReader() + friend class QueryCache; /// for createReader() }; - QueryResultCache(size_t max_cache_size_in_bytes_, size_t max_cache_entries_, size_t max_cache_entry_size_in_bytes_, size_t max_cache_entry_size_in_rows_); + void updateConfiguration(const Poco::Util::AbstractConfiguration & config); Reader createReader(const Key & key); Writer createWriter(const Key & key, std::chrono::milliseconds min_query_runtime); @@ -154,15 +155,17 @@ private: Cache cache TSA_GUARDED_BY(mutex); TimesExecuted times_executed TSA_GUARDED_BY(mutex); - size_t cache_size_in_bytes TSA_GUARDED_BY(mutex) = 0; /// updated in each cache insert/delete - const size_t max_cache_size_in_bytes; - const size_t max_cache_entries; - const size_t max_cache_entry_size_in_bytes; - const size_t max_cache_entry_size_in_rows; + /// Cache configuration + size_t max_cache_size_in_bytes TSA_GUARDED_BY(mutex) = 0; + size_t max_cache_entries TSA_GUARDED_BY(mutex) = 0; + size_t max_cache_entry_size_in_bytes TSA_GUARDED_BY(mutex) = 0; + size_t max_cache_entry_size_in_rows TSA_GUARDED_BY(mutex) = 0; - friend class StorageSystemQueryResultCache; + size_t cache_size_in_bytes TSA_GUARDED_BY(mutex) = 0; /// Updated in each cache insert/delete + + friend class StorageSystemQueryCache; }; -using QueryResultCachePtr = std::shared_ptr; +using QueryCachePtr = std::shared_ptr; } diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index e228dcc1f4a..b08ec7e5ab5 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -8,7 +8,9 @@ #include #include #include +#include #include +#include #include #include @@ -37,6 +39,53 @@ namespace ErrorCodes namespace ClusterProxy { +/// select query has database, table and table function names as AST pointers +/// Creates a copy of query, changes database, table and table function names. +ASTPtr rewriteSelectQuery( + ContextPtr context, + const ASTPtr & query, + const std::string & remote_database, + const std::string & remote_table, + ASTPtr table_function_ptr) +{ + auto modified_query_ast = query->clone(); + + ASTSelectQuery & select_query = modified_query_ast->as(); + + // Get rid of the settings clause so we don't send them to remote. Thus newly non-important + // settings won't break any remote parser. It's also more reasonable since the query settings + // are written into the query context and will be sent by the query pipeline. + select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {}); + + if (table_function_ptr) + select_query.addTableFunction(table_function_ptr); + else + select_query.replaceDatabaseAndTable(remote_database, remote_table); + + /// Restore long column names (cause our short names are ambiguous). + /// TODO: aliased table functions & CREATE TABLE AS table function cases + if (!table_function_ptr) + { + RestoreQualifiedNamesVisitor::Data data; + data.distributed_table = DatabaseAndTableWithAlias(*getTableExpression(query->as(), 0)); + data.remote_table.database = remote_database; + data.remote_table.table = remote_table; + RestoreQualifiedNamesVisitor(data).visit(modified_query_ast); + } + + /// To make local JOIN works, default database should be added to table names. + /// But only for JOIN section, since the following should work using default_database: + /// - SELECT * FROM d WHERE value IN (SELECT l.value FROM l) ORDER BY value + /// (see 01487_distributed_in_not_default_db) + AddDefaultDatabaseVisitor visitor(context, context->getCurrentDatabase(), + /* only_replace_current_database_function_= */false, + /* only_replace_in_join_= */true); + visitor.visit(modified_query_ast); + + return modified_query_ast; +} + + SelectStreamFactory::SelectStreamFactory( const Block & header_, const ColumnsDescriptionByShardNum & objects_by_shard_, @@ -172,67 +221,5 @@ void SelectStreamFactory::createForShard( } -void SelectStreamFactory::createForShardWithParallelReplicas( - const Cluster::ShardInfo & shard_info, - const ASTPtr & query_ast, - const StorageID & main_table, - ContextPtr context, - UInt32 shard_count, - std::vector & local_plans, - Shards & remote_shards) -{ - if (auto it = objects_by_shard.find(shard_info.shard_num); it != objects_by_shard.end()) - replaceMissedSubcolumnsByConstants(storage_snapshot->object_columns, it->second, query_ast); - - const auto & settings = context->getSettingsRef(); - - auto is_local_replica_obsolete = [&]() - { - auto resolved_id = context->resolveStorageID(main_table); - auto main_table_storage = DatabaseCatalog::instance().tryGetTable(resolved_id, context); - const auto * replicated_storage = dynamic_cast(main_table_storage.get()); - - if (!replicated_storage) - return false; - - UInt64 max_allowed_delay = settings.max_replica_delay_for_distributed_queries; - - if (!max_allowed_delay) - return false; - - UInt64 local_delay = replicated_storage->getAbsoluteDelay(); - return local_delay >= max_allowed_delay; - }; - - size_t next_replica_number = 0; - size_t all_replicas_count = shard_info.getRemoteNodeCount(); - - auto coordinator = std::make_shared(); - - if (settings.prefer_localhost_replica && shard_info.isLocal()) - { - /// We don't need more than one local replica in parallel reading - if (!is_local_replica_obsolete()) - { - ++all_replicas_count; - - local_plans.emplace_back(createLocalPlan( - query_ast, header, context, processed_stage, shard_info.shard_num, shard_count, next_replica_number, all_replicas_count, coordinator)); - - ++next_replica_number; - } - } - - if (shard_info.hasRemoteConnections()) - remote_shards.emplace_back(Shard{ - .query = query_ast, - .header = header, - .shard_info = shard_info, - .lazy = false, - .local_delay = 0, - .coordinator = coordinator, - }); -} - } } diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h index a8f7d131b15..f1a8b3e0984 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -29,6 +29,14 @@ struct StorageID; namespace ClusterProxy { +/// select query has database, table and table function names as AST pointers +/// Creates a copy of query, changes database, table and table function names. +ASTPtr rewriteSelectQuery( + ContextPtr context, + const ASTPtr & query, + const std::string & remote_database, + const std::string & remote_table, + ASTPtr table_function_ptr = nullptr); using ColumnsDescriptionByShardNum = std::unordered_map; @@ -80,16 +88,6 @@ public: std::unique_ptr remote_plan; }; - void createForShardWithParallelReplicas( - const Cluster::ShardInfo & shard_info, - const ASTPtr & query_ast, - const StorageID & main_table, - ContextPtr context, - UInt32 shard_count, - std::vector & local_plans, - Shards & remote_shards); - -private: const Block header; const ColumnsDescriptionByShardNum objects_by_shard; const StorageSnapshotPtr storage_snapshot; diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index fe31b4d8302..2e035ef883f 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -1,6 +1,8 @@ #include #include +#include #include +#include #include #include #include @@ -13,8 +15,11 @@ #include #include #include +#include +#include #include #include +#include namespace DB { @@ -23,6 +28,7 @@ namespace ErrorCodes { extern const int TOO_LARGE_DISTRIBUTED_DEPTH; extern const int LOGICAL_ERROR; + extern const int SUPPORT_IS_DISABLED; } namespace ClusterProxy @@ -117,6 +123,31 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr c return new_context; } +static ThrottlerPtr getThrottler(const ContextPtr & context) +{ + const Settings & settings = context->getSettingsRef(); + + ThrottlerPtr user_level_throttler; + if (auto process_list_element = context->getProcessListElement()) + user_level_throttler = process_list_element->getUserNetworkThrottler(); + + /// Network bandwidth limit, if needed. + ThrottlerPtr throttler; + if (settings.max_network_bandwidth || settings.max_network_bytes) + { + throttler = std::make_shared( + settings.max_network_bandwidth, + settings.max_network_bytes, + "Limit for bytes to send or receive over network exceeded.", + user_level_throttler); + } + else + throttler = user_level_throttler; + + return throttler; +} + + void executeQuery( QueryPlan & query_plan, const Block & header, @@ -138,26 +169,8 @@ void executeQuery( SelectStreamFactory::Shards remote_shards; auto new_context = updateSettingsForCluster(*query_info.getCluster(), context, settings, main_table, &query_info, log); - new_context->getClientInfo().distributed_depth += 1; - ThrottlerPtr user_level_throttler; - if (auto process_list_element = context->getProcessListElement()) - user_level_throttler = process_list_element->getUserNetworkThrottler(); - - /// Network bandwidth limit, if needed. - ThrottlerPtr throttler; - if (settings.max_network_bandwidth || settings.max_network_bytes) - { - throttler = std::make_shared( - settings.max_network_bandwidth, - settings.max_network_bytes, - "Limit for bytes to send or receive over network exceeded.", - user_level_throttler); - } - else - throttler = user_level_throttler; - size_t shards = query_info.getCluster()->getShardCount(); for (const auto & shard_info : query_info.getCluster()->getShardsInfo()) { @@ -199,7 +212,7 @@ void executeQuery( main_table, table_func_ptr, new_context, - throttler, + getThrottler(context), std::move(scalars), std::move(external_tables), log, @@ -236,103 +249,76 @@ void executeQueryWithParallelReplicas( const StorageID & main_table, const ASTPtr & table_func_ptr, SelectStreamFactory & stream_factory, - const ASTPtr & query_ast, - ContextPtr context, - const SelectQueryInfo & query_info, - const ExpressionActionsPtr & sharding_key_expr, - const std::string & sharding_key_column_name, - const ClusterPtr & not_optimized_cluster, - QueryProcessingStage::Enum processed_stage) + const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info, + const ClusterPtr & not_optimized_cluster) { - const Settings & settings = context->getSettingsRef(); + if (not_optimized_cluster->getShardsInfo().size() != 1) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Cluster for parallel replicas should consist only from one shard"); - ThrottlerPtr user_level_throttler; - if (auto process_list_element = context->getProcessListElement()) - user_level_throttler = process_list_element->getUserNetworkThrottler(); + auto shard_info = not_optimized_cluster->getShardsInfo().front(); - /// Network bandwidth limit, if needed. - ThrottlerPtr throttler; - if (settings.max_network_bandwidth || settings.max_network_bytes) - { - throttler = std::make_shared( - settings.max_network_bandwidth, - settings.max_network_bytes, - "Limit for bytes to send or receive over network exceeded.", - user_level_throttler); - } - else - throttler = user_level_throttler; - - - std::vector plans; - SelectStreamFactory::Shards remote_shards; - size_t shards = query_info.getCluster()->getShardCount(); - - for (const auto & shard_info : query_info.getCluster()->getShardsInfo()) - { - ASTPtr query_ast_for_shard; - if (query_info.optimized_cluster && settings.optimize_skip_unused_shards_rewrite_in && shards > 1) - { - query_ast_for_shard = query_ast->clone(); - - OptimizeShardingKeyRewriteInVisitor::Data visitor_data{ - sharding_key_expr, - sharding_key_expr->getSampleBlock().getByPosition(0).type, - sharding_key_column_name, - shard_info, - not_optimized_cluster->getSlotToShard(), - }; - OptimizeShardingKeyRewriteInVisitor visitor(visitor_data); - visitor.visit(query_ast_for_shard); - } - else - query_ast_for_shard = query_ast; - - stream_factory.createForShardWithParallelReplicas( - shard_info, query_ast_for_shard, main_table, context, static_cast(shards), plans, remote_shards); - } - - Scalars scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{}; - scalars.emplace( - "_shard_count", Block{{DataTypeUInt32().createColumnConst(1, shards), std::make_shared(), "_shard_count"}}); - auto external_tables = context->getExternalTables(); - - if (!remote_shards.empty()) - { - auto new_context = Context::createCopy(context); - - for (const auto & shard : remote_shards) - { - auto read_from_remote = std::make_unique( - shard.coordinator, - shard, - shard.header, - processed_stage, - main_table, - table_func_ptr, - new_context, - throttler, - scalars, - external_tables, - &Poco::Logger::get("ReadFromParallelRemoteReplicasStep"), - query_info.storage_limits); - - auto remote_plan = std::make_unique(); - remote_plan->addStep(std::move(read_from_remote)); - remote_plan->addInterpreterContext(new_context); - plans.emplace_back(std::move(remote_plan)); - } - } - - if (plans.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "No plans were generated for reading from Distributed. This is a bug"); - - if (plans.size() == 1) + const auto & settings = context->getSettingsRef(); + auto all_replicas_count = std::min(static_cast(settings.max_parallel_replicas), shard_info.all_addresses.size()); + auto coordinator = std::make_shared(all_replicas_count); + auto remote_plan = std::make_unique(); + auto plans = std::vector(); + + /// This is a little bit weird, but we construct an "empty" coordinator without + /// any specified reading/coordination method (like Default, InOrder, InReverseOrder) + /// Because we will understand it later during QueryPlan optimization + /// So we place a reference to the coordinator to some common plane like QueryInfo + /// to then tell it about the reading method we chose. + query_info.coordinator = coordinator; + + UUID parallel_group_id = UUIDHelpers::generateV4(); + + plans.emplace_back(createLocalPlan( + query_ast, + stream_factory.header, + context, + stream_factory.processed_stage, + shard_info.shard_num, + /*shard_count*/1, + 0, + all_replicas_count, + coordinator, + parallel_group_id)); + + if (!shard_info.hasRemoteConnections()) { + if (!plans.front()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "An empty plan was generated to read from local shard and there is no remote connections. This is a bug"); query_plan = std::move(*plans.front()); return; } + auto new_context = Context::createCopy(context); + auto scalars = new_context->hasQueryContext() ? new_context->getQueryContext()->getScalars() : Scalars{}; + auto external_tables = new_context->getExternalTables(); + + auto read_from_remote = std::make_unique( + query_ast, + std::move(shard_info), + coordinator, + stream_factory.header, + stream_factory.processed_stage, + main_table, + table_func_ptr, + new_context, + getThrottler(new_context), + std::move(scalars), + std::move(external_tables), + &Poco::Logger::get("ReadFromParallelRemoteReplicasStep"), + query_info.storage_limits, + parallel_group_id); + + remote_plan->addStep(std::move(read_from_remote)); + remote_plan->addInterpreterContext(context); + plans.emplace_back(std::move(remote_plan)); + + if (std::all_of(plans.begin(), plans.end(), [](const QueryPlanPtr & plan) { return !plan; })) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No plans were generated for reading from shard. This is a bug"); + DataStreams input_streams; input_streams.reserve(plans.size()); for (const auto & plan : plans) diff --git a/src/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h index 662fe47ca65..787e79313cc 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.h +++ b/src/Interpreters/ClusterProxy/executeQuery.h @@ -61,10 +61,7 @@ void executeQueryWithParallelReplicas( const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info, - const ExpressionActionsPtr & sharding_key_expr, - const std::string & sharding_key_column_name, - const ClusterPtr & not_optimized_cluster, - QueryProcessingStage::Enum processed_stage); + const ClusterPtr & not_optimized_cluster); } } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 2a8734596c5..d91661072bc 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include #include #include @@ -236,7 +236,7 @@ struct ContextSharedPart : boost::noncopyable mutable std::unique_ptr load_marks_threadpool; /// Threadpool for loading marks cache. mutable UncompressedCachePtr index_uncompressed_cache; /// The cache of decompressed blocks for MergeTree indices. mutable MarkCachePtr index_mark_cache; /// Cache of marks in compressed files of MergeTree indices. - mutable QueryResultCachePtr query_result_cache; /// Cache of query results. + mutable QueryCachePtr query_cache; /// Cache of query results. mutable MMappedFileCachePtr mmap_cache; /// Cache of mmapped files to avoid frequent open/map/unmap/close and to reuse from several threads. ProcessList process_list; /// Executing queries at the moment. GlobalOvercommitTracker global_overcommit_tracker; @@ -2041,27 +2041,35 @@ void Context::dropIndexMarkCache() const shared->index_mark_cache->reset(); } -void Context::setQueryResultCache(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes, size_t max_entry_size_in_records) +void Context::setQueryCache(const Poco::Util::AbstractConfiguration & config) { auto lock = getLock(); - if (shared->query_result_cache) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Query result cache has been already created."); + if (shared->query_cache) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query cache has been already created."); - shared->query_result_cache = std::make_shared(max_size_in_bytes, max_entries, max_entry_size_in_bytes, max_entry_size_in_records); + shared->query_cache = std::make_shared(); + shared->query_cache->updateConfiguration(config); } -QueryResultCachePtr Context::getQueryResultCache() const +void Context::updateQueryCacheConfiguration(const Poco::Util::AbstractConfiguration & config) { auto lock = getLock(); - return shared->query_result_cache; + if (shared->query_cache) + shared->query_cache->updateConfiguration(config); } -void Context::dropQueryResultCache() const +QueryCachePtr Context::getQueryCache() const { auto lock = getLock(); - if (shared->query_result_cache) - shared->query_result_cache->reset(); + return shared->query_cache; +} + +void Context::dropQueryCache() const +{ + auto lock = getLock(); + if (shared->query_cache) + shared->query_cache->reset(); } void Context::setMMappedFileCache(size_t cache_size_in_num_entries) @@ -2104,8 +2112,8 @@ void Context::dropCaches() const if (shared->index_mark_cache) shared->index_mark_cache->reset(); - if (shared->query_result_cache) - shared->query_result_cache->reset(); + if (shared->query_cache) + shared->query_cache->reset(); if (shared->mmap_cache) shared->mmap_cache->reset(); @@ -3621,6 +3629,32 @@ void Context::setMergeTreeReadTaskCallback(MergeTreeReadTaskCallback && callback merge_tree_read_task_callback = callback; } + +MergeTreeAllRangesCallback Context::getMergeTreeAllRangesCallback() const +{ + if (!merge_tree_all_ranges_callback.has_value()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Next task callback is not set for query with id: {}", getInitialQueryId()); + + return merge_tree_all_ranges_callback.value(); +} + + +void Context::setMergeTreeAllRangesCallback(MergeTreeAllRangesCallback && callback) +{ + merge_tree_all_ranges_callback = callback; +} + + +void Context::setParallelReplicasGroupUUID(UUID uuid) +{ + parallel_replicas_group_uuid = uuid; +} + +UUID Context::getParallelReplicasGroupUUID() const +{ + return parallel_replicas_group_uuid; +} + PartUUIDsPtr Context::getIgnoredPartUUIDs() const { auto lock = getLock(); @@ -3886,4 +3920,22 @@ WriteSettings Context::getWriteSettings() const return res; } +bool Context::canUseParallelReplicasOnInitiator() const +{ + const auto & settings = getSettingsRef(); + return settings.allow_experimental_parallel_reading_from_replicas + && settings.max_parallel_replicas > 1 + && !settings.use_hedged_requests + && !getClientInfo().collaborate_with_initiator; +} + +bool Context::canUseParallelReplicasOnFollower() const +{ + const auto & settings = getSettingsRef(); + return settings.allow_experimental_parallel_reading_from_replicas + && settings.max_parallel_replicas > 1 + && !settings.use_hedged_requests + && getClientInfo().collaborate_with_initiator; +} + } diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 10983b15d7f..f40f8608092 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1,5 +1,11 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include #include @@ -8,32 +14,24 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include - +#include +#include +#include +#include #include - +#include +#include #include "config.h" #include +#include #include #include #include #include - #include -#include namespace Poco::Net { class IPAddress; } @@ -81,8 +79,8 @@ class Macros; struct Progress; struct FileProgress; class Clusters; +class QueryCache; class QueryLog; -class QueryResultCache; class QueryThreadLog; class QueryViewsLog; class PartLog; @@ -98,7 +96,11 @@ class TransactionsInfoLog; class ProcessorsProfileLog; class FilesystemCacheLog; class AsynchronousInsertLog; +class IAsynchronousReader; struct MergeTreeSettings; +struct InitialAllRangesAnnouncement; +struct ParallelReadRequest; +struct ParallelReadResponse; class StorageS3Settings; class IDatabase; class DDLWorker; @@ -172,11 +174,15 @@ using InputBlocksReader = std::function; /// Used in distributed task processing using ReadTaskCallback = std::function; -using MergeTreeReadTaskCallback = std::function(PartitionReadRequest)>; +using MergeTreeAllRangesCallback = std::function; +using MergeTreeReadTaskCallback = std::function(ParallelReadRequest)>; class TemporaryDataOnDiskScope; using TemporaryDataOnDiskScopePtr = std::shared_ptr; +class ParallelReplicasReadingCoordinator; +using ParallelReplicasReadingCoordinatorPtr = std::shared_ptr; + #if USE_ROCKSDB class MergeTreeMetadataCache; using MergeTreeMetadataCachePtr = std::shared_ptr; @@ -262,6 +268,8 @@ private: /// Used in parallel reading from replicas. A replica tells about its intentions to read /// some ranges from some part and initiator will tell the replica about whether it is accepted or denied. std::optional merge_tree_read_task_callback; + std::optional merge_tree_all_ranges_callback; + UUID parallel_replicas_group_uuid{UUIDHelpers::Nil}; /// Record entities accessed by current query, and store this information in system.query_log. struct QueryAccessInfo @@ -380,6 +388,7 @@ private: /// Temporary data for query execution accounting. TemporaryDataOnDiskScopePtr temp_data_on_disk; + public: /// Some counters for current query execution. /// Most of them are workarounds and should be removed in the future. @@ -402,6 +411,8 @@ public: KitchenSink kitchen_sink; + ParallelReplicasReadingCoordinatorPtr parallel_reading_coordinator; + private: using SampleBlockCache = std::unordered_map; mutable SampleBlockCache sample_block_cache; @@ -861,9 +872,10 @@ public: void dropMMappedFileCache() const; /// Create a cache of query results for statements which run repeatedly. - void setQueryResultCache(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes, size_t max_entry_size_in_records); - std::shared_ptr getQueryResultCache() const; - void dropQueryResultCache() const; + void setQueryCache(const Poco::Util::AbstractConfiguration & config); + void updateQueryCacheConfiguration(const Poco::Util::AbstractConfiguration & config); + std::shared_ptr getQueryCache() const; + void dropQueryCache() const; /** Clear the caches of the uncompressed blocks and marks. * This is usually done when renaming tables, changing the type of columns, deleting a table. @@ -1045,6 +1057,12 @@ public: MergeTreeReadTaskCallback getMergeTreeReadTaskCallback() const; void setMergeTreeReadTaskCallback(MergeTreeReadTaskCallback && callback); + MergeTreeAllRangesCallback getMergeTreeAllRangesCallback() const; + void setMergeTreeAllRangesCallback(MergeTreeAllRangesCallback && callback); + + UUID getParallelReplicasGroupUUID() const; + void setParallelReplicasGroupUUID(UUID uuid); + /// Background executors related methods void initializeBackgroundExecutorsIfNeeded(); bool areBackgroundExecutorsInitialized(); @@ -1071,6 +1089,10 @@ public: /** Get settings for writing to filesystem. */ WriteSettings getWriteSettings() const; + /** There are multiple conditions that have to be met to be able to use parallel replicas */ + bool canUseParallelReplicasOnInitiator() const; + bool canUseParallelReplicasOnFollower() const; + private: std::unique_lock getLock() const; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 80cc0414643..2b88ff6a353 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1801,11 +1801,16 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool proje getActionsDAG(add_aliases, project_result), ExpressionActionsSettings::fromContext(getContext(), compile_expressions)); } -ExpressionActionsPtr ExpressionAnalyzer::getConstActions(const ColumnsWithTypeAndName & constant_inputs) +ActionsDAGPtr ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs) { auto actions = std::make_shared(constant_inputs); - getRootActions(query, true /* no_makeset_for_subqueries */, actions, true /* only_consts */); + return actions; +} + +ExpressionActionsPtr ExpressionAnalyzer::getConstActions(const ColumnsWithTypeAndName & constant_inputs) +{ + auto actions = getConstActionsDAG(constant_inputs); return std::make_shared(actions, ExpressionActionsSettings::fromContext(getContext())); } diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index af4ebdcafc1..1676cb506c0 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -119,8 +119,9 @@ public: ActionsDAGPtr getActionsDAG(bool add_aliases, bool project_result = true); ExpressionActionsPtr getActions(bool add_aliases, bool project_result = true, CompileExpressions compile_expressions = CompileExpressions::no); - /// Actions that can be performed on an empty block: adding constants and applying functions that depend only on constants. + /// Get actions to evaluate a constant expression. The function adds constants and applies functions that depend only on constants. /// Does not execute subqueries. + ActionsDAGPtr getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs = {}); ExpressionActionsPtr getConstActions(const ColumnsWithTypeAndName & constant_inputs = {}); /** Sets that require a subquery to be create. diff --git a/src/Interpreters/IInterpreter.cpp b/src/Interpreters/IInterpreter.cpp index 84fbfee7905..aff703f79af 100644 --- a/src/Interpreters/IInterpreter.cpp +++ b/src/Interpreters/IInterpreter.cpp @@ -11,6 +11,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } + void IInterpreter::extendQueryLogElem( QueryLogElement & elem, const ASTPtr & ast, ContextPtr context, const String & query_database, const String & query_table) const { diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index 5df3abbb32e..b6e910eac94 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -9,18 +9,12 @@ #include #include #include -#include #include namespace DB { -void IInterpreterUnionOrSelectQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr /*context_*/) const -{ - elem.query_kind = "Select"; -} - QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline() { QueryPlan query_plan; diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.h b/src/Interpreters/IInterpreterUnionOrSelectQuery.h index 0addaa7e024..1147070f48a 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.h +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.h @@ -44,8 +44,6 @@ public: size_t getMaxStreams() const { return max_streams; } - void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr context) const override; - /// Returns whether the query uses the view source from the Context /// The view source is a virtual storage that currently only materialized views use to replace the source table /// with the incoming block only diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index ee5aad3d18e..fabcc6844e5 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -147,7 +147,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) { table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), false).validate(); - table->mutate(mutation_commands, getContext(), false); + table->mutate(mutation_commands, getContext()); } if (!partition_commands.empty()) @@ -446,7 +446,6 @@ void InterpreterAlterQuery::extendQueryLogElemImpl(QueryLogElement & elem, const { const auto & alter = ast->as(); - elem.query_kind = "Alter"; if (alter.command_list != nullptr && alter.alter_object != ASTAlterQuery::AlterObjectType::DATABASE) { // Alter queries already have their target table inserted into `elem`. diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 611f533d559..e1a2c5775b7 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1703,7 +1703,6 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const void InterpreterCreateQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const { - elem.query_kind = "Create"; if (!as_table_saved.empty()) { String database = backQuoteIfNeed(as_database_saved.empty() ? getContext()->getCurrentDatabase() : as_database_saved); diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index abccc313e14..f8974a19f45 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -5,15 +5,16 @@ #include #include #include +#include #include +#include +#include +#include #include -#include -#include #include #include #include #include -#include namespace DB @@ -72,7 +73,7 @@ BlockIO InterpreterDeleteQuery::execute() table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), false).validate(); - table->mutate(mutation_commands, getContext(), false); + table->mutate(mutation_commands, getContext()); return {}; } else if (table->supportsLightweightDelete()) @@ -82,35 +83,25 @@ BlockIO InterpreterDeleteQuery::execute() "Lightweight delete mutate is experimental. " "Set `allow_experimental_lightweight_delete` setting to enable it"); - /// Convert to MutationCommand - MutationCommands mutation_commands; - MutationCommand mut_command; + /// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query + String alter_query = + "ALTER TABLE " + table->getStorageID().getFullTableName() + + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) + + " UPDATE `_row_exists` = 0 WHERE " + serializeAST(*delete_query.predicate); - /// Build "UPDATE _row_exists = 0 WHERE predicate" query - mut_command.type = MutationCommand::Type::UPDATE; - mut_command.predicate = delete_query.predicate; + ParserAlterQuery parser; + ASTPtr alter_ast = parseQuery( + parser, + alter_query.data(), + alter_query.data() + alter_query.size(), + "ALTER query", + 0, + DBMS_DEFAULT_MAX_PARSER_DEPTH); - auto command = std::make_shared(); - command->type = ASTAlterCommand::UPDATE; - command->predicate = delete_query.predicate; - command->update_assignments = std::make_shared(); - auto set_row_does_not_exist = std::make_shared(); - set_row_does_not_exist->column_name = LightweightDeleteDescription::FILTER_COLUMN.name; - auto zero_value = std::make_shared(DB::Field(UInt8(0))); - set_row_does_not_exist->children.push_back(zero_value); - command->update_assignments->children.push_back(set_row_does_not_exist); - command->children.push_back(command->predicate); - command->children.push_back(command->update_assignments); - mut_command.column_to_update_expression[set_row_does_not_exist->column_name] = zero_value; - mut_command.ast = command->ptr(); - - mutation_commands.emplace_back(mut_command); - - table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); - MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), false).validate(); - table->mutate(mutation_commands, getContext(), true); - - return {}; + auto context = Context::createCopy(getContext()); + context->setSetting("mutations_sync", 2); /// Lightweight delete is always synchronous + InterpreterAlterQuery alter_interpreter(alter_ast, context); + return alter_interpreter.execute(); } else { diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index f2f937f6ec0..e4bf22730b3 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -432,11 +432,6 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co return required_access; } -void InterpreterDropQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const -{ - elem.query_kind = "Drop"; -} - void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool sync) { if (DatabaseCatalog::instance().tryGetTable(target_table_id, current_context)) diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index edd84471c22..afec26424ba 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -24,8 +24,6 @@ public: /// Drop table or database. BlockIO execute() override; - void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const override; - static void executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool sync); bool supportsTransactions() const override; diff --git a/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp index efcdde46e49..502de459156 100644 --- a/src/Interpreters/InterpreterFactory.cpp +++ b/src/Interpreters/InterpreterFactory.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -73,6 +74,7 @@ #include #include #include +#include #include #include #include @@ -167,6 +169,10 @@ std::unique_ptr InterpreterFactory::get(ASTPtr & query, ContextMut { return std::make_unique(query, context); } + else if (query->as()) + { + return std::make_unique(query, context); + } else if (query->as()) { return std::make_unique(query, context); diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 3b90ac8d284..35ce28ec0b6 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -561,10 +560,8 @@ StorageID InterpreterInsertQuery::getDatabaseTable() const return query_ptr->as().table_id; } - void InterpreterInsertQuery::extendQueryLogElemImpl(QueryLogElement & elem, ContextPtr context_) { - elem.query_kind = "Insert"; const auto & insert_table = context_->getInsertionTable(); if (!insert_table.empty()) { diff --git a/src/Interpreters/InterpreterInsertQuery.h b/src/Interpreters/InterpreterInsertQuery.h index 842460c8c25..9b3f617e4b3 100644 --- a/src/Interpreters/InterpreterInsertQuery.h +++ b/src/Interpreters/InterpreterInsertQuery.h @@ -44,6 +44,7 @@ public: std::atomic_uint64_t * elapsed_counter_ms = nullptr); static void extendQueryLogElemImpl(QueryLogElement & elem, ContextPtr context_); + void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr context_) const override; StoragePtr getTable(ASTInsertQuery & query); diff --git a/src/Interpreters/InterpreterRenameQuery.cpp b/src/Interpreters/InterpreterRenameQuery.cpp index 82c230ef8e2..8cf39d2c850 100644 --- a/src/Interpreters/InterpreterRenameQuery.cpp +++ b/src/Interpreters/InterpreterRenameQuery.cpp @@ -197,7 +197,6 @@ AccessRightsElements InterpreterRenameQuery::getRequiredAccess(InterpreterRename void InterpreterRenameQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr) const { - elem.query_kind = "Rename"; const auto & rename = ast->as(); for (const auto & element : rename.elements) { diff --git a/src/Interpreters/InterpreterRenameQuery.h b/src/Interpreters/InterpreterRenameQuery.h index 6141e8c1585..31d3d3d6ad9 100644 --- a/src/Interpreters/InterpreterRenameQuery.h +++ b/src/Interpreters/InterpreterRenameQuery.h @@ -55,6 +55,7 @@ class InterpreterRenameQuery : public IInterpreter, WithContext public: InterpreterRenameQuery(const ASTPtr & query_ptr_, ContextPtr context_); BlockIO execute() override; + void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr) const override; bool renamedInsteadOfExchange() const { return renamed_instead_of_exchange; } diff --git a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp index 15945cbbeef..2218ed4417b 100644 --- a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp +++ b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp @@ -193,8 +193,6 @@ void InterpreterSelectIntersectExceptQuery::ignoreWithTotals() void InterpreterSelectIntersectExceptQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr /*context_*/) const { - elem.query_kind = "Select"; - for (const auto & interpreter : nested_interpreters) { if (const auto * select_interpreter = dynamic_cast(interpreter.get())) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 624859300b9..2ce116f7796 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -448,6 +448,16 @@ InterpreterSelectQuery::InterpreterSelectQuery( } } + /// FIXME: Memory bound aggregation may cause another reading algorithm to be used on remote replicas + if (settings.allow_experimental_parallel_reading_from_replicas && settings.enable_memory_bound_merging_of_aggregation_results) + context->setSetting("enable_memory_bound_merging_of_aggregation_results", false); + + if (joined_tables.tablesCount() > 1 && settings.allow_experimental_parallel_reading_from_replicas) + { + LOG_WARNING(log, "Joins are not supported with parallel replicas. Query will be executed without using them."); + context->setSetting("allow_experimental_parallel_reading_from_replicas", false); + } + /// Rewrite JOINs if (!has_input && joined_tables.tablesCount() > 1) { @@ -543,6 +553,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( parameter_values, parameter_types); + query_info.syntax_analyzer_result = syntax_analyzer_result; context->setDistributed(syntax_analyzer_result->is_remote_storage); @@ -1902,22 +1913,6 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan( } } -void InterpreterSelectQuery::setMergeTreeReadTaskCallbackAndClientInfo(MergeTreeReadTaskCallback && callback) -{ - context->getClientInfo().collaborate_with_initiator = true; - context->setMergeTreeReadTaskCallback(std::move(callback)); -} - -void InterpreterSelectQuery::setProperClientInfo(size_t replica_num, size_t replica_count) -{ - context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; - context->getClientInfo().count_participating_replicas = replica_count; - context->getClientInfo().number_of_current_replica = replica_num; - context->getClientInfo().connection_client_version_major = DBMS_VERSION_MAJOR; - context->getClientInfo().connection_client_version_minor = DBMS_VERSION_MINOR; - context->getClientInfo().connection_tcp_protocol_version = DBMS_TCP_PROTOCOL_VERSION; -} - RowPolicyFilterPtr InterpreterSelectQuery::getRowPolicyFilter() const { return row_policy_filter; @@ -1925,8 +1920,6 @@ RowPolicyFilterPtr InterpreterSelectQuery::getRowPolicyFilter() const void InterpreterSelectQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr /*context_*/) const { - elem.query_kind = "Select"; - for (const auto & row_policy : row_policy_filter->policies) { auto name = row_policy->getFullName().toString(); @@ -2574,12 +2567,13 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool const bool should_produce_results_in_order_of_bucket_number = options.to_stage == QueryProcessingStage::WithMergeableState && (settings.distributed_aggregation_memory_efficient || settings.enable_memory_bound_merging_of_aggregation_results); + const bool parallel_replicas_from_merge_tree = storage->isMergeTree() && context->canUseParallelReplicasOnInitiator(); executeMergeAggregatedImpl( query_plan, overflow_row, final, - storage && storage->isRemote(), + storage && (storage->isRemote() || parallel_replicas_from_merge_tree), has_grouping_sets, context->getSettingsRef(), query_analyzer->aggregationKeys(), diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index 761eea8e1b8..0ab1ba58e0f 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -122,16 +122,6 @@ public: bool supportsTransactions() const override { return true; } - /// This is tiny crutch to support reading from localhost replica during distributed query - /// Replica need to talk to the initiator through a connection to ask for a next task - /// but there will be no connection if we create Interpreter explicitly. - /// The other problem is that context is copied inside Interpreter's constructor - /// And with this method we can change the internals of cloned one - void setMergeTreeReadTaskCallbackAndClientInfo(MergeTreeReadTaskCallback && callback); - - /// It will set shard_num and shard_count to the client_info - void setProperClientInfo(size_t replica_num, size_t replica_count); - FilterDAGInfoPtr getAdditionalQueryInfo() const { return additional_filter_info; } RowPolicyFilterPtr getRowPolicyFilter() const; diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp index 86417701f6d..6b6f3560c5a 100644 --- a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp @@ -135,25 +135,4 @@ void InterpreterSelectQueryAnalyzer::addStorageLimits(const StorageLimitsList & planner.addStorageLimits(storage_limits); } -void InterpreterSelectQueryAnalyzer::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const -{ - elem.query_kind = "Select"; -} - -void InterpreterSelectQueryAnalyzer::setMergeTreeReadTaskCallbackAndClientInfo(MergeTreeReadTaskCallback && callback) -{ - context->getClientInfo().collaborate_with_initiator = true; - context->setMergeTreeReadTaskCallback(std::move(callback)); -} - -void InterpreterSelectQueryAnalyzer::setProperClientInfo(size_t replica_number, size_t count_participating_replicas) -{ - context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; - context->getClientInfo().number_of_current_replica = replica_number; - context->getClientInfo().count_participating_replicas = count_participating_replicas; - context->getClientInfo().connection_client_version_major = DBMS_VERSION_MAJOR; - context->getClientInfo().connection_client_version_minor = DBMS_VERSION_MINOR; - context->getClientInfo().connection_tcp_protocol_version = DBMS_TCP_PROTOCOL_VERSION; -} - } diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.h b/src/Interpreters/InterpreterSelectQueryAnalyzer.h index 0c2465224e7..7fe64a7fb99 100644 --- a/src/Interpreters/InterpreterSelectQueryAnalyzer.h +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.h @@ -46,8 +46,6 @@ public: bool ignoreQuota() const override { return select_query_options.ignore_quota; } - void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const override; - /// Set merge tree read task callback in context and set collaborate_with_initiator in client info void setMergeTreeReadTaskCallbackAndClientInfo(MergeTreeReadTaskCallback && callback); diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index e3954f2a197..bfa3d16bf29 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -398,8 +398,6 @@ void InterpreterSelectWithUnionQuery::ignoreWithTotals() void InterpreterSelectWithUnionQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr /*context_*/) const { - elem.query_kind = "Select"; - for (const auto & interpreter : nested_interpreters) { if (const auto * select_interpreter = dynamic_cast(interpreter.get())) diff --git a/src/Interpreters/InterpreterShowEngineQuery.cpp b/src/Interpreters/InterpreterShowEngineQuery.cpp new file mode 100644 index 00000000000..5aae6ad5d28 --- /dev/null +++ b/src/Interpreters/InterpreterShowEngineQuery.cpp @@ -0,0 +1,18 @@ +#include + +#include +#include +#include + +#include + + +namespace DB +{ + +BlockIO InterpreterShowEnginesQuery::execute() +{ + return executeQuery("SELECT * FROM system.table_engines", getContext(), true); +} + +} diff --git a/src/Interpreters/InterpreterShowEngineQuery.h b/src/Interpreters/InterpreterShowEngineQuery.h new file mode 100644 index 00000000000..3c451e9b071 --- /dev/null +++ b/src/Interpreters/InterpreterShowEngineQuery.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/** Return list of all engines + */ +class InterpreterShowEnginesQuery : public IInterpreter, WithMutableContext +{ +public: + InterpreterShowEnginesQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) + : WithMutableContext(context_), query_ptr(query_ptr_) {} + + BlockIO execute() override; + + /// We ignore the quota and limits here because execute() will rewrite a show query as a SELECT query and then + /// the SELECT query will check the quota and limits. + bool ignoreQuota() const override { return true; } + bool ignoreLimits() const override { return true; } + +private: + ASTPtr query_ptr; +}; + +} diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 19b31a858f4..9a2252844d1 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -58,6 +58,10 @@ #include #include +#if USE_AWS_S3 +#include +#endif + #include "config.h" namespace DB @@ -327,9 +331,9 @@ BlockIO InterpreterSystemQuery::execute() getContext()->checkAccess(AccessType::SYSTEM_DROP_MMAP_CACHE); system_context->dropMMappedFileCache(); break; - case Type::DROP_QUERY_RESULT_CACHE: - getContext()->checkAccess(AccessType::SYSTEM_DROP_QUERY_RESULT_CACHE); - getContext()->dropQueryResultCache(); + case Type::DROP_QUERY_CACHE: + getContext()->checkAccess(AccessType::SYSTEM_DROP_QUERY_CACHE); + getContext()->dropQueryCache(); break; #if USE_EMBEDDED_COMPILER case Type::DROP_COMPILED_EXPRESSION_CACHE: @@ -338,6 +342,13 @@ BlockIO InterpreterSystemQuery::execute() cache->reset(); break; #endif +#if USE_AWS_S3 + case Type::DROP_S3_CLIENT_CACHE: + getContext()->checkAccess(AccessType::SYSTEM_DROP_S3_CLIENT_CACHE); + S3::ClientCacheRegistry::instance().clearCacheForAll(); + break; +#endif + case Type::DROP_FILESYSTEM_CACHE: { getContext()->checkAccess(AccessType::SYSTEM_DROP_FILESYSTEM_CACHE); @@ -969,7 +980,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::DROP_DNS_CACHE: case Type::DROP_MARK_CACHE: case Type::DROP_MMAP_CACHE: - case Type::DROP_QUERY_RESULT_CACHE: + case Type::DROP_QUERY_CACHE: #if USE_EMBEDDED_COMPILER case Type::DROP_COMPILED_EXPRESSION_CACHE: #endif @@ -978,6 +989,9 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::DROP_INDEX_UNCOMPRESSED_CACHE: case Type::DROP_FILESYSTEM_CACHE: case Type::DROP_SCHEMA_CACHE: +#if USE_AWS_S3 + case Type::DROP_S3_CLIENT_CACHE: +#endif { required_access.emplace_back(AccessType::SYSTEM_DROP_CACHE); break; @@ -1155,9 +1169,4 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() return required_access; } -void InterpreterSystemQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr) const -{ - elem.query_kind = "System"; -} - } diff --git a/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h index 5673890daf3..df06a2fa4ef 100644 --- a/src/Interpreters/InterpreterSystemQuery.h +++ b/src/Interpreters/InterpreterSystemQuery.h @@ -73,8 +73,6 @@ private: AccessRightsElements getRequiredAccessForDDLOnCluster() const; void startStopAction(StorageActionBlockType action_type, bool start); - - void extendQueryLogElemImpl(QueryLogElement &, const ASTPtr &, ContextPtr) const override; }; diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index c207309a274..c11ee14478c 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -197,7 +197,7 @@ bool isStorageTouchedByMutations( MergeTreeData::DataPartPtr source_part, const StorageMetadataPtr & metadata_snapshot, const std::vector & commands, - ContextMutablePtr context_copy) + ContextPtr context) { if (commands.empty()) return false; @@ -210,7 +210,7 @@ bool isStorageTouchedByMutations( if (command.partition) { - const String partition_id = storage.getPartitionIDFromQuery(command.partition, context_copy); + const String partition_id = storage.getPartitionIDFromQuery(command.partition, context); if (partition_id == source_part->info.partition_id) all_commands_can_be_skipped = false; } @@ -221,15 +221,7 @@ bool isStorageTouchedByMutations( if (all_commands_can_be_skipped) return false; - /// We must read with one thread because it guarantees that - /// output stream will be sorted after reading from MergeTree parts. - /// Disable all settings that can enable reading with several streams. - context_copy->setSetting("max_streams_to_max_threads_ratio", 1); - context_copy->setSetting("max_threads", 1); - context_copy->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false); - context_copy->setSetting("max_streams_for_merge_tree_reading", Field(0)); - - ASTPtr select_query = prepareQueryAffectedAST(commands, storage.shared_from_this(), context_copy); + ASTPtr select_query = prepareQueryAffectedAST(commands, storage.shared_from_this(), context); auto storage_from_part = std::make_shared(source_part); @@ -237,12 +229,12 @@ bool isStorageTouchedByMutations( /// For some reason it may copy context and give it into ExpressionTransform /// after that we will use context from destroyed stack frame in our stream. InterpreterSelectQuery interpreter( - select_query, context_copy, storage_from_part, metadata_snapshot, SelectQueryOptions().ignoreLimits().ignoreProjections()); + select_query, context, storage_from_part, metadata_snapshot, SelectQueryOptions().ignoreLimits().ignoreProjections()); auto io = interpreter.execute(); - PullingPipelineExecutor executor(io.pipeline); + PullingAsyncPipelineExecutor executor(io.pipeline); Block block; - while (executor.pull(block)) {} + while (block.rows() == 0 && executor.pull(block)); if (!block.rows()) return false; diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index fbcb56fac6f..e2d9c5938d6 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -23,7 +23,7 @@ bool isStorageTouchedByMutations( MergeTreeData::DataPartPtr source_part, const StorageMetadataPtr & metadata_snapshot, const std::vector & commands, - ContextMutablePtr context_copy + ContextPtr context ); ASTPtr getPartitionAndPredicateExpressionForMutationCommand( diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 9ee6cf1afe8..49d7989ac5e 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -3,17 +3,13 @@ #include #include #include -#include #include #include #include #include -#include #include -#include #include #include -#include #include #include @@ -526,6 +522,7 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even QueryStatusInfo res{}; res.query = query; + res.query_kind = query_kind; res.client_info = client_info; res.elapsed_microseconds = watch.elapsedMicroseconds(); res.is_cancelled = is_killed.load(std::memory_order_relaxed); diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index eae8b15c695..d5c136ab62a 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -52,6 +52,7 @@ class ProcessListEntry; struct QueryStatusInfo { String query; + IAST::QueryKind query_kind{}; UInt64 elapsed_microseconds; size_t read_rows; size_t read_bytes; @@ -134,7 +135,8 @@ protected: OvercommitTracker * global_overcommit_tracker = nullptr; - IAST::QueryKind query_kind; + /// This is used to control the maximum number of SELECT or INSERT queries. + IAST::QueryKind query_kind{}; /// This field is unused in this class, but it /// increments/decrements metric in constructor/destructor. @@ -176,11 +178,6 @@ public: return &thread_group->memory_tracker; } - IAST::QueryKind getQueryKind() const - { - return query_kind; - } - bool updateProgressIn(const Progress & value) { CurrentThread::updateProgressIn(value); diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 224ca9f11a8..2a1c53e746b 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -166,7 +166,9 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insertData(query.data(), query.size()); columns[i++]->insertData(formatted_query.data(), formatted_query.size()); columns[i++]->insert(normalized_query_hash); - columns[i++]->insertData(query_kind.data(), query_kind.size()); + + const std::string_view query_kind_str = magic_enum::enum_name(query_kind); + columns[i++]->insertData(query_kind_str.data(), query_kind_str.size()); { auto & column_databases = typeid_cast(*columns[i++]); diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index e8dbbf685ea..cb9cf289e7a 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -6,6 +6,7 @@ #include #include #include +#include namespace ProfileEvents @@ -58,7 +59,7 @@ struct QueryLogElement String formatted_query; UInt64 normalized_query_hash{}; - String query_kind; + IAST::QueryKind query_kind{}; std::set query_databases; std::set query_tables; std::set query_columns; diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 6aa89426916..ebefa0d9ce7 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -70,7 +70,7 @@ std::pair> evaluateConstantExpression(co if (context->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY && context->getSettingsRef().normalize_function_names) FunctionNameNormalizer().visit(ast.get()); - String name = ast->getColumnName(); + String result_name = ast->getColumnName(); auto syntax_result = TreeRewriter(context).analyze(ast, source_columns); /// AST potentially could be transformed to literal during TreeRewriter analyze. @@ -78,33 +78,37 @@ std::pair> evaluateConstantExpression(co if (ASTLiteral * literal = ast->as()) return getFieldAndDataTypeFromLiteral(literal); - ExpressionActionsPtr expr_for_constant_folding = ExpressionAnalyzer(ast, syntax_result, context).getConstActions(); + auto actions = ExpressionAnalyzer(ast, syntax_result, context).getConstActionsDAG(); - /// There must be at least one column in the block so that it knows the number of rows. - Block block_with_constants{{ ColumnConst::create(ColumnUInt8::create(1, 0), 1), std::make_shared(), "_dummy" }}; + ColumnPtr result_column; + DataTypePtr result_type; + for (const auto & action_node : actions->getOutputs()) + { + if ((action_node->result_name == result_name) && action_node->column) + { + result_column = action_node->column; + result_type = action_node->result_type; + break; + } + } - expr_for_constant_folding->execute(block_with_constants); + if (!result_column) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Element of set in IN, VALUES or LIMIT or aggregate function parameter " + "is not a constant expression (result column not found): {}", result_name); - if (!block_with_constants || block_with_constants.rows() == 0) + if (result_column->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, - "Logical error: empty block after evaluation " + "Logical error: empty result column after evaluation " "of constant expression for IN, VALUES or LIMIT or aggregate function parameter"); - if (!block_with_constants.has(name)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Element of set in IN, VALUES or LIMIT or aggregate function parameter " - "is not a constant expression (result column not found): {}", name); - - const ColumnWithTypeAndName & result = block_with_constants.getByName(name); - const IColumn & result_column = *result.column; - /// Expressions like rand() or now() are not constant - if (!isColumnConst(result_column)) + if (!isColumnConst(*result_column)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Element of set in IN, VALUES or LIMIT or aggregate function parameter " - "is not a constant expression (result column is not const): {}", name); + "is not a constant expression (result column is not const): {}", result_name); - return std::make_pair(result_column[0], result.type); + return std::make_pair((*result_column)[0], result_type); } diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 646f1e89fc3..06d92116adc 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -235,10 +235,10 @@ static void onExceptionBeforeStart( elem.query = query_for_logging; elem.normalized_query_hash = normalizedQueryHash(query_for_logging); - // Try log query_kind if ast is valid + // Log query_kind if ast is valid if (ast) { - elem.query_kind = magic_enum::enum_name(ast->getQueryKind()); + elem.query_kind = ast->getQueryKind(); if (settings.log_formatted_queries) elem.formatted_query = queryToString(ast); } @@ -716,48 +716,48 @@ static std::tuple executeQueryImpl( /// If /// - it is a SELECT query, - /// - passive (read) use of the query result cache is enabled, and - /// - the query result cache knows the query result - /// then replace the pipeline by a new pipeline with a single source that is populated from the query result cache - auto query_result_cache = context->getQueryResultCache(); - bool read_result_from_query_result_cache = false; /// a query must not read from *and* write to the query result cache at the same time - if (query_result_cache != nullptr - && (settings.allow_experimental_query_result_cache && settings.use_query_result_cache && settings.enable_reads_from_query_result_cache) + /// - passive (read) use of the query cache is enabled, and + /// - the query cache knows the query result + /// then replace the pipeline by a new pipeline with a single source that is populated from the query cache + auto query_cache = context->getQueryCache(); + bool read_result_from_query_cache = false; /// a query must not read from *and* write to the query cache at the same time + if (query_cache != nullptr + && (settings.allow_experimental_query_cache && settings.use_query_cache && settings.enable_reads_from_query_cache) && res.pipeline.pulling()) { - QueryResultCache::Key key( + QueryCache::Key key( ast, res.pipeline.getHeader(), std::make_optional(context->getUserName()), - std::chrono::system_clock::now() + std::chrono::seconds(settings.query_result_cache_ttl)); - QueryResultCache::Reader reader = query_result_cache->createReader(key); + std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl)); + QueryCache::Reader reader = query_cache->createReader(key); if (reader.hasCacheEntryForKey()) { res.pipeline = QueryPipeline(reader.getPipe()); - read_result_from_query_result_cache = true; + read_result_from_query_cache = true; } } /// If /// - it is a SELECT query, and - /// - active (write) use of the query result cache is enabled - /// then add a processor on top of the pipeline which stores the result in the query result cache. - if (!read_result_from_query_result_cache - && query_result_cache != nullptr - && settings.allow_experimental_query_result_cache && settings.use_query_result_cache && settings.enable_writes_to_query_result_cache + /// - active (write) use of the query cache is enabled + /// then add a processor on top of the pipeline which stores the result in the query cache. + if (!read_result_from_query_cache + && query_cache != nullptr + && settings.allow_experimental_query_cache && settings.use_query_cache && settings.enable_writes_to_query_cache && res.pipeline.pulling() - && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions)) + && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_cache_store_results_of_queries_with_nondeterministic_functions)) { - QueryResultCache::Key key( + QueryCache::Key key( ast, res.pipeline.getHeader(), - settings.query_result_cache_share_between_users ? std::nullopt : std::make_optional(context->getUserName()), - std::chrono::system_clock::now() + std::chrono::seconds(settings.query_result_cache_ttl)); + settings.query_cache_share_between_users ? std::nullopt : std::make_optional(context->getUserName()), + std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl)); - const size_t num_query_runs = query_result_cache->recordQueryRun(key); - if (num_query_runs > settings.query_result_cache_min_query_runs) + const size_t num_query_runs = query_cache->recordQueryRun(key); + if (num_query_runs > settings.query_cache_min_query_runs) { - auto stream_in_query_result_cache_transform = std::make_shared(res.pipeline.getHeader(), query_result_cache, key, - std::chrono::milliseconds(context->getSettings().query_result_cache_min_query_duration.totalMilliseconds())); - res.pipeline.streamIntoQueryResultCache(stream_in_query_result_cache_transform); + auto stream_in_query_cache_transform = std::make_shared(res.pipeline.getHeader(), query_cache, key, + std::chrono::milliseconds(context->getSettings().query_cache_min_query_duration.totalMilliseconds())); + res.pipeline.streamIntoQueryCache(stream_in_query_cache_transform); } } @@ -807,6 +807,7 @@ static std::tuple executeQueryImpl( if (settings.log_formatted_queries) elem.formatted_query = queryToString(ast); elem.normalized_query_hash = normalizedQueryHash(query_for_logging); + elem.query_kind = ast->getQueryKind(); elem.client_info = client_info; @@ -908,10 +909,10 @@ static std::tuple executeQueryImpl( auto finish_callback = [elem, context, ast, - allow_experimental_query_result_cache = settings.allow_experimental_query_result_cache, - use_query_result_cache = settings.use_query_result_cache, - enable_writes_to_query_result_cache = settings.enable_writes_to_query_result_cache, - query_result_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions, + allow_experimental_query_cache = settings.allow_experimental_query_cache, + use_query_cache = settings.use_query_cache, + enable_writes_to_query_cache = settings.enable_writes_to_query_cache, + query_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_cache_store_results_of_queries_with_nondeterministic_functions, log_queries, log_queries_min_type = settings.log_queries_min_type, log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds(), @@ -921,15 +922,15 @@ static std::tuple executeQueryImpl( pulling_pipeline = pipeline.pulling(), query_span](QueryPipeline & query_pipeline) mutable { - /// If active (write) use of the query result cache is enabled and the query is eligible for result caching, then store the - /// query result buffered in the special-purpose cache processor (added on top of the pipeline) into the cache. - auto query_result_cache = context->getQueryResultCache(); - if (query_result_cache != nullptr + /// If active (write) use of the query cache is enabled and the query is eligible for result caching, then store the query + /// result buffered in the special-purpose cache processor (added on top of the pipeline) into the cache. + auto query_cache = context->getQueryCache(); + if (query_cache != nullptr && pulling_pipeline - && allow_experimental_query_result_cache && use_query_result_cache && enable_writes_to_query_result_cache - && (!astContainsNonDeterministicFunctions(ast, context) || query_result_cache_store_results_of_queries_with_nondeterministic_functions)) + && allow_experimental_query_cache && use_query_cache && enable_writes_to_query_cache + && (!astContainsNonDeterministicFunctions(ast, context) || query_cache_store_results_of_queries_with_nondeterministic_functions)) { - query_pipeline.finalizeWriteInQueryResultCache(); + query_pipeline.finalizeWriteInQueryCache(); } QueryStatusPtr process_list_elem = context->getProcessListElement(); diff --git a/src/Interpreters/interpretSubquery.cpp b/src/Interpreters/interpretSubquery.cpp index 5f00be07fa5..2358b0ab42a 100644 --- a/src/Interpreters/interpretSubquery.cpp +++ b/src/Interpreters/interpretSubquery.cpp @@ -112,6 +112,8 @@ std::shared_ptr interpretSubquery( subquery_options.removeDuplicates(); } + /// We don't want to execute reading for subqueries in parallel + subquery_context->setSetting("allow_experimental_parallel_reading_from_replicas", false); return std::make_shared(query, subquery_context, subquery_options, required_source_columns); } diff --git a/src/Parsers/ASTAlterNamedCollectionQuery.h b/src/Parsers/ASTAlterNamedCollectionQuery.h index a8aa06200fd..36cf9c65b81 100644 --- a/src/Parsers/ASTAlterNamedCollectionQuery.h +++ b/src/Parsers/ASTAlterNamedCollectionQuery.h @@ -23,6 +23,8 @@ public: void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::Alter; } }; } diff --git a/src/Parsers/ASTBackupQuery.cpp b/src/Parsers/ASTBackupQuery.cpp index 85a0a0c7799..2c26e723687 100644 --- a/src/Parsers/ASTBackupQuery.cpp +++ b/src/Parsers/ASTBackupQuery.cpp @@ -289,4 +289,9 @@ ASTPtr ASTBackupQuery::getRewrittenASTWithoutOnCluster(const WithoutOnClusterAST return new_query; } +IAST::QueryKind ASTBackupQuery::getQueryKind() const +{ + return kind == Kind::BACKUP ? QueryKind::Backup : QueryKind::Restore; +} + } diff --git a/src/Parsers/ASTBackupQuery.h b/src/Parsers/ASTBackupQuery.h index 708f5bf468f..a3e3a144c72 100644 --- a/src/Parsers/ASTBackupQuery.h +++ b/src/Parsers/ASTBackupQuery.h @@ -93,5 +93,6 @@ public: ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override; + QueryKind getQueryKind() const override; }; } diff --git a/src/Parsers/ASTCheckQuery.h b/src/Parsers/ASTCheckQuery.h index 8b376ef8d60..f29a0bd5406 100644 --- a/src/Parsers/ASTCheckQuery.h +++ b/src/Parsers/ASTCheckQuery.h @@ -23,6 +23,8 @@ struct ASTCheckQuery : public ASTQueryWithTableAndOutput return res; } + QueryKind getQueryKind() const override { return QueryKind::Check; } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { diff --git a/src/Parsers/ASTCreateFunctionQuery.h b/src/Parsers/ASTCreateFunctionQuery.h index ce0d874b15d..8ce167ba7db 100644 --- a/src/Parsers/ASTCreateFunctionQuery.h +++ b/src/Parsers/ASTCreateFunctionQuery.h @@ -25,6 +25,8 @@ public: ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } String getFunctionName() const; + + QueryKind getQueryKind() const override { return QueryKind::Create; } }; } diff --git a/src/Parsers/ASTCreateNamedCollectionQuery.h b/src/Parsers/ASTCreateNamedCollectionQuery.h index 901e6b50a4c..303d1901931 100644 --- a/src/Parsers/ASTCreateNamedCollectionQuery.h +++ b/src/Parsers/ASTCreateNamedCollectionQuery.h @@ -22,6 +22,8 @@ public: ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + QueryKind getQueryKind() const override { return QueryKind::Create; } + std::string getCollectionName() const; }; diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index baf626f87d8..e74be6c66c5 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -74,6 +74,11 @@ void ASTStorage::formatImpl(const FormatSettings & s, FormatState & state, Forma } } +bool ASTStorage::isExtendedStorageDefinition() const +{ + return partition_by || primary_key || order_by || sample_by || settings; +} + class ASTColumnsElement : public IAST { diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index f4e29f67bc2..90a15e09369 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -30,6 +30,8 @@ public: ASTPtr clone() const override; void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; + + bool isExtendedStorageDefinition() const; }; diff --git a/src/Parsers/ASTDeleteQuery.cpp b/src/Parsers/ASTDeleteQuery.cpp index 08b40f65121..09dc4b936ae 100644 --- a/src/Parsers/ASTDeleteQuery.cpp +++ b/src/Parsers/ASTDeleteQuery.cpp @@ -41,6 +41,8 @@ void ASTDeleteQuery::formatQueryImpl(const FormatSettings & settings, FormatStat } settings.ostr << backQuoteIfNeed(getTable()); + formatOnCluster(settings); + settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : ""); predicate->formatImpl(settings, state, frame); } diff --git a/src/Parsers/ASTDeleteQuery.h b/src/Parsers/ASTDeleteQuery.h index bcb97639b64..cc115a366db 100644 --- a/src/Parsers/ASTDeleteQuery.h +++ b/src/Parsers/ASTDeleteQuery.h @@ -2,15 +2,22 @@ #include #include +#include namespace DB { /// DELETE FROM [db.]name WHERE ... -class ASTDeleteQuery : public ASTQueryWithTableAndOutput +class ASTDeleteQuery : public ASTQueryWithTableAndOutput, public ASTQueryWithOnCluster { public: String getID(char delim) const final; ASTPtr clone() const final; + QueryKind getQueryKind() const override { return QueryKind::Delete; } + + ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams & params) const override + { + return removeOnCluster(clone(), params.default_database); + } ASTPtr predicate; diff --git a/src/Parsers/ASTDropFunctionQuery.h b/src/Parsers/ASTDropFunctionQuery.h index edfa6a23994..da8fb1ba1c1 100644 --- a/src/Parsers/ASTDropFunctionQuery.h +++ b/src/Parsers/ASTDropFunctionQuery.h @@ -21,6 +21,8 @@ public: void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::Drop; } }; } diff --git a/src/Parsers/ASTDropIndexQuery.cpp b/src/Parsers/ASTDropIndexQuery.cpp index a07336a2d26..43de582ba8a 100644 --- a/src/Parsers/ASTDropIndexQuery.cpp +++ b/src/Parsers/ASTDropIndexQuery.cpp @@ -9,7 +9,7 @@ namespace DB /** Get the text that identifies this element. */ String ASTDropIndexQuery::getID(char delim) const { - return "CreateIndexQuery" + (delim + getDatabase()) + delim + getTable(); + return "DropIndexQuery" + (delim + getDatabase()) + delim + getTable(); } ASTPtr ASTDropIndexQuery::clone() const diff --git a/src/Parsers/ASTDropNamedCollectionQuery.h b/src/Parsers/ASTDropNamedCollectionQuery.h index 0b71bdaf213..2ead6c72532 100644 --- a/src/Parsers/ASTDropNamedCollectionQuery.h +++ b/src/Parsers/ASTDropNamedCollectionQuery.h @@ -20,6 +20,8 @@ public: void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::Drop; } }; } diff --git a/src/Parsers/ASTExplainQuery.h b/src/Parsers/ASTExplainQuery.h index cb8b3199c81..3903cf42269 100644 --- a/src/Parsers/ASTExplainQuery.h +++ b/src/Parsers/ASTExplainQuery.h @@ -109,6 +109,8 @@ public: const ASTPtr & getTableFunction() const { return table_function; } const ASTPtr & getTableOverride() const { return table_override; } + QueryKind getQueryKind() const override { return QueryKind::Explain; } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { diff --git a/src/Parsers/ASTExternalDDLQuery.h b/src/Parsers/ASTExternalDDLQuery.h index 4ec68aa8b9f..7913d44b970 100644 --- a/src/Parsers/ASTExternalDDLQuery.h +++ b/src/Parsers/ASTExternalDDLQuery.h @@ -4,6 +4,7 @@ #include #include + namespace DB { @@ -38,6 +39,8 @@ public: from->formatImpl(settings, state, stacked); external_ddl->formatImpl(settings, state, stacked); } + + QueryKind getQueryKind() const override { return QueryKind::ExternalDDL; } }; } diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index fccef01a2bc..7a19cba0f75 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -37,81 +37,118 @@ namespace { /// Finds arguments of a specified function which should not be displayed for most users for security reasons. /// That involves passwords and secret keys. - /// The member function getRange() returns a pair of numbers [first, last) specifying arguments - /// which must be hidden. If the function returns {-1, -1} that means no arguments must be hidden. class FunctionSecretArgumentsFinder { public: explicit FunctionSecretArgumentsFinder(const ASTFunction & function_) : function(function_) { - if (function.arguments) - { - if (const auto * expr_list = function.arguments->as()) - arguments = &expr_list->children; - } - } + if (!function.arguments) + return; - std::pair getRange() const - { - if (!arguments) - return npos; + const auto * expr_list = function.arguments->as(); + if (!expr_list) + return; + arguments = &expr_list->children; switch (function.kind) { - case ASTFunction::Kind::ORDINARY_FUNCTION: return findOrdinaryFunctionSecretArguments(); - case ASTFunction::Kind::WINDOW_FUNCTION: return npos; - case ASTFunction::Kind::LAMBDA_FUNCTION: return npos; - case ASTFunction::Kind::TABLE_ENGINE: return findTableEngineSecretArguments(); - case ASTFunction::Kind::DATABASE_ENGINE: return findDatabaseEngineSecretArguments(); - case ASTFunction::Kind::BACKUP_NAME: return findBackupNameSecretArguments(); + case ASTFunction::Kind::ORDINARY_FUNCTION: findOrdinaryFunctionSecretArguments(); break; + case ASTFunction::Kind::WINDOW_FUNCTION: break; + case ASTFunction::Kind::LAMBDA_FUNCTION: break; + case ASTFunction::Kind::TABLE_ENGINE: findTableEngineSecretArguments(); break; + case ASTFunction::Kind::DATABASE_ENGINE: findDatabaseEngineSecretArguments(); break; + case ASTFunction::Kind::BACKUP_NAME: findBackupNameSecretArguments(); break; } } - static const constexpr std::pair npos{static_cast(-1), static_cast(-1)}; + struct Result + { + /// Result constructed by default means no arguments will be hidden. + size_t start = static_cast(-1); + size_t count = 0; /// Mostly it's either 0 or 1. There are only a few cases where `count` can be greater than 1 (e.g. see `encrypt`). + /// In all known cases secret arguments are consecutive + bool are_named = false; /// Arguments like `password = 'password'` are considered as named arguments. + }; + + Result getResult() const { return result; } private: - std::pair findOrdinaryFunctionSecretArguments() const + const ASTFunction & function; + const ASTs * arguments = nullptr; + Result result; + + void markSecretArgument(size_t index, bool argument_is_named = false) + { + if (!result.count) + { + result.start = index; + result.are_named = argument_is_named; + } + chassert(index >= result.start); /// We always check arguments consecutively + result.count = index + 1 - result.start; + if (!argument_is_named) + result.are_named = false; + } + + void findOrdinaryFunctionSecretArguments() { if ((function.name == "mysql") || (function.name == "postgresql") || (function.name == "mongodb")) { /// mysql('host:port', 'database', 'table', 'user', 'password', ...) /// postgresql('host:port', 'database', 'table', 'user', 'password', ...) /// mongodb('host:port', 'database', 'collection', 'user', 'password', ...) - return {4, 5}; + findMySQLFunctionSecretArguments(); } else if ((function.name == "s3") || (function.name == "cosn") || (function.name == "oss")) { /// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...) - return findS3FunctionSecretArguments(/* is_cluster_function= */ false); + findS3FunctionSecretArguments(/* is_cluster_function= */ false); } else if (function.name == "s3Cluster") { /// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', ...) - return findS3FunctionSecretArguments(/* is_cluster_function= */ true); + findS3FunctionSecretArguments(/* is_cluster_function= */ true); } else if ((function.name == "remote") || (function.name == "remoteSecure")) { /// remote('addresses_expr', 'db', 'table', 'user', 'password', ...) - return findRemoteFunctionSecretArguments(); + findRemoteFunctionSecretArguments(); } else if ((function.name == "encrypt") || (function.name == "decrypt") || (function.name == "aes_encrypt_mysql") || (function.name == "aes_decrypt_mysql") || (function.name == "tryDecrypt")) { /// encrypt('mode', 'plaintext', 'key' [, iv, aad]) - return findEncryptionFunctionSecretArguments(); - } - else - { - return npos; + findEncryptionFunctionSecretArguments(); } } - std::pair findS3FunctionSecretArguments(bool is_cluster_function) const + void findMySQLFunctionSecretArguments() + { + if (isNamedCollectionName(0)) + { + /// mysql(named_collection, ..., password = 'password', ...) + findSecretNamedArgument("password", 1); + } + else + { + /// mysql('host:port', 'database', 'table', 'user', 'password', ...) + markSecretArgument(4); + } + } + + void findS3FunctionSecretArguments(bool is_cluster_function) { /// s3Cluster('cluster_name', 'url', ...) has 'url' as its second argument. size_t url_arg_idx = is_cluster_function ? 1 : 0; + if (!is_cluster_function && isNamedCollectionName(0)) + { + /// s3(named_collection, ..., secret_access_key = 'secret_access_key', ...) + findSecretNamedArgument("secret_access_key", 1); + return; + } + /// We're going to replace 'aws_secret_access_key' with '[HIDDEN'] for the following signatures: /// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...) /// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression') @@ -119,12 +156,12 @@ namespace /// But we should check the number of arguments first because we don't need to do any replacements in case of /// s3('url' [, 'format']) or s3Cluster('cluster_name', 'url' [, 'format']) if (arguments->size() < url_arg_idx + 3) - return npos; + return; if (arguments->size() >= url_arg_idx + 5) { /// s3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'structure', ...) - return {url_arg_idx + 2, url_arg_idx + 3}; + markSecretArgument(url_arg_idx + 2); } else { @@ -136,15 +173,16 @@ namespace { /// We couldn't evaluate the argument after 'url' so we don't know whether it is a format or `aws_access_key_id`. /// So it's safer to wipe the next argument just in case. - return {url_arg_idx + 2, url_arg_idx + 3}; /// Wipe either `aws_secret_access_key` or `structure`. + markSecretArgument(url_arg_idx + 2); /// Wipe either `aws_secret_access_key` or `structure`. + return; } if (KnownFormatNames::instance().exists(format)) - return npos; /// The argument after 'url' is a format: s3('url', 'format', ...) + return; /// The argument after 'url' is a format: s3('url', 'format', ...) /// The argument after 'url' is not a format so we do our replacement: /// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...) -> s3('url', 'aws_access_key_id', '[HIDDEN]', ...) - return {url_arg_idx + 2, url_arg_idx + 3}; + markSecretArgument(url_arg_idx + 2); } } @@ -153,8 +191,12 @@ namespace if (arg_idx >= arguments->size()) return false; - ASTPtr argument = (*arguments)[arg_idx]; - if (const auto * literal = argument->as()) + return tryGetStringFromArgument(*(*arguments)[arg_idx], res, allow_identifier); + } + + static bool tryGetStringFromArgument(const IAST & argument, String * res, bool allow_identifier = true) + { + if (const auto * literal = argument.as()) { if (literal->value.getType() != Field::Types::String) return false; @@ -165,7 +207,7 @@ namespace if (allow_identifier) { - if (const auto * id = argument->as()) + if (const auto * id = argument.as()) { if (res) *res = id->name(); @@ -176,8 +218,15 @@ namespace return false; } - std::pair findRemoteFunctionSecretArguments() const + void findRemoteFunctionSecretArguments() { + if (isNamedCollectionName(0)) + { + /// remote(named_collection, ..., password = 'password', ...) + findSecretNamedArgument("password", 1); + return; + } + /// We're going to replace 'password' with '[HIDDEN'] for the following signatures: /// remote('addresses_expr', db.table, 'user' [, 'password'] [, sharding_key]) /// remote('addresses_expr', 'db', 'table', 'user' [, 'password'] [, sharding_key]) @@ -186,7 +235,7 @@ namespace /// But we should check the number of arguments first because we don't need to do any replacements in case of /// remote('addresses_expr', db.table) if (arguments->size() < 3) - return npos; + return; size_t arg_num = 1; @@ -207,20 +256,17 @@ namespace /// before the argument 'password'. So it's safer to wipe two arguments just in case. /// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string /// before wiping it (because the `password` argument is always a literal string). - auto res = npos; if (tryGetStringFromArgument(arg_num + 2, nullptr, /* allow_identifier= */ false)) { /// Wipe either `password` or `user`. - res = {arg_num + 2, arg_num + 3}; + markSecretArgument(arg_num + 2); } if (tryGetStringFromArgument(arg_num + 3, nullptr, /* allow_identifier= */ false)) { /// Wipe either `password` or `sharding_key`. - if (res == npos) - res.first = arg_num + 3; - res.second = arg_num + 4; + markSecretArgument(arg_num + 3); } - return res; + return; } /// Skip the current argument (which is either a database name or a qualified table name). @@ -241,9 +287,7 @@ namespace /// before wiping it (because the `password` argument is always a literal string). bool can_be_password = tryGetStringFromArgument(arg_num, nullptr, /* allow_identifier= */ false); if (can_be_password) - return {arg_num, arg_num + 1}; - - return npos; + markSecretArgument(arg_num); } /// Tries to get either a database name or a qualified table name from an argument. @@ -278,20 +322,24 @@ namespace return true; } - std::pair findEncryptionFunctionSecretArguments() const + void findEncryptionFunctionSecretArguments() { + if (arguments->empty()) + return; + /// We replace all arguments after 'mode' with '[HIDDEN]': /// encrypt('mode', 'plaintext', 'key' [, iv, aad]) -> encrypt('mode', '[HIDDEN]') - return {1, arguments->size()}; + result.start = 1; + result.count = arguments->size() - 1; } - std::pair findTableEngineSecretArguments() const + void findTableEngineSecretArguments() { const String & engine_name = function.name; if (engine_name == "ExternalDistributed") { /// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password') - return {5, 6}; + findExternalDistributedTableEngineSecretArguments(); } else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") || (engine_name == "MaterializedPostgreSQL") || (engine_name == "MongoDB")) @@ -300,21 +348,38 @@ namespace /// PostgreSQL('host:port', 'database', 'table', 'user', 'password', ...) /// MaterializedPostgreSQL('host:port', 'database', 'table', 'user', 'password', ...) /// MongoDB('host:port', 'database', 'collection', 'user', 'password', ...) - return {4, 5}; + findMySQLFunctionSecretArguments(); } else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS")) { /// S3('url', ['aws_access_key_id', 'aws_secret_access_key',] ...) - return findS3TableEngineSecretArguments(); - } - else - { - return npos; + findS3TableEngineSecretArguments(); } } - std::pair findS3TableEngineSecretArguments() const + void findExternalDistributedTableEngineSecretArguments() { + if (isNamedCollectionName(1)) + { + /// ExternalDistributed('engine', named_collection, ..., password = 'password', ...) + findSecretNamedArgument("password", 2); + } + else + { + /// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password') + markSecretArgument(5); + } + } + + void findS3TableEngineSecretArguments() + { + if (isNamedCollectionName(0)) + { + /// S3(named_collection, ..., secret_access_key = 'secret_access_key') + findSecretNamedArgument("secret_access_key", 1); + return; + } + /// We replace 'aws_secret_access_key' with '[HIDDEN'] for the following signatures: /// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format') /// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression') @@ -322,12 +387,12 @@ namespace /// But we should check the number of arguments first because we don't need to do that replacements in case of /// S3('url' [, 'format' [, 'compression']]) if (arguments->size() < 4) - return npos; + return; - return {2, 3}; + markSecretArgument(2); } - std::pair findDatabaseEngineSecretArguments() const + void findDatabaseEngineSecretArguments() { const String & engine_name = function.name; if ((engine_name == "MySQL") || (engine_name == "MaterializeMySQL") || @@ -335,31 +400,71 @@ namespace (engine_name == "MaterializedPostgreSQL")) { /// MySQL('host:port', 'database', 'user', 'password') - /// PostgreSQL('host:port', 'database', 'user', 'password', ...) - return {3, 4}; - } - else - { - return npos; + /// PostgreSQL('host:port', 'database', 'user', 'password') + findMySQLDatabaseSecretArguments(); } } - std::pair findBackupNameSecretArguments() const + void findMySQLDatabaseSecretArguments() + { + if (isNamedCollectionName(0)) + { + /// MySQL(named_collection, ..., password = 'password', ...) + findSecretNamedArgument("password", 1); + } + else + { + /// MySQL('host:port', 'database', 'user', 'password') + markSecretArgument(3); + } + } + + void findBackupNameSecretArguments() { const String & engine_name = function.name; if (engine_name == "S3") { /// BACKUP ... TO S3(url, [aws_access_key_id, aws_secret_access_key]) - return {2, 3}; - } - else - { - return npos; + markSecretArgument(2); } } - const ASTFunction & function; - const ASTs * arguments = nullptr; + /// Whether a specified argument can be the name of a named collection? + bool isNamedCollectionName(size_t arg_idx) const + { + if (arguments->size() <= arg_idx) + return false; + + const auto * identifier = (*arguments)[arg_idx]->as(); + return identifier != nullptr; + } + + /// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified. + void findSecretNamedArgument(const std::string_view & key, size_t start = 0) + { + for (size_t i = start; i < arguments->size(); ++i) + { + const auto & argument = (*arguments)[i]; + const auto * equals_func = argument->as(); + if (!equals_func || (equals_func->name != "equals")) + continue; + + const auto * expr_list = equals_func->arguments->as(); + if (!expr_list) + continue; + + const auto & equal_args = expr_list->children; + if (equal_args.size() != 2) + continue; + + String found_key; + if (!tryGetStringFromArgument(*equal_args[0], &found_key)) + continue; + + if (found_key == key) + markSecretArgument(i, /* argument_is_named= */ true); + } + } }; } @@ -966,32 +1071,39 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format && (name == "match" || name == "extract" || name == "extractAll" || name == "replaceRegexpOne" || name == "replaceRegexpAll"); - auto secret_arguments = std::make_pair(static_cast(-1), static_cast(-1)); + FunctionSecretArgumentsFinder::Result secret_arguments; if (!settings.show_secrets) - secret_arguments = FunctionSecretArgumentsFinder(*this).getRange(); + secret_arguments = FunctionSecretArgumentsFinder{*this}.getResult(); for (size_t i = 0, size = arguments->children.size(); i < size; ++i) { if (i != 0) settings.ostr << ", "; - if (arguments->children[i]->as()) + + const auto & argument = arguments->children[i]; + if (argument->as()) settings.ostr << "SETTINGS "; - if (!settings.show_secrets && (secret_arguments.first <= i) && (i < secret_arguments.second)) + if (!settings.show_secrets && (secret_arguments.start <= i) && (i < secret_arguments.start + secret_arguments.count)) { + if (secret_arguments.are_named) + { + assert_cast(argument.get())->arguments->children[0]->formatImpl(settings, state, nested_dont_need_parens); + settings.ostr << (settings.hilite ? hilite_operator : "") << " = " << (settings.hilite ? hilite_none : ""); + } settings.ostr << "'[HIDDEN]'"; - if (size - 1 < secret_arguments.second) + if (size <= secret_arguments.start + secret_arguments.count && !secret_arguments.are_named) break; /// All other arguments should also be hidden. continue; } if ((i == 1) && special_hilite_regexp - && highlightStringLiteralWithMetacharacters(arguments->children[i], settings, "|()^$.[]?*+{:-")) + && highlightStringLiteralWithMetacharacters(argument, settings, "|()^$.[]?*+{:-")) { continue; } - arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens); + argument->formatImpl(settings, state, nested_dont_need_parens); } } @@ -1005,14 +1117,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format bool ASTFunction::hasSecretParts() const { - if (arguments) - { - size_t num_arguments = arguments->children.size(); - auto secret_arguments = FunctionSecretArgumentsFinder(*this).getRange(); - if ((secret_arguments.first < num_arguments) && (secret_arguments.first < secret_arguments.second)) - return true; - } - return childrenHaveSecretParts(); + return (FunctionSecretArgumentsFinder{*this}.getResult().count > 0) || childrenHaveSecretParts(); } String getFunctionName(const IAST * ast) diff --git a/src/Parsers/ASTKillQueryQuery.h b/src/Parsers/ASTKillQueryQuery.h index 88a1be38766..20db5576fa4 100644 --- a/src/Parsers/ASTKillQueryQuery.h +++ b/src/Parsers/ASTKillQueryQuery.h @@ -42,6 +42,8 @@ public: { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::KillQuery; } }; } diff --git a/src/Parsers/ASTOptimizeQuery.h b/src/Parsers/ASTOptimizeQuery.h index c53a61a66dc..67721da4176 100644 --- a/src/Parsers/ASTOptimizeQuery.h +++ b/src/Parsers/ASTOptimizeQuery.h @@ -54,6 +54,8 @@ public: { return removeOnCluster(clone(), params.default_database); } + + QueryKind getQueryKind() const override { return QueryKind::Optimize; } }; } diff --git a/src/Parsers/ASTQualifiedAsterisk.h b/src/Parsers/ASTQualifiedAsterisk.h index e67b4cd82dd..079b83ae171 100644 --- a/src/Parsers/ASTQualifiedAsterisk.h +++ b/src/Parsers/ASTQualifiedAsterisk.h @@ -17,8 +17,13 @@ public: ASTPtr clone() const override { auto clone = std::make_shared(*this); + clone->children.clear(); - if (transformers) { clone->transformers = transformers->clone(); clone->children.push_back(clone->transformers); } + if (transformers) + { + clone->transformers = transformers->clone(); + clone->children.push_back(clone->transformers); + } clone->qualifier = qualifier->clone(); clone->children.push_back(clone->qualifier); diff --git a/src/Parsers/ASTQueryWithTableAndOutput.h b/src/Parsers/ASTQueryWithTableAndOutput.h index 233028c5023..10f0e76f29c 100644 --- a/src/Parsers/ASTQueryWithTableAndOutput.h +++ b/src/Parsers/ASTQueryWithTableAndOutput.h @@ -49,6 +49,8 @@ public: return res; } + QueryKind getQueryKind() const override { return QueryKind::Show; } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override { diff --git a/src/Parsers/ASTSelectIntersectExceptQuery.h b/src/Parsers/ASTSelectIntersectExceptQuery.h index ad962fe25e2..db00fb3df87 100644 --- a/src/Parsers/ASTSelectIntersectExceptQuery.h +++ b/src/Parsers/ASTSelectIntersectExceptQuery.h @@ -25,7 +25,7 @@ public: void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; - QueryKind getQueryKind() const override { return QueryKind::SelectIntersectExcept; } + QueryKind getQueryKind() const override { return QueryKind::Select; } ASTs getListOfSelects() const; diff --git a/src/Parsers/ASTSetQuery.h b/src/Parsers/ASTSetQuery.h index 0e1933cbf1a..40abe2de31d 100644 --- a/src/Parsers/ASTSetQuery.h +++ b/src/Parsers/ASTSetQuery.h @@ -35,6 +35,8 @@ public: void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; void updateTreeHashImpl(SipHash & hash_state) const override; + + QueryKind getQueryKind() const override { return QueryKind::Set; } }; } diff --git a/src/Parsers/ASTShowEngineQuery.h b/src/Parsers/ASTShowEngineQuery.h new file mode 100644 index 00000000000..7a447a4f24b --- /dev/null +++ b/src/Parsers/ASTShowEngineQuery.h @@ -0,0 +1,17 @@ +#pragma once + +#include + + +namespace DB +{ + +struct ASTShowEngineAndQueryNames +{ + static constexpr auto ID = "ShowEngineQuery"; + static constexpr auto Query = "SHOW ENGINES"; +}; + +using ASTShowEnginesQuery = ASTQueryWithOutputImpl; + +} diff --git a/src/Parsers/ASTShowTablesQuery.h b/src/Parsers/ASTShowTablesQuery.h index c3e7f0799d4..b58d65e37ab 100644 --- a/src/Parsers/ASTShowTablesQuery.h +++ b/src/Parsers/ASTShowTablesQuery.h @@ -39,6 +39,8 @@ public: ASTPtr clone() const override; + QueryKind getQueryKind() const override { return QueryKind::Show; } + protected: void formatLike(const FormatSettings & settings) const; void formatLimit(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const; diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 4290f4e2a94..80a891712a6 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -25,12 +25,15 @@ public: DROP_INDEX_MARK_CACHE, DROP_INDEX_UNCOMPRESSED_CACHE, DROP_MMAP_CACHE, - DROP_QUERY_RESULT_CACHE, + DROP_QUERY_CACHE, #if USE_EMBEDDED_COMPILER DROP_COMPILED_EXPRESSION_CACHE, #endif DROP_FILESYSTEM_CACHE, DROP_SCHEMA_CACHE, +#if USE_AWS_S3 + DROP_S3_CLIENT_CACHE, +#endif STOP_LISTEN_QUERIES, START_LISTEN_QUERIES, RESTART_REPLICAS, diff --git a/src/Parsers/ASTTransactionControl.cpp b/src/Parsers/ASTTransactionControl.cpp index 3ff29d9e43e..3106d432c90 100644 --- a/src/Parsers/ASTTransactionControl.cpp +++ b/src/Parsers/ASTTransactionControl.cpp @@ -24,6 +24,21 @@ void ASTTransactionControl::formatImpl(const FormatSettings & format /*state*/, } } +IAST::QueryKind ASTTransactionControl::getQueryKind() const +{ + switch (action) + { + case BEGIN: + return QueryKind::Begin; + case COMMIT: + return QueryKind::Commit; + case ROLLBACK: + return QueryKind::Rollback; + case SET_SNAPSHOT: + return QueryKind::SetTransactionSnapshot; + } +} + void ASTTransactionControl::updateTreeHashImpl(SipHash & hash_state) const { hash_state.update(action); diff --git a/src/Parsers/ASTTransactionControl.h b/src/Parsers/ASTTransactionControl.h index 06f578ff138..fb0058144dd 100644 --- a/src/Parsers/ASTTransactionControl.h +++ b/src/Parsers/ASTTransactionControl.h @@ -27,6 +27,8 @@ public: void formatImpl(const FormatSettings & format, FormatState & /*state*/, FormatStateStacked /*frame*/) const override; void updateTreeHashImpl(SipHash & hash_state) const override; + + QueryKind getQueryKind() const override; }; } diff --git a/src/Parsers/ASTUseQuery.h b/src/Parsers/ASTUseQuery.h index 16d449f905f..f767a6bbdb7 100644 --- a/src/Parsers/ASTUseQuery.h +++ b/src/Parsers/ASTUseQuery.h @@ -21,6 +21,8 @@ public: ASTPtr clone() const override { return std::make_shared(*this); } + QueryKind getQueryKind() const override { return QueryKind::Use; } + protected: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override { diff --git a/src/Parsers/ASTWatchQuery.h b/src/Parsers/ASTWatchQuery.h index ea02c18a51d..29dadd71675 100644 --- a/src/Parsers/ASTWatchQuery.h +++ b/src/Parsers/ASTWatchQuery.h @@ -37,6 +37,8 @@ public: return res; } + QueryKind getQueryKind() const override { return QueryKind::Create; } + protected: void formatQueryImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override { diff --git a/src/Parsers/Access/ASTCreateQuotaQuery.h b/src/Parsers/Access/ASTCreateQuotaQuery.h index b6add6d8321..f5eb59800ec 100644 --- a/src/Parsers/Access/ASTCreateQuotaQuery.h +++ b/src/Parsers/Access/ASTCreateQuotaQuery.h @@ -55,5 +55,7 @@ public: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; void replaceCurrentUserTag(const String & current_user_name) const; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::Create; } }; } diff --git a/src/Parsers/Access/ASTCreateRoleQuery.h b/src/Parsers/Access/ASTCreateRoleQuery.h index 906ea683e1a..42d1a4031b6 100644 --- a/src/Parsers/Access/ASTCreateRoleQuery.h +++ b/src/Parsers/Access/ASTCreateRoleQuery.h @@ -35,5 +35,7 @@ public: ASTPtr clone() const override; void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::Create; } }; } diff --git a/src/Parsers/Access/ASTCreateRowPolicyQuery.h b/src/Parsers/Access/ASTCreateRowPolicyQuery.h index b96cc325524..32d98fab822 100644 --- a/src/Parsers/Access/ASTCreateRowPolicyQuery.h +++ b/src/Parsers/Access/ASTCreateRowPolicyQuery.h @@ -51,5 +51,7 @@ public: void replaceCurrentUserTag(const String & current_user_name) const; void replaceEmptyDatabase(const String & current_database) const; + + QueryKind getQueryKind() const override { return QueryKind::Create; } }; } diff --git a/src/Parsers/Access/ASTCreateSettingsProfileQuery.h b/src/Parsers/Access/ASTCreateSettingsProfileQuery.h index 441ec0f5233..c1a64998f29 100644 --- a/src/Parsers/Access/ASTCreateSettingsProfileQuery.h +++ b/src/Parsers/Access/ASTCreateSettingsProfileQuery.h @@ -41,5 +41,6 @@ public: void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; void replaceCurrentUserTag(const String & current_user_name) const; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + QueryKind getQueryKind() const override { return QueryKind::Create; } }; } diff --git a/src/Parsers/Access/ASTCreateUserQuery.h b/src/Parsers/Access/ASTCreateUserQuery.h index 412775d3bf3..a3571dd6c61 100644 --- a/src/Parsers/Access/ASTCreateUserQuery.h +++ b/src/Parsers/Access/ASTCreateUserQuery.h @@ -63,5 +63,7 @@ public: void formatImpl(const FormatSettings & format, FormatState &, FormatStateStacked) const override; bool hasSecretParts() const override; ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } + + QueryKind getQueryKind() const override { return QueryKind::Create; } }; } diff --git a/src/Parsers/Access/ASTDropAccessEntityQuery.h b/src/Parsers/Access/ASTDropAccessEntityQuery.h index 7ca672ad989..f3a065c50df 100644 --- a/src/Parsers/Access/ASTDropAccessEntityQuery.h +++ b/src/Parsers/Access/ASTDropAccessEntityQuery.h @@ -29,5 +29,7 @@ public: ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams &) const override { return removeOnCluster(clone()); } void replaceEmptyDatabase(const String & current_database) const; + + QueryKind getQueryKind() const override { return QueryKind::Drop; } }; } diff --git a/src/Parsers/Access/ASTSetRoleQuery.h b/src/Parsers/Access/ASTSetRoleQuery.h index f0170ae6af2..1146205af2b 100644 --- a/src/Parsers/Access/ASTSetRoleQuery.h +++ b/src/Parsers/Access/ASTSetRoleQuery.h @@ -27,5 +27,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + + QueryKind getQueryKind() const override { return QueryKind::Set; } }; } diff --git a/src/Parsers/Access/ASTShowAccessEntitiesQuery.h b/src/Parsers/Access/ASTShowAccessEntitiesQuery.h index e633a4b506a..9957f8d5705 100644 --- a/src/Parsers/Access/ASTShowAccessEntitiesQuery.h +++ b/src/Parsers/Access/ASTShowAccessEntitiesQuery.h @@ -31,6 +31,8 @@ public: void replaceEmptyDatabase(const String & current_database); + QueryKind getQueryKind() const override { return QueryKind::Show; } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; diff --git a/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h b/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h index 27f13587033..657160a96dd 100644 --- a/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h +++ b/src/Parsers/Access/ASTShowCreateAccessEntityQuery.h @@ -40,6 +40,8 @@ public: void replaceEmptyDatabase(const String & current_database); + QueryKind getQueryKind() const override { return QueryKind::Show; } + protected: String getKeyword() const; void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; diff --git a/src/Parsers/Access/ASTShowGrantsQuery.h b/src/Parsers/Access/ASTShowGrantsQuery.h index 04764fe3502..ceaf2f586cb 100644 --- a/src/Parsers/Access/ASTShowGrantsQuery.h +++ b/src/Parsers/Access/ASTShowGrantsQuery.h @@ -17,5 +17,7 @@ public: String getID(char) const override; ASTPtr clone() const override; void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + + QueryKind getQueryKind() const override { return QueryKind::Show; } }; } diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index c1520a6fca7..627b1174b33 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -253,16 +253,32 @@ public: enum class QueryKind : uint8_t { None = 0, - Alter, + Select, + Insert, + Delete, Create, Drop, - Grant, - Insert, Rename, + Optimize, + Check, + Alter, + Grant, Revoke, - SelectIntersectExcept, - Select, System, + Set, + Use, + Show, + Exists, + Describe, + Explain, + Backup, + Restore, + KillQuery, + ExternalDDL, + Begin, + Commit, + Rollback, + SetTransactionSnapshot, }; /// Return QueryKind of this AST query. virtual QueryKind getQueryKind() const { return QueryKind::None; } diff --git a/src/Parsers/ParserDeleteQuery.cpp b/src/Parsers/ParserDeleteQuery.cpp index 7b8057d227e..7b27651d82d 100644 --- a/src/Parsers/ParserDeleteQuery.cpp +++ b/src/Parsers/ParserDeleteQuery.cpp @@ -18,6 +18,7 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_where("WHERE"); ParserExpression parser_exp_elem; ParserKeyword s_settings("SETTINGS"); + ParserKeyword s_on{"ON"}; if (s_delete.ignore(pos, expected)) { @@ -27,6 +28,14 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table)) return false; + if (s_on.ignore(pos, expected)) + { + String cluster_str; + if (!ASTQueryWithOnCluster::parse(pos, cluster_str, expected)) + return false; + query->cluster = cluster_str; + } + if (!s_where.ignore(pos, expected)) return false; diff --git a/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp index 163e71e3201..7024d8cbe11 100644 --- a/src/Parsers/ParserQueryWithOutput.cpp +++ b/src/Parsers/ParserQueryWithOutput.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ namespace DB bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserShowTablesQuery show_tables_p; + ParserShowEnginesQuery show_engine_p; ParserSelectWithUnionQuery select_p; ParserTablePropertiesQuery table_p; ParserDescribeTableQuery describe_table_p; @@ -60,6 +62,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec || select_p.parse(pos, query, expected) || show_create_access_entity_p.parse(pos, query, expected) /// should be before `show_tables_p` || show_tables_p.parse(pos, query, expected) + || show_engine_p.parse(pos, query, expected) || table_p.parse(pos, query, expected) || describe_cache_p.parse(pos, query, expected) || describe_table_p.parse(pos, query, expected) diff --git a/src/Parsers/ParserShowEngineQuery.h b/src/Parsers/ParserShowEngineQuery.h new file mode 100644 index 00000000000..e06326436f1 --- /dev/null +++ b/src/Parsers/ParserShowEngineQuery.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ + +/** Query SHOW ENGINES + */ +class ParserShowEnginesQuery : public IParserBase +{ +protected: + const char * getName() const override { return "SHOW ENGINES query"; } + + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override + { + auto query = std::make_shared(); + + if (!ParserKeyword("SHOW ENGINES").ignore(pos, expected)) + return false; + + node = query; + + return true; + } +}; + +} diff --git a/src/Parsers/TablePropertiesQueriesASTs.h b/src/Parsers/TablePropertiesQueriesASTs.h index 3de6feb2409..038936a0297 100644 --- a/src/Parsers/TablePropertiesQueriesASTs.h +++ b/src/Parsers/TablePropertiesQueriesASTs.h @@ -91,6 +91,8 @@ protected: settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTExistsDatabaseQueryIDAndQueryNames::Query << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase()); } + + QueryKind getQueryKind() const override { return QueryKind::Exists; } }; class ASTShowCreateDatabaseQuery : public ASTQueryWithTableAndOutputImpl @@ -123,6 +125,8 @@ public: return res; } + QueryKind getQueryKind() const override { return QueryKind::Describe; } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { diff --git a/src/Parsers/obfuscateQueries.cpp b/src/Parsers/obfuscateQueries.cpp index 09267148c79..909c86b0bf0 100644 --- a/src/Parsers/obfuscateQueries.cpp +++ b/src/Parsers/obfuscateQueries.cpp @@ -26,44 +26,448 @@ namespace const std::unordered_set keywords { - "CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT", - "MATERIALIZED", "EPHEMERAL", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", - "DROP", "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", - "PROJECT", "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", - "INTO", "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", - "ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", - "VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", - "INNER", "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", - "BY", "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", - "ASC", "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER", - "ROLE", "PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", - "REPLACE", "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED", - "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "DICTIONARY", "OFFSET", "TRIM", - "LTRIM", "RTRIM", "BOTH", "LEADING", "TRAILING" + "!=", + "", + "%", + "*", + "+", + "-", + "->", + ".", + "/", + ":", + "::", + "<", + "<=", + "<>", + "=", + "==", + ">", + ">=", + "?", + "[", + "]+", + "]+|[", + "^[", + "||", + "]+$", + "ACCESS", + "ACTION", + "ADD", + "ADMIN", + "AFTER", + "ALGORITHM", + "ALIAS", + "ALL", + "ALLOWED_LATENESS", + "ALTER", + "AND", + "ANTI", + "ANY", + "APPLY", + "ARRAY", + "AS", + "ASC", + "ASCENDING", + "ASOF", + "ASSUME", + "AST", + "ASYNC", + "ATTACH", + "AUTO_INCREMENT", + "BACKUP", + "BASE_BACKUP", + "BEGIN", + "BETWEEN", + "BIDIRECTIONAL", + "BOTH", + "BY", + "CACHE", + "CACHES", + "CASCADE", + "CASE", + "CASEWITHEXPRESSION", + "CAST", + "CHANGE", + "CHANGEABLE_IN_READONLY", + "CHANGED", + "CHAR", + "CHARACTER", + "CHECK", + "CLEAR", + "CLUSTER", + "CLUSTER_HOST_IDS", + "CLUSTERS", + "CN", + "CODEC", + "COLLATE", + "COLLECTION", + "COLUMN", + "COLUMNS", + "COMMENT", + "COMMIT", + "COMPRESSION", + "CONCAT", + "CONSTRAINT", + "CREATE", + "CROSS", + "CUBE", + "CURRENT", + "CURRENT_USER", + "DATABASE", + "DATABASES", + "DATE", + "DATE_ADD", + "DATEADD", + "DATE_DIFF", + "DATEDIFF", + "DATE_SUB", + "DATESUB", + "DAY", + "DD", + "DDL", + "DEDUPLICATE", + "DEFAULT", + "DELAY", + "DELETE", + "DESC", + "DESCENDING", + "DESCRIBE", + "DETACH", + "DETACHED", + "DICTIONARIES", + "DICTIONARY", + "DISK", + "DISTINCT", + "DIV", + "DOUBLE_SHA1_HASH", + "DROP", + "ELSE", + "EMPTY", + "ENABLED", + "END", + "ENFORCED", + "ENGINE", + "EPHEMERAL", + "EQUALS", + "ESTIMATE", + "EVENT", + "EVENTS", + "EXCEPT", + "EXCHANGE", + "EXISTS", + "EXPLAIN", + "EXPRESSION", + "EXTERNAL", + "EXTRACT", + "FALSE", + "FETCH", + "FILE", + "FILESYSTEM", + "FILL", + "FILTER", + "FINAL", + "FIRST", + "FOLLOWING", + "FOR", + "FOREIGN", + "FORMAT", + "FREEZE", + "FROM", + "FULL", + "FULLTEXT", + "FUNCTION", + "GLOBAL", + "GRANT", + "GRANTEES", + "GRANTS", + "GRANULARITY", + "GREATER", + "GREATEROREQUALS", + "GROUP", + "GROUPING", + "GROUPS", + "HASH", + "HAVING", + "HDFS", + "HH", + "HIERARCHICAL", + "HOST", + "HOUR", + "ID", + "IDENTIFIED", + "IF", + "ILIKE", + "IN", + "INDEX", + "INFILE", + "INHERIT", + "INJECTIVE", + "INNER", + "INSERT", + "INTERPOLATE", + "INTERSECT", + "INTERVAL", + "INTO", + "INVISIBLE", + "IP", + "IS", + "IS_OBJECT_ID", + "JOIN", + "KEY", + "KEYED", + "KILL", + "LAMBDA", + "LARGE", + "LAST", + "LAYOUT", + "LEADING", + "LEFT", + "LESS", + "LESSOREQUALS", + "LEVEL", + "LIFETIME", + "LIKE", + "LIMIT", + "LIMITS", + "LINEAR", + "LIST", + "LITERAL", + "LIVE", + "LOCAL", + "LTRIM", + "MATCH", + "MATERIALIZE", + "MATERIALIZED", + "MAX", + "MCS", + "MEMORY", + "MI", + "MICROSECOND", + "MILLISECOND", + "MIN", + "MINUS", + "MINUTE", + "MM", + "MOD", + "MODIFY", + "MONTH", + "MOVE", + "MS", + "MULTIIF", + "MUTATION", + "NAME", + "NAMED", + "NANOSECOND", + "NEXT", + "NO", + "NONE", + "NOT", + "NOTEQUALS", + "NOTIN", + "NS", + "NULL", + "NULLS", + "OBJECT", + "OFFSET", + "ON", + "ONLY", + "OPTIMIZE", + "OPTION", + "OR", + "ORDER", + "OUTER", + "OUTFILE", + "OVER", + "OVERRIDE", + "PART", + "PARTIAL", + "PARTITION", + "PARTITIONS", + "PART_MOVE_TO_SHARD", + "PERMANENTLY", + "PERMISSIVE", + "PIPELINE", + "PLAN", + "PLUS", + "POLICY", + "POPULATE", + "POSITION", + "PRECEDING", + "PRECISION", + "PREWHERE", + "PRIMARY", + "PRIVILEGES", + "PROCESSLIST", + "PROFILE", + "PROJECTION", + "QQ", + "QUARTER", + "QUERY", + "QUOTA", + "RANDOMIZED", + "RANGE", + "READONLY", + "REALM", + "RECOMPRESS", + "REFERENCES", + "REFRESH", + "REGEXP", + "REGEXPQUOTEMETA", + "REMOVE", + "RENAME", + "REPLACE", + "REPLACEREGEXPALL", + "REPLACEREGEXPONE", + "RESET", + "RESTORE", + "RESTRICT", + "RESTRICTIVE", + "RESUME", + "REVOKE", + "RIGHT", + "ROLE", + "ROLES", + "ROLLBACK", + "ROLLUP", + "ROW", + "ROWS", + "RTRIM", + "S3", + "SALT", + "SAMPLE", + "SECOND", + "SELECT", + "SEMI", + "SERVER", + "SET", + "SETS", + "SETTING", + "SETTINGS", + "SHA256_HASH", + "SHARD", + "SHOW", + "SIGNED", + "SIMPLE", + "SINGLEVALUEORNULL", + "SNAPSHOT", + "SOURCE", + "SPATIAL", + "SS", + "STDOUT", + "STEP", + "STORAGE", + "STRICT", + "STRICTLY_ASCENDING", + "SUBPARTITION", + "SUBPARTITIONS", + "SUBSTRING", + "SUSPEND", + "SYNC", + "SYNTAX", + "SYSTEM", + "TABLE", + "TABLES", + "TEMPORARY", + "TEST", + "THAN", + "THEN", + "TIES", + "TIMESTAMP", + "TIMESTAMP_ADD", + "TIMESTAMPADD", + "TIMESTAMP_DIFF", + "TIMESTAMPDIFF", + "TIMESTAMP_SUB", + "TIMESTAMPSUB", + "TO", + "TODATE", + "TODATETIME", + "TOP", + "TOTALS", + "TRACKING", + "TRAILING", + "TRANSACTION", + "TREE", + "TRIGGER", + "TRIM", + "TRIMBOTH", + "TRIMLEFT", + "TRIMRIGHT", + "TRUE", + "TRUNCATE", + "TTL", + "TUPLE", + "TYPE", + "UNBOUNDED", + "UNFREEZE", + "UNION", + "UNIQUE", + "UNSIGNED", + "UNTUPLE", + "UPDATE", + "URL", + "USE", + "USER", + "USING", + "UUID", + "VALUES", + "VARYING", + "VIEW", + "VIEWIFPERMITTED", + "VISIBLE", + "VOLUME", + "WATCH", + "WATERMARK", + "WEEK", + "WHEN", + "WHERE", + "WINDOW", + "WITH", + "WK", + "WRITABLE", + "YEAR", + "YYYY", + "ZKPATH" }; +/// We want to keep some words inside quotes. For example we want to keep HOUR inside: +/// Select now() + INTERVAL '1 HOUR' const std::unordered_set keep_words { - "id", "name", "value", "num", - "Id", "Name", "Value", "Num", - "ID", "NAME", "VALUE", "NUM", + "DAY", + "HOUR", + "ID", + "NAME", + "NANOSECOND", + "MICROSECOND", + "MILLISECOND", + "SECOND", + "MINUTE", + "NUM", + "VALUE", + "WEEK", + "MONTH", + "QUARTER", + "YEAR" }; /// The list of nouns collected from here: http://www.desiquintans.com/nounlist, Public domain. +/// Removed nouns with spaces, words with non-ascii chars and keywords std::initializer_list nouns { "aardvark", "abacus", "abbey", "abbreviation", "abdomen", "ability", "abnormality", "abolishment", "abortion", -"abrogation", "absence", "abundance", "abuse", "academics", "academy", "accelerant", "accelerator", "accent", "acceptance", "access", +"abrogation", "absence", "abundance", "abuse", "academics", "academy", "accelerant", "accelerator", "accent", "acceptance", "accessory", "accident", "accommodation", "accompanist", "accomplishment", "accord", "accordance", "accordion", "account", "accountability", "accountant", "accounting", "accuracy", "accusation", "acetate", "achievement", "achiever", "acid", "acknowledgment", "acorn", "acoustics", -"acquaintance", "acquisition", "acre", "acrylic", "act", "action", "activation", "activist", "activity", "actor", "actress", "acupuncture", -"ad", "adaptation", "adapter", "addiction", "addition", "address", "adjective", "adjustment", "admin", "administration", "administrator", +"acquaintance", "acquisition", "acre", "acrylic", "act", "activation", "activist", "activity", "actor", "actress", "acupuncture", +"ad", "adaptation", "adapter", "addiction", "addition", "address", "adjective", "adjustment", "administration", "administrator", "admire", "admission", "adobe", "adoption", "adrenalin", "adrenaline", "adult", "adulthood", "advance", "advancement", "advantage", "advent", "adverb", "advertisement", "advertising", "advice", "adviser", "advocacy", "advocate", "affair", "affect", "affidavit", "affiliate", "affinity", "afoul", "afterlife", "aftermath", "afternoon", "aftershave", "aftershock", "afterthought", "age", "agency", "agenda", "agent", "aggradation", "aggression", "aglet", "agony", "agreement", "agriculture", "aid", "aide", "aim", "air", "airbag", "airbus", "aircraft", "airfare", "airfield", "airforce", "airline", "airmail", "airman", "airplane", "airport", "airship", "airspace", "alarm", "alb", "albatross", -"album", "alcohol", "alcove", "alder", "ale", "alert", "alfalfa", "algebra", "algorithm", "alibi", "alien", "allegation", "allergist", +"album", "alcohol", "alcove", "alder", "ale", "alert", "alfalfa", "algebra", "alibi", "alien", "allegation", "allergist", "alley", "alliance", "alligator", "allocation", "allowance", "alloy", "alluvium", "almanac", "almighty", "almond", "alpaca", "alpenglow", "alpenhorn", "alpha", "alphabet", "altar", "alteration", "alternative", "altitude", "alto", "aluminium", "aluminum", "amazement", "amazon", "ambassador", "amber", "ambience", "ambiguity", "ambition", "ambulance", "amendment", "amenity", "ammunition", "amnesty", "amount", "amusement", @@ -76,7 +480,7 @@ std::initializer_list nouns "apple", "applewood", "appliance", "application", "appointment", "appreciation", "apprehension", "approach", "appropriation", "approval", "apricot", "apron", "apse", "aquarium", "aquifer", "arcade", "arch", "archaeologist", "archaeology", "archeology", "archer", "architect", "architecture", "archives", "area", "arena", "argument", "arithmetic", "ark", "arm", "armadillo", "armament", -"armchair", "armoire", "armor", "armour", "armpit", "armrest", "army", "arrangement", "array", "arrest", "arrival", "arrogance", "arrow", +"armchair", "armoire", "armor", "armour", "armpit", "armrest", "army", "arrangement", "arrest", "arrival", "arrogance", "arrow", "art", "artery", "arthur", "artichoke", "article", "artifact", "artificer", "artist", "ascend", "ascent", "ascot", "ash", "ashram", "ashtray", "aside", "asparagus", "aspect", "asphalt", "aspic", "assassination", "assault", "assembly", "assertion", "assessment", "asset", "assignment", "assist", "assistance", "assistant", "associate", "association", "assumption", "assurance", "asterisk", "astrakhan", "astrolabe", @@ -85,7 +489,7 @@ std::initializer_list nouns "attraction", "attribute", "auction", "audience", "audit", "auditorium", "aunt", "authentication", "authenticity", "author", "authorisation", "authority", "authorization", "auto", "autoimmunity", "automation", "automaton", "autumn", "availability", "avalanche", "avenue", "average", "avocado", "award", "awareness", "awe", "axis", "azimuth", "babe", "baboon", "babushka", "baby", "bachelor", "back", "backbone", -"backburn", "backdrop", "background", "backpack", "backup", "backyard", "bacon", "bacterium", "badge", "badger", "bafflement", "bag", +"backburn", "backdrop", "background", "backpack", "backyard", "bacon", "bacterium", "badge", "badger", "bafflement", "bag", "bagel", "baggage", "baggie", "baggy", "bagpipe", "bail", "bait", "bake", "baker", "bakery", "bakeware", "balaclava", "balalaika", "balance", "balcony", "ball", "ballet", "balloon", "balloonist", "ballot", "ballpark", "bamboo", "ban", "banana", "band", "bandana", "bandanna", "bandolier", "bandwidth", "bangle", "banjo", "bank", "bankbook", "banker", "banking", "bankruptcy", "banner", "banquette", "banyan", @@ -125,16 +529,16 @@ std::initializer_list nouns "captain", "caption", "captor", "car", "carabao", "caramel", "caravan", "carbohydrate", "carbon", "carboxyl", "card", "cardboard", "cardigan", "care", "career", "cargo", "caribou", "carload", "carnation", "carnival", "carol", "carotene", "carp", "carpenter", "carpet", "carpeting", "carport", "carriage", "carrier", "carrot", "carry", "cart", "cartel", "carter", "cartilage", "cartload", "cartoon", "cartridge", "carving", -"cascade", "casement", "cash", "cashew", "cashier", "casino", "casket", "cassava", "casserole", "cassock", "cast", "castanet", +"casement", "cash", "cashew", "cashier", "casino", "casket", "cassava", "casserole", "cassock", "castanet", "castle", "casualty", "cat", "catacomb", "catalogue", "catalysis", "catalyst", "catamaran", "catastrophe", "catch", "catcher", "category", "caterpillar", "cathedral", "cation", "catsup", "cattle", "cauliflower", "causal", "cause", "causeway", "caution", "cave", "caviar", "cayenne", "ceiling", "celebration", "celebrity", "celeriac", "celery", "cell", "cellar", "cello", "celsius", "cement", "cemetery", "cenotaph", "census", "cent", "center", "centimeter", "centre", "centurion", "century", "cephalopod", "ceramic", "ceramics", "cereal", "ceremony", "certainty", "certificate", "certification", "cesspool", "chafe", "chain", "chainstay", "chair", "chairlift", "chairman", "chairperson", -"chaise", "chalet", "chalice", "chalk", "challenge", "chamber", "champagne", "champion", "championship", "chance", "chandelier", "change", -"channel", "chaos", "chap", "chapel", "chaplain", "chapter", "character", "characteristic", "characterization", "chard", "charge", "charger", +"chaise", "chalet", "chalice", "chalk", "challenge", "chamber", "champagne", "champion", "championship", "chance", "chandelier", +"channel", "chaos", "chap", "chapel", "chaplain", "chapter", "characteristic", "characterization", "chard", "charge", "charger", "charity", "charlatan", "charm", "charset", "chart", "charter", "chasm", "chassis", "chastity", "chasuble", "chateau", "chatter", "chauffeur", -"chauvinist", "check", "checkbook", "checking", "checkout", "checkroom", "cheddar", "cheek", "cheer", "cheese", "cheesecake", "cheetah", +"chauvinist", "checkbook", "checking", "checkout", "checkroom", "cheddar", "cheek", "cheer", "cheese", "cheesecake", "cheetah", "chef", "chem", "chemical", "chemistry", "chemotaxis", "cheque", "cherry", "chess", "chest", "chestnut", "chick", "chicken", "chicory", "chief", "chiffonier", "child", "childbirth", "childhood", "chili", "chill", "chime", "chimpanzee", "chin", "chinchilla", "chino", "chip", "chipmunk", "chivalry", "chive", "chives", "chocolate", "choice", "choir", "choker", "cholesterol", "choosing", "chop", @@ -146,13 +550,13 @@ std::initializer_list nouns "claw", "clay", "cleaner", "clearance", "clearing", "cleat", "cleavage", "clef", "cleft", "clergyman", "cleric", "clerk", "click", "client", "cliff", "climate", "climb", "clinic", "clip", "clipboard", "clipper", "cloak", "cloakroom", "clock", "clockwork", "clogs", "cloister", "clone", "close", "closet", "closing", "closure", "cloth", "clothes", "clothing", "cloud", "cloudburst", "clove", "clover", "cloves", -"club", "clue", "cluster", "clutch", "coach", "coal", "coalition", "coast", "coaster", "coat", "cob", "cobbler", "cobweb", +"club", "clue", "clutch", "coach", "coal", "coalition", "coast", "coaster", "coat", "cob", "cobbler", "cobweb", "cock", "cockpit", "cockroach", "cocktail", "cocoa", "coconut", "cod", "code", "codepage", "codling", "codon", "codpiece", "coevolution", "cofactor", "coffee", "coffin", "cohesion", "cohort", "coil", "coin", "coincidence", "coinsurance", "coke", "cold", "coleslaw", "coliseum", -"collaboration", "collagen", "collapse", "collar", "collard", "collateral", "colleague", "collection", "collectivisation", "collectivization", +"collaboration", "collagen", "collapse", "collar", "collard", "collateral", "colleague", "collectivisation", "collectivization", "collector", "college", "collision", "colloquy", "colon", "colonial", "colonialism", "colonisation", "colonization", "colony", "color", -"colorlessness", "colt", "column", "columnist", "comb", "combat", "combination", "combine", "comeback", "comedy", "comestible", "comfort", -"comfortable", "comic", "comics", "comma", "command", "commander", "commandment", "comment", "commerce", "commercial", "commission", +"colorlessness", "colt", "columnist", "comb", "combat", "combination", "combine", "comeback", "comedy", "comestible", "comfort", +"comfortable", "comic", "comics", "comma", "command", "commander", "commandment", "commerce", "commercial", "commission", "commitment", "committee", "commodity", "common", "commonsense", "commotion", "communicant", "communication", "communion", "communist", "community", "commuter", "company", "comparison", "compass", "compassion", "compassionate", "compensation", "competence", "competition", "competitor", "complaint", "complement", "completion", "complex", "complexity", "compliance", "complication", "complicity", "compliment", @@ -162,8 +566,8 @@ std::initializer_list nouns "confidentiality", "configuration", "confirmation", "conflict", "conformation", "confusion", "conga", "congo", "congregation", "congress", "congressman", "congressperson", "conifer", "connection", "connotation", "conscience", "consciousness", "consensus", "consent", "consequence", "conservation", "conservative", "consideration", "consignment", "consist", "consistency", "console", "consonant", "conspiracy", "conspirator", -"constant", "constellation", "constitution", "constraint", "construction", "consul", "consulate", "consulting", "consumer", "consumption", -"contact", "contact lens", "contagion", "container", "content", "contention", "contest", "context", "continent", "contingency", "continuity", +"constant", "constellation", "constitution", "construction", "consul", "consulate", "consulting", "consumer", "consumption", +"contact", "contagion", "container", "content", "contention", "contest", "context", "continent", "contingency", "continuity", "contour", "contract", "contractor", "contrail", "contrary", "contrast", "contribution", "contributor", "control", "controller", "controversy", "convection", "convenience", "convention", "conversation", "conversion", "convert", "convertible", "conviction", "cook", "cookbook", "cookie", "cooking", "coonskin", "cooperation", "coordination", "coordinator", "cop", "cope", "copper", "copy", "copying", @@ -175,33 +579,33 @@ std::initializer_list nouns "cousin", "covariate", "cover", "coverage", "coverall", "cow", "cowbell", "cowboy", "coyote", "crab", "crack", "cracker", "crackers", "cradle", "craft", "craftsman", "cranberry", "crane", "cranky", "crash", "crate", "cravat", "craw", "crawdad", "crayfish", "crayon", "crazy", "cream", "creation", "creationism", "creationist", "creative", "creativity", "creator", "creature", "creche", "credential", -"credenza", "credibility", "credit", "creditor", "creek", "creme brulee", "crepe", "crest", "crew", "crewman", "crewmate", "crewmember", +"credenza", "credibility", "credit", "creditor", "creek", "crepe", "crest", "crew", "crewman", "crewmate", "crewmember", "crewmen", "cria", "crib", "cribbage", "cricket", "cricketer", "crime", "criminal", "crinoline", "crisis", "crisp", "criteria", "criterion", -"critic", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "cross", "crotch", +"critic", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "crotch", "croup", "crow", "crowd", "crown", "crucifixion", "crude", "cruelty", "cruise", "crumb", "crunch", "crusader", "crush", "crust", "cry", -"crystal", "crystallography", "cub", "cube", "cuckoo", "cucumber", "cue", "cuisine", "cultivar", "cultivator", "culture", +"crystal", "crystallography", "cub", "cuckoo", "cucumber", "cue", "cuisine", "cultivar", "cultivator", "culture", "culvert", "cummerbund", "cup", "cupboard", "cupcake", "cupola", "curd", "cure", "curio", "curiosity", "curl", "curler", "currant", "currency", -"current", "curriculum", "curry", "curse", "cursor", "curtailment", "curtain", "curve", "cushion", "custard", "custody", "custom", "customer", +"curriculum", "curry", "curse", "cursor", "curtailment", "curtain", "curve", "cushion", "custard", "custody", "custom", "customer", "cut", "cuticle", "cutlet", "cutover", "cutting", "cyclamen", "cycle", "cyclone", "cyclooxygenase", "cygnet", "cylinder", "cymbal", "cynic", "cyst", "cytokine", "cytoplasm", "dad", "daddy", "daffodil", "dagger", "dahlia", "daikon", "daily", "dairy", "daisy", "dam", "damage", "dame", "dance", "dancer", "dancing", "dandelion", "danger", "dare", "dark", "darkness", "darn", "dart", "dash", "dashboard", -"data", "date", "daughter", "dawn", "day", "daybed", "daylight", "dead", "deadline", "deal", "dealer", "dealing", "dearest", +"data", "daughter", "dawn", "daybed", "daylight", "dead", "deadline", "deal", "dealer", "dealing", "dearest", "death", "deathwatch", "debate", "debris", "debt", "debtor", "decade", "decadence", "decency", "decimal", "decision", "deck", "declaration", "declination", "decline", "decoder", "decongestant", "decoration", "decrease", "decryption", "dedication", "deduce", "deduction", "deed", "deep", "deer", "defeat", "defendant", "defender", "defense", "deficit", "definition", "deformation", -"degradation", "degree", "delay", "deliberation", "delight", "delivery", "demand", "democracy", "democrat", "demon", "demur", "den", +"degradation", "degree", "deliberation", "delight", "delivery", "demand", "democracy", "democrat", "demon", "demur", "den", "denim", "denominator", "density", "dentist", "deodorant", "department", "departure", "dependency", "dependent", "deployment", "deposit", "deposition", "depot", "depression", "depressive", "depth", "deputy", "derby", "derivation", "derivative", "derrick", "descendant", "descent", "description", "desert", "design", "designation", "designer", "desire", "desk", "desktop", "dessert", "destination", "destiny", "destroyer", "destruction", "detail", "detainee", "detainment", "detection", "detective", "detector", "detention", "determination", "detour", "devastation", "developer", "developing", "development", "developmental", "deviance", "deviation", "device", "devil", "dew", "dhow", "diabetes", "diadem", -"diagnosis", "diagram", "dial", "dialect", "dialogue", "diam", "diamond", "diaper", "diaphragm", "diarist", "diary", "dibble", "dickey", "dictaphone", "dictator", "diction", "dictionary", "die", "diesel", "diet", "difference", "differential", "difficulty", "diffuse", +"diagnosis", "diagram", "dial", "dialect", "dialogue", "diam", "diamond", "diaper", "diaphragm", "diarist", "diary", "dibble", "dickey", "dictaphone", "dictator", "diction", "die", "diesel", "diet", "difference", "differential", "difficulty", "diffuse", "dig", "digestion", "digestive", "digger", "digging", "digit", "dignity", "dilapidation", "dill", "dilution", "dime", "dimension", "dimple", "diner", "dinghy", "dining", "dinner", "dinosaur", "dioxide", "dip", "diploma", "diplomacy", "dipstick", "direction", "directive", "director", "directory", "dirndl", "dirt", "disability", "disadvantage", "disagreement", "disappointment", "disarmament", "disaster", "discharge", "discipline", "disclaimer", "disclosure", "disco", "disconnection", "discount", "discourse", "discovery", "discrepancy", "discretion", "discrimination", "discussion", "disdain", "disease", "disembodiment", "disengagement", "disguise", "disgust", "dish", "dishwasher", -"disk", "disparity", "dispatch", "displacement", "display", "disposal", "disposer", "disposition", "dispute", "disregard", "disruption", +"disparity", "dispatch", "displacement", "display", "disposal", "disposer", "disposition", "dispute", "disregard", "disruption", "dissemination", "dissonance", "distance", "distinction", "distortion", "distribution", "distributor", "district", "divalent", "divan", "diver", "diversity", "divide", "dividend", "divider", "divine", "diving", "division", "divorce", "doc", "dock", "doctor", "doctorate", "doctrine", "document", "documentary", "documentation", "doe", "dog", "doggie", "dogsled", "dogwood", "doing", "doll", "dollar", "dollop", @@ -209,10 +613,10 @@ std::initializer_list nouns "doorpost", "doorway", "dory", "dose", "dot", "double", "doubling", "doubt", "doubter", "dough", "doughnut", "down", "downfall", "downforce", "downgrade", "download", "downstairs", "downtown", "downturn", "dozen", "draft", "drag", "dragon", "dragonfly", "dragonfruit", "dragster", "drain", "drainage", "drake", "drama", "dramaturge", "drapes", "draw", "drawbridge", "drawer", "drawing", "dream", "dreamer", "dredger", -"dress", "dresser", "dressing", "drill", "drink", "drinking", "drive", "driver", "driveway", "driving", "drizzle", "dromedary", "drop", +"dress", "dresser", "dressing", "drill", "drink", "drinking", "drive", "driver", "driveway", "driving", "drizzle", "dromedary", "drudgery", "drug", "drum", "drummer", "drunk", "dryer", "duck", "duckling", "dud", "dude", "due", "duel", "dueling", "duffel", "dugout", -"dulcimer", "dumbwaiter", "dump", "dump truck", "dune", "dune buggy", "dungarees", "dungeon", "duplexer", "duration", "durian", "dusk", -"dust", "dust storm", "duster", "duty", "dwarf", "dwell", "dwelling", "dynamics", "dynamite", "dynamo", "dynasty", "dysfunction", +"dulcimer", "dumbwaiter", "dump", "dune", "dungarees", "dungeon", "duplexer", "duration", "durian", "dusk", +"dust", "duster", "duty", "dwarf", "dwell", "dwelling", "dynamics", "dynamite", "dynamo", "dynasty", "dysfunction", "eagle", "eaglet", "ear", "eardrum", "earmuffs", "earnings", "earplug", "earring", "earrings", "earth", "earthquake", "earthworm", "ease", "easel", "east", "eating", "eaves", "eavesdropper", "ecclesia", "echidna", "eclipse", "ecliptic", "ecology", "economics", "economy", "ecosystem", "ectoderm", "ectodermal", "ecumenist", "eddy", "edge", "edger", "edible", "editing", "edition", "editor", "editorial", @@ -222,19 +626,19 @@ std::initializer_list nouns "ellipse", "elm", "elongation", "elver", "email", "emanate", "embarrassment", "embassy", "embellishment", "embossing", "embryo", "emerald", "emergence", "emergency", "emergent", "emery", "emission", "emitter", "emotion", "emphasis", "empire", "employ", "employee", "employer", "employment", "empowerment", "emu", "enactment", "encirclement", "enclave", "enclosure", "encounter", "encouragement", "encyclopedia", -"end", "endive", "endoderm", "endorsement", "endothelium", "endpoint", "enemy", "energy", "enforcement", "engagement", "engine", "engineer", +"endive", "endoderm", "endorsement", "endothelium", "endpoint", "enemy", "energy", "enforcement", "engagement", "engineer", "engineering", "enigma", "enjoyment", "enquiry", "enrollment", "enterprise", "entertainment", "enthusiasm", "entirety", "entity", "entrance", "entree", "entrepreneur", "entry", "envelope", "environment", "envy", "enzyme", "epauliere", "epee", "ephemera", "ephemeris", "ephyra", "epic", "episode", "epithelium", "epoch", "eponym", "epoxy", "equal", "equality", "equation", "equinox", "equipment", "equity", "equivalent", "era", "eraser", "erection", "erosion", "error", "escalator", "escape", "escort", "espadrille", "espalier", "essay", "essence", "essential", -"establishment", "estate", "estimate", "estrogen", "estuary", "eternity", "ethernet", "ethics", "ethnicity", "ethyl", "euphonium", "eurocentrism", -"evaluation", "evaluator", "evaporation", "eve", "evening", "event", "everybody", "everyone", "everything", "eviction", +"establishment", "estate", "estrogen", "estuary", "eternity", "ethernet", "ethics", "ethnicity", "ethyl", "euphonium", "eurocentrism", +"evaluation", "evaluator", "evaporation", "eve", "evening", "everybody", "everyone", "everything", "eviction", "evidence", "evil", "evocation", "evolution", "exaggeration", "exam", "examination", "examiner", "example", -"exasperation", "excellence", "exception", "excerpt", "excess", "exchange", "excitement", "exclamation", "excursion", "excuse", "execution", +"exasperation", "excellence", "exception", "excerpt", "excess", "excitement", "exclamation", "excursion", "excuse", "execution", "executive", "executor", "exercise", "exhaust", "exhaustion", "exhibit", "exhibition", "exile", "existence", "exit", "exocrine", "expansion", "expansionism", "expectancy", "expectation", "expedition", "expense", "experience", "experiment", "experimentation", "expert", "expertise", -"explanation", "exploration", "explorer", "explosion", "export", "expose", "exposition", "exposure", "expression", "extension", "extent", -"exterior", "external", "extinction", "extreme", "extremist", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes", +"explanation", "exploration", "explorer", "explosion", "export", "expose", "exposition", "exposure", "extension", "extent", +"exterior", "extinction", "extreme", "extremist", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes", "eyelid", "eyelids", "eyeliner", "eyestrain", "eyrie", "fabric", "face", "facelift", "facet", "facility", "facsimile", "fact", "factor", "factory", "faculty", "fahrenheit", "fail", "failure", "fairness", "fairy", "faith", "faithful", "fall", "fallacy", "fame", "familiar", "familiarity", "family", "fan", "fang", "fanlight", "fanny", "fantasy", "farm", "farmer", "farming", "farmland", @@ -242,13 +646,13 @@ std::initializer_list nouns "favorite", "fawn", "fax", "fear", "feast", "feather", "feature", "fedelini", "federation", "fedora", "fee", "feed", "feedback", "feeding", "feel", "feeling", "fellow", "felony", "female", "fen", "fence", "fencing", "fender", "feng", "fennel", "ferret", "ferry", "ferryboat", "fertilizer", "festival", "fetus", "few", "fiber", "fiberglass", "fibre", "fibroblast", "fibrosis", "ficlet", "fiction", "fiddle", "field", -"fiery", "fiesta", "fifth", "fig", "fight", "fighter", "figure", "figurine", "file", "filing", "fill", "fillet", "filly", "film", "filter", -"filth", "final", "finance", "financing", "finding", "fine", "finer", "finger", "fingerling", "fingernail", "finish", "finisher", "fir", -"fire", "fireman", "fireplace", "firewall", "firm", "first", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet", +"fiery", "fiesta", "fifth", "fig", "fight", "fighter", "figure", "figurine", "filing", "fillet", "filly", "film", +"filth", "finance", "financing", "finding", "fine", "finer", "finger", "fingerling", "fingernail", "finish", "finisher", "fir", +"fire", "fireman", "fireplace", "firewall", "firm", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet", "fisting", "fit", "fitness", "fix", "fixture", "flag", "flair", "flame", "flan", "flanker", "flare", "flash", "flat", "flatboat", "flavor", "flax", "fleck", "fledgling", "fleece", "flesh", "flexibility", "flick", "flicker", "flight", "flint", "flintlock", "flock", "flood", "floodplain", "floor", "floozie", "flour", "flow", "flower", "flu", "flugelhorn", "fluke", "flume", "flung", "flute", "fly", -"flytrap", "foal", "foam", "fob", "focus", "fog", "fold", "folder", "folk", "folklore", "follower", "following", "fondue", "font", "food", +"flytrap", "foal", "foam", "fob", "focus", "fog", "fold", "folder", "folk", "folklore", "follower", "fondue", "font", "food", "foodstuffs", "fool", "foot", "footage", "football", "footnote", "footprint", "footrest", "footstep", "footstool", "footwear", "forage", "forager", "foray", "force", "ford", "forearm", "forebear", "forecast", "forehead", "foreigner", "forelimb", "forest", "forestry", "forever", "forgery", "fork", "form", "formal", "formamide", "formation", "former", "formicarium", "formula", "fort", "forte", "fortnight", @@ -256,7 +660,7 @@ std::initializer_list nouns "frame", "framework", "fratricide", "fraud", "fraudster", "freak", "freckle", "freedom", "freelance", "freezer", "freezing", "freight", "freighter", "frenzy", "freon", "frequency", "fresco", "friction", "fridge", "friend", "friendship", "fries", "frigate", "fright", "fringe", "fritter", "frock", "frog", "front", "frontier", "frost", "frosting", "frown", "fruit", "frustration", "fry", "fuel", "fugato", -"fulfillment", "full", "fun", "function", "functionality", "fund", "funding", "fundraising", "funeral", "fur", "furnace", "furniture", +"fulfillment", "fun", "functionality", "fund", "funding", "fundraising", "funeral", "fur", "furnace", "furniture", "furry", "fusarium", "futon", "future", "gadget", "gaffe", "gaffer", "gain", "gaiters", "gale", "gallery", "galley", "gallon", "galoshes", "gambling", "game", "gamebird", "gaming", "gander", "gang", "gap", "garage", "garb", "garbage", "garden", "garlic", "garment", "garter", "gas", "gasket", "gasoline", "gasp", "gastronomy", "gastropod", "gate", "gateway", "gather", "gathering", @@ -269,7 +673,7 @@ std::initializer_list nouns "goggles", "going", "gold", "goldfish", "golf", "gondola", "gong", "good", "goodbye", "goodie", "goodness", "goodnight", "goodwill", "goose", "gopher", "gorilla", "gosling", "gossip", "governance", "government", "governor", "gown", "grace", "grade", "gradient", "graduate", "graduation", "graffiti", "graft", "grain", "gram", "grammar", "gran", "grand", "grandchild", "granddaughter", -"grandfather", "grandma", "grandmom", "grandmother", "grandpa", "grandparent", "grandson", "granny", "granola", "grant", "grape", "grapefruit", +"grandfather", "grandma", "grandmom", "grandmother", "grandpa", "grandparent", "grandson", "granny", "granola", "grape", "grapefruit", "graph", "graphic", "grasp", "grass", "grasshopper", "grassland", "gratitude", "gravel", "gravitas", "gravity", "gravy", "gray", "grease", "greatness", "greed", "green", "greenhouse", "greens", "grenade", "grey", "grid", "grief", "grill", "grin", "grip", "gripper", "grit", "grocery", "ground", "grouper", "grouse", "grove", "growth", "grub", "guacamole", @@ -279,7 +683,7 @@ std::initializer_list nouns "halibut", "hall", "halloween", "hallway", "halt", "ham", "hamburger", "hammer", "hammock", "hamster", "hand", "handball", "handful", "handgun", "handicap", "handle", "handlebar", "handmaiden", "handover", "handrail", "handsaw", "hanger", "happening", "happiness", "harald", "harbor", "harbour", "hardboard", "hardcover", "hardening", "hardhat", "hardship", "hardware", "hare", "harm", -"harmonica", "harmonise", "harmonize", "harmony", "harp", "harpooner", "harpsichord", "harvest", "harvester", "hash", "hashtag", "hassock", +"harmonica", "harmonise", "harmonize", "harmony", "harp", "harpooner", "harpsichord", "harvest", "harvester", "hashtag", "hassock", "haste", "hat", "hatbox", "hatchet", "hatchling", "hate", "hatred", "haunt", "haven", "haversack", "havoc", "hawk", "hay", "haze", "hazel", "hazelnut", "head", "headache", "headlight", "headline", "headphones", "headquarters", "headrest", "health", "hearing", "hearsay", "heart", "heartache", "heartbeat", "hearth", "hearthside", "heartwood", "heat", "heater", "heating", "heaven", @@ -290,53 +694,53 @@ std::initializer_list nouns "hobbit", "hobby", "hockey", "hoe", "hog", "hold", "holder", "hole", "holiday", "home", "homeland", "homeownership", "hometown", "homework", "homicide", "homogenate", "homonym", "honesty", "honey", "honeybee", "honeydew", "honor", "honoree", "hood", "hoof", "hook", "hop", "hope", "hops", "horde", "horizon", "hormone", "horn", "hornet", "horror", "horse", "horseradish", "horst", "hose", -"hosiery", "hospice", "hospital", "hospitalisation", "hospitality", "hospitalization", "host", "hostel", "hostess", "hotdog", "hotel", -"hound", "hour", "hourglass", "house", "houseboat", "household", "housewife", "housework", "housing", "hovel", "hovercraft", "howard", +"hosiery", "hospice", "hospital", "hospitalisation", "hospitality", "hospitalization", "hostel", "hostess", "hotdog", "hotel", +"hound", "hourglass", "house", "houseboat", "household", "housewife", "housework", "housing", "hovel", "hovercraft", "howard", "howitzer", "hub", "hubcap", "hubris", "hug", "hugger", "hull", "human", "humanity", "humidity", "hummus", "humor", "humour", "hunchback", "hundred", "hunger", "hunt", "hunter", "hunting", "hurdle", "hurdler", "hurricane", "hurry", "hurt", "husband", "hut", "hutch", "hyacinth", "hybridisation", "hybridization", "hydrant", "hydraulics", "hydrocarb", "hydrocarbon", "hydrofoil", "hydrogen", "hydrolyse", "hydrolysis", "hydrolyze", "hydroxyl", "hyena", "hygienic", "hype", "hyphenation", "hypochondria", "hypothermia", "hypothesis", "ice", -"iceberg", "icebreaker", "icecream", "icicle", "icing", "icon", "icy", "id", "idea", "ideal", "identification", "identity", "ideology", +"iceberg", "icebreaker", "icecream", "icicle", "icing", "icon", "icy", "idea", "ideal", "identification", "identity", "ideology", "idiom", "idiot", "igloo", "ignorance", "ignorant", "ikebana", "illegal", "illiteracy", "illness", "illusion", "illustration", "image", "imagination", "imbalance", "imitation", "immigrant", "immigration", "immortal", "impact", "impairment", "impala", "impediment", "implement", "implementation", "implication", "import", "importance", "impostor", "impress", "impression", "imprisonment", "impropriety", "improvement", "impudence", "impulse", "inability", "inauguration", "inbox", "incandescence", "incarnation", "incense", "incentive", "inch", "incidence", "incident", "incision", "inclusion", "income", "incompetence", "inconvenience", "increase", "incubation", "independence", -"independent", "index", "indication", "indicator", "indigence", "individual", "industrialisation", "industrialization", "industry", "inequality", +"independent", "indication", "indicator", "indigence", "individual", "industrialisation", "industrialization", "industry", "inequality", "inevitable", "infancy", "infant", "infarction", "infection", "infiltration", "infinite", "infix", "inflammation", "inflation", "influence", "influx", "info", "information", "infrastructure", "infusion", "inglenook", "ingrate", "ingredient", "inhabitant", "inheritance", "inhibition", "inhibitor", "initial", "initialise", "initialize", "initiative", "injunction", "injury", "injustice", "ink", "inlay", "inn", "innervation", -"innocence", "innocent", "innovation", "input", "inquiry", "inscription", "insect", "insectarium", "insert", "inside", "insight", "insolence", +"innocence", "innocent", "innovation", "input", "inquiry", "inscription", "insect", "insectarium", "inside", "insight", "insolence", "insomnia", "inspection", "inspector", "inspiration", "installation", "instance", "instant", "instinct", "institute", "institution", "instruction", "instructor", "instrument", "instrumentalist", "instrumentation", "insulation", "insurance", "insurgence", "insurrection", "integer", "integral", "integration", "integrity", "intellect", "intelligence", "intensity", "intent", "intention", "intentionality", "interaction", "interchange", "interconnection", "intercourse", "interest", "interface", "interferometer", "interior", "interject", "interloper", -"internet", "interpretation", "interpreter", "interval", "intervenor", "intervention", "interview", "interviewer", "intestine", "introduction", +"internet", "interpretation", "interpreter", "intervenor", "intervention", "interview", "interviewer", "intestine", "introduction", "intuition", "invader", "invasion", "invention", "inventor", "inventory", "inverse", "inversion", "investigation", "investigator", "investment", "investor", "invitation", "invite", "invoice", "involvement", "iridescence", "iris", "iron", "ironclad", "irony", "irrigation", "ischemia", "island", "isogloss", "isolation", "issue", "item", "itinerary", "ivory", "jack", "jackal", "jacket", "jackfruit", "jade", "jaguar", -"jail", "jailhouse", "jalapeño", "jam", "jar", "jasmine", "jaw", "jazz", "jealousy", "jeans", "jeep", "jelly", "jellybeans", "jellyfish", +"jail", "jailhouse", "jam", "jar", "jasmine", "jaw", "jazz", "jealousy", "jeans", "jeep", "jelly", "jellybeans", "jellyfish", "jerk", "jet", "jewel", "jeweller", "jewellery", "jewelry", "jicama", "jiffy", "job", "jockey", "jodhpurs", "joey", "jogging", "joint", "joke", "jot", "journal", "journalism", "journalist", "journey", "joy", "judge", "judgment", "judo", "jug", "juggernaut", "juice", "julienne", "jumbo", "jump", "jumper", "jumpsuit", "jungle", "junior", "junk", "junker", "junket", "jury", "justice", "justification", "jute", "kale", "kamikaze", "kangaroo", "karate", "kayak", "kazoo", "kebab", "keep", "keeper", "kendo", "kennel", "ketch", "ketchup", "kettle", "kettledrum", -"key", "keyboard", "keyboarding", "keystone", "kick", "kid", "kidney", "kielbasa", "kill", "killer", "killing", "kilogram", +"keyboard", "keyboarding", "keystone", "kick", "kid", "kidney", "kielbasa", "killer", "killing", "kilogram", "kilometer", "kilt", "kimono", "kinase", "kind", "kindness", "king", "kingdom", "kingfish", "kiosk", "kiss", "kit", "kitchen", "kite", "kitsch", "kitten", "kitty", "kiwi", "knee", "kneejerk", "knickers", "knife", "knight", "knitting", "knock", "knot", "knowledge", "knuckle", "koala", "kohlrabi", "kumquat", "lab", "label", "labor", "laboratory", "laborer", "labour", "labourer", "lace", "lack", "lacquerware", "lad", "ladder", "ladle", "lady", "ladybug", "lag", "lake", "lamb", "lambkin", "lament", "lamp", "lanai", "land", "landform", "landing", "landmine", "landscape", "lane", "language", "lantern", "lap", "laparoscope", "lapdog", "laptop", "larch", "lard", -"larder", "lark", "larva", "laryngitis", "lasagna", "lashes", "last", "latency", "latex", "lathe", "latitude", "latte", "latter", "laugh", -"laughter", "laundry", "lava", "law", "lawmaker", "lawn", "lawsuit", "lawyer", "lay", "layer", "layout", "lead", "leader", "leadership", -"leading", "leaf", "league", "leaker", "leap", "learning", "leash", "leather", "leave", "leaver", "lecture", "leek", "leeway", "left", +"larder", "lark", "larva", "laryngitis", "lasagna", "lashes", "latency", "latex", "lathe", "latitude", "latte", "latter", "laugh", +"laughter", "laundry", "lava", "law", "lawmaker", "lawn", "lawsuit", "lawyer", "lay", "layer", "lead", "leader", "leadership", +"leaf", "league", "leaker", "leap", "learning", "leash", "leather", "leave", "leaver", "lecture", "leek", "leeway", "leg", "legacy", "legal", "legend", "legging", "legislation", "legislator", "legislature", "legitimacy", "legume", "leisure", "lemon", "lemonade", "lemur", "lender", "lending", "length", "lens", "lentil", "leopard", "leprosy", "leptocephalus", "lesson", "letter", -"lettuce", "level", "lever", "leverage", "leveret", "liability", "liar", "liberty", "libido", "library", "licence", "license", "licensing", -"licorice", "lid", "lie", "lieu", "lieutenant", "life", "lifestyle", "lifetime", "lift", "ligand", "light", "lighting", "lightning", +"lettuce", "lever", "leverage", "leveret", "liability", "liar", "liberty", "libido", "library", "licence", "license", "licensing", +"licorice", "lid", "lie", "lieu", "lieutenant", "life", "lifestyle", "lift", "ligand", "light", "lighting", "lightning", "lightscreen", "ligula", "likelihood", "likeness", "lilac", "lily", "limb", "lime", "limestone", "limitation", "limo", "line", "linen", "liner", "linguist", "linguistics", "lining", "link", "linkage", "linseed", "lion", "lip", "lipid", "lipoprotein", "lipstick", -"liquid", "liquidity", "liquor", "list", "listening", "listing", "literate", "literature", "litigation", "litmus", "litter", "littleneck", -"liver", "livestock", "living", "lizard", "llama", "load", "loading", "loaf", "loafer", "loan", "lobby", "lobotomy", "lobster", "local", +"liquid", "liquidity", "liquor", "listening", "listing", "literate", "literature", "litigation", "litmus", "litter", "littleneck", +"liver", "livestock", "living", "lizard", "llama", "load", "loading", "loaf", "loafer", "loan", "lobby", "lobotomy", "lobster", "locality", "location", "lock", "locker", "locket", "locomotive", "locust", "lode", "loft", "log", "loggia", "logic", "login", "logistics", "logo", "loincloth", "lollipop", "loneliness", "longboat", "longitude", "look", "lookout", "loop", "loophole", "loquat", "lord", "loss", "lot", "lotion", "lottery", "lounge", "louse", "lout", "love", "lover", "lox", "loyalty", "luck", "luggage", "lumber", "lumberman", "lunch", @@ -350,28 +754,28 @@ std::initializer_list nouns "manufacturer", "manufacturing", "many", "map", "maple", "mapping", "maracas", "marathon", "marble", "march", "mare", "margarine", "margin", "mariachi", "marimba", "marines", "marionberry", "mark", "marker", "market", "marketer", "marketing", "marketplace", "marksman", "markup", "marmalade", "marriage", "marsh", "marshland", "marshmallow", "marten", "marxism", "mascara", "mask", "masonry", "mass", "massage", "mast", -"master", "masterpiece", "mastication", "mastoid", "mat", "match", "matchmaker", "mate", "material", "maternity", "math", "mathematics", -"matrix", "matter", "mattock", "mattress", "max", "maximum", "maybe", "mayonnaise", "mayor", "meadow", "meal", "mean", "meander", "meaning", +"master", "masterpiece", "mastication", "mastoid", "mat", "matchmaker", "mate", "material", "maternity", "math", "mathematics", +"matrix", "matter", "mattock", "mattress", "maximum", "maybe", "mayonnaise", "mayor", "meadow", "meal", "mean", "meander", "meaning", "means", "meantime", "measles", "measure", "measurement", "meat", "meatball", "meatloaf", "mecca", "mechanic", "mechanism", "med", "medal", "media", "median", "medication", "medicine", "medium", "meet", "meeting", "melatonin", "melody", "melon", "member", "membership", "membrane", -"meme", "memo", "memorial", "memory", "men", "menopause", "menorah", "mention", "mentor", "menu", "merchandise", "merchant", "mercury", +"meme", "memo", "memorial", "men", "menopause", "menorah", "mention", "mentor", "menu", "merchandise", "merchant", "mercury", "meridian", "meringue", "merit", "mesenchyme", "mess", "message", "messenger", "messy", "metabolite", "metal", "metallurgist", "metaphor", "meteor", "meteorology", "meter", "methane", "method", "methodology", "metric", "metro", "metronome", "mezzanine", "microlending", "micronutrient", "microphone", "microwave", "midden", "middle", "middleman", "midline", "midnight", "midwife", "might", "migrant", "migration", "mile", "mileage", "milepost", "milestone", "military", "milk", "milkshake", "mill", "millennium", "millet", "millimeter", "million", -"millisecond", "millstone", "mime", "mimosa", "min", "mincemeat", "mind", "mine", "mineral", "mineshaft", "mini", "minibus", -"minimalism", "minimum", "mining", "minion", "minister", "mink", "minnow", "minor", "minority", "mint", "minute", "miracle", +"millstone", "mime", "mimosa", "mincemeat", "mind", "mine", "mineral", "mineshaft", "mini", "minibus", +"minimalism", "minimum", "mining", "minion", "minister", "mink", "minnow", "minor", "minority", "mint", "miracle", "mirror", "miscarriage", "miscommunication", "misfit", "misnomer", "misogyny", "misplacement", "misreading", "misrepresentation", "miss", "missile", "mission", "missionary", "mist", "mistake", "mister", "misunderstand", "miter", "mitten", "mix", "mixer", "mixture", "moai", "moat", "mob", "mobile", "mobility", "mobster", "moccasins", "mocha", "mochi", "mode", "model", "modeling", "modem", "modernist", "modernity", "modification", "molar", "molasses", "molding", "mole", "molecule", "mom", "moment", "monastery", "monasticism", "money", "monger", "monitor", -"monitoring", "monk", "monkey", "monocle", "monopoly", "monotheism", "monsoon", "monster", "month", "monument", "mood", "moody", "moon", +"monitoring", "monk", "monkey", "monocle", "monopoly", "monotheism", "monsoon", "monster", "monument", "mood", "moody", "moon", "moonlight", "moonscape", "moonshine", "moose", "mop", "morale", "morbid", "morbidity", "morning", "moron", "morphology", "morsel", "mortal", "mortality", "mortgage", "mortise", "mosque", "mosquito", "most", "motel", "moth", "mother", "motion", "motivation", "motive", "motor", "motorboat", "motorcar", "motorcycle", "mound", "mountain", "mouse", "mouser", "mousse", "moustache", "mouth", "mouton", "movement", "mover", "movie", "mower", "mozzarella", "mud", "muffin", "mug", "mukluk", "mule", "multimedia", "murder", "muscat", "muscatel", "muscle", "musculature", "museum", "mushroom", "music", "musician", "muskrat", "mussel", "mustache", "mustard", -"mutation", "mutt", "mutton", "mycoplasma", "mystery", "myth", "mythology", "nail", "name", "naming", "nanoparticle", "napkin", "narrative", +"mutt", "mutton", "mycoplasma", "mystery", "myth", "mythology", "nail", "naming", "nanoparticle", "napkin", "narrative", "nasal", "nation", "nationality", "native", "naturalisation", "nature", "navigation", "necessity", "neck", "necklace", "necktie", "nectar", "nectarine", "need", "needle", "neglect", "negligee", "negotiation", "neighbor", "neighborhood", "neighbour", "neighbourhood", "neologism", "neon", "neonate", "nephew", "nerve", "nest", "nestling", "nestmate", "net", "netball", "netbook", "netsuke", "network", "networking", @@ -381,13 +785,13 @@ std::initializer_list nouns "noodle", "noodles", "noon", "norm", "normal", "normalisation", "normalization", "north", "nose", "notation", "note", "notebook", "notepad", "nothing", "notice", "notion", "notoriety", "nougat", "noun", "nourishment", "novel", "nucleotidase", "nucleotide", "nudge", "nuke", "number", "numeracy", "numeric", "numismatist", "nun", "nurse", "nursery", "nursing", "nurture", "nut", "nutmeg", "nutrient", "nutrition", -"nylon", "nymph", "oak", "oar", "oasis", "oat", "oatmeal", "oats", "obedience", "obesity", "obi", "object", "objection", "objective", +"nylon", "nymph", "oak", "oar", "oasis", "oat", "oatmeal", "oats", "obedience", "obesity", "obi", "objection", "objective", "obligation", "oboe", "observation", "observatory", "obsession", "obsidian", "obstacle", "occasion", "occupation", "occurrence", "ocean", "ocelot", "octagon", "octave", "octavo", "octet", "octopus", "odometer", "odyssey", "oeuvre", "offence", "offense", "offer", -"offering", "office", "officer", "official", "offset", "oil", "okra", "oldie", "oleo", "olive", "omega", "omelet", "omission", "omnivore", +"offering", "office", "officer", "official", "oil", "okra", "oldie", "oleo", "olive", "omega", "omelet", "omission", "omnivore", "oncology", "onion", "online", "onset", "opening", "opera", "operating", "operation", "operator", "ophthalmologist", "opinion", "opium", "opossum", "opponent", "opportunist", "opportunity", "opposite", "opposition", "optimal", "optimisation", "optimist", "optimization", -"option", "orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "ordinary", "ordination", "ore", "oregano", "organ", +"orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "ordinary", "ordination", "ore", "oregano", "organ", "organisation", "organising", "organization", "organizing", "orient", "orientation", "origin", "original", "originality", "ornament", "osmosis", "osprey", "ostrich", "other", "otter", "ottoman", "ounce", "outback", "outcome", "outfielder", "outfit", "outhouse", "outlaw", "outlay", "outlet", "outline", "outlook", "output", "outrage", "outrigger", "outrun", "outset", "outside", "oval", "ovary", "oven", "overcharge", @@ -398,7 +802,7 @@ std::initializer_list nouns "pansy", "panther", "panties", "pantologist", "pantology", "pantry", "pants", "pantsuit", "panty", "pantyhose", "papa", "papaya", "paper", "paperback", "paperwork", "parable", "parachute", "parade", "paradise", "paragraph", "parallelogram", "paramecium", "paramedic", "parameter", "paranoia", "parcel", "parchment", "pard", "pardon", "parent", "parenthesis", "parenting", "park", "parka", "parking", "parliament", -"parole", "parrot", "parser", "parsley", "parsnip", "part", "participant", "participation", "particle", "particular", "partner", "partnership", +"parole", "parrot", "parser", "parsley", "parsnip", "participant", "participation", "particle", "particular", "partner", "partnership", "partridge", "party", "pass", "passage", "passbook", "passenger", "passing", "passion", "passive", "passport", "password", "past", "pasta", "paste", "pastor", "pastoralist", "pastry", "pasture", "pat", "patch", "pate", "patent", "patentee", "path", "pathogenesis", "pathology", "pathway", "patience", "patient", "patina", "patio", "patriarch", "patrimony", "patriot", "patrol", "patroller", "patrolling", "patron", @@ -413,26 +817,26 @@ std::initializer_list nouns "physical", "physics", "physiology", "pianist", "piano", "piccolo", "pick", "pickax", "pickaxe", "picket", "pickle", "pickup", "picnic", "picture", "picturesque", "pie", "piece", "pier", "piety", "pig", "pigeon", "piglet", "pigpen", "pigsty", "pike", "pilaf", "pile", "pilgrim", "pilgrimage", "pill", "pillar", "pillbox", "pillow", "pilot", "pimp", "pimple", "pin", "pinafore", "pine", "pineapple", -"pinecone", "ping", "pink", "pinkie", "pinot", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "pipeline", "piracy", "pirate", +"pinecone", "ping", "pink", "pinkie", "pinot", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "piracy", "pirate", "pistol", "pit", "pita", "pitch", "pitcher", "pitching", "pith", "pizza", "place", "placebo", "placement", "placode", "plagiarism", -"plain", "plaintiff", "plan", "plane", "planet", "planning", "plant", "plantation", "planter", "planula", "plaster", "plasterboard", +"plain", "plaintiff", "plane", "planet", "planning", "plant", "plantation", "planter", "planula", "plaster", "plasterboard", "plastic", "plate", "platelet", "platform", "platinum", "platter", "platypus", "play", "player", "playground", "playroom", "playwright", "plea", "pleasure", "pleat", "pledge", "plenty", "plier", "pliers", "plight", "plot", "plough", "plover", "plow", "plowman", "plug", "plugin", "plum", "plumber", "plume", "plunger", "plywood", "pneumonia", "pocket", "pocketbook", "pod", "podcast", "poem", "poet", "poetry", "poignance", "point", "poison", "poisoning", "poker", "polarisation", "polarization", "pole", "polenta", "police", -"policeman", "policy", "polish", "politician", "politics", "poll", "polliwog", "pollutant", "pollution", "polo", "polyester", "polyp", +"policeman", "polish", "politician", "politics", "poll", "polliwog", "pollutant", "pollution", "polo", "polyester", "polyp", "pomegranate", "pomelo", "pompom", "poncho", "pond", "pony", "pool", "poor", "pop", "popcorn", "poppy", "popsicle", "popularity", "population", "populist", "porcelain", "porch", "porcupine", "pork", "porpoise", "port", "porter", "portfolio", "porthole", "portion", "portrait", -"position", "possession", "possibility", "possible", "post", "postage", "postbox", "poster", "posterior", "postfix", "pot", "potato", +"possession", "possibility", "possible", "post", "postage", "postbox", "poster", "posterior", "postfix", "pot", "potato", "potential", "pottery", "potty", "pouch", "poultry", "pound", "pounding", "poverty", "powder", "power", "practice", "practitioner", "prairie", -"praise", "pray", "prayer", "precedence", "precedent", "precipitation", "precision", "predecessor", "preface", "preference", "prefix", +"praise", "pray", "prayer", "precedence", "precedent", "precipitation", "predecessor", "preface", "preference", "prefix", "pregnancy", "prejudice", "prelude", "premeditation", "premier", "premise", "premium", "preoccupation", "preparation", "prescription", "presence", "present", "presentation", "preservation", "preserves", "presidency", "president", "press", "pressroom", "pressure", "pressurisation", "pressurization", "prestige", "presume", "pretzel", "prevalence", "prevention", "prey", "price", "pricing", "pride", "priest", "priesthood", -"primary", "primate", "prince", "princess", "principal", "principle", "print", "printer", "printing", "prior", "priority", "prison", +"primate", "prince", "princess", "principal", "principle", "print", "printer", "printing", "prior", "priority", "prison", "prisoner", "privacy", "private", "privilege", "prize", "prizefight", "probability", "probation", "probe", "problem", "procedure", "proceedings", "process", "processing", "processor", "proctor", "procurement", "produce", "producer", "product", "production", "productivity", "profession", -"professional", "professor", "profile", "profit", "progenitor", "program", "programme", "programming", "progress", "progression", "prohibition", +"professional", "professor", "profit", "progenitor", "program", "programme", "programming", "progress", "progression", "prohibition", "project", "proliferation", "promenade", "promise", "promotion", "prompt", "pronoun", "pronunciation", "proof", "propaganda", "propane", "property", "prophet", "proponent", "proportion", "proposal", "proposition", "proprietor", "prose", "prosecution", "prosecutor", "prospect", "prosperity", "prostacyclin", "prostanoid", "prostrate", "protection", "protein", "protest", "protocol", "providence", "provider", @@ -440,14 +844,14 @@ std::initializer_list nouns "psychologist", "psychology", "ptarmigan", "pub", "public", "publication", "publicity", "publisher", "publishing", "pudding", "puddle", "puffin", "pug", "puggle", "pulley", "pulse", "puma", "pump", "pumpernickel", "pumpkin", "pumpkinseed", "pun", "punch", "punctuation", "punishment", "pup", "pupa", "pupil", "puppet", "puppy", "purchase", "puritan", "purity", "purple", "purpose", "purr", "purse", "pursuit", -"push", "pusher", "put", "puzzle", "pyramid", "pyridine", "quadrant", "quail", "qualification", "quality", "quantity", "quart", "quarter", -"quartet", "quartz", "queen", "query", "quest", "question", "questioner", "questionnaire", "quiche", "quicksand", "quiet", "quill", "quilt", -"quince", "quinoa", "quit", "quiver", "quota", "quotation", "quote", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racism", +"push", "pusher", "put", "puzzle", "pyramid", "pyridine", "quadrant", "quail", "qualification", "quality", "quantity", "quart", +"quartet", "quartz", "queen", "quest", "question", "questioner", "questionnaire", "quiche", "quicksand", "quiet", "quill", "quilt", +"quince", "quinoa", "quit", "quiver", "quotation", "quote", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racism", "racist", "rack", "radar", "radiator", "radio", "radiosonde", "radish", "raffle", "raft", "rag", "rage", "raid", "rail", "railing", "railroad", "railway", "raiment", "rain", "rainbow", "raincoat", "rainmaker", "rainstorm", "rainy", "raise", "raisin", "rake", "rally", "ram", "rambler", -"ramen", "ramie", "ranch", "rancher", "randomisation", "randomization", "range", "ranger", "rank", "rap", "rape", "raspberry", "rat", +"ramen", "ramie", "ranch", "rancher", "randomisation", "randomization", "ranger", "rank", "rap", "rape", "raspberry", "rat", "rate", "ratepayer", "rating", "ratio", "rationale", "rations", "raven", "ravioli", "rawhide", "ray", "rayon", "razor", "reach", "reactant", -"reaction", "read", "reader", "readiness", "reading", "real", "reality", "realization", "realm", "reamer", "rear", "reason", "reasoning", +"reaction", "read", "reader", "readiness", "reading", "real", "reality", "realization", "reamer", "rear", "reason", "reasoning", "rebel", "rebellion", "reboot", "recall", "recapitulation", "receipt", "receiver", "reception", "receptor", "recess", "recession", "recipe", "recipient", "reciprocity", "reclamation", "recliner", "recognition", "recollection", "recommendation", "reconsideration", "record", "recorder", "recording", "recovery", "recreation", "recruit", "rectangle", "red", "redesign", "redhead", "redirect", "rediscovery", "reduction", @@ -457,21 +861,21 @@ std::initializer_list nouns "reliability", "relief", "religion", "relish", "reluctance", "remains", "remark", "reminder", "remnant", "remote", "removal", "renaissance", "rent", "reorganisation", "reorganization", "repair", "reparation", "repayment", "repeat", "replacement", "replica", "replication", "reply", "report", "reporter", "reporting", "repository", "representation", "representative", "reprocessing", "republic", "republican", "reputation", -"request", "requirement", "resale", "rescue", "research", "researcher", "resemblance", "reservation", "reserve", "reservoir", "reset", +"request", "requirement", "resale", "rescue", "research", "researcher", "resemblance", "reservation", "reserve", "reservoir", "residence", "resident", "residue", "resist", "resistance", "resolution", "resolve", "resort", "resource", "respect", "respite", "response", -"responsibility", "rest", "restaurant", "restoration", "restriction", "restroom", "restructuring", "result", "resume", "retailer", "retention", +"responsibility", "rest", "restaurant", "restoration", "restriction", "restroom", "restructuring", "result", "retailer", "retention", "rethinking", "retina", "retirement", "retouching", "retreat", "retrospect", "retrospective", "retrospectivity", "return", "reunion", "revascularisation", "revascularization", "reveal", "revelation", "revenant", "revenge", "revenue", "reversal", "reverse", "review", "revitalisation", "revitalization", "revival", "revolution", "revolver", "reward", "rhetoric", "rheumatism", "rhinoceros", "rhubarb", -"rhyme", "rhythm", "rib", "ribbon", "rice", "riddle", "ride", "rider", "ridge", "riding", "rifle", "right", "rim", "ring", "ringworm", +"rhyme", "rhythm", "rib", "ribbon", "rice", "riddle", "ride", "rider", "ridge", "riding", "rifle", "rim", "ring", "ringworm", "riot", "rip", "ripple", "rise", "riser", "risk", "rite", "ritual", "river", "riverbed", "rivulet", "road", "roadway", "roar", "roast", -"robe", "robin", "robot", "robotics", "rock", "rocker", "rocket", "rod", "role", "roll", "roller", "romaine", "romance", +"robe", "robin", "robot", "robotics", "rock", "rocker", "rocket", "rod", "roll", "roller", "romaine", "romance", "roof", "room", "roommate", "rooster", "root", "rope", "rose", "rosemary", "roster", "rostrum", "rotation", "round", "roundabout", "route", -"router", "routine", "row", "rowboat", "rowing", "rubber", "rubric", "ruby", "ruckus", "rudiment", "ruffle", "rug", "rugby", +"router", "routine", "rowboat", "rowing", "rubber", "rubric", "ruby", "ruckus", "rudiment", "ruffle", "rug", "rugby", "ruin", "rule", "ruler", "ruling", "rum", "rumor", "run", "runaway", "runner", "running", "runway", "rush", "rust", "rutabaga", "rye", "sabre", "sac", "sack", "saddle", "sadness", "safari", "safe", "safeguard", "safety", "saffron", "sage", "sail", "sailboat", "sailing", -"sailor", "saint", "sake", "salad", "salami", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salsa", "salt", "salute", "samovar", -"sampan", "sample", "samurai", "sanction", "sanctity", "sanctuary", "sand", "sandal", "sandbar", "sandpaper", "sandwich", "sanity", "sardine", +"sailor", "saint", "sake", "salad", "salami", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salsa", "salute", "samovar", +"sampan", "samurai", "sanction", "sanctity", "sanctuary", "sand", "sandal", "sandbar", "sandpaper", "sandwich", "sanity", "sardine", "sari", "sarong", "sash", "satellite", "satin", "satire", "satisfaction", "sauce", "saucer", "sauerkraut", "sausage", "savage", "savannah", "saving", "savings", "savior", "saviour", "savory", "saw", "saxophone", "scaffold", "scale", "scallion", "scallops", "scalp", "scam", "scanner", "scarecrow", "scarf", "scarification", "scenario", "scene", "scenery", "scent", "schedule", "scheduling", "schema", "scheme", @@ -479,20 +883,20 @@ std::initializer_list nouns "scooter", "scope", "score", "scorn", "scorpion", "scotch", "scout", "scow", "scrambled", "scrap", "scraper", "scratch", "screamer", "screen", "screening", "screenwriting", "screw", "screwdriver", "scrim", "scrip", "script", "scripture", "scrutiny", "sculpting", "sculptural", "sculpture", "sea", "seabass", "seafood", "seagull", "seal", "seaplane", "search", "seashore", "seaside", "season", "seat", -"seaweed", "second", "secrecy", "secret", "secretariat", "secretary", "secretion", "section", "sectional", "sector", "security", "sediment", +"seaweed", "secrecy", "secret", "secretariat", "secretary", "secretion", "section", "sectional", "sector", "security", "sediment", "seed", "seeder", "seeker", "seep", "segment", "seizure", "selection", "self", "seller", "selling", "semantics", "semester", "semicircle", "semicolon", "semiconductor", "seminar", "senate", "senator", "sender", "senior", "sense", "sensibility", "sensitive", "sensitivity", "sensor", "sentence", "sentencing", "sentiment", "sepal", "separation", "septicaemia", "sequel", -"sequence", "serial", "series", "sermon", "serum", "serval", "servant", "server", "service", "servitude", "sesame", "session", "set", -"setback", "setting", "settlement", "settler", "severity", "sewer", "sex", "sexuality", "shack", "shackle", "shade", "shadow", "shadowbox", +"sequence", "serial", "series", "sermon", "serum", "serval", "servant", "service", "servitude", "sesame", "session", +"setback", "settlement", "settler", "severity", "sewer", "sex", "sexuality", "shack", "shackle", "shade", "shadow", "shadowbox", "shakedown", "shaker", "shallot", "shallows", "shame", "shampoo", "shanty", "shape", "share", "shareholder", "shark", "shaw", "shawl", "shear", "shearling", "sheath", "shed", "sheep", "sheet", "shelf", "shell", "shelter", "sherbet", "sherry", "shield", "shift", "shin", "shine", "shingle", "ship", "shipper", "shipping", "shipyard", "shirt", "shirtdress", "shoat", "shock", "shoe", "shoehorn", "shoelace", "shoemaker", "shoes", "shoestring", "shofar", "shoot", "shootdown", "shop", "shopper", "shopping", "shore", "shoreline", -"short", "shortage", "shorts", "shortwave", "shot", "shoulder", "shout", "shovel", "show", "shower", "shred", "shrimp", +"short", "shortage", "shorts", "shortwave", "shot", "shoulder", "shout", "shovel", "shower", "shred", "shrimp", "shrine", "shutdown", "sibling", "sick", "sickness", "side", "sideboard", "sideburns", "sidecar", "sidestream", "sidewalk", "siding", "siege", "sigh", "sight", "sightseeing", "sign", "signal", "signature", "signet", "significance", "signify", "signup", "silence", "silica", -"silicon", "silk", "silkworm", "sill", "silly", "silo", "silver", "similarity", "simple", "simplicity", "simplification", "simvastatin", +"silicon", "silk", "silkworm", "sill", "silly", "silo", "silver", "similarity", "simplicity", "simplification", "simvastatin", "sin", "singer", "singing", "singular", "sink", "sinuosity", "sip", "sir", "sister", "sitar", "site", "situation", "size", "skate", "skating", "skean", "skeleton", "ski", "skiing", "skill", "skin", "skirt", "skull", "skullcap", "skullduggery", "skunk", "sky", "skylight", "skyline", "skyscraper", "skywalk", "slang", "slapstick", "slash", "slate", "slavery", "slaw", "sled", "sledge", @@ -503,7 +907,7 @@ std::initializer_list nouns "society", "sociology", "sock", "socks", "soda", "sofa", "softball", "softdrink", "softening", "software", "soil", "soldier", "sole", "solicitation", "solicitor", "solidarity", "solidity", "soliloquy", "solitaire", "solution", "solvency", "sombrero", "somebody", "someone", "someplace", "somersault", "something", "somewhere", "son", "sonar", "sonata", "song", "songbird", "sonnet", "soot", "sophomore", "soprano", -"sorbet", "sorghum", "sorrel", "sorrow", "sort", "soul", "soulmate", "sound", "soundness", "soup", "source", "sourwood", "sousaphone", +"sorbet", "sorghum", "sorrel", "sorrow", "sort", "soul", "soulmate", "sound", "soundness", "soup", "sourwood", "sousaphone", "south", "southeast", "souvenir", "sovereignty", "sow", "soy", "soybean", "space", "spacing", "spade", "spaghetti", "span", "spandex", "spank", "sparerib", "spark", "sparrow", "spasm", "spat", "spatula", "spawn", "speaker", "speakerphone", "speaking", "spear", "spec", "special", "specialist", "specialty", "species", "specification", "spectacle", "spectacles", "spectrograph", "spectrum", "speculation", @@ -515,11 +919,11 @@ std::initializer_list nouns "staff", "stag", "stage", "stain", "stair", "staircase", "stake", "stalk", "stall", "stallion", "stamen", "stamina", "stamp", "stance", "stand", "standard", "standardisation", "standardization", "standing", "standoff", "standpoint", "star", "starboard", "start", "starter", "state", "statement", "statin", "station", "statistic", "statistics", "statue", "status", "statute", "stay", "steak", -"stealth", "steam", "steamroller", "steel", "steeple", "stem", "stench", "stencil", "step", +"stealth", "steam", "steamroller", "steel", "steeple", "stem", "stench", "stencil", "stepdaughter", "stepmother", "stepson", "stereo", "stew", "steward", "stick", "sticker", "stiletto", "still", "stimulation", "stimulus", "sting", "stinger", "stitch", "stitcher", "stock", "stockings", "stole", "stomach", "stone", "stonework", "stool", -"stop", "stopsign", "stopwatch", "storage", "store", "storey", "storm", "story", "storyboard", "stot", "stove", "strait", +"stop", "stopsign", "stopwatch", "store", "storey", "storm", "story", "storyboard", "stot", "stove", "strait", "strand", "stranger", "strap", "strategy", "straw", "strawberry", "strawman", "stream", "street", "streetcar", "strength", "stress", "stretch", "strife", "strike", "string", "strip", "stripe", "strobe", "stroke", "structure", "strudel", "struggle", "stucco", "stud", "student", "studio", "study", "stuff", "stumbling", "stump", "stupidity", "sturgeon", "sty", "style", "styling", "stylus", "sub", "subcomponent", @@ -533,16 +937,16 @@ std::initializer_list nouns "suspenders", "suspension", "sustainment", "sustenance", "swallow", "swamp", "swan", "swanling", "swath", "sweat", "sweater", "sweatshirt", "sweatshop", "sweatsuit", "sweets", "swell", "swim", "swimming", "swimsuit", "swine", "swing", "switch", "switchboard", "switching", "swivel", "sword", "swordfight", "swordfish", "sycamore", "symbol", "symmetry", "sympathy", "symptom", "syndicate", "syndrome", "synergy", -"synod", "synonym", "synthesis", "syrup", "system", "tab", "tabby", "tabernacle", "tablecloth", "tablet", "tabletop", +"synod", "synonym", "synthesis", "syrup", "tab", "tabby", "tabernacle", "tablecloth", "tablet", "tabletop", "tachometer", "tackle", "taco", "tactics", "tactile", "tadpole", "tag", "tail", "tailbud", "tailor", "tailspin", "takeover", "tale", "talent", "talk", "talking", "tamale", "tambour", "tambourine", "tan", "tandem", "tangerine", "tank", "tanker", "tankful", "tap", "tape", "tapioca", "target", "taro", "tarragon", "tart", "task", "tassel", "taste", "tatami", "tattler", "tattoo", "tavern", "tax", "taxi", "taxicab", "taxpayer", "tea", "teacher", "teaching", "team", "teammate", "teapot", "tear", "tech", "technician", "technique", "technologist", "technology", "tectonics", "teen", "teenager", "teepee", "telephone", "telescreen", "teletype", -"television", "tell", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temporary", "temptation", "temptress", +"television", "tell", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temptation", "temptress", "tenant", "tendency", "tender", "tenement", "tenet", "tennis", "tenor", "tension", "tensor", "tent", "tentacle", "tenth", "tepee", "teriyaki", "term", "terminal", "termination", "terminology", "termite", "terrace", "terracotta", "terrapin", "terrarium", "territory", "terror", -"terrorism", "terrorist", "test", "testament", "testimonial", "testimony", "testing", "text", "textbook", "textual", "texture", "thanks", +"terrorism", "terrorist", "testament", "testimonial", "testimony", "testing", "text", "textbook", "textual", "texture", "thanks", "thaw", "theater", "theft", "theism", "theme", "theology", "theory", "therapist", "therapy", "thermals", "thermometer", "thermostat", "thesis", "thickness", "thief", "thigh", "thing", "thinking", "thirst", "thistle", "thong", "thongs", "thorn", "thought", "thousand", "thread", "threat", "threshold", "thrift", "thrill", "throat", "throne", "thrush", "thrust", "thug", "thumb", "thump", "thunder", "thunderbolt", @@ -550,49 +954,49 @@ std::initializer_list nouns "timber", "time", "timeline", "timeout", "timer", "timetable", "timing", "timpani", "tin", "tinderbox", "tinkle", "tintype", "tip", "tire", "tissue", "titanium", "title", "toad", "toast", "toaster", "tobacco", "today", "toe", "toenail", "toffee", "tofu", "tog", "toga", "toilet", "tolerance", "tolerant", "toll", "tomatillo", "tomato", "tomb", "tomography", "tomorrow", "ton", "tonality", "tone", "tongue", -"tonic", "tonight", "tool", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "top", "topic", "topsail", "toque", +"tonic", "tonight", "tool", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "topic", "topsail", "toque", "toreador", "tornado", "torso", "torte", "tortellini", "tortilla", "tortoise", "tosser", "total", "tote", "touch", "tour", "tourism", "tourist", "tournament", "towel", "tower", "town", "townhouse", "township", "toy", "trace", "trachoma", "track", -"tracking", "tracksuit", "tract", "tractor", "trade", "trader", "trading", "tradition", "traditionalism", "traffic", "trafficker", "tragedy", -"trail", "trailer", "trailpatrol", "train", "trainer", "training", "trait", "tram", "tramp", "trance", "transaction", "transcript", "transfer", +"tracksuit", "tract", "tractor", "trade", "trader", "trading", "tradition", "traditionalism", "traffic", "trafficker", "tragedy", +"trail", "trailer", "trailpatrol", "train", "trainer", "training", "trait", "tram", "tramp", "trance", "transcript", "transfer", "transformation", "transit", "transition", "translation", "transmission", "transom", "transparency", "transplantation", "transport", "transportation", "trap", "trapdoor", "trapezium", "trapezoid", "trash", "travel", "traveler", "tray", "treasure", "treasury", "treat", -"treatment", "treaty", "tree", "trek", "trellis", "tremor", "trench", "trend", "triad", "trial", "triangle", "tribe", "tributary", "trick", -"trigger", "trigonometry", "trillion", "trim", "trinket", "trip", "tripod", "tritone", "triumph", "trolley", "trombone", "troop", "trooper", +"treatment", "treaty", "trek", "trellis", "tremor", "trench", "trend", "triad", "trial", "triangle", "tribe", "tributary", "trick", +"trigonometry", "trillion", "trinket", "trip", "tripod", "tritone", "triumph", "trolley", "trombone", "troop", "trooper", "trophy", "trouble", "trousers", "trout", "trove", "trowel", "truck", "trumpet", "trunk", "trust", "trustee", "truth", "try", "tsunami", "tub", "tuba", "tube", "tuber", "tug", "tugboat", "tuition", "tulip", "tumbler", "tummy", "tuna", "tune", "tunic", "tunnel", "turban", "turf", "turkey", "turmeric", "turn", "turning", "turnip", "turnover", "turnstile", "turret", "turtle", "tusk", "tussle", "tutu", -"tuxedo", "tweet", "tweezers", "twig", "twilight", "twine", "twins", "twist", "twister", "twitter", "type", "typeface", "typewriter", +"tuxedo", "tweet", "tweezers", "twig", "twilight", "twine", "twins", "twist", "twister", "twitter", "typeface", "typewriter", "typhoon", "ukulele", "ultimatum", "umbrella", "unblinking", "uncertainty", "uncle", "underclothes", "underestimate", "underground", "underneath", "underpants", "underpass", "undershirt", "understanding", "understatement", "undertaker", "underwear", "underweight", "underwire", -"underwriting", "unemployment", "unibody", "uniform", "uniformity", "unique", "unit", "unity", "universe", "university", "update", -"upgrade", "uplift", "upper", "upstairs", "upward", "urge", "urgency", "urn", "usage", "use", "user", "usher", "usual", "utensil", "utilisation", +"underwriting", "unemployment", "unibody", "uniform", "uniformity", "unit", "unity", "universe", "university", +"upgrade", "uplift", "upper", "upstairs", "upward", "urge", "urgency", "urn", "usage", "usher", "usual", "utensil", "utilisation", "utility", "utilization", "vacation", "vaccine", "vacuum", "vagrant", "valance", "valentine", "validate", "validity", "valley", "valuable", -"value", "vampire", "van", "vanadyl", "vane", "vanilla", "vanity", "variability", "variable", "variant", "variation", "variety", "vascular", +"vampire", "van", "vanadyl", "vane", "vanilla", "vanity", "variability", "variable", "variant", "variation", "variety", "vascular", "vase", "vault", "vaulting", "veal", "vector", "vegetable", "vegetarian", "vegetarianism", "vegetation", "vehicle", "veil", "vein", "veldt", "vellum", "velocity", "velodrome", "velvet", "vendor", "veneer", "vengeance", "venison", "venom", "venti", "venture", "venue", "veranda", "verb", "verdict", "verification", "vermicelli", "vernacular", "verse", "version", "vertigo", "verve", "vessel", "vest", "vestment", "vet", "veteran", "veterinarian", "veto", "viability", "vibe", "vibraphone", "vibration", "vibrissae", "vice", "vicinity", "victim", -"victory", "video", "view", "viewer", "vignette", "villa", "village", "vine", "vinegar", "vineyard", "vintage", "vintner", "vinyl", "viola", +"victory", "video", "viewer", "vignette", "villa", "village", "vine", "vinegar", "vineyard", "vintage", "vintner", "vinyl", "viola", "violation", "violence", "violet", "violin", "virginal", "virtue", "virus", "visa", "viscose", "vise", "vision", "visit", "visitor", "visor", "vista", "visual", "vitality", "vitamin", "vitro", "vivo", "vixen", "vodka", "vogue", "voice", "void", "vol", "volatility", -"volcano", "volleyball", "volume", "volunteer", "volunteering", "vomit", "vote", "voter", "voting", "voyage", "vulture", "wad", "wafer", +"volcano", "volleyball", "volunteer", "volunteering", "vomit", "vote", "voter", "voting", "voyage", "vulture", "wad", "wafer", "waffle", "wage", "wagon", "waist", "waistband", "wait", "waiter", "waiting", "waitress", "waiver", "wake", "walk", "walker", "walking", "walkway", "wall", "wallaby", "wallet", "walnut", "walrus", "wampum", "wannabe", "want", "war", "warden", "wardrobe", "warfare", "warlock", "warlord", "warming", "warmth", "warning", "warrant", "warren", "warrior", "wasabi", "wash", "washbasin", "washcloth", "washer", -"washtub", "wasp", "waste", "wastebasket", "wasting", "watch", "watcher", "watchmaker", "water", "waterbed", "watercress", "waterfall", +"washtub", "wasp", "waste", "wastebasket", "wasting", "watcher", "watchmaker", "water", "waterbed", "watercress", "waterfall", "waterfront", "watermelon", "waterskiing", "waterspout", "waterwheel", "wave", "waveform", "wax", "way", "weakness", "wealth", "weapon", -"wear", "weasel", "weather", "web", "webinar", "webmail", "webpage", "website", "wedding", "wedge", "weed", "weeder", "weedkiller", "week", +"wear", "weasel", "weather", "web", "webinar", "webmail", "webpage", "website", "wedding", "wedge", "weed", "weeder", "weedkiller", "weekend", "weekender", "weight", "weird", "welcome", "welfare", "well", "west", "western", "wetland", "wetsuit", "whack", "whale", "wharf", "wheat", "wheel", "whelp", "whey", "whip", "whirlpool", "whirlwind", "whisker", "whiskey", "whisper", "whistle", "white", "whole", "wholesale", "wholesaler", "whorl", "wick", "widget", "widow", "width", "wife", "wifi", "wild", "wildebeest", "wilderness", -"wildlife", "will", "willingness", "willow", "win", "wind", "windage", "window", "windscreen", "windshield", "wine", "winery", +"wildlife", "will", "willingness", "willow", "win", "wind", "windage", "windscreen", "windshield", "wine", "winery", "wing", "wingman", "wingtip", "wink", "winner", "winter", "wire", "wiretap", "wiring", "wisdom", "wiseguy", "wish", "wisteria", "wit", "witch", "withdrawal", "witness", "wok", "wolf", "woman", "wombat", "wonder", "wont", "wood", "woodchuck", "woodland", "woodshed", "woodwind", "wool", "woolens", "word", "wording", "work", "workbench", "worker", "workforce", "workhorse", "working", "workout", "workplace", "workshop", "world", "worm", "worry", "worship", "worshiper", "worth", "wound", "wrap", "wraparound", "wrapper", "wrapping", "wreck", "wrecker", "wren", "wrench", "wrestler", "wriggler", "wrinkle", "wrist", "writer", "writing", "wrong", "xylophone", "yacht", -"yahoo", "yak", "yam", "yang", "yard", "yarmulke", "yarn", "yawl", "year", "yeast", "yellow", "yellowjacket", "yesterday", "yew", "yin", +"yahoo", "yak", "yam", "yang", "yard", "yarmulke", "yarn", "yawl", "yeast", "yellow", "yellowjacket", "yesterday", "yew", "yin", "yoga", "yogurt", "yoke", "yolk", "young", "youngster", "yourself", "youth", "yoyo", "yurt", "zampone", "zebra", "zebrafish", "zen", "zephyr", "zero", "ziggurat", "zinc", "zipper", "zither", "zombie", "zone", "zoo", "zoologist", "zoology", "zucchini" }; @@ -637,7 +1041,10 @@ void obfuscateIdentifier(std::string_view src, WriteBuffer & result, WordMap & o { std::string_view word(word_begin, src_pos - word_begin); - if (keep_words.contains(word)) + String wordcopy(word_begin, src_pos - word_begin); + Poco::toUpperInPlace(wordcopy); + + if (keep_words.contains(wordcopy)) { result.write(word.data(), word.size()); } @@ -805,18 +1212,28 @@ void obfuscateLiteral(std::string_view src, WriteBuffer & result, SipHash hash_f while (alpha_end < src_end && isAlphaASCII(*alpha_end)) ++alpha_end; - hash_func.update(src_pos, alpha_end - src_pos); - pcg64 rng(hash_func.get64()); - - while (src_pos < alpha_end) + String wordcopy(src_pos, alpha_end); + Poco::toUpperInPlace(wordcopy); + if (keep_words.contains(wordcopy)) { - auto random = rng(); - if (isLowerAlphaASCII(*src_pos)) - result.write('a' + random % 26); - else - result.write('A' + random % 26); + result.write(src_pos, alpha_end - src_pos); + src_pos = alpha_end; + } + else + { + hash_func.update(src_pos, alpha_end - src_pos); + pcg64 rng(hash_func.get64()); - ++src_pos; + while (src_pos < alpha_end) + { + auto random = rng(); + if (isLowerAlphaASCII(*src_pos)) + result.write('a' + random % 26); + else + result.write('A' + random % 26); + + ++src_pos; + } } } else if (isASCII(src_pos[0])) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 2a9d06bc17b..0d302fda904 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1377,8 +1377,7 @@ void Planner::buildPlanForQueryNode() */ if (query_node.hasLimit() && apply_limit && !limit_applied && apply_offset) addLimitStep(query_plan, query_analysis_result, planner_context, query_node); - - if (apply_offset && query_node.hasOffset()) + else if (!limit_applied && apply_offset && query_node.hasOffset()) addOffsetStep(query_plan, query_analysis_result); const auto & projection_analysis_result = expression_analysis_result.getProjection(); diff --git a/src/Processors/ConcatProcessor.h b/src/Processors/ConcatProcessor.h index 4a1fc580411..506317eba5d 100644 --- a/src/Processors/ConcatProcessor.h +++ b/src/Processors/ConcatProcessor.h @@ -13,7 +13,7 @@ namespace DB * Doesn't do any heavy calculations. * Preserves an order of data. */ -class ConcatProcessor : public IProcessor +class ConcatProcessor final : public IProcessor { public: ConcatProcessor(const Block & header, size_t num_inputs); diff --git a/src/Processors/DelayedPortsProcessor.h b/src/Processors/DelayedPortsProcessor.h index a6a9590e0c8..3909d533914 100644 --- a/src/Processors/DelayedPortsProcessor.h +++ b/src/Processors/DelayedPortsProcessor.h @@ -8,7 +8,7 @@ namespace DB /// Some ports are delayed. Delayed ports are processed after other outputs are all finished. /// Data between ports is not mixed. It is important because this processor can be used before MergingSortedTransform. /// Delayed ports are appeared after joins, when some non-matched data need to be processed at the end. -class DelayedPortsProcessor : public IProcessor +class DelayedPortsProcessor final : public IProcessor { public: DelayedPortsProcessor(const Block & header, size_t num_ports, const PortNumbers & delayed_ports, bool assert_main_ports_empty = false); diff --git a/src/Processors/ForkProcessor.h b/src/Processors/ForkProcessor.h index 8839f73584f..c6a2a57c657 100644 --- a/src/Processors/ForkProcessor.h +++ b/src/Processors/ForkProcessor.h @@ -15,7 +15,7 @@ namespace DB * Doesn't do any heavy calculations. * Preserves an order of data. */ -class ForkProcessor : public IProcessor +class ForkProcessor final : public IProcessor { public: ForkProcessor(const Block & header, size_t num_outputs) diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index 8b9e309aa1f..68c40527097 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -44,6 +44,7 @@ M(arrow::Type::INT32, DB::Int32) \ M(arrow::Type::UINT64, DB::UInt64) \ M(arrow::Type::INT64, DB::Int64) \ + M(arrow::Type::DURATION, DB::Int64) \ M(arrow::Type::HALF_FLOAT, DB::Float32) \ M(arrow::Type::FLOAT, DB::Float32) \ M(arrow::Type::DOUBLE, DB::Float64) diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.cpp b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.cpp index 4a618d3a164..113b537170a 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.cpp @@ -8,7 +8,7 @@ namespace DB { JSONColumnsBlockOutputFormat::JSONColumnsBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_, bool validate_utf8, size_t indent_) - : JSONColumnsBlockOutputFormatBase(out_, header_, format_settings_, validate_utf8), indent(indent_) + : JSONColumnsBlockOutputFormatBase(out_, header_, format_settings_, validate_utf8), indent(indent_), header(header_) { names = JSONUtils::makeNamesValidJSONStrings(header_.getNames(), format_settings, validate_utf8); } @@ -25,6 +25,18 @@ void JSONColumnsBlockOutputFormat::writeColumnStart(size_t column_index) void JSONColumnsBlockOutputFormat::writeChunkEnd() { + /// Write empty chunk + if (!written_rows) + { + const auto & columns = header.getColumns(); + for (size_t i = 0; i != columns.size(); ++i) + { + writeColumnStart(i); + writeColumn(*columns[i], *serializations[i]); + writeColumnEnd(i == columns.size() - 1); + } + } + JSONUtils::writeObjectEnd(*ostr, indent); writeChar('\n', *ostr); } diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.h b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.h index 7140c2ebe90..fa5784d6e15 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormat.h @@ -27,6 +27,8 @@ protected: Names names; size_t indent; + + Block header; }; } diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.cpp b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.cpp index 87a87548a91..490516b7eb4 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.cpp @@ -42,6 +42,7 @@ void JSONColumnsBlockOutputFormatBase::writeChunk(Chunk & chunk) writeColumn(*columns[i], *serializations[i]); writeColumnEnd(i == columns.size() - 1); } + written_rows += chunk.getNumRows(); writeChunkEnd(); } diff --git a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.h b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.h index 541abc0ebce..235a6d4da96 100644 --- a/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.h +++ b/src/Processors/Formats/Impl/JSONColumnsBlockOutputFormatBase.h @@ -36,6 +36,8 @@ protected: const Serializations serializations; Chunk mono_chunk; + + size_t written_rows = 0; }; } diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index a3b34d30ed6..d2ec3c02eed 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -17,6 +17,7 @@ #include "ArrowFieldIndexUtil.h" #include + namespace DB { @@ -44,35 +45,40 @@ Chunk ParquetBlockInputFormat::generate() block_missing_values.clear(); if (!file_reader) + { prepareReader(); + file_reader->set_batch_size(format_settings.parquet.max_block_size); + std::vector row_group_indices; + for (int i = 0; i < row_group_total; ++i) + { + if (!skip_row_groups.contains(i)) + row_group_indices.emplace_back(i); + } + auto read_status = file_reader->GetRecordBatchReader(row_group_indices, column_indices, ¤t_record_batch_reader); + if (!read_status.ok()) + throw DB::ParsingException(ErrorCodes::CANNOT_READ_ALL_DATA, "Error while reading Parquet data: {}", read_status.ToString()); + } if (is_stopped) return {}; - while (row_group_current < row_group_total && skip_row_groups.contains(row_group_current)) - ++row_group_current; - - if (row_group_current >= row_group_total) - return res; - - std::shared_ptr table; - - std::unique_ptr<::arrow::RecordBatchReader> rbr; - std::vector row_group_indices { row_group_current }; - arrow::Status get_batch_reader_status = file_reader->GetRecordBatchReader(row_group_indices, column_indices, &rbr); - - if (!get_batch_reader_status.ok()) + auto batch = current_record_batch_reader->Next(); + if (!batch.ok()) + { throw ParsingException(ErrorCodes::CANNOT_READ_ALL_DATA, "Error while reading Parquet data: {}", - get_batch_reader_status.ToString()); - - arrow::Status read_status = rbr->ReadAll(&table); - - if (!read_status.ok()) - throw ParsingException(ErrorCodes::CANNOT_READ_ALL_DATA, "Error while reading Parquet data: {}", read_status.ToString()); - - ++row_group_current; - - arrow_column_to_ch_column->arrowTableToCHChunk(res, table, table->num_rows()); + batch.status().ToString()); + } + if (*batch) + { + auto tmp_table = arrow::Table::FromRecordBatches({*batch}); + arrow_column_to_ch_column->arrowTableToCHChunk(res, *tmp_table, (*tmp_table)->num_rows()); + } + else + { + current_record_batch_reader.reset(); + file_reader.reset(); + return {}; + } /// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields. /// Otherwise fill the missing columns with zero values of its type. diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index 25814090587..37878a94dd9 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -8,7 +8,7 @@ namespace parquet::arrow { class FileReader; } -namespace arrow { class Buffer; } +namespace arrow { class Buffer; class RecordBatchReader;} namespace DB { @@ -46,6 +46,7 @@ private: BlockMissingValues block_missing_values; const FormatSettings format_settings; const std::unordered_set & skip_row_groups; + std::shared_ptr current_record_batch_reader; std::atomic is_stopped{0}; }; diff --git a/src/Processors/LimitTransform.h b/src/Processors/LimitTransform.h index 8865eab732a..0d8c5f4ea47 100644 --- a/src/Processors/LimitTransform.h +++ b/src/Processors/LimitTransform.h @@ -15,7 +15,7 @@ namespace DB /// /// always_read_till_end - read all data from input ports even if limit was reached. /// with_ties, description - implementation of LIMIT WITH TIES. It works only for single port. -class LimitTransform : public IProcessor +class LimitTransform final : public IProcessor { private: UInt64 limit; diff --git a/src/Processors/OffsetTransform.h b/src/Processors/OffsetTransform.h index e67685362aa..d24440d68ea 100644 --- a/src/Processors/OffsetTransform.h +++ b/src/Processors/OffsetTransform.h @@ -10,7 +10,7 @@ namespace DB /// Implementation for OFFSET N (without limit) /// This processor support multiple inputs and outputs (the same number). /// Each pair of input and output port works independently. -class OffsetTransform : public IProcessor +class OffsetTransform final : public IProcessor { private: UInt64 offset; diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp index 166b021b5ce..2bb29a0b6fe 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp @@ -1,9 +1,12 @@ #include + +#include "config_version.h" #include -#include +#include #include #include #include +#include namespace DB { @@ -40,48 +43,58 @@ std::unique_ptr createLocalPlan( const Block & header, ContextPtr context, QueryProcessingStage::Enum processed_stage, - UInt32 shard_num, - UInt32 shard_count, + size_t shard_num, + size_t shard_count, size_t replica_num, size_t replica_count, - std::shared_ptr coordinator) + std::shared_ptr coordinator, + UUID group_uuid) { checkStackSize(); auto query_plan = std::make_unique(); + auto new_context = Context::createCopy(context); /// Do not apply AST optimizations, because query /// is already optimized and some optimizations /// can be applied only for non-distributed tables /// and we can produce query, inconsistent with remote plans. auto select_query_options = SelectQueryOptions(processed_stage) - .setShardInfo(shard_num, shard_count) + .setShardInfo(static_cast(shard_num), static_cast(shard_count)) .ignoreASTOptimizations(); - auto update_interpreter = [&](auto & interpreter) + /// There are much things that are needed for coordination + /// during reading with parallel replicas + if (coordinator) { - interpreter.setProperClientInfo(replica_num, replica_count); - if (coordinator) + new_context->parallel_reading_coordinator = coordinator; + new_context->getClientInfo().interface = ClientInfo::Interface::LOCAL; + new_context->getClientInfo().collaborate_with_initiator = true; + new_context->getClientInfo().query_kind = ClientInfo::QueryKind::SECONDARY_QUERY; + new_context->getClientInfo().count_participating_replicas = replica_count; + new_context->getClientInfo().number_of_current_replica = replica_num; + new_context->getClientInfo().connection_client_version_major = DBMS_VERSION_MAJOR; + new_context->getClientInfo().connection_client_version_minor = DBMS_VERSION_MINOR; + new_context->getClientInfo().connection_tcp_protocol_version = DBMS_TCP_PROTOCOL_VERSION; + new_context->setParallelReplicasGroupUUID(group_uuid); + new_context->setMergeTreeAllRangesCallback([coordinator](InitialAllRangesAnnouncement announcement) { - interpreter.setMergeTreeReadTaskCallbackAndClientInfo([coordinator](PartitionReadRequest request) -> std::optional - { - return coordinator->handleRequest(request); - }); - } - }; + coordinator->handleInitialAllRangesAnnouncement(announcement); + }); + new_context->setMergeTreeReadTaskCallback([coordinator](ParallelReadRequest request) -> std::optional + { + return coordinator->handleRequest(request); + }); + } if (context->getSettingsRef().allow_experimental_analyzer) { - auto interpreter = InterpreterSelectQueryAnalyzer(query_ast, context, select_query_options); - update_interpreter(interpreter); + auto interpreter = InterpreterSelectQueryAnalyzer(query_ast, new_context, select_query_options); query_plan = std::make_unique(std::move(interpreter).extractQueryPlan()); } else { - auto interpreter = InterpreterSelectQuery( - query_ast, context, - select_query_options); - update_interpreter(interpreter); + auto interpreter = InterpreterSelectQuery(query_ast, new_context, select_query_options); interpreter.buildQueryPlan(*query_plan); } diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.h b/src/Processors/QueryPlan/DistributedCreateLocalPlan.h index b55cedf9871..16bf1c565ff 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.h +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.h @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace DB @@ -13,10 +14,11 @@ std::unique_ptr createLocalPlan( const Block & header, ContextPtr context, QueryProcessingStage::Enum processed_stage, - UInt32 shard_num, - UInt32 shard_count, + size_t shard_num, + size_t shard_count, size_t replica_num, size_t replica_count, - std::shared_ptr coordinator); + std::shared_ptr coordinator, + UUID group_uuid = UUIDHelpers::Nil); } diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index bdf8f24f9d6..301c3bca571 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -1005,8 +1005,6 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (auto * reading = typeid_cast(reading_node->step.get())) { - - //std::cerr << "---- optimizeReadInOrder found mt" << std::endl; auto order_info = buildInputOrderInfo( reading, fixed_columns, diff --git a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp index 13095dfad47..0378c5ef416 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp @@ -1,8 +1,9 @@ -#include -#include #include #include +#include +#include #include + #include namespace DB diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index e7ea7a4e34b..917bea4c884 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -13,6 +13,7 @@ #include #include #include +#include using namespace DB; @@ -77,7 +78,7 @@ std::pair, std::vector> split(RangesInDat RangeEnd, }; - bool operator<(const PartsRangesIterator & other) const { return std::tie(value, event) > std::tie(other.value, other.event); } + [[ maybe_unused ]] bool operator<(const PartsRangesIterator & other) const { return std::tie(value, event) > std::tie(other.value, other.event); } Values value; MarkRangeWithPartIdx range; diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index e817a9ef8a9..48a9fbd7a34 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -166,6 +166,7 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline( QueryPipelineBuilderPtr last_pipeline; + std::stack stack; stack.push(Frame{.node = root}); @@ -198,6 +199,13 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline( last_pipeline->setProcessListElement(build_pipeline_settings.process_list_element); last_pipeline->addResources(std::move(resources)); + /// This is related to parallel replicas. + /// Not to let the remote sources starve for CPU we create an + /// explicit dependency between processors which read from local replica + /// and ones that receive data from remote replicas and constantly answer + /// to coordination packets. + last_pipeline->connectDependencies(); + return last_pipeline; } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 22245b82966..cca8e5297ee 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1,16 +1,18 @@ -#include -#include -#include -#include -#include -#include -#include +#include + +#include +#include +#include +#include +#include "Storages/MergeTree/RequestResponse.h" +#include #include #include -#include +#include #include #include #include +#include #include #include #include @@ -25,18 +27,22 @@ #include #include #include +#include #include #include #include -#include #include #include #include +#include #include -#include -#include -#include -#include + +#include +#include +#include +#include +#include +#include namespace ProfileEvents { @@ -114,6 +120,7 @@ ReadFromMergeTree::ReadFromMergeTree( , max_block_numbers_to_read(std::move(max_block_numbers_to_read_)) , log(log_) , analyzed_result_ptr(analyzed_result_ptr_) + , is_parallel_reading_from_replicas(enable_parallel_reading) { if (sample_factor_column_queried) { @@ -123,8 +130,11 @@ ReadFromMergeTree::ReadFromMergeTree( output_stream->header.insert({type->createColumn(), type, "_sample_factor"}); } - if (enable_parallel_reading) + if (is_parallel_reading_from_replicas) + { + all_ranges_callback = context->getMergeTreeAllRangesCallback(); read_task_callback = context->getMergeTreeReadTaskCallback(); + } const auto & settings = context->getSettingsRef(); if (settings.max_streams_for_merge_tree_reading) @@ -173,6 +183,80 @@ ReadFromMergeTree::ReadFromMergeTree( } } + +Pipe ReadFromMergeTree::readFromPoolParallelReplicas( + RangesInDataParts parts_with_range, + Names required_columns, + size_t max_streams, + size_t min_marks_for_concurrent_read, + bool use_uncompressed_cache +) +{ + const auto & client_info = context->getClientInfo(); + auto extension = ParallelReadingExtension + { + .all_callback = all_ranges_callback.value(), + .callback = read_task_callback.value(), + .count_participating_replicas = client_info.count_participating_replicas, + .number_of_current_replica = client_info.number_of_current_replica, + .colums_to_read = required_columns + }; + + /// We have a special logic for local replica. It has to read less data, because in some cases it should + /// merge states of aggregate functions or do some other important stuff other than reading from Disk. + auto is_local_replica = context->getClientInfo().interface == ClientInfo::Interface::LOCAL; + if (!is_local_replica) + min_marks_for_concurrent_read = static_cast(min_marks_for_concurrent_read * context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier); + + auto pool = std::make_shared( + storage_snapshot, + max_streams, + extension, + parts_with_range, + prewhere_info, + required_columns, + virt_column_names, + min_marks_for_concurrent_read + ); + + Pipes pipes; + const auto & settings = context->getSettingsRef(); + size_t total_rows = parts_with_range.getRowsCountAllParts(); + + for (size_t i = 0; i < max_streams; ++i) + { + auto algorithm = std::make_unique( + i, pool, min_marks_for_concurrent_read, max_block_size, + settings.preferred_block_size_bytes, settings.preferred_max_column_in_block_size_bytes, + data, storage_snapshot, use_uncompressed_cache, + prewhere_info, actions_settings, reader_settings, virt_column_names); + + auto source = std::make_shared(std::move(algorithm)); + + /// Set the approximate number of rows for the first source only + /// In case of parallel processing on replicas do not set approximate rows at all. + /// Because the value will be identical on every replicas and will be accounted + /// multiple times (settings.max_parallel_replicas times more) + if (i == 0 && !client_info.collaborate_with_initiator) + source->addTotalRowsApprox(total_rows); + + pipes.emplace_back(std::move(source)); + + /// Add a special dependency transform which will be connected later with + /// all RemoteSources through a simple scheduler (ResizeProcessor) + if (context->getClientInfo().interface == ClientInfo::Interface::LOCAL) + { + pipes.back().addSimpleTransform([&](const Block & header) -> ProcessorPtr + { + return std::make_shared(header, context->getParallelReplicasGroupUUID()); + }); + } + } + + return Pipe::unitePipes(std::move(pipes)); +} + + Pipe ReadFromMergeTree::readFromPool( RangesInDataParts parts_with_range, Names required_columns, @@ -181,22 +265,25 @@ Pipe ReadFromMergeTree::readFromPool( bool use_uncompressed_cache) { Pipes pipes; - size_t sum_marks = 0; - size_t total_rows = 0; - - for (const auto & part : parts_with_range) - { - sum_marks += part.getMarksCount(); - total_rows += part.getRowsCount(); - } + size_t sum_marks = parts_with_range.getMarksCountAllParts(); + size_t total_rows = parts_with_range.getRowsCountAllParts(); if (query_info.limit > 0 && query_info.limit < total_rows) total_rows = query_info.limit; const auto & settings = context->getSettingsRef(); - const auto & client_info = context->getClientInfo(); MergeTreeReadPool::BackoffSettings backoff_settings(settings); + /// round min_marks_to_read up to nearest multiple of block_size expressed in marks + /// If granularity is adaptive it doesn't make sense + /// Maybe it will make sense to add settings `max_block_size_bytes` + if (max_block_size && !data.canUseAdaptiveGranularity()) + { + size_t fixed_index_granularity = data.getSettings()->index_granularity; + min_marks_for_concurrent_read = (min_marks_for_concurrent_read * fixed_index_granularity + max_block_size - 1) + / max_block_size * max_block_size / fixed_index_granularity; + } + auto pool = std::make_shared( max_streams, sum_marks, @@ -215,34 +302,17 @@ Pipe ReadFromMergeTree::readFromPool( for (size_t i = 0; i < max_streams; ++i) { - std::optional extension; - if (read_task_callback) - { - extension = ParallelReadingExtension - { - .callback = read_task_callback.value(), - .count_participating_replicas = client_info.count_participating_replicas, - .number_of_current_replica = client_info.number_of_current_replica, - .colums_to_read = required_columns - }; - } - auto algorithm = std::make_unique( i, pool, min_marks_for_concurrent_read, max_block_size, settings.preferred_block_size_bytes, settings.preferred_max_column_in_block_size_bytes, data, storage_snapshot, use_uncompressed_cache, - prewhere_info, actions_settings, reader_settings, virt_column_names, std::move(extension)); + prewhere_info, actions_settings, reader_settings, virt_column_names); auto source = std::make_shared(std::move(algorithm)); - /// Set the approximate number of rows for the first source only - /// In case of parallel processing on replicas do not set approximate rows at all. - /// Because the value will be identical on every replicas and will be accounted - /// multiple times (settings.max_parallel_replicas times more) - if (i == 0 && !client_info.collaborate_with_initiator) + if (i == 0) source->addTotalRowsApprox(total_rows); - pipes.emplace_back(std::move(source)); } @@ -257,21 +327,9 @@ ProcessorPtr ReadFromMergeTree::createSource( const RangesInDataPart & part, const Names & required_columns, bool use_uncompressed_cache, - bool has_limit_below_one_block) + bool has_limit_below_one_block, + MergeTreeInOrderReadPoolParallelReplicasPtr pool) { - const auto & client_info = context->getClientInfo(); - std::optional extension; - if (read_task_callback) - { - extension = ParallelReadingExtension - { - .callback = read_task_callback.value(), - .count_participating_replicas = client_info.count_participating_replicas, - .number_of_current_replica = client_info.number_of_current_replica, - .colums_to_read = required_columns - }; - } - auto total_rows = part.getRowsCount(); if (query_info.limit > 0 && query_info.limit < total_rows) total_rows = query_info.limit; @@ -281,12 +339,12 @@ ProcessorPtr ReadFromMergeTree::createSource( /// In this case we won't set approximate rows, because it will be accounted multiple times. /// Also do not count amount of read rows if we read in order of sorting key, /// because we don't know actual amount of read rows in case when limit is set. - bool set_rows_approx = !extension.has_value() && !reader_settings.read_in_order; + bool set_rows_approx = !is_parallel_reading_from_replicas && !reader_settings.read_in_order; auto algorithm = std::make_unique( data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info, - actions_settings, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block, std::move(extension)); + actions_settings, reader_settings, pool, virt_column_names, part.part_index_in_query, has_limit_below_one_block); auto source = std::make_shared(std::move(algorithm)); @@ -301,7 +359,8 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, ReadType read_type, bool use_uncompressed_cache, - UInt64 limit) + UInt64 limit, + MergeTreeInOrderReadPoolParallelReplicasPtr pool) { Pipes pipes; /// For reading in order it makes sense to read only @@ -311,8 +370,8 @@ Pipe ReadFromMergeTree::readInOrder( for (const auto & part : parts_with_range) { auto source = read_type == ReadType::InReverseOrder - ? createSource(part, required_columns, use_uncompressed_cache, has_limit_below_one_block) - : createSource(part, required_columns, use_uncompressed_cache, has_limit_below_one_block); + ? createSource(part, required_columns, use_uncompressed_cache, has_limit_below_one_block, pool) + : createSource(part, required_columns, use_uncompressed_cache, has_limit_below_one_block, pool); pipes.emplace_back(std::move(source)); } @@ -334,11 +393,14 @@ Pipe ReadFromMergeTree::read( RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache) { + if (read_type == ReadType::ParallelReplicas) + return readFromPoolParallelReplicas(parts_with_range, required_columns, max_streams, min_marks_for_concurrent_read, use_uncompressed_cache); + if (read_type == ReadType::Default && max_streams > 1) return readFromPool(parts_with_range, required_columns, max_streams, min_marks_for_concurrent_read, use_uncompressed_cache); - auto pipe = readInOrder(parts_with_range, required_columns, read_type, use_uncompressed_cache, 0); + auto pipe = readInOrder(parts_with_range, required_columns, read_type, use_uncompressed_cache, /*limit */0, /*pool*/nullptr); /// Use ConcatProcessor to concat sources together. /// It is needed to read in parts order (and so in PK order) if single thread is used. @@ -425,6 +487,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams( const auto & settings = context->getSettingsRef(); const auto data_settings = data.getSettings(); + LOG_TRACE(log, "Spreading mark ranges among streams (default reading)"); + PartRangesReadInfo info(parts_with_ranges, settings, *data_settings); if (0 == info.sum_marks) @@ -438,7 +502,9 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams( num_streams = std::max((info.sum_marks + info.min_marks_for_concurrent_read - 1) / info.min_marks_for_concurrent_read, parts_with_ranges.size()); } - return read(std::move(parts_with_ranges), column_names, ReadType::Default, + auto read_type = is_parallel_reading_from_replicas ? ReadType::ParallelReplicas : ReadType::Default; + + return read(std::move(parts_with_ranges), column_names, read_type, num_streams, info.min_marks_for_concurrent_read, info.use_uncompressed_cache); } @@ -459,6 +525,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( const auto & settings = context->getSettingsRef(); const auto data_settings = data.getSettings(); + LOG_TRACE(log, "Spreading ranges among streams with order"); + PartRangesReadInfo info(parts_with_ranges, settings, *data_settings); Pipes res; @@ -534,7 +602,41 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( const size_t min_marks_per_stream = (info.sum_marks - 1) / requested_num_streams + 1; bool need_preliminary_merge = (parts_with_ranges.size() > settings.read_in_order_two_level_merge_threshold); - Pipes pipes; + std::vector splitted_parts_and_ranges; + splitted_parts_and_ranges.reserve(requested_num_streams); + + const auto read_type = input_order_info->direction == 1 + ? ReadFromMergeTree::ReadType::InOrder + : ReadFromMergeTree::ReadType::InReverseOrder; + + MergeTreeInOrderReadPoolParallelReplicasPtr pool; + + if (is_parallel_reading_from_replicas) + { + const auto & client_info = context->getClientInfo(); + auto extension = ParallelReadingExtension + { + .all_callback = all_ranges_callback.value(), + .callback = read_task_callback.value(), + .count_participating_replicas = client_info.count_participating_replicas, + .number_of_current_replica = client_info.number_of_current_replica, + .colums_to_read = column_names + }; + + /// We have a special logic for local replica. It has to read less data, because in some cases it should + /// merge states of aggregate functions or do some other important stuff other than reading from Disk. + auto is_local_replica = context->getClientInfo().interface == ClientInfo::Interface::LOCAL; + auto min_marks_for_concurrent_read = info.min_marks_for_concurrent_read; + if (!is_local_replica) + min_marks_for_concurrent_read = static_cast(min_marks_for_concurrent_read * settings.parallel_replicas_single_task_marks_count_multiplier); + + pool = std::make_shared( + parts_with_ranges, + extension, + read_type == ReadFromMergeTree::ReadType::InOrder ? CoordinationMode::WithOrder : CoordinationMode::ReverseOrder, + min_marks_for_concurrent_read); + } + for (size_t i = 0; i < requested_num_streams && !parts_with_ranges.empty(); ++i) { @@ -602,12 +704,14 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( new_parts.emplace_back(part.data_part, part.part_index_in_query, std::move(ranges_to_get_from_part)); } - auto read_type = input_order_info->direction == 1 - ? ReadFromMergeTree::ReadType::InOrder - : ReadFromMergeTree::ReadType::InReverseOrder; + splitted_parts_and_ranges.emplace_back(std::move(new_parts)); + } - pipes.emplace_back(readInOrder(std::move(new_parts), column_names, read_type, - info.use_uncompressed_cache, input_order_info->limit)); + Pipes pipes; + for (auto & item : splitted_parts_and_ranges) + { + pipes.emplace_back(readInOrder(std::move(item), column_names, read_type, + info.use_uncompressed_cache, input_order_info->limit, pool)); } Block pipe_header; @@ -758,7 +862,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( /// If do_not_merge_across_partitions_select_final is true and num_streams > 1 /// we will store lonely parts with level > 0 to use parallel select on them. - std::vector lonely_parts; + RangesInDataParts lonely_parts; size_t sum_marks_in_lonely_parts = 0; for (size_t range_index = 0; range_index < parts_to_merge_ranges.size() - 1; ++range_index) @@ -1265,6 +1369,17 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons const auto & input_order_info = query_info.getInputOrderInfo(); + /// Construct a proper coordinator + if (input_order_info && is_parallel_reading_from_replicas && context->getClientInfo().interface == ClientInfo::Interface::LOCAL) + { + assert(context->parallel_reading_coordinator); + auto mode = input_order_info->direction == 1 ? CoordinationMode::WithOrder : CoordinationMode::ReverseOrder; + context->parallel_reading_coordinator->setMode(mode); + } + + if (final && is_parallel_reading_from_replicas) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Final modifier is not supported with parallel replicas"); + if (final) { /// Add columns needed to calculate the sorting expression and the sign. @@ -1406,6 +1521,8 @@ static const char * readTypeToString(ReadFromMergeTree::ReadType type) return "InOrder"; case ReadFromMergeTree::ReadType::InReverseOrder: return "InReverseOrder"; + case ReadFromMergeTree::ReadType::ParallelReplicas: + return "Parallel"; } UNREACHABLE(); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index a3cea2a8afe..8b2eca5e08e 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -3,6 +3,8 @@ #include #include #include +#include +#include namespace DB { @@ -11,7 +13,7 @@ using PartitionIdToMaxBlock = std::unordered_map; class Pipe; -using MergeTreeReadTaskCallback = std::function(PartitionReadRequest)>; +using MergeTreeReadTaskCallback = std::function(ParallelReadRequest)>; struct MergeTreeDataSelectSamplingData { @@ -68,6 +70,10 @@ public: /// The same as InOrder, but in reverse order. /// For every part, read ranges and granules from end to begin. Also add ReverseTransform. InReverseOrder, + /// A special type of reading where every replica + /// talks to a remote coordinator (which is located on the initiator node) + /// and who spreads marks and parts across them. + ParallelReplicas, }; struct AnalysisResult @@ -212,10 +218,11 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_ranges, Names required_columns, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); - Pipe readInOrder(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, bool use_uncompressed_cache, UInt64 limit); + Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_ranges, Names required_columns, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); + Pipe readInOrder(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, bool use_uncompressed_cache, UInt64 limit, MergeTreeInOrderReadPoolParallelReplicasPtr pool); template - ProcessorPtr createSource(const RangesInDataPart & part, const Names & required_columns, bool use_uncompressed_cache, bool has_limit_below_one_block); + ProcessorPtr createSource(const RangesInDataPart & part, const Names & required_columns, bool use_uncompressed_cache, bool has_limit_below_one_block, MergeTreeInOrderReadPoolParallelReplicasPtr pool); Pipe spreadMarkRangesAmongStreams( RangesInDataParts && parts_with_ranges, @@ -236,6 +243,8 @@ private: ReadFromMergeTree::AnalysisResult getAnalysisResult() const; MergeTreeDataSelectAnalysisResultPtr analyzed_result_ptr; + bool is_parallel_reading_from_replicas; + std::optional all_ranges_callback; std::optional read_task_callback; }; diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 103f2734b06..9e5ecc791dc 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -9,10 +9,13 @@ #include #include #include +#include #include #include #include +#include "Common/logger_useful.h" #include +#include #include #include #include @@ -180,7 +183,8 @@ void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStream if (try_results.empty() || local_delay < max_remote_delay) { - auto plan = createLocalPlan(query, header, context, stage, shard.shard_info.shard_num, shard_count, 0, 0, /*coordinator=*/nullptr); + auto plan = createLocalPlan( + query, header, context, stage, shard.shard_info.shard_num, shard_count, 0, 0, /*coordinator=*/nullptr); return std::move(*plan->buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(context), @@ -231,7 +235,7 @@ void ReadFromRemote::addPipe(Pipes & pipes, const ClusterProxy::SelectStreamFact std::shared_ptr remote_query_executor; remote_query_executor = std::make_shared( - shard.shard_info.pool, query_string, shard.header, context, throttler, scalars, external_tables, stage); + shard.shard_info.pool, query_string, output_stream->header, context, throttler, scalars, external_tables, stage); remote_query_executor->setLogger(log); remote_query_executor->setPoolMode(PoolMode::GET_MANY); @@ -265,8 +269,9 @@ void ReadFromRemote::initializePipeline(QueryPipelineBuilder & pipeline, const B ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( + ASTPtr query_ast_, + Cluster::ShardInfo shard_info_, ParallelReplicasReadingCoordinatorPtr coordinator_, - ClusterProxy::SelectStreamFactory::Shard shard_, Block header_, QueryProcessingStage::Enum stage_, StorageID main_table_, @@ -276,10 +281,12 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( Scalars scalars_, Tables external_tables_, Poco::Logger * log_, - std::shared_ptr storage_limits_) + std::shared_ptr storage_limits_, + UUID uuid_) : ISourceStep(DataStream{.header = std::move(header_)}) + , shard_info(shard_info_) + , query_ast(query_ast_) , coordinator(std::move(coordinator_)) - , shard(std::move(shard_)) , stage(std::move(stage_)) , main_table(std::move(main_table_)) , table_func_ptr(table_func_ptr_) @@ -289,10 +296,11 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( , external_tables{external_tables_} , storage_limits(std::move(storage_limits_)) , log(log_) + , uuid(uuid_) { std::vector description; - for (const auto & address : shard.shard_info.all_addresses) + for (const auto & address : shard_info.all_addresses) if (!address.is_local) description.push_back(fmt::format("Replica: {}", address.host_name)); @@ -312,28 +320,46 @@ void ReadFromParallelRemoteReplicasStep::enforceAggregationInOrder() void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { Pipes pipes; - const Settings & current_settings = context->getSettingsRef(); auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(current_settings); - for (size_t replica_num = 0; replica_num < shard.shard_info.getAllNodeCount(); ++replica_num) + size_t all_replicas_count = current_settings.max_parallel_replicas; + if (all_replicas_count > shard_info.all_addresses.size()) { - if (shard.shard_info.all_addresses[replica_num].is_local) + LOG_INFO(&Poco::Logger::get("ReadFromParallelRemoteReplicasStep"), + "The number of replicas requested ({}) is bigger than the real number available in the cluster ({}). "\ + "Will use the latter number to execute the query.", current_settings.max_parallel_replicas, shard_info.all_addresses.size()); + all_replicas_count = shard_info.all_addresses.size(); + } + + /// The requested number of replicas to read from could be less + /// than the total number of replicas in the shard + /// And we have to pick only "remote" ones + /// So, that's why this loop looks like this. + size_t replica_num = 0; + while (pipes.size() != all_replicas_count - 1) + { + if (shard_info.all_addresses[replica_num].is_local) + { + ++replica_num; continue; + } IConnections::ReplicaInfo replica_info { - .all_replicas_count = shard.shard_info.getAllNodeCount(), - .number_of_current_replica = replica_num + .all_replicas_count = all_replicas_count, + /// Replica 0 is threated as local always + .number_of_current_replica = pipes.size() + 1 }; - auto pool = shard.shard_info.per_replica_pools[replica_num]; + auto pool = shard_info.per_replica_pools[replica_num]; assert(pool); auto pool_with_failover = std::make_shared( ConnectionPoolPtrs{pool}, current_settings.load_balancing); addPipeForSingeReplica(pipes, std::move(pool_with_failover), replica_info); + ++replica_num; } auto pipe = Pipe::unitePipes(std::move(pipes)); @@ -355,25 +381,22 @@ void ReadFromParallelRemoteReplicasStep::addPipeForSingeReplica(Pipes & pipes, s if (stage == QueryProcessingStage::Complete) { - add_totals = shard.query->as().group_by_with_totals; + add_totals = query_ast->as().group_by_with_totals; add_extremes = context->getSettingsRef().extremes; } - String query_string = formattedAST(shard.query); + String query_string = formattedAST(query_ast); - scalars["_shard_num"] - = Block{{DataTypeUInt32().createColumnConst(1, shard.shard_info.shard_num), std::make_shared(), "_shard_num"}}; + assert(stage != QueryProcessingStage::Complete); + assert(output_stream); auto remote_query_executor = std::make_shared( - pool, query_string, shard.header, context, throttler, scalars, external_tables, stage, + pool, query_string, output_stream->header, context, throttler, scalars, external_tables, stage, RemoteQueryExecutor::Extension{.parallel_reading_coordinator = coordinator, .replica_info = std::move(replica_info)}); remote_query_executor->setLogger(log); - if (!table_func_ptr) - remote_query_executor->setMainTable(main_table); - - pipes.emplace_back(createRemoteSourcePipe(std::move(remote_query_executor), add_agg_info, add_totals, add_extremes, async_read)); + pipes.emplace_back(createRemoteSourcePipe(std::move(remote_query_executor), add_agg_info, add_totals, add_extremes, async_read, uuid)); addConvertingActions(pipes.back(), output_stream->header); } diff --git a/src/Processors/QueryPlan/ReadFromRemote.h b/src/Processors/QueryPlan/ReadFromRemote.h index 60a7cd90f3f..e1979ee1aaa 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.h +++ b/src/Processors/QueryPlan/ReadFromRemote.h @@ -6,6 +6,7 @@ #include #include #include +#include "Core/UUID.h" namespace DB { @@ -45,18 +46,13 @@ public: private: ClusterProxy::SelectStreamFactory::Shards shards; QueryProcessingStage::Enum stage; - StorageID main_table; ASTPtr table_func_ptr; - ContextMutablePtr context; - ThrottlerPtr throttler; Scalars scalars; Tables external_tables; - std::shared_ptr storage_limits; - Poco::Logger * log; UInt32 shard_count; @@ -69,8 +65,9 @@ class ReadFromParallelRemoteReplicasStep : public ISourceStep { public: ReadFromParallelRemoteReplicasStep( + ASTPtr query_ast_, + Cluster::ShardInfo shard_info, ParallelReplicasReadingCoordinatorPtr coordinator_, - ClusterProxy::SelectStreamFactory::Shard shard, Block header_, QueryProcessingStage::Enum stage_, StorageID main_table_, @@ -80,7 +77,8 @@ public: Scalars scalars_, Tables external_tables_, Poco::Logger * log_, - std::shared_ptr storage_limits_); + std::shared_ptr storage_limits_, + UUID uuid); String getName() const override { return "ReadFromRemoteParallelReplicas"; } @@ -93,22 +91,20 @@ private: void addPipeForSingeReplica(Pipes & pipes, std::shared_ptr pool, IConnections::ReplicaInfo replica_info); + Cluster::ShardInfo shard_info; + ASTPtr query_ast; ParallelReplicasReadingCoordinatorPtr coordinator; - ClusterProxy::SelectStreamFactory::Shard shard; QueryProcessingStage::Enum stage; - StorageID main_table; ASTPtr table_func_ptr; - ContextMutablePtr context; - ThrottlerPtr throttler; Scalars scalars; Tables external_tables; std::shared_ptr storage_limits; - Poco::Logger * log; + UUID uuid; }; } diff --git a/src/Processors/QueueBuffer.h b/src/Processors/QueueBuffer.h index 6856e214823..0736d6fbf43 100644 --- a/src/Processors/QueueBuffer.h +++ b/src/Processors/QueueBuffer.h @@ -10,7 +10,7 @@ namespace DB /** Reads all data into queue. * After all data has been read - output it in the same order. */ -class QueueBuffer : public IAccumulatingTransform +class QueueBuffer final : public IAccumulatingTransform { private: std::queue chunks; diff --git a/src/Processors/ResizeProcessor.h b/src/Processors/ResizeProcessor.h index 364d1b4c883..07d7149ebb4 100644 --- a/src/Processors/ResizeProcessor.h +++ b/src/Processors/ResizeProcessor.h @@ -18,7 +18,7 @@ namespace DB * - union data from multiple inputs to single output - to serialize data that was processed in parallel. * - split data from single input to multiple outputs - to allow further parallel processing. */ -class ResizeProcessor : public IProcessor +class ResizeProcessor final : public IProcessor { public: /// TODO Check that there is non zero number of inputs and outputs. diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 9f29ad9ad07..69964d569fa 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,10 +9,16 @@ namespace DB { -RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation_info_, bool async_read_) +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation_info_, bool async_read_, UUID uuid_) : ISource(executor->getHeader(), false) , add_aggregation_info(add_aggregation_info_), query_executor(std::move(executor)) , async_read(async_read_) + , uuid(uuid_) { /// Add AggregatedChunkInfo if we expect DataTypeAggregateFunction as a result. const auto & sample = getPort().getHeader(); @@ -22,6 +29,18 @@ RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation RemoteSource::~RemoteSource() = default; +void RemoteSource::connectToScheduler(InputPort & input_port) +{ + outputs.emplace_back(Block{}, this); + dependency_port = &outputs.back(); + connect(*dependency_port, input_port); +} + +UUID RemoteSource::getParallelReplicasGroupUUID() +{ + return uuid; +} + void RemoteSource::setStorageLimits(const std::shared_ptr & storage_limits_) { /// Remove leaf limits for remote source. @@ -50,8 +69,21 @@ ISource::Status RemoteSource::prepare() if (status == Status::Finished) { query_executor->finish(&read_context); + if (dependency_port) + dependency_port->finish(); is_async_state = false; + + return status; } + + if (status == Status::PortFull) + { + /// Also push empty chunk to dependency to signal that we read data from remote source + /// or answered to the incoming request from parallel replica + if (dependency_port && !dependency_port->isFinished() && dependency_port->canPush()) + dependency_port->push(Chunk()); + } + return status; } @@ -88,19 +120,29 @@ std::optional RemoteSource::tryGenerate() if (async_read) { auto res = query_executor->read(read_context); - if (std::holds_alternative(res)) + + if (res.getType() == RemoteQueryExecutor::ReadResult::Type::Nothing) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an empty packet from the RemoteQueryExecutor. This is a bug"); + + if (res.getType() == RemoteQueryExecutor::ReadResult::Type::FileDescriptor) { - fd = std::get(res); + fd = res.getFileDescriptor(); is_async_state = true; return Chunk(); } + if (res.getType() == RemoteQueryExecutor::ReadResult::Type::ParallelReplicasToken) + { + is_async_state = false; + return Chunk(); + } + is_async_state = false; - block = std::get(std::move(res)); + block = res.getBlock(); } else - block = query_executor->read(); + block = query_executor->readBlock(); if (!block) { @@ -180,9 +222,9 @@ Chunk RemoteExtremesSource::generate() Pipe createRemoteSourcePipe( RemoteQueryExecutorPtr query_executor, - bool add_aggregation_info, bool add_totals, bool add_extremes, bool async_read) + bool add_aggregation_info, bool add_totals, bool add_extremes, bool async_read, UUID uuid) { - Pipe pipe(std::make_shared(query_executor, add_aggregation_info, async_read)); + Pipe pipe(std::make_shared(query_executor, add_aggregation_info, async_read, uuid)); if (add_totals) pipe.addTotalsSource(std::make_shared(query_executor)); diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index f415b91aae0..8fe0114ab6f 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -3,6 +3,7 @@ #include #include #include +#include "Core/UUID.h" #include namespace DB @@ -14,20 +15,24 @@ using RemoteQueryExecutorPtr = std::shared_ptr; class RemoteQueryExecutorReadContext; /// Source from RemoteQueryExecutor. Executes remote query and returns query result chunks. -class RemoteSource : public ISource +class RemoteSource final : public ISource { public: /// Flag add_aggregation_info tells if AggregatedChunkInfo should be added to result chunk. /// AggregatedChunkInfo stores the bucket number used for two-level aggregation. /// This flag should be typically enabled for queries with GROUP BY which are executed till WithMergeableState. - RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation_info_, bool async_read_); + RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation_info_, bool async_read_, UUID uuid = UUIDHelpers::Nil); ~RemoteSource() override; Status prepare() override; String getName() const override { return "Remote"; } + void connectToScheduler(InputPort & input_port); + void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) { rows_before_limit.swap(counter); } + UUID getParallelReplicasGroupUUID(); + /// Stop reading from stream if output port is finished. void onUpdatePorts() override; @@ -46,9 +51,12 @@ private: RemoteQueryExecutorPtr query_executor; RowsBeforeLimitCounterPtr rows_before_limit; + OutputPort * dependency_port{nullptr}; + const bool async_read; bool is_async_state = false; std::unique_ptr read_context; + UUID uuid; int fd = -1; }; @@ -87,6 +95,6 @@ private: /// Create pipe with remote sources. Pipe createRemoteSourcePipe( RemoteQueryExecutorPtr query_executor, - bool add_aggregation_info, bool add_totals, bool add_extremes, bool async_read); + bool add_aggregation_info, bool add_totals, bool add_extremes, bool async_read, UUID uuid = UUIDHelpers::Nil); } diff --git a/src/Processors/Transforms/IntersectOrExceptTransform.cpp b/src/Processors/Transforms/IntersectOrExceptTransform.cpp index 31a3e304505..d4d7ca9f193 100644 --- a/src/Processors/Transforms/IntersectOrExceptTransform.cpp +++ b/src/Processors/Transforms/IntersectOrExceptTransform.cpp @@ -124,6 +124,8 @@ size_t IntersectOrExceptTransform::buildFilter( void IntersectOrExceptTransform::accumulate(Chunk chunk) { + convertToFullIfSparse(chunk); + auto num_rows = chunk.getNumRows(); auto columns = chunk.detachColumns(); @@ -160,6 +162,8 @@ void IntersectOrExceptTransform::accumulate(Chunk chunk) void IntersectOrExceptTransform::filter(Chunk & chunk) { + convertToFullIfSparse(chunk); + auto num_rows = chunk.getNumRows(); auto columns = chunk.detachColumns(); diff --git a/src/Processors/Transforms/ReadFromMergeTreeDependencyTransform.cpp b/src/Processors/Transforms/ReadFromMergeTreeDependencyTransform.cpp new file mode 100644 index 00000000000..295eddb206d --- /dev/null +++ b/src/Processors/Transforms/ReadFromMergeTreeDependencyTransform.cpp @@ -0,0 +1,103 @@ +#include + +#include +#include "Processors/Port.h" + +namespace DB +{ + +ReadFromMergeTreeDependencyTransform::ReadFromMergeTreeDependencyTransform(const Block & header, UUID uuid_) + : IProcessor(InputPorts(1, header), OutputPorts(1, header)) + , uuid(uuid_) + , data_port(&inputs.front()) +{ +} + +void ReadFromMergeTreeDependencyTransform::connectToScheduler(OutputPort & output_port) +{ + inputs.emplace_back(Block{}, this); + dependency_port = &inputs.back(); + connect(output_port, *dependency_port); +} + +UUID ReadFromMergeTreeDependencyTransform::getParallelReplicasGroupUUID() +{ + return uuid; +} + +IProcessor::Status ReadFromMergeTreeDependencyTransform::prepare() +{ + Status status = Status::Ready; + + while (status == Status::Ready) + { + status = !has_data ? prepareConsume() + : prepareGenerate(); + } + + return status; +} + +IProcessor::Status ReadFromMergeTreeDependencyTransform::prepareConsume() +{ + auto & output_port = getOutputPort(); + + /// Check all outputs are finished or ready to get data. + if (output_port.isFinished()) + { + data_port->close(); + dependency_port->close(); + return Status::Finished; + } + + /// Try get chunk from input. + if (data_port->isFinished()) + { + if (dependency_port->hasData()) + dependency_port->pull(true); + dependency_port->close(); + output_port.finish(); + return Status::Finished; + } + + if (!dependency_port->isFinished()) + { + dependency_port->setNeeded(); + if (!dependency_port->hasData()) + return Status::NeedData; + } + + data_port->setNeeded(); + if (!data_port->hasData()) + return Status::NeedData; + + if (!dependency_port->isFinished()) + dependency_port->pull(); + + chunk = data_port->pull(); + has_data = true; + + return Status::Ready; +} + +IProcessor::Status ReadFromMergeTreeDependencyTransform::prepareGenerate() +{ + auto & output_port = getOutputPort(); + if (!output_port.isFinished() && output_port.canPush()) + { + output_port.push(std::move(chunk)); + has_data = false; + return Status::Ready; + } + + if (output_port.isFinished()) + { + data_port->close(); + dependency_port->close(); + return Status::Finished; + } + + return Status::PortFull; +} + +} diff --git a/src/Processors/Transforms/ReadFromMergeTreeDependencyTransform.h b/src/Processors/Transforms/ReadFromMergeTreeDependencyTransform.h new file mode 100644 index 00000000000..929841e7ce0 --- /dev/null +++ b/src/Processors/Transforms/ReadFromMergeTreeDependencyTransform.h @@ -0,0 +1,48 @@ +#pragma once +#include + +namespace DB +{ + +class RemoteQueryExecutor; +using RemoteQueryExecutorPtr = std::shared_ptr; + +/// A tiny class which is used for reading with multiple replicas in parallel. +/// Motivation is that we don't have a full control on how +/// processors are scheduled across threads and there could be a situation +/// when all available threads will read from local replica and will just +/// forget about remote replicas existence. That is not what we want. +/// For parallel replicas we have to constantly answer to incoming requests +/// with a set of marks to read. +/// With the help of this class, we explicitly connect a "local" source with +/// all the remote ones. And thus achieve fairness somehow. +class ReadFromMergeTreeDependencyTransform : public IProcessor +{ +public: + ReadFromMergeTreeDependencyTransform(const Block & header, UUID uuid_); + + String getName() const override { return "ReadFromMergeTreeDependency"; } + Status prepare() override; + + InputPort & getInputPort() { assert(data_port); return *data_port; } + InputPort & getDependencyPort() { assert(dependency_port); return *dependency_port; } + OutputPort & getOutputPort() { return outputs.front(); } + + UUID getParallelReplicasGroupUUID(); + + void connectToScheduler(OutputPort & output_port); +private: + bool has_data{false}; + Chunk chunk; + + UUID uuid; + + InputPort * data_port{nullptr}; + InputPort * dependency_port{nullptr}; + + Status prepareGenerate(); + Status prepareConsume(); +}; + + +} diff --git a/src/Processors/Transforms/StreamInQueryCacheTransform.cpp b/src/Processors/Transforms/StreamInQueryCacheTransform.cpp new file mode 100644 index 00000000000..1ba57ea8ed2 --- /dev/null +++ b/src/Processors/Transforms/StreamInQueryCacheTransform.cpp @@ -0,0 +1,24 @@ +#include + +namespace DB +{ + +StreamInQueryCacheTransform::StreamInQueryCacheTransform( + const Block & header_, QueryCachePtr cache, const QueryCache::Key & cache_key, std::chrono::milliseconds min_query_duration) + : ISimpleTransform(header_, header_, false) + , cache_writer(cache->createWriter(cache_key, min_query_duration)) +{ +} + +void StreamInQueryCacheTransform::transform(Chunk & chunk) +{ + cache_writer.buffer(chunk.clone()); +} + +void StreamInQueryCacheTransform::finalizeWriteInQueryCache() +{ + if (!isCancelled()) + cache_writer.finalizeWrite(); +} + +}; diff --git a/src/Processors/Transforms/StreamInQueryCacheTransform.h b/src/Processors/Transforms/StreamInQueryCacheTransform.h new file mode 100644 index 00000000000..15d977cd445 --- /dev/null +++ b/src/Processors/Transforms/StreamInQueryCacheTransform.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class StreamInQueryCacheTransform : public ISimpleTransform +{ +public: + StreamInQueryCacheTransform( + const Block & header_, QueryCachePtr cache, const QueryCache::Key & cache_key, std::chrono::milliseconds min_query_duration); + +protected: + void transform(Chunk & chunk) override; + +public: + void finalizeWriteInQueryCache(); + String getName() const override { return "StreamInQueryCacheTransform"; } + +private: + QueryCache::Writer cache_writer; +}; + +} diff --git a/src/Processors/Transforms/StreamInQueryResultCacheTransform.cpp b/src/Processors/Transforms/StreamInQueryResultCacheTransform.cpp deleted file mode 100644 index 841fcfdf8b5..00000000000 --- a/src/Processors/Transforms/StreamInQueryResultCacheTransform.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include - -namespace DB -{ - -StreamInQueryResultCacheTransform::StreamInQueryResultCacheTransform( - const Block & header_, QueryResultCachePtr cache, const QueryResultCache::Key & cache_key, std::chrono::milliseconds min_query_duration) - : ISimpleTransform(header_, header_, false) - , cache_writer(cache->createWriter(cache_key, min_query_duration)) -{ -} - -void StreamInQueryResultCacheTransform::transform(Chunk & chunk) -{ - cache_writer.buffer(chunk.clone()); -} - -void StreamInQueryResultCacheTransform::finalizeWriteInQueryResultCache() -{ - if (!isCancelled()) - cache_writer.finalizeWrite(); -} - -}; diff --git a/src/Processors/Transforms/StreamInQueryResultCacheTransform.h b/src/Processors/Transforms/StreamInQueryResultCacheTransform.h deleted file mode 100644 index a90d33a0681..00000000000 --- a/src/Processors/Transforms/StreamInQueryResultCacheTransform.h +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class StreamInQueryResultCacheTransform : public ISimpleTransform -{ -public: - StreamInQueryResultCacheTransform( - const Block & header_, QueryResultCachePtr cache, const QueryResultCache::Key & cache_key, std::chrono::milliseconds min_query_duration); - -protected: - void transform(Chunk & chunk) override; - -public: - void finalizeWriteInQueryResultCache(); - String getName() const override { return "StreamInQueryResultCacheTransform"; } - -private: - QueryResultCache::Writer cache_writer; -}; - -} diff --git a/src/QueryPipeline/Pipe.h b/src/QueryPipeline/Pipe.h index 2b61bfe7573..09931e38578 100644 --- a/src/QueryPipeline/Pipe.h +++ b/src/QueryPipeline/Pipe.h @@ -102,6 +102,8 @@ public: /// Get processors from Pipe without destroying pipe (used for EXPLAIN to keep QueryPlan). const Processors & getProcessors() const { return *processors; } + std::shared_ptr getProcessorsPtr() { return processors; } + private: /// Header is common for all output below. Block header; diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index aa01801b1ec..b7b18014f1f 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -525,7 +525,7 @@ bool QueryPipeline::tryGetResultRowsAndBytes(UInt64 & result_rows, UInt64 & resu return true; } -void QueryPipeline::streamIntoQueryResultCache(std::shared_ptr transform) +void QueryPipeline::streamIntoQueryCache(std::shared_ptr transform) { assert(pulling()); @@ -534,16 +534,16 @@ void QueryPipeline::streamIntoQueryResultCache(std::shared_ptremplace_back(transform); } -void QueryPipeline::finalizeWriteInQueryResultCache() +void QueryPipeline::finalizeWriteInQueryCache() { auto it = std::find_if( processors->begin(), processors->end(), - [](ProcessorPtr processor){ return dynamic_cast(&*processor); }); + [](ProcessorPtr processor){ return dynamic_cast(&*processor); }); - /// the pipeline should theoretically contain just one StreamInQueryResultCacheTransform + /// the pipeline should theoretically contain just one StreamInQueryCacheTransform if (it != processors->end()) - dynamic_cast(**it).finalizeWriteInQueryResultCache(); + dynamic_cast(**it).finalizeWriteInQueryCache(); } void QueryPipeline::addStorageHolder(StoragePtr storage) diff --git a/src/QueryPipeline/QueryPipeline.h b/src/QueryPipeline/QueryPipeline.h index da43aa035f3..55c78ca78ed 100644 --- a/src/QueryPipeline/QueryPipeline.h +++ b/src/QueryPipeline/QueryPipeline.h @@ -31,7 +31,7 @@ class SinkToStorage; class ISource; class ISink; class ReadProgressCallback; -class StreamInQueryResultCacheTransform; +class StreamInQueryCacheTransform; struct ColumnWithTypeAndName; using ColumnsWithTypeAndName = std::vector; @@ -105,8 +105,8 @@ public: void setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota_); bool tryGetResultRowsAndBytes(UInt64 & result_rows, UInt64 & result_bytes) const; - void streamIntoQueryResultCache(std::shared_ptr transform); - void finalizeWriteInQueryResultCache(); + void streamIntoQueryCache(std::shared_ptr transform); + void finalizeWriteInQueryCache(); void setQuota(std::shared_ptr quota_); diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 483447d1e4d..07adc6b0b3a 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -1,34 +1,35 @@ -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include "Core/UUID.h" +#include #include #include #include #include -#include -#include +#include #include -#include -#include #include +#include +#include +#include +#include +#include +#include #include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include namespace DB { @@ -620,6 +621,65 @@ void QueryPipelineBuilder::setProgressCallback(ProgressCallback callback) progress_callback = callback; } +void QueryPipelineBuilder::connectDependencies() +{ + /** + * This is needed because among all RemoteSources there could be + * one or several that don't belong to the parallel replicas reading process. + * It could happen for example if we read through distributed table + prefer_localhost_replica=1 + parallel replicas + * SELECT * FROM remote('127.0.0.{1,2}', table.merge_tree) + * Will generate a local pipeline and a remote source. For local pipeline because of parallel replicas we will create + * several processors to read and several remote sources. + */ + std::set all_parallel_replicas_groups; + for (auto & processor : *pipe.getProcessorsPtr()) + { + if (auto * remote_dependency = typeid_cast(processor.get()); remote_dependency) + if (auto uuid = remote_dependency->getParallelReplicasGroupUUID(); uuid != UUIDHelpers::Nil) + all_parallel_replicas_groups.insert(uuid); + if (auto * merge_tree_dependency = typeid_cast(processor.get()); merge_tree_dependency) + if (auto uuid = merge_tree_dependency->getParallelReplicasGroupUUID(); uuid != UUIDHelpers::Nil) + all_parallel_replicas_groups.insert(uuid); + } + + for (const auto & group_id : all_parallel_replicas_groups) + { + std::vector input_dependencies; + std::vector output_dependencies; + + for (auto & processor : *pipe.getProcessorsPtr()) + { + if (auto * remote_dependency = typeid_cast(processor.get()); remote_dependency) + if (auto uuid = remote_dependency->getParallelReplicasGroupUUID(); uuid == group_id) + input_dependencies.emplace_back(remote_dependency); + if (auto * merge_tree_dependency = typeid_cast(processor.get()); merge_tree_dependency) + if (auto uuid = merge_tree_dependency->getParallelReplicasGroupUUID(); uuid == group_id) + output_dependencies.emplace_back(merge_tree_dependency); + } + + if (input_dependencies.empty() || output_dependencies.empty()) + continue; + + auto input_dependency_iter = input_dependencies.begin(); + auto output_dependency_iter = output_dependencies.begin(); + auto scheduler = std::make_shared(Block{}, input_dependencies.size(), output_dependencies.size()); + + for (auto & scheduler_input : scheduler->getInputs()) + { + (*input_dependency_iter)->connectToScheduler(scheduler_input); + ++input_dependency_iter; + } + + for (auto & scheduler_output : scheduler->getOutputs()) + { + (*output_dependency_iter)->connectToScheduler(scheduler_output); + ++output_dependency_iter; + } + + pipe.getProcessorsPtr()->emplace_back(std::move(scheduler)); + } +} + PipelineExecutorPtr QueryPipelineBuilder::execute() { if (!isCompleted()) diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 5a0694100eb..0a102d186ca 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -140,6 +140,12 @@ public: void addCreatingSetsTransform(const Block & res_header, SubqueryForSet subquery_for_set, const SizeLimits & limits, ContextPtr context); + /// Finds all processors for reading from MergeTree + /// And explicitly connects them with all RemoteSources + /// using a ResizeProcessor. This is needed not to let + /// the RemoteSource to starve for CPU time + void connectDependencies(); + PipelineExecutorPtr execute(); size_t getNumStreams() const { return pipe.numOutputPorts(); } diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 961d8129d29..033907e9e2b 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -259,48 +259,62 @@ void RemoteQueryExecutor::sendQuery(ClientInfo::QueryKind query_kind) sendExternalTables(); } -Block RemoteQueryExecutor::read() + +Block RemoteQueryExecutor::readBlock() +{ + while (true) + { + auto res = read(); + + if (res.getType() == ReadResult::Type::Data) + return res.getBlock(); + } +} + + +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::read() { if (!sent_query) { sendQuery(); if (context->getSettingsRef().skip_unavailable_shards && (0 == connections->size())) - return {}; + return ReadResult(Block()); } while (true) { std::lock_guard lock(was_cancelled_mutex); if (was_cancelled) - return Block(); + return ReadResult(Block()); - Packet packet = connections->receivePacket(); + auto packet = connections->receivePacket(); + auto anything = processPacket(std::move(packet)); - if (auto block = processPacket(std::move(packet))) - return *block; - else if (got_duplicated_part_uuids) - return std::get(restartQueryWithoutDuplicatedUUIDs()); + if (anything.getType() == ReadResult::Type::Data || anything.getType() == ReadResult::Type::ParallelReplicasToken) + return anything; + + if (got_duplicated_part_uuids) + return restartQueryWithoutDuplicatedUUIDs(); } } -std::variant RemoteQueryExecutor::read(std::unique_ptr & read_context [[maybe_unused]]) +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::read(std::unique_ptr & read_context [[maybe_unused]]) { - #if defined(OS_LINUX) if (!sent_query) { sendQuery(); if (context->getSettingsRef().skip_unavailable_shards && (0 == connections->size())) - return Block(); + return ReadResult(Block()); } if (!read_context || resent_query) { std::lock_guard lock(was_cancelled_mutex); if (was_cancelled) - return Block(); + return ReadResult(Block()); read_context = std::make_unique(*connections); } @@ -308,12 +322,12 @@ std::variant RemoteQueryExecutor::read(std::unique_ptr do { if (!read_context->resumeRoutine()) - return Block(); + return ReadResult(Block()); if (read_context->is_read_in_progress.load(std::memory_order_relaxed)) { read_context->setTimer(); - return read_context->epoll.getFileDescriptor(); + return ReadResult(read_context->epoll.getFileDescriptor()); } else { @@ -321,11 +335,14 @@ std::variant RemoteQueryExecutor::read(std::unique_ptr /// to avoid the race between cancel() thread and read() thread. /// (since cancel() thread will steal the fiber and may update the packet). if (was_cancelled) - return Block(); + return ReadResult(Block()); - if (auto data = processPacket(std::move(read_context->packet))) - return std::move(*data); - else if (got_duplicated_part_uuids) + auto anything = processPacket(std::move(read_context->packet)); + + if (anything.getType() == ReadResult::Type::Data || anything.getType() == ReadResult::Type::ParallelReplicasToken) + return anything; + + if (got_duplicated_part_uuids) return restartQueryWithoutDuplicatedUUIDs(&read_context); } } @@ -336,7 +353,7 @@ std::variant RemoteQueryExecutor::read(std::unique_ptr } -std::variant RemoteQueryExecutor::restartQueryWithoutDuplicatedUUIDs(std::unique_ptr * read_context) +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::restartQueryWithoutDuplicatedUUIDs(std::unique_ptr * read_context) { /// Cancel previous query and disconnect before retry. cancel(read_context); @@ -360,13 +377,18 @@ std::variant RemoteQueryExecutor::restartQueryWithoutDuplicatedUUIDs throw Exception(ErrorCodes::DUPLICATED_PART_UUIDS, "Found duplicate uuids while processing query"); } -std::optional RemoteQueryExecutor::processPacket(Packet packet) +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::processPacket(Packet packet) { switch (packet.type) { case Protocol::Server::MergeTreeReadTaskRequest: processMergeTreeReadTaskRequest(packet.request); - break; + return ReadResult(ReadResult::Type::ParallelReplicasToken); + + case Protocol::Server::MergeTreeAllRangesAnnounecement: + processMergeTreeInitialReadAnnounecement(packet.announcement); + return ReadResult(ReadResult::Type::ParallelReplicasToken); + case Protocol::Server::ReadTaskRequest: processReadTaskRequest(); break; @@ -379,7 +401,7 @@ std::optional RemoteQueryExecutor::processPacket(Packet packet) /// We can actually return it, and the first call to RemoteQueryExecutor::read /// will return earlier. We should consider doing it. if (packet.block && (packet.block.rows() > 0)) - return adaptBlockStructure(packet.block, header); + return ReadResult(adaptBlockStructure(packet.block, header)); break; /// If the block is empty - we will receive other packets before EndOfStream. case Protocol::Server::Exception: @@ -391,7 +413,8 @@ std::optional RemoteQueryExecutor::processPacket(Packet packet) if (!connections->hasActiveConnections()) { finished = true; - return Block(); + /// TODO: Replace with Type::Finished + return ReadResult(Block{}); } break; @@ -446,7 +469,7 @@ std::optional RemoteQueryExecutor::processPacket(Packet packet) connections->dumpAddresses()); } - return {}; + return ReadResult(ReadResult::Type::Nothing); } bool RemoteQueryExecutor::setPartUUIDs(const std::vector & uuids) @@ -471,7 +494,7 @@ void RemoteQueryExecutor::processReadTaskRequest() connections->sendReadTaskResponse(response); } -void RemoteQueryExecutor::processMergeTreeReadTaskRequest(PartitionReadRequest request) +void RemoteQueryExecutor::processMergeTreeReadTaskRequest(ParallelReadRequest request) { if (!parallel_reading_coordinator) throw Exception(ErrorCodes::LOGICAL_ERROR, "Coordinator for parallel reading from replicas is not initialized"); @@ -480,6 +503,14 @@ void RemoteQueryExecutor::processMergeTreeReadTaskRequest(PartitionReadRequest r connections->sendMergeTreeReadTaskResponse(response); } +void RemoteQueryExecutor::processMergeTreeInitialReadAnnounecement(InitialAllRangesAnnouncement announcement) +{ + if (!parallel_reading_coordinator) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Coordinator for parallel reading from replicas is not initialized"); + + parallel_reading_coordinator->handleInitialAllRangesAnnouncement(announcement); +} + void RemoteQueryExecutor::finish(std::unique_ptr * read_context) { /** If one of: diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 8b8f21a3ae4..c67a45c7275 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB @@ -94,12 +95,60 @@ public: /// Query is resent to a replica, the query itself can be modified. std::atomic resent_query { false }; + struct ReadResult + { + enum class Type : uint8_t + { + Data, + ParallelReplicasToken, + FileDescriptor, + Finished, + Nothing + }; + + explicit ReadResult(Block block_) + : type(Type::Data) + , block(std::move(block_)) + {} + + explicit ReadResult(int fd_) + : type(Type::FileDescriptor) + , fd(fd_) + {} + + explicit ReadResult(Type type_) + : type(type_) + { + assert(type != Type::Data && type != Type::FileDescriptor); + } + + Type getType() const { return type; } + + Block getBlock() + { + chassert(type == Type::Data); + return std::move(block); + } + + int getFileDescriptor() const + { + chassert(type == Type::FileDescriptor); + return fd; + } + + Type type; + Block block; + int fd{-1}; + }; + /// Read next block of data. Returns empty block if query is finished. - Block read(); + Block readBlock(); + + ReadResult read(); /// Async variant of read. Returns ready block or file descriptor which may be used for polling. /// ReadContext is an internal read state. Pass empty ptr first time, reuse created one for every call. - std::variant read(std::unique_ptr & read_context); + ReadResult read(std::unique_ptr & read_context); /// Receive all remain packets and finish query. /// It should be cancelled after read returned empty block. @@ -231,11 +280,12 @@ private: void processReadTaskRequest(); - void processMergeTreeReadTaskRequest(PartitionReadRequest request); + void processMergeTreeReadTaskRequest(ParallelReadRequest request); + void processMergeTreeInitialReadAnnounecement(InitialAllRangesAnnouncement announcement); /// Cancel query and restart it with info about duplicate UUIDs /// only for `allow_experimental_query_deduplication`. - std::variant restartQueryWithoutDuplicatedUUIDs(std::unique_ptr * read_context = nullptr); + ReadResult restartQueryWithoutDuplicatedUUIDs(std::unique_ptr * read_context = nullptr); /// If wasn't sent yet, send request to cancel all connections to replicas void tryCancel(const char * reason, std::unique_ptr * read_context); @@ -247,11 +297,10 @@ private: bool hasThrownException() const; /// Process packet for read and return data block if possible. - std::optional processPacket(Packet packet); + ReadResult processPacket(Packet packet); /// Reads packet by packet Block readPackets(); - }; } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index a48a3bb1ed6..28377edf8ca 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -54,6 +54,7 @@ #include #include "Core/Protocol.h" +#include "Storages/MergeTree/RequestResponse.h" #include "TCPHandler.h" #include "config_version.h" @@ -363,7 +364,17 @@ void TCPHandler::runImpl() return receiveReadTaskResponseAssumeLocked(); }); - query_context->setMergeTreeReadTaskCallback([this](PartitionReadRequest request) -> std::optional + query_context->setMergeTreeAllRangesCallback([this](InitialAllRangesAnnouncement announcement) + { + std::lock_guard lock(task_callback_mutex); + + if (state.is_cancelled) + return; + + sendMergeTreeAllRangesAnnounecementAssumeLocked(announcement); + }); + + query_context->setMergeTreeReadTaskCallback([this](ParallelReadRequest request) -> std::optional { std::lock_guard lock(task_callback_mutex); @@ -920,7 +931,15 @@ void TCPHandler::sendReadTaskRequestAssumeLocked() } -void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(PartitionReadRequest request) +void TCPHandler::sendMergeTreeAllRangesAnnounecementAssumeLocked(InitialAllRangesAnnouncement announcement) +{ + writeVarUInt(Protocol::Server::MergeTreeAllRangesAnnounecement, *out); + announcement.serialize(*out); + out->next(); +} + + +void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest request) { writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out); request.serialize(*out); @@ -1348,7 +1367,7 @@ String TCPHandler::receiveReadTaskResponseAssumeLocked() } -std::optional TCPHandler::receivePartitionMergeTreeReadTaskResponseAssumeLocked() +std::optional TCPHandler::receivePartitionMergeTreeReadTaskResponseAssumeLocked() { UInt64 packet_type = 0; readVarUInt(packet_type, *in); @@ -1371,7 +1390,7 @@ std::optional TCPHandler::receivePartitionMergeTreeReadTa Protocol::Client::toString(packet_type)); } } - PartitionReadResponse response; + ParallelReadResponse response; response.deserialize(*in); return response; } diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 0b296aaef4e..f06b0b060b3 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -21,6 +21,7 @@ #include "IServer.h" #include "Server/TCPProtocolStackData.h" +#include "Storages/MergeTree/RequestResponse.h" #include "base/types.h" @@ -220,7 +221,7 @@ private: void receiveQuery(); void receiveIgnoredPartUUIDs(); String receiveReadTaskResponseAssumeLocked(); - std::optional receivePartitionMergeTreeReadTaskResponseAssumeLocked(); + std::optional receivePartitionMergeTreeReadTaskResponseAssumeLocked(); bool receiveData(bool scalar); bool readDataNext(); void readData(); @@ -253,7 +254,8 @@ private: void sendEndOfStream(); void sendPartUUIDs(); void sendReadTaskRequestAssumeLocked(); - void sendMergeTreeReadTaskRequestAssumeLocked(PartitionReadRequest request); + void sendMergeTreeAllRangesAnnounecementAssumeLocked(InitialAllRangesAnnouncement announcement); + void sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest request); void sendProfileInfo(const ProfileInfo & info); void sendTotals(const Block & totals); void sendExtremes(const Block & extremes); diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index eb1d83af851..7ffd2b98ba5 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -700,12 +700,14 @@ struct StorageDistributedDirectoryMonitor::BatchHeader struct StorageDistributedDirectoryMonitor::Batch { + /// File indexes for this batch. std::vector file_indices; size_t total_rows = 0; size_t total_bytes = 0; bool recovered = false; StorageDistributedDirectoryMonitor & parent; + /// Information about all available indexes (not only for the current batch). const std::map & file_index_to_path; bool split_batch_on_failure = true; @@ -795,17 +797,22 @@ struct StorageDistributedDirectoryMonitor::Batch else { std::vector files; - for (const auto && file_info : file_index_to_path | boost::adaptors::indexed()) + for (auto file_index_info : file_indices | boost::adaptors::indexed()) { - if (file_info.index() > 8) + if (file_index_info.index() > 8) { files.push_back("..."); break; } - files.push_back(file_info.value().second); + auto file_index = file_index_info.value(); + auto file_path = file_index_to_path.find(file_index); + if (file_path != file_index_to_path.end()) + files.push_back(file_path->second); + else + files.push_back(fmt::format("#{}.bin (deleted)", file_index)); } - e.addMessage(fmt::format("While sending batch, nums: {}, files: {}", file_index_to_path.size(), fmt::join(files, "\n"))); + e.addMessage(fmt::format("While sending batch, size: {}, files: {}", file_indices.size(), fmt::join(files, "\n"))); throw; } diff --git a/src/Storages/HDFS/StorageHDFSCluster.cpp b/src/Storages/HDFS/StorageHDFSCluster.cpp index 91204d852ae..f6e6f773d6c 100644 --- a/src/Storages/HDFS/StorageHDFSCluster.cpp +++ b/src/Storages/HDFS/StorageHDFSCluster.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index a4825358d6e..699780db0b9 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -487,7 +487,7 @@ public: } /// Mutate the table contents - virtual void mutate(const MutationCommands &, ContextPtr, bool /*force_wait*/) + virtual void mutate(const MutationCommands &, ContextPtr) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Mutations are not supported by storage {}", getName()); } diff --git a/src/Storages/MergeTree/IntersectionsIndexes.h b/src/Storages/MergeTree/IntersectionsIndexes.h index 68ccbc4a0b1..d9445f446ce 100644 --- a/src/Storages/MergeTree/IntersectionsIndexes.h +++ b/src/Storages/MergeTree/IntersectionsIndexes.h @@ -136,7 +136,7 @@ struct HalfIntervals MarkRanges convertToMarkRangesFinal() { MarkRanges result; - std::move(intervals.begin(), intervals.end(), std::back_inserter(result)); + std::copy(intervals.begin(), intervals.end(), std::back_inserter(result)); return result; } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index fda1daec3a3..5c71804ef27 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -706,8 +706,12 @@ Block KeyCondition::getBlockWithConstants( if (syntax_analyzer_result) { - const auto expr_for_constant_folding = ExpressionAnalyzer(query, syntax_analyzer_result, context).getConstActions(); - expr_for_constant_folding->execute(result); + auto actions = ExpressionAnalyzer(query, syntax_analyzer_result, context).getConstActionsDAG(); + for (const auto & action_node : actions->getOutputs()) + { + if (action_node->column) + result.insert(ColumnWithTypeAndName{action_node->column, action_node->result_type, action_node->result_name}); + } } return result; diff --git a/src/Storages/MergeTree/MarkRange.cpp b/src/Storages/MergeTree/MarkRange.cpp index 0eea0e5afd1..c6c197919f4 100644 --- a/src/Storages/MergeTree/MarkRange.cpp +++ b/src/Storages/MergeTree/MarkRange.cpp @@ -1,5 +1,8 @@ #include "MarkRange.h" +#include +#include + namespace DB { @@ -8,6 +11,11 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +size_t MarkRange::getNumberOfMarks() const +{ + return end - begin; +} + bool MarkRange::operator==(const MarkRange & rhs) const { return begin == rhs.begin && end == rhs.end; @@ -65,4 +73,41 @@ void assertSortedAndNonIntersecting(const MarkRanges & ranges) toString(ranges)); } +size_t MarkRanges::getNumberOfMarks() const +{ + size_t result = 0; + for (const auto & mark : *this) + result += mark.getNumberOfMarks(); + return result; +} + +void MarkRanges::serialize(WriteBuffer & out) const +{ + writeIntBinary(this->size(), out); + + for (const auto & [begin, end] : *this) + { + writeIntBinary(begin, out); + writeIntBinary(end, out); + } +} + +String MarkRanges::describe() const +{ + return fmt::format("Size: {}, Data: {}", this->size(), fmt::join(*this, ",")); +} + +void MarkRanges::deserialize(ReadBuffer & in) +{ + size_t size = 0; + readIntBinary(size, in); + + this->resize(size); + for (size_t i = 0; i < size; ++i) + { + readIntBinary((*this)[i].begin, in); + readIntBinary((*this)[i].end, in); + } +} + } diff --git a/src/Storages/MergeTree/MarkRange.h b/src/Storages/MergeTree/MarkRange.h index 076fc7dfea2..d1f4e1a4b45 100644 --- a/src/Storages/MergeTree/MarkRange.h +++ b/src/Storages/MergeTree/MarkRange.h @@ -4,7 +4,11 @@ #include #include +#include +#include + #include +#include namespace DB { @@ -21,12 +25,22 @@ struct MarkRange MarkRange() = default; MarkRange(const size_t begin_, const size_t end_) : begin{begin_}, end{end_} {} - bool operator==(const MarkRange & rhs) const; + size_t getNumberOfMarks() const; + bool operator==(const MarkRange & rhs) const; bool operator<(const MarkRange & rhs) const; }; -using MarkRanges = std::deque; +struct MarkRanges : public std::deque +{ + using std::deque::deque; + + size_t getNumberOfMarks() const; + + void serialize(WriteBuffer & out) const; + String describe() const; + void deserialize(ReadBuffer & in); +}; /** Get max range.end from ranges. */ @@ -37,3 +51,26 @@ std::string toString(const MarkRanges & ranges); void assertSortedAndNonIntersecting(const MarkRanges & ranges); } + + +template <> +struct fmt::formatter +{ + constexpr static auto parse(format_parse_context & ctx) + { + const auto * it = ctx.begin(); + const auto * end = ctx.end(); + + /// Only support {}. + if (it != end && *it != '}') + throw format_error("invalid format"); + + return it; + } + + template + auto format(const DB::MarkRange & range, FormatContext & ctx) + { + return format_to(ctx.out(), "{}", fmt::format("({}, {})", range.begin, range.end)); + } +}; diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index dd8781691b8..2d2013bd648 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -335,7 +335,7 @@ bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrite " We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); if (storage.getSettings()->detach_not_byte_identical_parts) storage.forcefullyMovePartToDetachedAndRemoveFromMemory(std::move(part), "merge-not-byte-identical"); diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp index cc5e87956a1..9f24839f1e1 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp @@ -52,7 +52,7 @@ bool MergePlainMergeTreeTask::executeStep() } catch (...) { - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); throw; } } diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index e2997df3bb0..9cb6db51c98 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -49,8 +49,7 @@ IMergeTreeSelectAlgorithm::IMergeTreeSelectAlgorithm( UInt64 preferred_max_column_in_block_size_bytes_, const MergeTreeReaderSettings & reader_settings_, bool use_uncompressed_cache_, - const Names & virt_column_names_, - std::optional extension_) + const Names & virt_column_names_) : storage(storage_) , storage_snapshot(storage_snapshot_) , prewhere_info(prewhere_info_) @@ -62,7 +61,6 @@ IMergeTreeSelectAlgorithm::IMergeTreeSelectAlgorithm( , use_uncompressed_cache(use_uncompressed_cache_) , virt_column_names(virt_column_names_) , partition_value_type(storage.getPartitionValueType()) - , extension(extension_) { header_without_const_virtual_columns = applyPrewhereActions(std::move(header), prewhere_info); size_t non_const_columns_offset = header_without_const_virtual_columns.columns(); @@ -115,86 +113,11 @@ std::unique_ptr IMergeTreeSelectAlgorithm::getPrewhereActions( bool IMergeTreeSelectAlgorithm::getNewTask() { - /// No parallel reading feature - if (!extension.has_value()) + if (getNewTaskImpl()) { - if (getNewTaskImpl()) - { - finalizeNewTask(); - return true; - } - return false; - } - return getNewTaskParallelReading(); -} - - -bool IMergeTreeSelectAlgorithm::getNewTaskParallelReading() -{ - if (getTaskFromBuffer()) + finalizeNewTask(); return true; - - if (no_more_tasks) - return getDelayedTasks(); - - while (true) - { - /// The end of execution. No task. - if (!getNewTaskImpl()) - { - no_more_tasks = true; - return getDelayedTasks(); - } - - splitCurrentTaskRangesAndFillBuffer(); - - if (getTaskFromBuffer()) - return true; } -} - - -bool IMergeTreeSelectAlgorithm::getTaskFromBuffer() -{ - while (!buffered_ranges.empty()) - { - auto ranges = std::move(buffered_ranges.front()); - buffered_ranges.pop_front(); - - assert(!ranges.empty()); - - auto res = performRequestToCoordinator(ranges, /*delayed=*/false); - - if (Status::Accepted == res) - return true; - - /// To avoid any possibility of ignoring cancellation, exception will be thrown. - if (Status::Cancelled == res) - throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query had been cancelled"); - } - return false; -} - - -bool IMergeTreeSelectAlgorithm::getDelayedTasks() -{ - while (!delayed_tasks.empty()) - { - task = std::move(delayed_tasks.front()); - delayed_tasks.pop_front(); - - assert(!task->mark_ranges.empty()); - - auto res = performRequestToCoordinator(task->mark_ranges, /*delayed=*/true); - - if (Status::Accepted == res) - return true; - - if (Status::Cancelled == res) - break; - } - - finish(); return false; } @@ -456,11 +379,6 @@ namespace bool columnExists(const String & name) const { return block.has(name); } - void insertStringColumn(const ColumnPtr & column, const String & name) - { - block.insert({column, std::make_shared(), name}); - } - void insertUInt8Column(const ColumnPtr & column, const String & name) { block.insert({column, std::make_shared(), name}); @@ -476,6 +394,11 @@ namespace block.insert({column, std::make_shared(), name}); } + void insertLowCardinalityColumn(const ColumnPtr & column, const String & name) + { + block.insert({column, std::make_shared(std::make_shared()), name}); + } + void insertPartitionValueColumn( size_t rows, const Row & partition_value, const DataTypePtr & partition_value_type, const String & name) { @@ -560,11 +483,13 @@ static void injectPartConstVirtualColumns( { ColumnPtr column; if (rows) - column = DataTypeString().createColumnConst(rows, part->name)->convertToFullColumnIfConst(); + column = DataTypeLowCardinality{std::make_shared()} + .createColumnConst(rows, part->name) + ->convertToFullColumnIfConst(); else - column = DataTypeString().createColumn(); + column = DataTypeLowCardinality{std::make_shared()}.createColumn(); - inserter.insertStringColumn(column, virtual_column_name); + inserter.insertLowCardinalityColumn(column, virtual_column_name); } else if (virtual_column_name == "_part_index") { @@ -590,11 +515,13 @@ static void injectPartConstVirtualColumns( { ColumnPtr column; if (rows) - column = DataTypeString().createColumnConst(rows, part->info.partition_id)->convertToFullColumnIfConst(); + column = DataTypeLowCardinality{std::make_shared()} + .createColumnConst(rows, part->info.partition_id) + ->convertToFullColumnIfConst(); else - column = DataTypeString().createColumn(); + column = DataTypeLowCardinality{std::make_shared()}.createColumn(); - inserter.insertStringColumn(column, virtual_column_name); + inserter.insertLowCardinalityColumn(column, virtual_column_name); } else if (virtual_column_name == "_partition_value") { @@ -688,170 +615,6 @@ std::unique_ptr IMergeTreeSelectAlgorithm::getSizeP } -IMergeTreeSelectAlgorithm::Status IMergeTreeSelectAlgorithm::performRequestToCoordinator(MarkRanges requested_ranges, bool delayed) -{ - String partition_id = task->data_part->info.partition_id; - String part_name; - String projection_name; - - if (task->data_part->isProjectionPart()) - { - part_name = task->data_part->getParentPart()->name; - projection_name = task->data_part->name; - } - else - { - part_name = task->data_part->name; - } - - PartBlockRange block_range - { - .begin = task->data_part->info.min_block, - .end = task->data_part->info.max_block - }; - - PartitionReadRequest request - { - .partition_id = std::move(partition_id), - .part_name = std::move(part_name), - .projection_name = std::move(projection_name), - .block_range = std::move(block_range), - .mark_ranges = std::move(requested_ranges) - }; - String request_description = request.toString(); - - /// Consistent hashing won't work with reading in order, because at the end of the execution - /// we could possibly seek back - if (!delayed && canUseConsistentHashingForParallelReading()) - { - const auto hash = request.getConsistentHash(extension->count_participating_replicas); - if (hash != extension->number_of_current_replica) - { - auto delayed_task = std::make_unique(*task); // Create a copy - delayed_task->mark_ranges = std::move(request.mark_ranges); - delayed_tasks.emplace_back(std::move(delayed_task)); - LOG_TRACE(log, "Request delayed by hash: {}", request_description); - return Status::Denied; - } - } - - auto optional_response = extension.value().callback(std::move(request)); - - if (!optional_response.has_value()) - { - LOG_TRACE(log, "Request cancelled: {}", request_description); - return Status::Cancelled; - } - - auto response = optional_response.value(); - - task->mark_ranges = std::move(response.mark_ranges); - - if (response.denied || task->mark_ranges.empty()) - { - LOG_TRACE(log, "Request rejected: {}", request_description); - return Status::Denied; - } - - finalizeNewTask(); - - LOG_TRACE(log, "Request accepted: {}", request_description); - return Status::Accepted; -} - - -size_t IMergeTreeSelectAlgorithm::estimateMaxBatchSizeForHugeRanges() -{ - /// This is an empirical number and it is so, - /// because we have an adaptive granularity by default. - const size_t average_granule_size_bytes = 1024 * 1024 * 10; // 10 MiB - - /// We want to have one RTT per one gigabyte of data read from disk - /// this could be configurable. - const size_t max_size_for_one_request = 1024 * 1024 * 1024; // 1 GiB - - size_t sum_average_marks_size = 0; - /// getColumnSize is not fully implemented for compact parts - if (task->data_part->getType() == IMergeTreeDataPart::Type::Compact) - { - sum_average_marks_size = average_granule_size_bytes; - } - else - { - for (const auto & name : extension->colums_to_read) - { - auto size = task->data_part->getColumnSize(name); - - assert(size.marks != 0); - sum_average_marks_size += size.data_uncompressed / size.marks; - } - } - - if (sum_average_marks_size == 0) - sum_average_marks_size = average_granule_size_bytes; // 10 MiB - - LOG_TEST(log, "Reading from {} part, average mark size is {}", - task->data_part->getTypeName(), sum_average_marks_size); - - return max_size_for_one_request / sum_average_marks_size; -} - -void IMergeTreeSelectAlgorithm::splitCurrentTaskRangesAndFillBuffer() -{ - const size_t max_batch_size = estimateMaxBatchSizeForHugeRanges(); - - size_t current_batch_size = 0; - buffered_ranges.emplace_back(); - - for (const auto & range : task->mark_ranges) - { - auto expand_if_needed = [&] - { - if (current_batch_size > max_batch_size) - { - buffered_ranges.emplace_back(); - current_batch_size = 0; - } - }; - - expand_if_needed(); - - if (range.end - range.begin < max_batch_size) - { - buffered_ranges.back().push_back(range); - current_batch_size += range.end - range.begin; - continue; - } - - auto current_begin = range.begin; - auto current_end = range.begin + max_batch_size; - - while (current_end < range.end) - { - auto current_range = MarkRange{current_begin, current_end}; - buffered_ranges.back().push_back(current_range); - current_batch_size += current_end - current_begin; - - current_begin = current_end; - current_end = current_end + max_batch_size; - - expand_if_needed(); - } - - if (range.end - current_begin > 0) - { - auto current_range = MarkRange{current_begin, range.end}; - buffered_ranges.back().push_back(current_range); - current_batch_size += range.end - current_begin; - - expand_if_needed(); - } - } - - if (buffered_ranges.back().empty()) - buffered_ranges.pop_back(); -} - IMergeTreeSelectAlgorithm::~IMergeTreeSelectAlgorithm() = default; } diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h index 3615127ea34..77d2a383e28 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h @@ -24,6 +24,7 @@ struct ChunkAndProgress struct ParallelReadingExtension { + MergeTreeAllRangesCallback all_callback; MergeTreeReadTaskCallback callback; size_t count_participating_replicas{0}; size_t number_of_current_replica{0}; @@ -48,8 +49,7 @@ public: UInt64 preferred_max_column_in_block_size_bytes_, const MergeTreeReaderSettings & reader_settings_, bool use_uncompressed_cache_, - const Names & virt_column_names_ = {}, - std::optional extension_ = {}); + const Names & virt_column_names_ = {}); virtual ~IMergeTreeSelectAlgorithm(); @@ -90,8 +90,6 @@ protected: size_t estimateMaxBatchSizeForHugeRanges(); - virtual bool canUseConsistentHashingForParallelReading() { return false; } - /// Closes readers and unlock part locks virtual void finish() = 0; @@ -164,11 +162,6 @@ protected: MergeTreeReadTaskPtr task; - std::optional extension; - bool no_more_tasks{false}; - std::deque delayed_tasks; - std::deque buffered_ranges; - /// This setting is used in base algorithm only to additionally limit the number of granules to read. /// It is changed in ctor of MergeTreeThreadSelectAlgorithm. /// @@ -186,44 +179,8 @@ private: std::atomic is_cancelled{false}; - enum class Status - { - Accepted, - Cancelled, - Denied - }; - - /// Calls getNewTaskImpl() to get new task, then performs a request to a coordinator - /// The coordinator may modify the set of ranges to read from a part or could - /// deny the whole request. In the latter case it creates new task and retries. - /// Then it calls finalizeNewTask() to create readers for a task if it is needed. bool getNewTask(); - bool getNewTaskParallelReading(); - /// After PK analysis the range of marks could be extremely big - /// We divide this range to a set smaller consecutive ranges - /// Then, depending on the type of reading (concurrent, in order or in reverse order) - /// we can calculate a consistent hash function with the number of buckets equal to - /// the number of replicas involved. And after that we can throw away some ranges with - /// hash not equals to the number of the current replica. - bool getTaskFromBuffer(); - - /// But we can't throw that ranges completely, because if we have different sets of parts - /// on replicas (have merged part on one, but not on another), then such a situation is possible - /// - Coordinator allows to read from a big merged part, but this part is present only on one replica. - /// And that replica calculates consistent hash and throws away some ranges - /// - Coordinator denies other replicas to read from another parts (source parts for that big one) - /// At the end, the result of the query is wrong, because we didn't read all the data. - /// So, we have to remember parts and mark ranges with hash different then current replica number. - /// An we have to ask the coordinator about its permission to read from that "delayed" parts. - /// It won't work with reading in order or reading in reverse order, because we can possibly seek back. - bool getDelayedTasks(); - - /// It will form a request to coordinator and - /// then reinitialize the mark ranges of this->task object - Status performRequestToCoordinator(MarkRanges requested_ranges, bool delayed); - - void splitCurrentTaskRangesAndFillBuffer(); static Block applyPrewhereActions(Block block, const PrewhereInfoPtr & prewhere_info); }; diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 6bd8cc60979..e300723b37b 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -135,10 +135,9 @@ NameSet injectRequiredColumns( MergeTreeReadTask::MergeTreeReadTask( - const MergeTreeData::DataPartPtr & data_part_, + const DataPartPtr & data_part_, const MarkRanges & mark_ranges_, size_t part_index_in_query_, - const Names & ordered_names_, const NameSet & column_name_set_, const MergeTreeReadTaskColumns & task_columns_, bool remove_prewhere_column_, @@ -146,7 +145,6 @@ MergeTreeReadTask::MergeTreeReadTask( : data_part{data_part_} , mark_ranges{mark_ranges_} , part_index_in_query{part_index_in_query_} - , ordered_names{ordered_names_} , column_name_set{column_name_set_} , task_columns{task_columns_} , remove_prewhere_column{remove_prewhere_column_} @@ -156,7 +154,7 @@ MergeTreeReadTask::MergeTreeReadTask( MergeTreeBlockSizePredictor::MergeTreeBlockSizePredictor( - const MergeTreeData::DataPartPtr & data_part_, const Names & columns, const Block & sample_block) + const DataPartPtr & data_part_, const Names & columns, const Block & sample_block) : data_part(data_part_) { number_of_rows_in_part = data_part->rows_count; diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.h b/src/Storages/MergeTree/MergeTreeBlockReadUtils.h index 72cdbc562ee..162b15b6388 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.h +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -17,6 +18,9 @@ class IMergeTreeDataPartInfoForReader; using MergeTreeReadTaskPtr = std::unique_ptr; using MergeTreeBlockSizePredictorPtr = std::shared_ptr; +class IMergeTreeDataPart; +using DataPartPtr = std::shared_ptr; + /** If some of the requested columns are not in the part, * then find out which columns may need to be read further, @@ -44,13 +48,11 @@ struct MergeTreeReadTaskColumns struct MergeTreeReadTask { /// data part which should be read while performing this task - MergeTreeData::DataPartPtr data_part; + DataPartPtr data_part; /// Ranges to read from `data_part`. MarkRanges mark_ranges; /// for virtual `part_index` virtual column size_t part_index_in_query; - /// ordered list of column names used in this query, allows returning blocks with consistent ordering - const Names & ordered_names; /// used to determine whether column should be filtered during PREWHERE or WHERE const NameSet & column_name_set; /// column names to read during PREWHERE and WHERE @@ -68,10 +70,9 @@ struct MergeTreeReadTask bool isFinished() const { return mark_ranges.empty() && range_reader.isCurrentRangeFinished(); } MergeTreeReadTask( - const MergeTreeData::DataPartPtr & data_part_, + const DataPartPtr & data_part_, const MarkRanges & mark_ranges_, size_t part_index_in_query_, - const Names & ordered_names_, const NameSet & column_name_set_, const MergeTreeReadTaskColumns & task_columns_, bool remove_prewhere_column_, @@ -88,7 +89,7 @@ MergeTreeReadTaskColumns getReadTaskColumns( struct MergeTreeBlockSizePredictor { - MergeTreeBlockSizePredictor(const MergeTreeData::DataPartPtr & data_part_, const Names & columns, const Block & sample_block); + MergeTreeBlockSizePredictor(const DataPartPtr & data_part_, const Names & columns, const Block & sample_block); /// Reset some values for correct statistics calculating void startBlock(); @@ -137,7 +138,7 @@ struct MergeTreeBlockSizePredictor protected: - MergeTreeData::DataPartPtr data_part; + DataPartPtr data_part; struct ColumnInfo { diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index b2e0c14489a..b8755e4ff69 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1,36 +1,48 @@ #include "Storages/MergeTree/MergeTreeDataPartBuilder.h" #include +#include #include #include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include -#include #include +#include +#include #include #include -#include #include #include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include @@ -40,29 +52,19 @@ #include #include #include +#include +#include #include +#include +#include #include #include #include #include #include -#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include @@ -82,6 +84,7 @@ #include #include +#include template <> struct fmt::formatter : fmt::formatter @@ -837,8 +840,14 @@ Block MergeTreeData::getSampleBlockWithVirtualColumns() const { DataTypePtr partition_value_type = getPartitionValueType(); return { - ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "_part"), - ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "_partition_id"), + ColumnWithTypeAndName( + DataTypeLowCardinality{std::make_shared()}.createColumn(), + std::make_shared(std::make_shared()), + "_part"), + ColumnWithTypeAndName( + DataTypeLowCardinality{std::make_shared()}.createColumn(), + std::make_shared(std::make_shared()), + "_partition_id"), ColumnWithTypeAndName(ColumnUUID::create(), std::make_shared(), "_part_uuid"), ColumnWithTypeAndName(partition_value_type->createColumn(), partition_value_type, "_partition_value")}; } @@ -1886,7 +1895,9 @@ size_t MergeTreeData::clearOldTemporaryDirectories(size_t custom_directories_lif { if (temporary_parts.contains(basename)) { - LOG_WARNING(log, "{} is in use (by merge/mutation/INSERT) (consider increasing temporary_directories_lifetime setting)", full_path); + /// Actually we don't rely on temporary_directories_lifetime when removing old temporaries directoties, + /// it's just an extra level of protection just in case we have a bug. + LOG_INFO(log, "{} is in use (by merge/mutation/INSERT) (consider increasing temporary_directories_lifetime setting)", full_path); continue; } else @@ -3393,6 +3404,28 @@ void MergeTreeData::checkPartDuplicate(MutableDataPartPtr & part, Transaction & } } +void MergeTreeData::checkPartDynamicColumns(MutableDataPartPtr & part, DataPartsLock & /*lock*/) const +{ + auto metadata_snapshot = getInMemoryMetadataPtr(); + const auto & columns = metadata_snapshot->getColumns(); + + if (!hasDynamicSubcolumns(columns)) + return; + + const auto & part_columns = part->getColumns(); + for (const auto & part_column : part_columns) + { + auto storage_column = columns.getPhysical(part_column.name); + if (!storage_column.type->hasDynamicSubcolumns()) + continue; + + auto concrete_storage_column = object_columns.getPhysical(part_column.name); + + /// It will throw if types are incompatible. + getLeastCommonTypeForDynamicColumns(storage_column.type, {concrete_storage_column.type, part_column.type}, true); + } +} + void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename) { part->is_temp = false; @@ -3427,6 +3460,7 @@ bool MergeTreeData::addTempPart( checkPartPartition(part, lock); checkPartDuplicate(part, out_transaction, lock); + checkPartDynamicColumns(part, lock); DataPartPtr covering_part; DataPartsVector covered_parts = getActivePartsToReplace(part->info, part->name, covering_part, lock); @@ -3467,6 +3501,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl( part->assertState({DataPartState::Temporary}); checkPartPartition(part, lock); checkPartDuplicate(part, out_transaction, lock); + checkPartDynamicColumns(part, lock); PartHierarchy hierarchy = getPartHierarchy(part->info, DataPartState::Active, lock); @@ -6783,6 +6818,14 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage( const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info) const { + if (query_context->getClientInfo().collaborate_with_initiator) + return QueryProcessingStage::Enum::FetchColumns; + + if (query_context->getSettingsRef().allow_experimental_parallel_reading_from_replicas + && !query_context->getClientInfo().collaborate_with_initiator + && to_stage >= QueryProcessingStage::WithMergeableState) + return QueryProcessingStage::Enum::WithMergeableState; + if (to_stage >= QueryProcessingStage::Enum::WithMergeableState) { if (auto projection = getQueryProcessingStageWithAggregateProjection(query_context, storage_snapshot, query_info)) @@ -7469,7 +7512,7 @@ bool MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagge } catch (...) { - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); if (cloned_part) cloned_part->remove(); @@ -7541,7 +7584,19 @@ MergeTreeData::WriteAheadLogPtr MergeTreeData::getWriteAheadLog() if (!write_ahead_log) { auto reservation = reserveSpace(getSettings()->write_ahead_log_max_bytes); - write_ahead_log = std::make_shared(*this, reservation->getDisk()); + for (const auto & disk: reservation->getDisks()) + { + if (!disk->isRemote()) + { + write_ahead_log = std::make_shared(*this, disk); + break; + } + } + + if (!write_ahead_log) + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, + "Can't store write ahead log in remote disk. It makes no sense."); } return write_ahead_log; @@ -7550,10 +7605,10 @@ MergeTreeData::WriteAheadLogPtr MergeTreeData::getWriteAheadLog() NamesAndTypesList MergeTreeData::getVirtuals() const { return NamesAndTypesList{ - NameAndTypePair("_part", std::make_shared()), + NameAndTypePair("_part", std::make_shared(std::make_shared())), NameAndTypePair("_part_index", std::make_shared()), NameAndTypePair("_part_uuid", std::make_shared()), - NameAndTypePair("_partition_id", std::make_shared()), + NameAndTypePair("_partition_id", std::make_shared(std::make_shared())), NameAndTypePair("_partition_value", getPartitionValueType()), NameAndTypePair("_sample_factor", std::make_shared()), NameAndTypePair("_part_offset", std::make_shared()), diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 7dcd0c40553..b9531e4020b 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1417,6 +1417,7 @@ private: /// Checking that candidate part doesn't break invariants: correct partition void checkPartPartition(MutableDataPartPtr & part, DataPartsLock & lock) const; void checkPartDuplicate(MutableDataPartPtr & part, Transaction & transaction, DataPartsLock & lock) const; + void checkPartDynamicColumns(MutableDataPartPtr & part, DataPartsLock & lock) const; /// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes /// in precommitted state and to transaction diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp index 3aa68266a3e..d55248df0af 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp @@ -165,7 +165,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromVolume() if (!storage || !mark_type) { /// Didn't find any data or mark file, suppose that part is empty. - return withBytesAndRows(0, 0); + return withBytesAndRowsOnDisk(0, 0); } part_storage = std::move(storage); @@ -181,7 +181,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromStorage() if (!mark_type) { /// Didn't find any mark file, suppose that part is empty. - return withBytesAndRows(0, 0); + return withBytesAndRowsOnDisk(0, 0); } part_type = mark_type->part_type; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 512f194ea53..597241c1753 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -876,7 +876,8 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd ReadFromMergeTree::IndexStats & index_stats, bool use_skip_indexes) { - RangesInDataParts parts_with_ranges(parts.size()); + RangesInDataParts parts_with_ranges; + parts_with_ranges.resize(parts.size()); const Settings & settings = context->getSettingsRef(); /// Let's start analyzing all useful indices @@ -1010,7 +1011,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd if (metadata_snapshot->hasPrimaryKey()) ranges.ranges = markRangesFromPKRange(part, metadata_snapshot, key_condition, settings, log); else if (total_marks_count) - ranges.ranges = MarkRanges{MarkRange{0, total_marks_count}}; + ranges.ranges = MarkRanges{{MarkRange{0, total_marks_count}}}; sum_marks_pk.fetch_add(ranges.getMarksCount(), std::memory_order_relaxed); diff --git a/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp index 0882b7fa129..bd7aa34ec0e 100644 --- a/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp @@ -1,4 +1,6 @@ #include +#include "Storages/MergeTree/RangesInDataPart.h" +#include namespace DB { @@ -18,23 +20,42 @@ try initializeReaders(); MarkRanges mark_ranges_for_task; - /// If we need to read few rows, set one range per task to reduce number of read data. - if (has_limit_below_one_block) + + if (!pool) { - mark_ranges_for_task = { std::move(all_mark_ranges.front()) }; - all_mark_ranges.pop_front(); + /// If we need to read few rows, set one range per task to reduce number of read data. + if (has_limit_below_one_block) + { + mark_ranges_for_task = MarkRanges{}; + mark_ranges_for_task.emplace_front(std::move(all_mark_ranges.front())); + all_mark_ranges.pop_front(); + } + else + { + mark_ranges_for_task = std::move(all_mark_ranges); + all_mark_ranges.clear(); + } } else { - mark_ranges_for_task = std::move(all_mark_ranges); - all_mark_ranges.clear(); + auto description = RangesInDataPartDescription{ + .info = data_part->info, + /// We just ignore all the distribution done before + /// Everything will be done on coordinator side + .ranges = {}, + }; + + mark_ranges_for_task = pool->getNewTask(description); + + if (mark_ranges_for_task.empty()) + return false; } auto size_predictor = (preferred_block_size_bytes == 0) ? nullptr : getSizePredictor(data_part, task_columns, sample_block); task = std::make_unique( - data_part, mark_ranges_for_task, part_index_in_query, ordered_names, column_name_set, task_columns, + data_part, mark_ranges_for_task, part_index_in_query, column_name_set, task_columns, prewhere_info && prewhere_info->remove_prewhere_column, std::move(size_predictor)); diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index 7a8a28b24aa..b3e6ec6ec10 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -88,7 +88,8 @@ static void assertIndexColumnsType(const Block & header) WhichDataType which(actual_type); if (!which.isUInt() && !which.isInt() && !which.isString() && !which.isFixedString() && !which.isFloat() && - !which.isDate() && !which.isDateTime() && !which.isDateTime64() && !which.isEnum() && !which.isUUID()) + !which.isDate() && !which.isDateTime() && !which.isDateTime64() && !which.isEnum() && !which.isUUID() && + !which.isIPv4() && !which.isIPv6()) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type {} of bloom filter index.", type->getName()); } } diff --git a/src/Storages/MergeTree/MergeTreePartInfo.cpp b/src/Storages/MergeTree/MergeTreePartInfo.cpp index a6baecee125..84432a293d7 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.cpp +++ b/src/Storages/MergeTree/MergeTreePartInfo.cpp @@ -2,6 +2,7 @@ #include #include #include +#include "Core/ProtocolDefines.h" namespace DB { @@ -10,6 +11,7 @@ namespace ErrorCodes { extern const int BAD_DATA_PART_NAME; extern const int INVALID_PARTITION_VALUE; + extern const int UNKNOWN_FORMAT_VERSION; } @@ -253,6 +255,43 @@ String MergeTreePartInfo::getPartNameV0(DayNum left_date, DayNum right_date) con return wb.str(); } +void MergeTreePartInfo::serialize(WriteBuffer & out) const +{ + UInt64 version = DBMS_MERGE_TREE_PART_INFO_VERSION; + /// Must be the first + writeIntBinary(version, out); + + writeStringBinary(partition_id, out); + writeIntBinary(min_block, out); + writeIntBinary(max_block, out); + writeIntBinary(level, out); + writeIntBinary(mutation, out); + writeBoolText(use_leagcy_max_level, out); +} + + +String MergeTreePartInfo::describe() const +{ + return getPartNameV1(); +} + + +void MergeTreePartInfo::deserialize(ReadBuffer & in) +{ + UInt64 version; + readIntBinary(version, in); + if (version != DBMS_MERGE_TREE_PART_INFO_VERSION) + throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Version for MergeTreePart info mismatched. Got: {}, supported version: {}", + version, DBMS_MERGE_TREE_PART_INFO_VERSION); + + readStringBinary(partition_id, in); + readIntBinary(min_block, in); + readIntBinary(max_block, in); + readIntBinary(level, in); + readIntBinary(mutation, in); + readBoolText(use_leagcy_max_level, in); +} + DetachedPartInfo DetachedPartInfo::parseDetachedPartName( const DiskPtr & disk, std::string_view dir_name, MergeTreeDataFormatVersion format_version) { diff --git a/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h index cad851fb882..b91bc1e595b 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.h +++ b/src/Storages/MergeTree/MergeTreePartInfo.h @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include @@ -112,6 +114,10 @@ struct MergeTreePartInfo return static_cast(max_block - min_block + 1); } + void serialize(WriteBuffer & out) const; + String describe() const; + void deserialize(ReadBuffer & in); + /// Simple sanity check for partition ID. Checking that it's not too long or too short, doesn't contain a lot of '_'. static void validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version); diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 37b24422af0..7444e099150 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -1,6 +1,7 @@ #include #include #include +#include "Common/Stopwatch.h" #include #include @@ -18,6 +19,52 @@ namespace ErrorCodes namespace DB { + +std::vector IMergeTreeReadPool::fillPerPartInfo(const RangesInDataParts & parts) +{ + std::vector per_part_sum_marks; + Block sample_block = storage_snapshot->metadata->getSampleBlock(); + is_part_on_remote_disk.resize(parts.size()); + + for (const auto i : collections::range(0, parts.size())) + { + const auto & part = parts[i]; +#ifndef NDEBUG + assertSortedAndNonIntersecting(part.ranges); +#endif + + bool part_on_remote_disk = part.data_part->isStoredOnRemoteDisk(); + is_part_on_remote_disk[i] = part_on_remote_disk; + do_not_steal_tasks |= part_on_remote_disk; + + /// Read marks for every data part. + size_t sum_marks = 0; + for (const auto & range : part.ranges) + sum_marks += range.end - range.begin; + + per_part_sum_marks.push_back(sum_marks); + + auto task_columns = getReadTaskColumns( + LoadedMergeTreeDataPartInfoForReader(part.data_part), storage_snapshot, + column_names, virtual_column_names, prewhere_info, /*with_subcolumns=*/ true); + + auto size_predictor = !predict_block_size_bytes ? nullptr + : IMergeTreeSelectAlgorithm::getSizePredictor(part.data_part, task_columns, sample_block); + + auto & per_part = per_part_params.emplace_back(); + + per_part.data_part = part; + per_part.size_predictor = std::move(size_predictor); + + /// will be used to distinguish between PREWHERE and WHERE columns when applying filter + const auto & required_column_names = task_columns.columns.getNames(); + per_part.column_name_set = {required_column_names.begin(), required_column_names.end()}; + per_part.task_columns = std::move(task_columns); + } + + return per_part_sum_marks; +} + MergeTreeReadPool::MergeTreeReadPool( size_t threads_, size_t sum_marks_, @@ -30,23 +77,25 @@ MergeTreeReadPool::MergeTreeReadPool( const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_, bool do_not_steal_tasks_) - : backoff_settings{backoff_settings_} + : IMergeTreeReadPool( + storage_snapshot_, + column_names_, + virtual_column_names_, + min_marks_for_concurrent_read_, + prewhere_info_, + std::move(parts_), + (preferred_block_size_bytes_ > 0), + do_not_steal_tasks_) + , backoff_settings{backoff_settings_} , backoff_state{threads_} - , storage_snapshot{storage_snapshot_} - , column_names{column_names_} - , virtual_column_names{virtual_column_names_} - , do_not_steal_tasks{do_not_steal_tasks_} - , predict_block_size_bytes{preferred_block_size_bytes_ > 0} - , prewhere_info{prewhere_info_} - , parts_ranges{std::move(parts_)} { /// parts don't contain duplicate MergeTreeDataPart's. const auto per_part_sum_marks = fillPerPartInfo(parts_ranges); - fillPerThreadInfo(threads_, sum_marks_, per_part_sum_marks, parts_ranges, min_marks_for_concurrent_read_); + fillPerThreadInfo(threads_, sum_marks_, per_part_sum_marks, parts_ranges); } -MergeTreeReadTaskPtr MergeTreeReadPool::getTask(size_t min_marks_to_read, size_t thread, const Names & ordered_names) +MergeTreeReadTaskPtr MergeTreeReadPool::getTask(size_t thread) { const std::lock_guard lock{mutex}; @@ -86,18 +135,18 @@ MergeTreeReadTaskPtr MergeTreeReadPool::getTask(size_t min_marks_to_read, size_t auto & thread_task = thread_tasks.parts_and_ranges.back(); const auto part_idx = thread_task.part_idx; - auto & part = parts_with_idx[part_idx]; + auto & part = per_part_params[part_idx].data_part; auto & marks_in_part = thread_tasks.sum_marks_in_parts.back(); size_t need_marks; if (is_part_on_remote_disk[part_idx]) /// For better performance with remote disks need_marks = marks_in_part; else /// Get whole part to read if it is small enough. - need_marks = std::min(marks_in_part, min_marks_to_read); + need_marks = std::min(marks_in_part, min_marks_for_concurrent_read); /// Do not leave too little rows in part for next time. if (marks_in_part > need_marks && - marks_in_part - need_marks < min_marks_to_read) + marks_in_part - need_marks < min_marks_for_concurrent_read) need_marks = marks_in_part; MarkRanges ranges_to_get_from_part; @@ -142,7 +191,7 @@ MergeTreeReadTaskPtr MergeTreeReadPool::getTask(size_t min_marks_to_read, size_t : std::make_unique(*per_part.size_predictor); /// make a copy return std::make_unique( - part.data_part, ranges_to_get_from_part, part.part_index_in_query, ordered_names, + part.data_part, ranges_to_get_from_part, part.part_index_in_query, per_part.column_name_set, per_part.task_columns, prewhere_info && prewhere_info->remove_prewhere_column, std::move(curr_task_size_predictor)); } @@ -192,56 +241,9 @@ void MergeTreeReadPool::profileFeedback(ReadBufferFromFileBase::ProfileInfo info } -std::vector MergeTreeReadPool::fillPerPartInfo(const RangesInDataParts & parts) -{ - std::vector per_part_sum_marks; - Block sample_block = storage_snapshot->metadata->getSampleBlock(); - is_part_on_remote_disk.resize(parts.size()); - - for (const auto i : collections::range(0, parts.size())) - { - const auto & part = parts[i]; -#ifndef NDEBUG - assertSortedAndNonIntersecting(part.ranges); -#endif - - bool part_on_remote_disk = part.data_part->isStoredOnRemoteDisk(); - is_part_on_remote_disk[i] = part_on_remote_disk; - do_not_steal_tasks |= part_on_remote_disk; - - /// Read marks for every data part. - size_t sum_marks = 0; - for (const auto & range : part.ranges) - sum_marks += range.end - range.begin; - - per_part_sum_marks.push_back(sum_marks); - - auto task_columns = getReadTaskColumns( - LoadedMergeTreeDataPartInfoForReader(part.data_part), storage_snapshot, - column_names, virtual_column_names, prewhere_info, /*with_subcolumns=*/ true); - - auto size_predictor = !predict_block_size_bytes ? nullptr - : IMergeTreeSelectAlgorithm::getSizePredictor(part.data_part, task_columns, sample_block); - - auto & per_part = per_part_params.emplace_back(); - - per_part.size_predictor = std::move(size_predictor); - - /// will be used to distinguish between PREWHERE and WHERE columns when applying filter - const auto & required_column_names = task_columns.columns.getNames(); - per_part.column_name_set = {required_column_names.begin(), required_column_names.end()}; - per_part.task_columns = std::move(task_columns); - - parts_with_idx.push_back({ part.data_part, part.part_index_in_query }); - } - - return per_part_sum_marks; -} - - void MergeTreeReadPool::fillPerThreadInfo( size_t threads, size_t sum_marks, std::vector per_part_sum_marks, - const RangesInDataParts & parts, size_t min_marks_for_concurrent_read) + const RangesInDataParts & parts) { threads_tasks.resize(threads); if (parts.empty()) @@ -355,4 +357,148 @@ void MergeTreeReadPool::fillPerThreadInfo( } +MergeTreeReadPoolParallelReplicas::~MergeTreeReadPoolParallelReplicas() = default; + + +Block MergeTreeReadPoolParallelReplicas::getHeader() const +{ + return storage_snapshot->getSampleBlockForColumns(extension.colums_to_read); +} + +MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicas::getTask(size_t thread) +{ + /// This parameter is needed only to satisfy the interface + UNUSED(thread); + + std::lock_guard lock(mutex); + + if (no_more_tasks_available) + return nullptr; + + if (buffered_ranges.empty()) + { + auto result = extension.callback(ParallelReadRequest{ + .replica_num = extension.number_of_current_replica, .min_number_of_marks = min_marks_for_concurrent_read * threads}); + + if (!result || result->finish) + { + no_more_tasks_available = true; + return nullptr; + } + + buffered_ranges = std::move(result->description); + } + + if (buffered_ranges.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No tasks to read. This is a bug"); + + auto & current_task = buffered_ranges.front(); + + RangesInDataPart part; + size_t part_idx = 0; + for (size_t index = 0; index < per_part_params.size(); ++index) + { + auto & other_part = per_part_params[index]; + if (other_part.data_part.data_part->info == current_task.info) + { + part = other_part.data_part; + part_idx = index; + break; + } + } + + MarkRanges ranges_to_read; + size_t current_sum_marks = 0; + while (current_sum_marks < min_marks_for_concurrent_read && !current_task.ranges.empty()) + { + auto diff = min_marks_for_concurrent_read - current_sum_marks; + auto range = current_task.ranges.front(); + if (range.getNumberOfMarks() > diff) + { + auto new_range = range; + new_range.end = range.begin + diff; + range.begin += diff; + + current_task.ranges.front() = range; + ranges_to_read.push_back(new_range); + current_sum_marks += new_range.getNumberOfMarks(); + continue; + } + + ranges_to_read.push_back(range); + current_sum_marks += range.getNumberOfMarks(); + current_task.ranges.pop_front(); + } + + if (current_task.ranges.empty()) + buffered_ranges.pop_front(); + + const auto & per_part = per_part_params[part_idx]; + + auto curr_task_size_predictor + = !per_part.size_predictor ? nullptr : std::make_unique(*per_part.size_predictor); /// make a copy + + return std::make_unique( + part.data_part, + ranges_to_read, + part.part_index_in_query, + per_part.column_name_set, + per_part.task_columns, + prewhere_info && prewhere_info->remove_prewhere_column, + std::move(curr_task_size_predictor)); +} + + +MarkRanges MergeTreeInOrderReadPoolParallelReplicas::getNewTask(RangesInDataPartDescription description) +{ + std::lock_guard lock(mutex); + + auto get_from_buffer = [&]() -> std::optional + { + for (auto & desc : buffered_tasks) + { + if (desc.info == description.info && !desc.ranges.empty()) + { + auto result = std::move(desc.ranges); + desc.ranges = MarkRanges{}; + return result; + } + } + return std::nullopt; + }; + + if (auto result = get_from_buffer(); result) + return result.value(); + + if (no_more_tasks) + return {}; + + auto response = extension.callback(ParallelReadRequest{ + .mode = mode, + .replica_num = extension.number_of_current_replica, + .min_number_of_marks = min_marks_for_concurrent_read * request.size(), + .description = request, + }); + + if (!response || response->description.empty() || response->finish) + { + no_more_tasks = true; + return {}; + } + + /// Fill the buffer + for (size_t i = 0; i < request.size(); ++i) + { + auto & new_ranges = response->description[i].ranges; + auto & old_ranges = buffered_tasks[i].ranges; + std::move(new_ranges.begin(), new_ranges.end(), std::back_inserter(old_ranges)); + } + + if (auto result = get_from_buffer(); result) + return result.value(); + + return {}; +} + + } diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index c9fe70d9a78..46d2e8bae3b 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -1,10 +1,13 @@ #pragma once #include -#include +#include #include #include +#include +#include #include + #include @@ -13,6 +16,64 @@ namespace DB using MergeTreeReadTaskPtr = std::unique_ptr; + +class IMergeTreeReadPool +{ +public: + IMergeTreeReadPool( + StorageSnapshotPtr storage_snapshot_, + Names column_names_, + Names virtual_column_names_, + size_t min_marks_for_concurrent_read_, + PrewhereInfoPtr prewhere_info_, + RangesInDataParts parts_ranges_, + bool predict_block_size_bytes_, + bool do_not_steal_tasks_) + : storage_snapshot(storage_snapshot_) + , column_names(column_names_) + , virtual_column_names(virtual_column_names_) + , min_marks_for_concurrent_read(min_marks_for_concurrent_read_) + , prewhere_info(prewhere_info_) + , parts_ranges(parts_ranges_) + , predict_block_size_bytes(predict_block_size_bytes_) + , do_not_steal_tasks(do_not_steal_tasks_) + {} + + virtual MergeTreeReadTaskPtr getTask(size_t thread) = 0; + virtual Block getHeader() const = 0; + virtual void profileFeedback(ReadBufferFromFileBase::ProfileInfo info) = 0; + virtual ~IMergeTreeReadPool() = default; + +protected: + + std::vector fillPerPartInfo(const RangesInDataParts & parts); + + /// Initialized in constructor + StorageSnapshotPtr storage_snapshot; + const Names column_names; + const Names virtual_column_names; + size_t min_marks_for_concurrent_read{0}; + PrewhereInfoPtr prewhere_info; + RangesInDataParts parts_ranges; + bool predict_block_size_bytes; + bool do_not_steal_tasks; + + struct PerPartParams + { + MergeTreeReadTaskColumns task_columns; + NameSet column_name_set; + MergeTreeBlockSizePredictorPtr size_predictor; + RangesInDataPart data_part; + }; + + std::vector per_part_params; + std::vector is_part_on_remote_disk; + + mutable std::mutex mutex; +}; + +using IMergeTreeReadPoolPtr = std::shared_ptr; + /** Provides read tasks for MergeTreeThreadSelectProcessor`s in fine-grained batches, allowing for more * uniform distribution of work amongst multiple threads. All parts and their ranges are divided into `threads` * workloads with at most `sum_marks / threads` marks. Then, threads are performing reads from these workloads @@ -20,7 +81,7 @@ using MergeTreeReadTaskPtr = std::unique_ptr; * it's workload, it either is signaled that no more work is available (`do_not_steal_tasks == false`) or * continues taking small batches from other threads' workloads (`do_not_steal_tasks == true`). */ -class MergeTreeReadPool : private boost::noncopyable +class MergeTreeReadPool final: public IMergeTreeReadPool, private boost::noncopyable { public: /** Pull could dynamically lower (backoff) number of threads, if read operation are too slow. @@ -82,47 +143,22 @@ public: size_t preferred_block_size_bytes_, bool do_not_steal_tasks_ = false); - MergeTreeReadTaskPtr getTask(size_t min_marks_to_read, size_t thread, const Names & ordered_names); + ~MergeTreeReadPool() override = default; + MergeTreeReadTaskPtr getTask(size_t thread) override; /** Each worker could call this method and pass information about read performance. * If read performance is too low, pool could decide to lower number of threads: do not assign more tasks to several threads. * This allows to overcome excessive load to disk subsystem, when reads are not from page cache. */ - void profileFeedback(ReadBufferFromFileBase::ProfileInfo info); + void profileFeedback(ReadBufferFromFileBase::ProfileInfo info) override; - Block getHeader() const; + Block getHeader() const override; private: - std::vector fillPerPartInfo(const RangesInDataParts & parts); void fillPerThreadInfo( size_t threads, size_t sum_marks, std::vector per_part_sum_marks, - const RangesInDataParts & parts, size_t min_marks_for_concurrent_read); - - StorageSnapshotPtr storage_snapshot; - const Names column_names; - const Names virtual_column_names; - bool do_not_steal_tasks; - bool predict_block_size_bytes; - - struct PerPartParams - { - MergeTreeReadTaskColumns task_columns; - NameSet column_name_set; - MergeTreeBlockSizePredictorPtr size_predictor; - }; - - std::vector per_part_params; - - PrewhereInfoPtr prewhere_info; - - struct Part - { - MergeTreeData::DataPartPtr data_part; - size_t part_index_in_query; - }; - - std::vector parts_with_idx; + const RangesInDataParts & parts); struct ThreadTask { @@ -137,18 +173,104 @@ private: }; std::vector threads_tasks; - std::set remaining_thread_tasks; - - RangesInDataParts parts_ranges; - - mutable std::mutex mutex; - Poco::Logger * log = &Poco::Logger::get("MergeTreeReadPool"); - std::vector is_part_on_remote_disk; }; using MergeTreeReadPoolPtr = std::shared_ptr; +class MergeTreeReadPoolParallelReplicas : public IMergeTreeReadPool, private boost::noncopyable +{ +public: + + MergeTreeReadPoolParallelReplicas( + StorageSnapshotPtr storage_snapshot_, + size_t threads_, + ParallelReadingExtension extension_, + const RangesInDataParts & parts_, + const PrewhereInfoPtr & prewhere_info_, + const Names & column_names_, + const Names & virtual_column_names_, + size_t min_marks_for_concurrent_read_ + ) + : IMergeTreeReadPool( + storage_snapshot_, + column_names_, + virtual_column_names_, + min_marks_for_concurrent_read_, + prewhere_info_, + parts_, + /*predict_block_size*/false, + /*do_not_steal_tasks*/false) + , extension(extension_) + , threads(threads_) + { + fillPerPartInfo(parts_ranges); + + extension.all_callback({ + .description = parts_ranges.getDescriptions(), + .replica_num = extension.number_of_current_replica + }); + } + + ~MergeTreeReadPoolParallelReplicas() override; + + MergeTreeReadTaskPtr getTask(size_t thread) override; + Block getHeader() const override; + void profileFeedback(ReadBufferFromFileBase::ProfileInfo) override {} + +private: + ParallelReadingExtension extension; + + RangesInDataPartsDescription buffered_ranges; + size_t threads; + bool no_more_tasks_available{false}; + Poco::Logger * log = &Poco::Logger::get("MergeTreeReadPoolParallelReplicas"); +}; + +using MergeTreeReadPoolParallelReplicasPtr = std::shared_ptr; + + +class MergeTreeInOrderReadPoolParallelReplicas : private boost::noncopyable +{ +public: + MergeTreeInOrderReadPoolParallelReplicas( + RangesInDataParts parts_, + ParallelReadingExtension extension_, + CoordinationMode mode_, + size_t min_marks_for_concurrent_read_) + : parts_ranges(parts_) + , extension(extension_) + , mode(mode_) + , min_marks_for_concurrent_read(min_marks_for_concurrent_read_) + { + for (const auto & part : parts_ranges) + request.push_back({part.data_part->info, MarkRanges{}}); + + for (const auto & part : parts_ranges) + buffered_tasks.push_back({part.data_part->info, MarkRanges{}}); + + extension.all_callback({ + .description = parts_ranges.getDescriptions(), + .replica_num = extension.number_of_current_replica + }); + } + + MarkRanges getNewTask(RangesInDataPartDescription description); + + RangesInDataParts parts_ranges; + ParallelReadingExtension extension; + CoordinationMode mode; + size_t min_marks_for_concurrent_read{0}; + + bool no_more_tasks{false}; + RangesInDataPartsDescription request; + RangesInDataPartsDescription buffered_tasks; + + std::mutex mutex; +}; + +using MergeTreeInOrderReadPoolParallelReplicasPtr = std::shared_ptr; + } diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp index d0d464b3c29..367818c7af1 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp @@ -1,4 +1,6 @@ #include +#include +#include "Storages/MergeTree/MergeTreeBaseSelectProcessor.h" namespace DB { @@ -10,6 +12,22 @@ namespace ErrorCodes bool MergeTreeReverseSelectAlgorithm::getNewTaskImpl() try +{ + if (pool) + return getNewTaskParallelReplicas(); + else + return getNewTaskOrdinaryReading(); +} +catch (...) +{ + /// Suspicion of the broken part. A part is added to the queue for verification. + if (getCurrentExceptionCode() != ErrorCodes::MEMORY_LIMIT_EXCEEDED) + storage.reportBrokenPart(data_part); + throw; +} + + +bool MergeTreeReverseSelectAlgorithm::getNewTaskOrdinaryReading() { if (chunks.empty() && all_mark_ranges.empty()) return false; @@ -23,25 +41,57 @@ try initializeReaders(); /// Read ranges from right to left. - MarkRanges mark_ranges_for_task = { all_mark_ranges.back() }; + MarkRanges mark_ranges_for_task{std::move(all_mark_ranges.back())}; all_mark_ranges.pop_back(); auto size_predictor = (preferred_block_size_bytes == 0) ? nullptr : getSizePredictor(data_part, task_columns, sample_block); task = std::make_unique( - data_part, mark_ranges_for_task, part_index_in_query, ordered_names, column_name_set, + data_part, mark_ranges_for_task, part_index_in_query, column_name_set, task_columns, prewhere_info && prewhere_info->remove_prewhere_column, std::move(size_predictor)); return true; + } -catch (...) + +bool MergeTreeReverseSelectAlgorithm::getNewTaskParallelReplicas() { - /// Suspicion of the broken part. A part is added to the queue for verification. - if (getCurrentExceptionCode() != ErrorCodes::MEMORY_LIMIT_EXCEEDED) - storage.reportBrokenPart(data_part); - throw; + if (chunks.empty() && no_more_tasks) + return false; + + /// We have some blocks to return in buffer. + /// Return true to continue reading, but actually don't create a task. + if (no_more_tasks) + return true; + + if (!reader) + initializeReaders(); + + auto description = RangesInDataPartDescription{ + .info = data_part->info, + /// We just ignore all the distribution done before + /// Everything will be done on coordinator side + .ranges = {}, + }; + + auto mark_ranges_for_task = pool->getNewTask(description); + if (mark_ranges_for_task.empty()) + { + /// If we have chunks in buffer - return true to continue reading from them + return !chunks.empty(); + } + + auto size_predictor = (preferred_block_size_bytes == 0) ? nullptr + : getSizePredictor(data_part, task_columns, sample_block); + + task = std::make_unique( + data_part, mark_ranges_for_task, part_index_in_query, column_name_set, + task_columns, prewhere_info && prewhere_info->remove_prewhere_column, + std::move(size_predictor)); + + return true; } MergeTreeReverseSelectAlgorithm::BlockAndProgress MergeTreeReverseSelectAlgorithm::readFromPart() diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h index ccadb1f1c61..fd25748050a 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h @@ -27,9 +27,16 @@ private: bool getNewTaskImpl() override; void finalizeNewTask() override {} + bool getNewTaskParallelReplicas(); + bool getNewTaskOrdinaryReading(); + BlockAndProgress readFromPart() override; std::vector chunks; + + /// Used for parallel replicas + bool no_more_tasks{false}; + Poco::Logger * log = &Poco::Logger::get("MergeTreeReverseSelectProcessor"); }; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 3f9da9c130a..e5ca851c76b 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -21,21 +21,22 @@ MergeTreeSelectAlgorithm::MergeTreeSelectAlgorithm( const PrewhereInfoPtr & prewhere_info_, ExpressionActionsSettings actions_settings, const MergeTreeReaderSettings & reader_settings_, + MergeTreeInOrderReadPoolParallelReplicasPtr pool_, const Names & virt_column_names_, size_t part_index_in_query_, - bool has_limit_below_one_block_, - std::optional extension_) + bool has_limit_below_one_block_) : IMergeTreeSelectAlgorithm{ storage_snapshot_->getSampleBlockForColumns(required_columns_), storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, - reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_}, + reader_settings_, use_uncompressed_cache_, virt_column_names_}, required_columns{std::move(required_columns_)}, data_part{owned_data_part_}, sample_block(storage_snapshot_->metadata->getSampleBlock()), all_mark_ranges(std::move(mark_ranges_)), part_index_in_query(part_index_in_query_), has_limit_below_one_block(has_limit_below_one_block_), + pool(pool_), total_rows(data_part->index_granularity.getRowsCountInRanges(all_mark_ranges)) { ordered_names = header_without_const_virtual_columns.getNames(); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 12f4804835c..76c8d81dd0b 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -3,6 +3,7 @@ #include #include #include +#include #include @@ -29,10 +30,10 @@ public: const PrewhereInfoPtr & prewhere_info, ExpressionActionsSettings actions_settings, const MergeTreeReaderSettings & reader_settings, + MergeTreeInOrderReadPoolParallelReplicasPtr pool_, const Names & virt_column_names = {}, size_t part_index_in_query_ = 0, - bool has_limit_below_one_block_ = false, - std::optional extension_ = {}); + bool has_limit_below_one_block_ = false); ~MergeTreeSelectAlgorithm() override; @@ -64,6 +65,9 @@ protected: /// It reduces amount of read data for queries with small LIMIT. bool has_limit_below_one_block = false; + /// Pool for reading in order + MergeTreeInOrderReadPoolParallelReplicasPtr pool; + size_t total_rows = 0; }; diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.cpp index 60586024359..97c283b8c01 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.cpp @@ -7,16 +7,11 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - MergeTreeThreadSelectAlgorithm::MergeTreeThreadSelectAlgorithm( size_t thread_, - const MergeTreeReadPoolPtr & pool_, - size_t min_marks_to_read_, - UInt64 max_block_size_rows_, + IMergeTreeReadPoolPtr pool_, + size_t min_marks_for_concurrent_read_, + size_t max_block_size_rows_, size_t preferred_block_size_bytes_, size_t preferred_max_column_in_block_size_bytes_, const MergeTreeData & storage_, @@ -25,74 +20,22 @@ MergeTreeThreadSelectAlgorithm::MergeTreeThreadSelectAlgorithm( const PrewhereInfoPtr & prewhere_info_, ExpressionActionsSettings actions_settings, const MergeTreeReaderSettings & reader_settings_, - const Names & virt_column_names_, - std::optional extension_) + const Names & virt_column_names_) : IMergeTreeSelectAlgorithm{ pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, - reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_}, + reader_settings_, use_uncompressed_cache_, virt_column_names_}, thread{thread_}, - pool{pool_} + pool{std::move(pool_)} { - /// round min_marks_to_read up to nearest multiple of block_size expressed in marks - /// If granularity is adaptive it doesn't make sense - /// Maybe it will make sense to add settings `max_block_size_bytes` - if (max_block_size_rows && !storage.canUseAdaptiveGranularity()) - { - size_t fixed_index_granularity = storage.getSettings()->index_granularity; - min_marks_to_read = (min_marks_to_read_ * fixed_index_granularity + max_block_size_rows - 1) - / max_block_size_rows * max_block_size_rows / fixed_index_granularity; - } - else if (extension.has_value()) - { - /// Parallel reading from replicas is enabled. - /// We try to estimate the average number of bytes in a granule - /// to make one request over the network per one gigabyte of data - /// Actually we will ask MergeTreeReadPool to provide us heavier tasks to read - /// because the most part of each task will be postponed - /// (due to using consistent hash for better cache affinity) - const size_t amount_of_read_bytes_per_one_request = 1024 * 1024 * 1024; // 1GiB - /// In case of reading from compact parts (for which we can't estimate the average size of marks) - /// we will use this value - const size_t empirical_size_of_mark = 1024 * 1024 * 10; // 10 MiB - - if (extension->colums_to_read.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "A set of column to read is empty. It is a bug"); - - size_t sum_average_marks_size = 0; - auto column_sizes = storage.getColumnSizes(); - for (const auto & name : extension->colums_to_read) - { - auto it = column_sizes.find(name); - if (it == column_sizes.end()) - continue; - auto size = it->second; - - if (size.data_compressed == 0 || size.data_uncompressed == 0 || size.marks == 0) - continue; - - sum_average_marks_size += size.data_uncompressed / size.marks; - } - - if (sum_average_marks_size == 0) - sum_average_marks_size = empirical_size_of_mark * extension->colums_to_read.size(); - - min_marks_to_read = extension->count_participating_replicas * amount_of_read_bytes_per_one_request / sum_average_marks_size; - } - else - { - min_marks_to_read = min_marks_to_read_; - } - - - ordered_names = getHeader().getNames(); + min_marks_to_read = min_marks_for_concurrent_read_; } /// Requests read task from MergeTreeReadPool and signals whether it got one bool MergeTreeThreadSelectAlgorithm::getNewTaskImpl() { - task = pool->getTask(min_marks_to_read, thread, ordered_names); + task = pool->getTask(thread); return static_cast(task); } @@ -113,19 +56,19 @@ void MergeTreeThreadSelectAlgorithm::finalizeNewTask() owned_uncompressed_cache = storage.getContext()->getUncompressedCache(); owned_mark_cache = storage.getContext()->getMarkCache(); } - else if (part_name != last_readed_part_name) + else if (part_name != last_read_part_name) { value_size_map = reader->getAvgValueSizeHints(); } - const bool init_new_readers = !reader || part_name != last_readed_part_name; + const bool init_new_readers = !reader || part_name != last_read_part_name; if (init_new_readers) { initializeMergeTreeReadersForPart(task->data_part, task->task_columns, metadata_snapshot, task->mark_ranges, value_size_map, profile_callback); } - last_readed_part_name = part_name; + last_read_part_name = part_name; } diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.h b/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.h index ac3dcf0cc41..37c9375a581 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeThreadSelectProcessor.h @@ -5,8 +5,8 @@ namespace DB { -class MergeTreeReadPool; - +class IMergeTreeReadPool; +using IMergeTreeReadPoolPtr = std::shared_ptr; /** Used in conjunction with MergeTreeReadPool, asking it for more work to do and performing whatever reads it is asked * to perform. @@ -16,9 +16,9 @@ class MergeTreeThreadSelectAlgorithm final : public IMergeTreeSelectAlgorithm public: MergeTreeThreadSelectAlgorithm( size_t thread_, - const std::shared_ptr & pool_, - size_t min_marks_to_read_, - UInt64 max_block_size_, + IMergeTreeReadPoolPtr pool_, + size_t min_marks_for_concurrent_read, + size_t max_block_size_, size_t preferred_block_size_bytes_, size_t preferred_max_column_in_block_size_bytes_, const MergeTreeData & storage_, @@ -27,8 +27,7 @@ public: const PrewhereInfoPtr & prewhere_info_, ExpressionActionsSettings actions_settings, const MergeTreeReaderSettings & reader_settings_, - const Names & virt_column_names_, - std::optional extension_); + const Names & virt_column_names_); String getName() const override { return "MergeTreeThread"; } @@ -42,18 +41,14 @@ protected: void finish() override; - bool canUseConsistentHashingForParallelReading() override { return true; } - private: /// "thread" index (there are N threads and each thread is assigned index in interval [0..N-1]) size_t thread; - std::shared_ptr pool; + IMergeTreeReadPoolPtr pool; /// Last part read in this thread - std::string last_readed_part_name; - /// Names from header. Used in order to order columns in read blocks. - Names ordered_names; + std::string last_read_part_name; }; } diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index d9d7c496d9f..b83c058f7fd 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -215,7 +215,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit LOG_ERROR(log, "{}. Data after mutation is not byte-identical to data on another replicas. " "We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); if (storage.getSettings()->detach_not_byte_identical_parts) storage.forcefullyMovePartToDetachedAndRemoveFromMemory(std::move(new_part), "mutate-not-byte-identical"); diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 5211e0f9c33..76ba921b705 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -103,7 +103,7 @@ bool MutatePlainMergeTreeTask::executeStep() PreformattedMessage exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false); LOG_ERROR(&Poco::Logger::get("MutatePlainMergeTreeTask"), exception_message); storage.updateMutationEntriesErrors(future_part, false, exception_message.text); - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); tryLogCurrentException(__PRETTY_FUNCTION__); return false; } diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 47df0cfe42e..4a7224b0722 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1543,13 +1543,6 @@ bool MutateTask::prepare() auto context_for_reading = Context::createCopy(ctx->context); - /// We must read with one thread because it guarantees that output stream will be sorted. - /// Disable all settings that can enable reading with several streams. - context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1); - context_for_reading->setSetting("max_threads", 1); - context_for_reading->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false); - context_for_reading->setSetting("max_streams_for_merge_tree_reading", Field(0)); - /// Allow mutations to work when force_index_by_date or force_primary_key is on. context_for_reading->setSetting("force_index_by_date", false); context_for_reading->setSetting("force_primary_key", false); @@ -1562,7 +1555,7 @@ bool MutateTask::prepare() } if (ctx->source_part->isStoredOnDisk() && !isStorageTouchedByMutations( - *ctx->data, ctx->source_part, ctx->metadata_snapshot, ctx->commands_for_part, Context::createCopy(context_for_reading))) + *ctx->data, ctx->source_part, ctx->metadata_snapshot, ctx->commands_for_part, context_for_reading)) { NameSet files_to_copy_instead_of_hardlinks; auto settings_ptr = ctx->data->getSettings(); @@ -1597,6 +1590,15 @@ bool MutateTask::prepare() LOG_TRACE(ctx->log, "Mutating part {} to mutation version {}", ctx->source_part->name, ctx->future_part->part_info.mutation); } + /// We must read with one thread because it guarantees that output stream will be sorted. + /// Disable all settings that can enable reading with several streams. + /// NOTE: isStorageTouchedByMutations() above is done without this settings because it + /// should be ok to calculate count() with multiple streams. + context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1); + context_for_reading->setSetting("max_threads", 1); + context_for_reading->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false); + context_for_reading->setSetting("max_streams_for_merge_tree_reading", Field(0)); + MutationHelpers::splitMutationCommands(ctx->source_part, ctx->commands_for_part, ctx->for_interpreter, ctx->for_file_renames); ctx->stage_progress = std::make_unique(1.0); diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index e07f19fb64c..3ef064ff743 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -1,23 +1,95 @@ #include +#include +#include +#include +#include #include +#include + +#include #include -#include -#include -#include +#include +#include +#include +#include "IO/WriteBufferFromString.h" +#include "Storages/MergeTree/RangesInDataPart.h" +#include "Storages/MergeTree/RequestResponse.h" +#include #include +#include namespace DB { -class ParallelReplicasReadingCoordinator::Impl +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +class ParallelReplicasReadingCoordinator::ImplInterface { public: - using PartitionReadRequestPtr = std::unique_ptr; + struct Stat + { + size_t number_of_requests{0}; + size_t sum_marks{0}; + }; + using Stats = std::vector; + static String toString(Stats stats) + { + String result = "Statistics: "; + for (size_t i = 0; i < stats.size(); ++i) + result += fmt::format("-- replica {}, requests: {} marks: {} ", i, stats[i].number_of_requests, stats[i].sum_marks); + return result; + } + + Stats stats; + std::mutex mutex; + size_t replicas_count; + + explicit ImplInterface(size_t replicas_count_) + : stats{replicas_count_} + , replicas_count(replicas_count_) + {} + + virtual ~ImplInterface() = default; + virtual ParallelReadResponse handleRequest(ParallelReadRequest request) = 0; + virtual void handleInitialAllRangesAnnouncement(InitialAllRangesAnnouncement announcement) = 0; +}; + + +struct Part +{ + mutable RangesInDataPartDescription description; + // FIXME: This is needed to put this struct in set + // and modify through iterator + mutable std::set replicas; + + bool operator<(const Part & rhs) const { return description.info < rhs.description.info; } +}; + +using Parts = std::set; +using PartRefs = std::deque; + + +class DefaultCoordinator : public ParallelReplicasReadingCoordinator::ImplInterface +{ +public: + using ParallelReadRequestPtr = std::unique_ptr; using PartToMarkRanges = std::map; + explicit DefaultCoordinator(size_t replicas_count_) + : ParallelReplicasReadingCoordinator::ImplInterface(replicas_count_) + , announcements(replicas_count_) + , reading_state(replicas_count_) + { + } + + ~DefaultCoordinator() override; + struct PartitionReading { PartSegments part_ranges; @@ -27,115 +99,423 @@ public: using PartitionToBlockRanges = std::map; PartitionToBlockRanges partitions; - std::mutex mutex; + size_t sent_initial_requests{0}; + std::vector announcements; - PartitionReadResponse handleRequest(PartitionReadRequest request); + Parts all_parts_to_read; + /// Contains only parts which we haven't started to read from + PartRefs delayed_parts; + /// Per-replica preferred parts split by consistent hash + /// Once all task will be done by some replica, it can steal tasks + std::vector reading_state; + + Poco::Logger * log = &Poco::Logger::get("DefaultCoordinator"); + + std::atomic state_initialized{false}; + + ParallelReadResponse handleRequest(ParallelReadRequest request) override; + void handleInitialAllRangesAnnouncement(InitialAllRangesAnnouncement announcement) override; + + void updateReadingState(const InitialAllRangesAnnouncement & announcement); + void finalizeReadingState(); + + size_t computeConsistentHash(const MergeTreePartInfo & info) const + { + auto hash = SipHash(); + hash.update(info.getPartNameV1()); + return ConsistentHashing(hash.get64(), replicas_count); + } + + void selectPartsAndRanges(const PartRefs & container, size_t replica_num, size_t min_number_of_marks, size_t & current_mark_size, ParallelReadResponse & response) const; +}; + +DefaultCoordinator::~DefaultCoordinator() +{ + LOG_INFO(log, "Coordination done: {}", toString(stats)); +} + +void DefaultCoordinator::updateReadingState(const InitialAllRangesAnnouncement & announcement) +{ + PartRefs parts_diff; + + /// To get rid of duplicates + for (const auto & part: announcement.description) + { + auto the_same_it = std::find_if(all_parts_to_read.begin(), all_parts_to_read.end(), + [&part] (const Part & other) { return other.description.info.getPartNameV1() == part.info.getPartNameV1(); }); + + /// We have the same part - add the info about presence on current replica to it + if (the_same_it != all_parts_to_read.end()) + { + the_same_it->replicas.insert(announcement.replica_num); + continue; + } + + auto covering_or_the_same_it = std::find_if(all_parts_to_read.begin(), all_parts_to_read.end(), + [&part] (const Part & other) { return !other.description.info.isDisjoint(part.info); }); + + /// It is covering part or we have covering - skip it + if (covering_or_the_same_it != all_parts_to_read.end()) + continue; + + auto new_part = Part{ + .description = part, + .replicas = {announcement.replica_num} + }; + + auto [insert_it, _] = all_parts_to_read.insert(new_part); + parts_diff.push_back(insert_it); + } + + /// Split all parts by consistent hash + while (!parts_diff.empty()) + { + auto current_part_it = parts_diff.front(); + parts_diff.pop_front(); + auto consistent_hash = computeConsistentHash(current_part_it->description.info); + + /// Check whether the new part can easy go to replica queue + if (current_part_it->replicas.contains(consistent_hash)) + { + reading_state[consistent_hash].emplace_back(current_part_it); + continue; + } + + /// Add to delayed parts + delayed_parts.emplace_back(current_part_it); + } +} + +void DefaultCoordinator::finalizeReadingState() +{ + /// Clear all the delayed queue + while (!delayed_parts.empty()) + { + auto current_part_it = delayed_parts.front(); + auto consistent_hash = computeConsistentHash(current_part_it->description.info); + + if (current_part_it->replicas.contains(consistent_hash)) + { + reading_state[consistent_hash].emplace_back(current_part_it); + delayed_parts.pop_front(); + continue; + } + + /// In this situation just assign to a random replica which has this part + auto replica = *(std::next(current_part_it->replicas.begin(), thread_local_rng() % current_part_it->replicas.size())); + reading_state[replica].emplace_back(current_part_it); + delayed_parts.pop_front(); + } + + String description; + for (const auto & part : all_parts_to_read) + { + description += part.description.describe(); + description += fmt::format("Replicas: ({}) --- ", fmt::join(part.replicas, ",")); + } + + LOG_INFO(log, "Reading state is fully initialized: {}", description); +} + + +void DefaultCoordinator::handleInitialAllRangesAnnouncement(InitialAllRangesAnnouncement announcement) +{ + std::lock_guard lock(mutex); + + updateReadingState(announcement); + stats[announcement.replica_num].number_of_requests +=1; + + ++sent_initial_requests; + LOG_INFO(log, "{} {}", sent_initial_requests, replicas_count); + if (sent_initial_requests == replicas_count) + finalizeReadingState(); +} + +void DefaultCoordinator::selectPartsAndRanges(const PartRefs & container, size_t replica_num, size_t min_number_of_marks, size_t & current_mark_size, ParallelReadResponse & response) const +{ + for (const auto & part : container) + { + if (current_mark_size >= min_number_of_marks) + { + LOG_TEST(log, "Current mark size {} is bigger than min_number_marks {}", current_mark_size, min_number_of_marks); + break; + } + + if (part->description.ranges.empty()) + { + LOG_TEST(log, "Part {} is already empty in reading state", part->description.info.getPartNameV1()); + continue; + } + + if (std::find(part->replicas.begin(), part->replicas.end(), replica_num) == part->replicas.end()) + { + LOG_TEST(log, "Not found part {} on replica {}", part->description.info.getPartNameV1(), replica_num); + continue; + } + + response.description.push_back({ + .info = part->description.info, + .ranges = {}, + }); + + while (!part->description.ranges.empty() && current_mark_size < min_number_of_marks) + { + auto & range = part->description.ranges.front(); + + if (range.getNumberOfMarks() > min_number_of_marks) + { + auto new_range = range; + range.begin += min_number_of_marks; + new_range.end = new_range.begin + min_number_of_marks; + + response.description.back().ranges.emplace_back(new_range); + current_mark_size += new_range.getNumberOfMarks(); + continue; + } + + current_mark_size += part->description.ranges.front().getNumberOfMarks(); + response.description.back().ranges.emplace_back(part->description.ranges.front()); + part->description.ranges.pop_front(); + } + } +} + +ParallelReadResponse DefaultCoordinator::handleRequest(ParallelReadRequest request) +{ + std::lock_guard lock(mutex); + + LOG_TRACE(log, "Handling request from replica {}, minimal marks size is {}", request.replica_num, request.min_number_of_marks); + + size_t current_mark_size = 0; + ParallelReadResponse response; + + /// 1. Try to select from preferred set of parts for current replica + selectPartsAndRanges(reading_state[request.replica_num], request.replica_num, request.min_number_of_marks, current_mark_size, response); + + /// 2. Try to use parts from delayed queue + while (!delayed_parts.empty() && current_mark_size < request.min_number_of_marks) + { + auto part = delayed_parts.front(); + delayed_parts.pop_front(); + reading_state[request.replica_num].emplace_back(part); + selectPartsAndRanges(reading_state[request.replica_num], request.replica_num, request.min_number_of_marks, current_mark_size, response); + } + + /// 3. Try to steal tasks; + if (current_mark_size < request.min_number_of_marks) + { + for (size_t i = 0; i < replicas_count; ++i) + { + if (i != request.replica_num) + selectPartsAndRanges(reading_state[i], request.replica_num, request.min_number_of_marks, current_mark_size, response); + + if (current_mark_size >= request.min_number_of_marks) + break; + } + } + + stats[request.replica_num].number_of_requests += 1; + stats[request.replica_num].sum_marks += current_mark_size; + + if (response.description.empty()) + response.finish = true; + + LOG_TRACE(log, "Going to respond to replica {} with {}", request.replica_num, response.describe()); + return response; +} + + +template +class InOrderCoordinator : public ParallelReplicasReadingCoordinator::ImplInterface +{ +public: + explicit InOrderCoordinator([[ maybe_unused ]] size_t replicas_count_) + : ParallelReplicasReadingCoordinator::ImplInterface(replicas_count_) + {} + ~InOrderCoordinator() override + { + LOG_INFO(log, "Coordination done: {}", toString(stats)); + } + + ParallelReadResponse handleRequest([[ maybe_unused ]] ParallelReadRequest request) override; + void handleInitialAllRangesAnnouncement([[ maybe_unused ]] InitialAllRangesAnnouncement announcement) override; + + Parts all_parts_to_read; + + Poco::Logger * log = &Poco::Logger::get(fmt::format("{}{}", magic_enum::enum_name(mode), "Coordinator")); }; -PartitionReadResponse ParallelReplicasReadingCoordinator::Impl::handleRequest(PartitionReadRequest request) +template +void InOrderCoordinator::handleInitialAllRangesAnnouncement(InitialAllRangesAnnouncement announcement) { - auto * log = &Poco::Logger::get("ParallelReplicasReadingCoordinator"); - Stopwatch watch; - - String request_description = request.toString(); std::lock_guard lock(mutex); + LOG_TRACE(log, "Received an announecement {}", announcement.describe()); - auto partition_it = partitions.find(request.partition_id); - - PartToRead::PartAndProjectionNames part_and_projection + /// To get rid of duplicates + for (const auto & part: announcement.description) { - .part = request.part_name, - .projection = request.projection_name - }; + auto the_same_it = std::find_if(all_parts_to_read.begin(), all_parts_to_read.end(), + [&part] (const Part & other) { return other.description.info == part.info; }); - /// We are the first who wants to process parts in partition - if (partition_it == partitions.end()) - { - PartitionReading partition_reading; + /// We have the same part - add the info about presence on current replica to it + if (the_same_it != all_parts_to_read.end()) + { + the_same_it->replicas.insert(announcement.replica_num); + continue; + } - PartToRead part_to_read; - part_to_read.range = request.block_range; - part_to_read.name = part_and_projection; + auto covering_or_the_same_it = std::find_if(all_parts_to_read.begin(), all_parts_to_read.end(), + [&part] (const Part & other) { return other.description.info.contains(part.info) || part.info.contains(other.description.info); }); - partition_reading.part_ranges.addPart(std::move(part_to_read)); + /// It is covering part or we have covering - skip it + if (covering_or_the_same_it != all_parts_to_read.end()) + continue; - /// As this query is first in partition, we will accept all ranges from it. - /// We need just to update our state. - auto request_ranges = HalfIntervals::initializeFromMarkRanges(request.mark_ranges); - auto mark_ranges_index = HalfIntervals::initializeWithEntireSpace(); - mark_ranges_index.intersect(request_ranges.negate()); + auto new_part = Part{ + .description = part, + .replicas = {announcement.replica_num} + }; - partition_reading.mark_ranges_in_part.insert({part_and_projection, std::move(mark_ranges_index)}); - partitions.insert({request.partition_id, std::move(partition_reading)}); - - LOG_TRACE(log, "Request is first in partition, accepted in {} ns: {}", watch.elapsed(), request_description); - return {.denied = false, .mark_ranges = std::move(request.mark_ranges)}; + auto insert_it = all_parts_to_read.insert(new_part); + auto & ranges = insert_it.first->description.ranges; + std::sort(ranges.begin(), ranges.end()); } - - auto & partition_reading = partition_it->second; - - PartToRead part_to_read; - part_to_read.range = request.block_range; - part_to_read.name = part_and_projection; - - auto part_intersection_res = partition_reading.part_ranges.getIntersectionResult(part_to_read); - - switch (part_intersection_res) - { - case PartSegments::IntersectionResult::REJECT: - { - LOG_TRACE(log, "Request rejected in {} ns: {}", watch.elapsed(), request_description); - return {.denied = true, .mark_ranges = {}}; - } - case PartSegments::IntersectionResult::EXACTLY_ONE_INTERSECTION: - { - auto marks_it = partition_reading.mark_ranges_in_part.find(part_and_projection); - - auto & intervals_to_do = marks_it->second; - auto result = HalfIntervals::initializeFromMarkRanges(request.mark_ranges); - result.intersect(intervals_to_do); - - /// Update intervals_to_do - intervals_to_do.intersect(HalfIntervals::initializeFromMarkRanges(std::move(request.mark_ranges)).negate()); - - auto result_ranges = result.convertToMarkRangesFinal(); - const bool denied = result_ranges.empty(); - - if (denied) - LOG_TRACE(log, "Request rejected due to intersection in {} ns: {}", watch.elapsed(), request_description); - else - LOG_TRACE(log, "Request accepted partially in {} ns: {}", watch.elapsed(), request_description); - - return {.denied = denied, .mark_ranges = std::move(result_ranges)}; - } - case PartSegments::IntersectionResult::NO_INTERSECTION: - { - partition_reading.part_ranges.addPart(std::move(part_to_read)); - - auto mark_ranges_index = HalfIntervals::initializeWithEntireSpace().intersect( - HalfIntervals::initializeFromMarkRanges(request.mark_ranges).negate() - ); - partition_reading.mark_ranges_in_part.insert({part_and_projection, std::move(mark_ranges_index)}); - - LOG_TRACE(log, "Request accepted in {} ns: {}", watch.elapsed(), request_description); - return {.denied = false, .mark_ranges = std::move(request.mark_ranges)}; - } - } - - UNREACHABLE(); } -PartitionReadResponse ParallelReplicasReadingCoordinator::handleRequest(PartitionReadRequest request) + +template +ParallelReadResponse InOrderCoordinator::handleRequest(ParallelReadRequest request) { + std::lock_guard lock(mutex); + + if (request.mode != mode) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Replica {} decided to read in {} mode, not in {}. This is a bug", + request.replica_num, magic_enum::enum_name(request.mode), magic_enum::enum_name(mode)); + + LOG_TRACE(log, "Got request from replica {}, data {}", request.replica_num, request.describe()); + + ParallelReadResponse response; + response.description = request.description; + size_t overall_number_of_marks = 0; + + for (auto & part : response.description) + { + auto global_part_it = std::find_if(all_parts_to_read.begin(), all_parts_to_read.end(), + [&part] (const Part & other) { return other.description.info == part.info; }); + + if (global_part_it == all_parts_to_read.end()) + continue; + + if (!global_part_it->replicas.contains(request.replica_num)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Part {} doesn't exist on replica {} according to the global state", part.info.getPartNameV1(), request.replica_num); + + size_t current_mark_size = 0; + + /// Now we can recommend to read more intervals + if constexpr (mode == CoordinationMode::ReverseOrder) + { + while (!global_part_it->description.ranges.empty() && current_mark_size < request.min_number_of_marks) + { + auto range = global_part_it->description.ranges.back(); + + if (range.getNumberOfMarks() > request.min_number_of_marks) + { + auto new_range = range; + range.end -= request.min_number_of_marks; + new_range.begin = new_range.end - request.min_number_of_marks; + + global_part_it->description.ranges.back() = range; + + part.ranges.emplace_front(new_range); + current_mark_size += new_range.getNumberOfMarks(); + continue; + } + + current_mark_size += global_part_it->description.ranges.back().getNumberOfMarks(); + part.ranges.emplace_front(global_part_it->description.ranges.back()); + global_part_it->description.ranges.pop_back(); + } + } + else if constexpr (mode == CoordinationMode::WithOrder) + { + while (!global_part_it->description.ranges.empty() && current_mark_size < request.min_number_of_marks) + { + auto range = global_part_it->description.ranges.front(); + + if (range.getNumberOfMarks() > request.min_number_of_marks) + { + auto new_range = range; + range.begin += request.min_number_of_marks; + new_range.end = new_range.begin + request.min_number_of_marks; + + global_part_it->description.ranges.front() = range; + + part.ranges.emplace_back(new_range); + current_mark_size += new_range.getNumberOfMarks(); + continue; + } + + current_mark_size += global_part_it->description.ranges.front().getNumberOfMarks(); + part.ranges.emplace_back(global_part_it->description.ranges.front()); + global_part_it->description.ranges.pop_front(); + } + } + + overall_number_of_marks += current_mark_size; + } + + if (!overall_number_of_marks) + response.finish = true; + + stats[request.replica_num].number_of_requests += 1; + stats[request.replica_num].sum_marks += overall_number_of_marks; + + LOG_TRACE(log, "Going to respond to replica {} with {}", request.replica_num, response.describe()); + return response; +} + + +void ParallelReplicasReadingCoordinator::handleInitialAllRangesAnnouncement(InitialAllRangesAnnouncement announcement) +{ + if (!pimpl) + initialize(); + + return pimpl->handleInitialAllRangesAnnouncement(announcement); +} + +ParallelReadResponse ParallelReplicasReadingCoordinator::handleRequest(ParallelReadRequest request) +{ + if (!pimpl) + initialize(); + return pimpl->handleRequest(std::move(request)); } -ParallelReplicasReadingCoordinator::ParallelReplicasReadingCoordinator() +void ParallelReplicasReadingCoordinator::setMode(CoordinationMode mode_) { - pimpl = std::make_unique(); + mode = mode_; } +void ParallelReplicasReadingCoordinator::initialize() +{ + switch (mode) + { + case CoordinationMode::Default: + pimpl = std::make_unique(replicas_count); + return; + case CoordinationMode::WithOrder: + pimpl = std::make_unique>(replicas_count); + return; + case CoordinationMode::ReverseOrder: + pimpl = std::make_unique>(replicas_count); + return; + } +} + +ParallelReplicasReadingCoordinator::ParallelReplicasReadingCoordinator(size_t replicas_count_) : replicas_count(replicas_count_) {} + ParallelReplicasReadingCoordinator::~ParallelReplicasReadingCoordinator() = default; } diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h index 4800533e919..0656a128884 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h @@ -7,15 +7,28 @@ namespace DB { +/// The main class to spread mark ranges across replicas dynamically +/// The reason why it uses pimpl - this header file is included in +/// multiple other files like Context or RemoteQueryExecutor class ParallelReplicasReadingCoordinator { public: - ParallelReplicasReadingCoordinator(); + class ImplInterface; + + explicit ParallelReplicasReadingCoordinator(size_t replicas_count_); ~ParallelReplicasReadingCoordinator(); - PartitionReadResponse handleRequest(PartitionReadRequest request); + + void setMode(CoordinationMode mode); + void handleInitialAllRangesAnnouncement(InitialAllRangesAnnouncement); + ParallelReadResponse handleRequest(ParallelReadRequest request); + private: - class Impl; - std::unique_ptr pimpl; + void initialize(); + + CoordinationMode mode{CoordinationMode::Default}; + size_t replicas_count{0}; + std::atomic initialized{false}; + std::unique_ptr pimpl; }; using ParallelReplicasReadingCoordinatorPtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/RangesInDataPart.cpp b/src/Storages/MergeTree/RangesInDataPart.cpp new file mode 100644 index 00000000000..29a236c9865 --- /dev/null +++ b/src/Storages/MergeTree/RangesInDataPart.cpp @@ -0,0 +1,113 @@ +#include + +#include + +#include "IO/VarInt.h" + +#include +#include + + +namespace DB +{ + +void RangesInDataPartDescription::serialize(WriteBuffer & out) const +{ + info.serialize(out); + ranges.serialize(out); +} + +String RangesInDataPartDescription::describe() const +{ + String result; + result += fmt::format("Part: {}, ", info.getPartNameV1()); + result += fmt::format("Ranges: [{}], ", fmt::join(ranges, ",")); + return result; +} + +void RangesInDataPartDescription::deserialize(ReadBuffer & in) +{ + info.deserialize(in); + ranges.deserialize(in); +} + +void RangesInDataPartsDescription::serialize(WriteBuffer & out) const +{ + writeVarUInt(this->size(), out); + for (const auto & desc : *this) + desc.serialize(out); +} + +String RangesInDataPartsDescription::describe() const +{ + String result; + for (const auto & desc : *this) + result += desc.describe() + ","; + return result; +} + +void RangesInDataPartsDescription::deserialize(ReadBuffer & in) +{ + size_t new_size = 0; + readVarUInt(new_size, in); + + this->resize(new_size); + for (auto & desc : *this) + desc.deserialize(in); +} + +void RangesInDataPartsDescription::merge(RangesInDataPartsDescription & other) +{ + for (const auto & desc : other) + this->emplace_back(desc); +} + +RangesInDataPartDescription RangesInDataPart::getDescription() const +{ + return RangesInDataPartDescription{ + .info = data_part->info, + .ranges = ranges, + }; +} + +size_t RangesInDataPart::getMarksCount() const +{ + size_t total = 0; + for (const auto & range : ranges) + total += range.end - range.begin; + + return total; +} + +size_t RangesInDataPart::getRowsCount() const +{ + return data_part->index_granularity.getRowsCountInRanges(ranges); +} + + +RangesInDataPartsDescription RangesInDataParts::getDescriptions() const +{ + RangesInDataPartsDescription result; + for (const auto & part : *this) + result.emplace_back(part.getDescription()); + return result; +} + + +size_t RangesInDataParts::getMarksCountAllParts() const +{ + size_t result = 0; + for (const auto & part : *this) + result += part.getMarksCount(); + return result; +} + +size_t RangesInDataParts::getRowsCountAllParts() const +{ + size_t result = 0; + for (const auto & part: *this) + result += part.getRowsCount(); + return result; +} + +} diff --git a/src/Storages/MergeTree/RangesInDataPart.h b/src/Storages/MergeTree/RangesInDataPart.h index 4f5d34e118d..9c8ab4859a0 100644 --- a/src/Storages/MergeTree/RangesInDataPart.h +++ b/src/Storages/MergeTree/RangesInDataPart.h @@ -1,42 +1,73 @@ #pragma once -#include +#include + +#include +#include #include +#include "Storages/MergeTree/MergeTreePartInfo.h" namespace DB { +class IMergeTreeDataPart; +using DataPartPtr = std::shared_ptr; + +/// The only purpose of this struct is that serialize and deserialize methods +/// they look natural here because we can fully serialize and then deserialize original DataPart class. +struct RangesInDataPartDescription +{ + MergeTreePartInfo info; + MarkRanges ranges; + + void serialize(WriteBuffer & out) const; + String describe() const; + void deserialize(ReadBuffer & in); +}; + +struct RangesInDataPartsDescription: public std::deque +{ + using std::deque::deque; + + void serialize(WriteBuffer & out) const; + String describe() const; + void deserialize(ReadBuffer & in); + + void merge(RangesInDataPartsDescription & other); +}; struct RangesInDataPart { - MergeTreeData::DataPartPtr data_part; + DataPartPtr data_part; size_t part_index_in_query; MarkRanges ranges; RangesInDataPart() = default; - RangesInDataPart(const MergeTreeData::DataPartPtr & data_part_, const size_t part_index_in_query_, - const MarkRanges & ranges_ = MarkRanges{}) - : data_part{data_part_}, part_index_in_query{part_index_in_query_}, ranges{ranges_} - { - } + RangesInDataPart( + const DataPartPtr & data_part_, + const size_t part_index_in_query_, + const MarkRanges & ranges_ = MarkRanges{}) + : data_part{data_part_} + , part_index_in_query{part_index_in_query_} + , ranges{ranges_} + {} - size_t getMarksCount() const - { - size_t total = 0; - for (const auto & range : ranges) - total += range.end - range.begin; + RangesInDataPartDescription getDescription() const; - return total; - } - - size_t getRowsCount() const - { - return data_part->index_granularity.getRowsCountInRanges(ranges); - } + size_t getMarksCount() const; + size_t getRowsCount() const; }; -using RangesInDataParts = std::vector; +struct RangesInDataParts: public std::vector +{ + using std::vector::vector; + + RangesInDataPartsDescription getDescriptions() const; + + size_t getMarksCountAllParts() const; + size_t getRowsCountAllParts() const; +}; } diff --git a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp index 1a443bc0105..a22aab8d6ce 100644 --- a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.cpp @@ -91,6 +91,7 @@ bool ReplicatedMergeMutateTaskBase::executeStep() auto & log_entry = selected_entry->log_entry; log_entry->exception = saved_exception; + log_entry->last_exception_time = time(nullptr); if (log_entry->type == ReplicatedMergeTreeLogEntryData::MUTATE_PART) { @@ -188,7 +189,7 @@ bool ReplicatedMergeMutateTaskBase::executeImpl() catch (...) { if (part_log_writer) - part_log_writer(ExecutionStatus::fromCurrentException()); + part_log_writer(ExecutionStatus::fromCurrentException("", true)); throw; } @@ -204,7 +205,7 @@ bool ReplicatedMergeMutateTaskBase::executeImpl() catch (...) { if (part_log_writer) - part_log_writer(ExecutionStatus::fromCurrentException()); + part_log_writer(ExecutionStatus::fromCurrentException("", true)); throw; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h index 7f504baaab3..17b79d09437 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.h @@ -157,6 +157,7 @@ struct ReplicatedMergeTreeLogEntryData /// Access under queue_mutex, see ReplicatedMergeTreeQueue. size_t num_tries = 0; /// The number of attempts to perform the action (since the server started, including the running one). std::exception_ptr exception; /// The last exception, in the case of an unsuccessful attempt to perform the action. + time_t last_exception_time = 0; /// The time at which the last exception occurred. time_t last_attempt_time = 0; /// The time at which the last attempt was attempted to complete the action. size_t num_postponed = 0; /// The number of times the action was postponed. String postpone_reason; /// The reason why the action was postponed, if it was postponed. diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index e47dddb9795..aa5ab8a5e85 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1363,10 +1363,11 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (!disk->supportZeroCopyReplication()) only_s3_storage = false; - if (!disks.empty() && only_s3_storage && storage.checkZeroCopyLockExists(entry.new_part_name, disks[0])) + String replica_to_execute_merge; + if (!disks.empty() && only_s3_storage && storage.checkZeroCopyLockExists(entry.new_part_name, disks[0], replica_to_execute_merge)) { - constexpr auto fmt_string = "Not executing merge/mutation for the part {}, waiting other replica to execute it and will fetch after."; - out_postpone_reason = fmt::format(fmt_string, entry.new_part_name); + constexpr auto fmt_string = "Not executing merge/mutation for the part {}, waiting for {} to execute it and will fetch after."; + out_postpone_reason = fmt::format(fmt_string, entry.new_part_name, replica_to_execute_merge); return false; } } @@ -1691,6 +1692,7 @@ bool ReplicatedMergeTreeQueue::processEntry( { std::lock_guard lock(state_mutex); entry->exception = saved_exception; + entry->last_exception_time = time(nullptr); return false; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 76b96899dac..ee192966758 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -520,7 +520,7 @@ void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithF } catch (...) { - PartLog::addNewPart(storage.getContext(), part, partition.elapsed_ns, ExecutionStatus::fromCurrentException(__PRETTY_FUNCTION__)); + PartLog::addNewPart(storage.getContext(), part, partition.elapsed_ns, ExecutionStatus::fromCurrentException("", true)); throw; } } @@ -588,7 +588,7 @@ void ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData: } catch (...) { - PartLog::addNewPart(storage.getContext(), part, watch.elapsed(), ExecutionStatus::fromCurrentException(__PRETTY_FUNCTION__)); + PartLog::addNewPart(storage.getContext(), part, watch.elapsed(), ExecutionStatus::fromCurrentException("", true)); throw; } } diff --git a/src/Storages/MergeTree/RequestResponse.cpp b/src/Storages/MergeTree/RequestResponse.cpp index 2ea6b0c9f9f..5249128590f 100644 --- a/src/Storages/MergeTree/RequestResponse.cpp +++ b/src/Storages/MergeTree/RequestResponse.cpp @@ -1,159 +1,129 @@ +#include #include #include #include +#include "IO/VarInt.h" #include #include -#include #include - namespace DB { namespace ErrorCodes { extern const int UNKNOWN_PROTOCOL; - extern const int BAD_ARGUMENTS; } -static void readMarkRangesBinary(MarkRanges & ranges, ReadBuffer & buf) -{ - size_t size = 0; - readVarUInt(size, buf); - - if (size > DEFAULT_MAX_STRING_SIZE) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Too large ranges size: {}.", size); - - ranges.resize(size); - for (size_t i = 0; i < size; ++i) - { - readBinary(ranges[i].begin, buf); - readBinary(ranges[i].end, buf); - } -} - - -static void writeMarkRangesBinary(const MarkRanges & ranges, WriteBuffer & buf) -{ - writeVarUInt(ranges.size(), buf); - - for (const auto & [begin, end] : ranges) - { - writeBinary(begin, buf); - writeBinary(end, buf); - } -} - - -void PartitionReadRequest::serialize(WriteBuffer & out) const +void ParallelReadRequest::serialize(WriteBuffer & out) const { + UInt64 version = DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION; /// Must be the first - writeVarUInt(DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION, out); + writeIntBinary(version, out); - writeStringBinary(partition_id, out); - writeStringBinary(part_name, out); - writeStringBinary(projection_name, out); - - writeVarInt(block_range.begin, out); - writeVarInt(block_range.end, out); - - writeMarkRangesBinary(mark_ranges, out); + writeIntBinary(mode, out); + writeIntBinary(replica_num, out); + writeIntBinary(min_number_of_marks, out); + description.serialize(out); } -String PartitionReadRequest::toString() const +String ParallelReadRequest::describe() const { - WriteBufferFromOwnString out; - out << "partition: " << partition_id << ", part: " << part_name; - if (!projection_name.empty()) - out << ", projection: " << projection_name; - out << ", block range: [" << block_range.begin << ", " << block_range.end << "]"; - out << ", mark ranges: "; - - bool is_first = true; - for (const auto & [begin, end] : mark_ranges) - { - if (!is_first) - out << ", "; - out << "[" << begin << ", " << end << ")"; - is_first = false; - } - - return out.str(); + String result; + result += fmt::format("replica_num: {} \n", replica_num); + result += fmt::format("min_num_of_marks: {} \n", min_number_of_marks); + result += description.describe(); + return result; } - -void PartitionReadRequest::deserialize(ReadBuffer & in) +void ParallelReadRequest::deserialize(ReadBuffer & in) { UInt64 version; - readVarUInt(version, in); + readIntBinary(version, in); if (version != DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION) - throw Exception(ErrorCodes::UNKNOWN_PROTOCOL, "Protocol versions for parallel reading \ - from replicas differ. Got: {}, supported version: {}", + throw Exception(ErrorCodes::UNKNOWN_PROTOCOL, "Protocol versions for parallel reading "\ + "from replicas differ. Got: {}, supported version: {}", version, DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION); - readStringBinary(partition_id, in); - readStringBinary(part_name, in); - readStringBinary(projection_name, in); - - readVarInt(block_range.begin, in); - readVarInt(block_range.end, in); - - readMarkRangesBinary(mark_ranges, in); + readIntBinary(mode, in); + readIntBinary(replica_num, in); + readIntBinary(min_number_of_marks, in); + description.deserialize(in); } -UInt64 PartitionReadRequest::getConsistentHash(size_t buckets) const +void ParallelReadRequest::merge(ParallelReadRequest & other) { - SipHash hash; - - hash.update(partition_id.size()); - hash.update(partition_id); - - hash.update(part_name.size()); - hash.update(part_name); - - hash.update(projection_name.size()); - hash.update(projection_name); - - hash.update(block_range.begin); - hash.update(block_range.end); - - hash.update(mark_ranges.size()); - for (const auto & range : mark_ranges) - { - hash.update(range.begin); - hash.update(range.end); - } - - return ConsistentHashing(hash.get64(), buckets); + assert(mode == other.mode); + assert(replica_num == other.replica_num); + assert(min_number_of_marks == other.min_number_of_marks); + description.merge(other.description); } - -void PartitionReadResponse::serialize(WriteBuffer & out) const +void ParallelReadResponse::serialize(WriteBuffer & out) const { + UInt64 version = DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION; /// Must be the first - writeVarUInt(DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION, out); + writeIntBinary(version, out); - writeBinary(denied, out); - writeMarkRangesBinary(mark_ranges, out); + writeBoolText(finish, out); + description.serialize(out); } +String ParallelReadResponse::describe() const +{ + String result; + result += fmt::format("finish: {} \n", finish); + result += description.describe(); + return result; +} -void PartitionReadResponse::deserialize(ReadBuffer & in) +void ParallelReadResponse::deserialize(ReadBuffer & in) { UInt64 version; - readVarUInt(version, in); + readIntBinary(version, in); if (version != DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION) - throw Exception(ErrorCodes::UNKNOWN_PROTOCOL, "Protocol versions for parallel reading \ - from replicas differ. Got: {}, supported version: {}", + throw Exception(ErrorCodes::UNKNOWN_PROTOCOL, "Protocol versions for parallel reading " \ + "from replicas differ. Got: {}, supported version: {}", version, DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION); - UInt64 value; - readVarUInt(value, in); - denied = static_cast(value); - readMarkRangesBinary(mark_ranges, in); + readBoolText(finish, in); + description.deserialize(in); +} + + +void InitialAllRangesAnnouncement::serialize(WriteBuffer & out) const +{ + UInt64 version = DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION; + /// Must be the first + writeIntBinary(version, out); + + description.serialize(out); + writeIntBinary(replica_num, out); +} + + +String InitialAllRangesAnnouncement::describe() +{ + String result; + result += description.describe(); + result += fmt::format("----------\nReceived from {} replica\n", replica_num); + return result; +} + +void InitialAllRangesAnnouncement::deserialize(ReadBuffer & in) +{ + UInt64 version; + readIntBinary(version, in); + if (version != DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION) + throw Exception(ErrorCodes::UNKNOWN_PROTOCOL, "Protocol versions for parallel reading " \ + "from replicas differ. Got: {}, supported version: {}", + version, DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION); + + description.deserialize(in); + readIntBinary(replica_num, in); } } diff --git a/src/Storages/MergeTree/RequestResponse.h b/src/Storages/MergeTree/RequestResponse.h index ce9dc55f479..8cdb9e49be5 100644 --- a/src/Storages/MergeTree/RequestResponse.h +++ b/src/Storages/MergeTree/RequestResponse.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -9,12 +10,21 @@ #include #include +#include namespace DB { -/// Represents a segment [left; right] of part's block numbers. +enum class CoordinationMode +{ + Default, + /// For reading in order + WithOrder, + ReverseOrder +}; + +/// Represents a segment [left; right] struct PartBlockRange { Int64 begin; @@ -26,34 +36,44 @@ struct PartBlockRange } }; -struct PartitionReadRequest +struct ParallelReadRequest { - String partition_id; - String part_name; - String projection_name; - PartBlockRange block_range; - MarkRanges mark_ranges; + CoordinationMode mode; + size_t replica_num; + size_t min_number_of_marks; + + /// Extension for ordered mode + RangesInDataPartsDescription description; void serialize(WriteBuffer & out) const; + String describe() const; void deserialize(ReadBuffer & in); - - UInt64 getConsistentHash(size_t buckets) const; - - /// Describe it for debugging purposes. - String toString() const; + void merge(ParallelReadRequest & other); }; -struct PartitionReadResponse +struct ParallelReadResponse { - bool denied{false}; - MarkRanges mark_ranges{}; + bool finish{false}; + RangesInDataPartsDescription description; void serialize(WriteBuffer & out) const; + String describe() const; void deserialize(ReadBuffer & in); }; -using MergeTreeReadTaskCallback = std::function(PartitionReadRequest)>; +struct InitialAllRangesAnnouncement +{ + RangesInDataPartsDescription description; + size_t replica_num; + void serialize(WriteBuffer & out) const; + String describe(); + void deserialize(ReadBuffer & in); +}; + + +using MergeTreeAllRangesCallback = std::function; +using MergeTreeReadTaskCallback = std::function(ParallelReadRequest)>; } diff --git a/src/Storages/MergeTree/ZeroCopyLock.cpp b/src/Storages/MergeTree/ZeroCopyLock.cpp index dbb12d0d610..53dfe0c769f 100644 --- a/src/Storages/MergeTree/ZeroCopyLock.cpp +++ b/src/Storages/MergeTree/ZeroCopyLock.cpp @@ -2,8 +2,8 @@ namespace DB { - ZeroCopyLock::ZeroCopyLock(const zkutil::ZooKeeperPtr & zookeeper, const std::string & lock_path) - : lock(zkutil::createSimpleZooKeeperLock(zookeeper, lock_path, "part_exclusive_lock", "")) + ZeroCopyLock::ZeroCopyLock(const zkutil::ZooKeeperPtr & zookeeper, const std::string & lock_path, const std::string & lock_message) + : lock(zkutil::createSimpleZooKeeperLock(zookeeper, lock_path, "part_exclusive_lock", lock_message)) { } } diff --git a/src/Storages/MergeTree/ZeroCopyLock.h b/src/Storages/MergeTree/ZeroCopyLock.h index 96709fb01c9..4e73b27804c 100644 --- a/src/Storages/MergeTree/ZeroCopyLock.h +++ b/src/Storages/MergeTree/ZeroCopyLock.h @@ -12,7 +12,7 @@ namespace DB /// because due to bad abstraction we use it in MergeTreeData. struct ZeroCopyLock { - ZeroCopyLock(const zkutil::ZooKeeperPtr & zookeeper, const std::string & lock_path); + ZeroCopyLock(const zkutil::ZooKeeperPtr & zookeeper, const std::string & lock_path, const std::string & lock_message); /// Actual lock std::unique_ptr lock; diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 2a94991ab8b..7458f3bad18 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -147,9 +147,9 @@ static StoragePtr create(const StorageFactory::Arguments & args) * - Additional MergeTreeSettings in the SETTINGS clause; */ - bool is_extended_storage_def = args.storage_def->partition_by || args.storage_def->primary_key || args.storage_def->order_by - || args.storage_def->sample_by || (args.query.columns_list->indices && !args.query.columns_list->indices->children.empty()) - || (args.query.columns_list->projections && !args.query.columns_list->projections->children.empty()) || args.storage_def->settings; + bool is_extended_storage_def = args.storage_def->isExtendedStorageDefinition() + || (args.query.columns_list->indices && !args.query.columns_list->indices->children.empty()) + || (args.query.columns_list->projections && !args.query.columns_list->projections->children.empty()); String name_part = args.engine_name.substr(0, args.engine_name.size() - strlen("MergeTree")); diff --git a/src/Storages/MergeTree/tests/gtest_coordinator.cpp b/src/Storages/MergeTree/tests/gtest_coordinator.cpp deleted file mode 100644 index 7bcf3304c2b..00000000000 --- a/src/Storages/MergeTree/tests/gtest_coordinator.cpp +++ /dev/null @@ -1,240 +0,0 @@ -#include - -#include -#include -#include - -#include - -#include - -using namespace DB; - - -TEST(HalfIntervals, Simple) -{ - ASSERT_TRUE(( - HalfIntervals{{{1, 2}, {3, 4}}}.negate() == - HalfIntervals{{{0, 1}, {2, 3}, {4, 18446744073709551615UL}}} - )); - - { - auto left = HalfIntervals{{{0, 2}, {4, 6}}}.negate(); - ASSERT_TRUE(( - left == - HalfIntervals{{{2, 4}, {6, 18446744073709551615UL}}} - )); - } - - { - auto left = HalfIntervals{{{0, 2}, {4, 6}}}; - auto right = HalfIntervals{{{1, 5}}}.negate(); - auto intersection = left.intersect(right); - - ASSERT_TRUE(( - intersection == - HalfIntervals{{{0, 1}, {5, 6}}} - )); - } - - { - auto left = HalfIntervals{{{1, 2}, {2, 3}}}; - auto right = HalfIntervals::initializeWithEntireSpace(); - auto intersection = right.intersect(left.negate()); - - ASSERT_TRUE(( - intersection == - HalfIntervals{{{0, 1}, {3, 18446744073709551615UL}}} - )); - } - - { - auto left = HalfIntervals{{{1, 2}, {2, 3}, {3, 4}, {4, 5}}}; - - ASSERT_EQ(getIntersection(left, HalfIntervals{{{1, 4}}}).convertToMarkRangesFinal().size(), 3); - ASSERT_EQ(getIntersection(left, HalfIntervals{{{1, 5}}}).convertToMarkRangesFinal().size(), 4); - } - - { - auto left = HalfIntervals{{{1, 3}, {3, 5}, {5, 7}}}; - - ASSERT_EQ(getIntersection(left, HalfIntervals{{{3, 5}}}).convertToMarkRangesFinal().size(), 1); - ASSERT_EQ(getIntersection(left, HalfIntervals{{{3, 7}}}).convertToMarkRangesFinal().size(), 2); - ASSERT_EQ(getIntersection(left, HalfIntervals{{{4, 6}}}).convertToMarkRangesFinal().size(), 2); - ASSERT_EQ(getIntersection(left, HalfIntervals{{{1, 7}}}).convertToMarkRangesFinal().size(), 3); - } - - { - auto left = HalfIntervals{{{1, 3}}}; - - ASSERT_EQ(getIntersection(left, HalfIntervals{{{3, 4}}}).convertToMarkRangesFinal().size(), 0); - } - - { - auto left = HalfIntervals{{{1, 2}, {3, 4}, {5, 6}}}; - - ASSERT_EQ(getIntersection(left, HalfIntervals{{{2, 3}}}).convertToMarkRangesFinal().size(), 0); - ASSERT_EQ(getIntersection(left, HalfIntervals{{{4, 5}}}).convertToMarkRangesFinal().size(), 0); - ASSERT_EQ(getIntersection(left, HalfIntervals{{{1, 6}}}).convertToMarkRangesFinal().size(), 3); - } -} - -TEST(HalfIntervals, TwoRequests) -{ - auto left = HalfIntervals{{{1, 2}, {2, 3}}}; - auto right = HalfIntervals{{{2, 3}, {3, 4}}}; - auto intersection = left.intersect(right); - - ASSERT_TRUE(( - intersection == - HalfIntervals{{{2, 3}}} - )); - - /// With negation - left = HalfIntervals{{{1, 2}, {2, 3}}}.negate(); - right = HalfIntervals{{{2, 3}, {3, 4}}}; - intersection = left.intersect(right); - - - ASSERT_TRUE(( - intersection == - HalfIntervals{{{3, 4}}} - )); -} - -TEST(HalfIntervals, SelfIntersection) -{ - auto left = HalfIntervals{{{1, 2}, {2, 3}, {4, 5}}}; - auto right = left; - auto intersection = left.intersect(right); - - ASSERT_TRUE(( - intersection == right - )); - - left = HalfIntervals{{{1, 2}, {2, 3}, {4, 5}}}; - right = left; - right.negate(); - intersection = left.intersect(right); - - ASSERT_TRUE(( - intersection == HalfIntervals{} - )); -} - - -TEST(Coordinator, Simple) -{ - PartitionReadRequest request; - request.partition_id = "a"; - request.part_name = "b"; - request.projection_name = "c"; - request.block_range = PartBlockRange{1, 2}; - request.mark_ranges = MarkRanges{{1, 2}, {3, 4}}; - - ParallelReplicasReadingCoordinator coordinator; - auto response = coordinator.handleRequest(request); - - ASSERT_FALSE(response.denied) << "Process request at first has to be accepted"; - - ASSERT_EQ(response.mark_ranges.size(), request.mark_ranges.size()); - - for (int i = 0; i < response.mark_ranges.size(); ++i) - EXPECT_EQ(response.mark_ranges[i], request.mark_ranges[i]); - - response = coordinator.handleRequest(request); - ASSERT_TRUE(response.denied) << "Process the same request second time"; -} - - -TEST(Coordinator, TwoRequests) -{ - PartitionReadRequest first; - first.partition_id = "a"; - first.part_name = "b"; - first.projection_name = "c"; - first.block_range = PartBlockRange{0, 0}; - first.mark_ranges = MarkRanges{{1, 2}, {2, 3}}; - - auto second = first; - second.mark_ranges = MarkRanges{{2, 3}, {3, 4}}; - - ParallelReplicasReadingCoordinator coordinator; - auto response = coordinator.handleRequest(first); - - ASSERT_FALSE(response.denied) << "First request must me accepted"; - - ASSERT_EQ(response.mark_ranges.size(), first.mark_ranges.size()); - for (int i = 0; i < response.mark_ranges.size(); ++i) - EXPECT_EQ(response.mark_ranges[i], first.mark_ranges[i]); - - response = coordinator.handleRequest(second); - ASSERT_FALSE(response.denied); - ASSERT_EQ(response.mark_ranges.size(), 1); - ASSERT_EQ(response.mark_ranges.front(), (MarkRange{3, 4})); -} - - -TEST(Coordinator, PartIntersections) -{ - { - PartSegments boundaries; - - boundaries.addPart(PartToRead{{1, 1}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{2, 2}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{3, 3}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{4, 4}, {"TestPart", "TestProjection"}}); - - ASSERT_EQ(boundaries.getIntersectionResult({{1, 4}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{0, 5}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 1}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::EXACTLY_ONE_INTERSECTION); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 1}, {"ClickHouse", "AnotherProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 2}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - - boundaries.addPart(PartToRead{{5, 5}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{0, 0}, {"TestPart", "TestProjection"}}); - - ASSERT_EQ(boundaries.getIntersectionResult({{0, 5}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 1}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::EXACTLY_ONE_INTERSECTION); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 1}, {"ClickHouse", "AnotherProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 2}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{0, 3}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - } - - { - PartSegments boundaries; - boundaries.addPart(PartToRead{{1, 3}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{4, 5}, {"TestPart", "TestProjection"}}); - - ASSERT_EQ(boundaries.getIntersectionResult({{2, 4}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{0, 6}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - } - - { - PartSegments boundaries; - boundaries.addPart(PartToRead{{1, 3}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{4, 6}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{7, 9}, {"TestPart", "TestProjection"}}); - - ASSERT_EQ(boundaries.getIntersectionResult({{2, 8}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{4, 6}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::EXACTLY_ONE_INTERSECTION); - ASSERT_EQ(boundaries.getIntersectionResult({{3, 7}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{5, 7}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - } - - { - PartSegments boundaries; - - ASSERT_EQ(boundaries.getIntersectionResult({{1, 1}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::NO_INTERSECTION); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 3}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::NO_INTERSECTION); - ASSERT_EQ(boundaries.getIntersectionResult({{0, 100500}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::NO_INTERSECTION); - - boundaries.addPart(PartToRead{{1, 1}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{2, 2}, {"TestPart", "TestProjection"}}); - boundaries.addPart(PartToRead{{3, 3}, {"TestPart", "TestProjection"}}); - - ASSERT_EQ(boundaries.getIntersectionResult({{1, 1}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::EXACTLY_ONE_INTERSECTION); - ASSERT_EQ(boundaries.getIntersectionResult({{1, 3}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::REJECT); - ASSERT_EQ(boundaries.getIntersectionResult({{100, 100500}, {"TestPart", "TestProjection"}}), PartSegments::IntersectionResult::NO_INTERSECTION); - } -} diff --git a/src/Storages/RabbitMQ/RabbitMQConsumer.h b/src/Storages/RabbitMQ/RabbitMQConsumer.h index ca3daa5e090..b5f51aba294 100644 --- a/src/Storages/RabbitMQ/RabbitMQConsumer.h +++ b/src/Storages/RabbitMQ/RabbitMQConsumer.h @@ -68,7 +68,7 @@ public: bool ackMessages(); void updateAckTracker(AckTracker record = AckTracker()); - bool queueEmpty() { return received.empty(); } + bool hasPendingMessages() { return received.empty(); } auto getChannelID() const { return current.track.channel_id; } auto getDeliveryTag() const { return current.track.delivery_tag; } diff --git a/src/Storages/RabbitMQ/RabbitMQSettings.h b/src/Storages/RabbitMQ/RabbitMQSettings.h index 739230d25f5..09766b054a1 100644 --- a/src/Storages/RabbitMQ/RabbitMQSettings.h +++ b/src/Storages/RabbitMQ/RabbitMQSettings.h @@ -24,9 +24,12 @@ namespace DB M(String, rabbitmq_address, "", "Address for connection", 0) \ M(UInt64, rabbitmq_skip_broken_messages, 0, "Skip at least this number of broken messages from RabbitMQ per block", 0) \ M(UInt64, rabbitmq_max_block_size, 0, "Number of row collected before flushing data from RabbitMQ.", 0) \ - M(Milliseconds, rabbitmq_flush_interval_ms, 0, "Timeout for flushing data from RabbitMQ.", 0) \ + M(UInt64, rabbitmq_flush_interval_ms, 0, "Timeout for flushing data from RabbitMQ.", 0) \ M(String, rabbitmq_vhost, "/", "RabbitMQ vhost.", 0) \ M(String, rabbitmq_queue_settings_list, "", "A list of rabbitmq queue settings", 0) \ + M(UInt64, rabbitmq_empty_queue_backoff_start_ms, 10, "A minimum backoff point to reschedule read if the rabbitmq queue is empty", 0) \ + M(UInt64, rabbitmq_empty_queue_backoff_end_ms, 10000, "A maximum backoff point to reschedule read if the rabbitmq queue is empty", 0) \ + M(UInt64, rabbitmq_empty_queue_backoff_step_ms, 100, "A maximum backoff point to reschedule read if the rabbitmq queue is empty", 0) \ M(Bool, rabbitmq_queue_consume, false, "Use user-defined queues and do not make any RabbitMQ setup: declaring exchanges, queues, bindings", 0) \ M(String, rabbitmq_username, "", "RabbitMQ username", 0) \ M(String, rabbitmq_password, "", "RabbitMQ password", 0) \ diff --git a/src/Storages/RabbitMQ/RabbitMQSource.cpp b/src/Storages/RabbitMQ/RabbitMQSource.cpp index 98c8bfa9189..c11a518b338 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.cpp +++ b/src/Storages/RabbitMQ/RabbitMQSource.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include namespace DB @@ -62,6 +63,7 @@ RabbitMQSource::RabbitMQSource( , ack_in_suffix(ack_in_suffix_) , non_virtual_header(std::move(headers.first)) , virtual_header(std::move(headers.second)) + , log(&Poco::Logger::get("RabbitMQSource")) { storage.incrementReader(); } @@ -107,17 +109,15 @@ Chunk RabbitMQSource::generate() return chunk; } -bool RabbitMQSource::checkTimeLimit() const +bool RabbitMQSource::isTimeLimitExceeded() const { - if (max_execution_time != 0) + if (max_execution_time_ms != 0) { - auto elapsed_ns = total_stopwatch.elapsed(); - - if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) - return false; + uint64_t elapsed_time_ms = total_stopwatch.elapsedMilliseconds(); + return max_execution_time_ms <= elapsed_time_ms; } - return true; + return false; } Chunk RabbitMQSource::generateImpl() @@ -128,9 +128,11 @@ Chunk RabbitMQSource::generateImpl() consumer = storage.popConsumer(timeout); } - if (!consumer || is_finished) + if (is_finished || !consumer || consumer->isConsumerStopped()) return {}; + /// Currently it is one time usage source: to make sure data is flushed + /// strictly by timeout or by block size. is_finished = true; MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); @@ -139,17 +141,17 @@ Chunk RabbitMQSource::generateImpl() storage.getFormatName(), empty_buf, non_virtual_header, context, max_block_size); StreamingFormatExecutor executor(non_virtual_header, input_format); - size_t total_rows = 0; while (true) { - if (consumer->queueEmpty()) - break; - size_t new_rows = 0; - if (auto buf = consumer->consume()) - new_rows = executor.execute(*buf); + + if (!consumer->hasPendingMessages()) + { + if (auto buf = consumer->consume()) + new_rows = executor.execute(*buf); + } if (new_rows) { @@ -172,13 +174,18 @@ Chunk RabbitMQSource::generateImpl() virtual_columns[5]->insert(timestamp); } - total_rows = total_rows + new_rows; + total_rows += new_rows; } - if (total_rows >= max_block_size || consumer->queueEmpty() || consumer->isConsumerStopped() || !checkTimeLimit()) + if (total_rows >= max_block_size || consumer->isConsumerStopped() || isTimeLimitExceeded()) break; } + LOG_TEST( + log, + "Flushing {} rows (max block size: {}, time: {} / {} ms)", + total_rows, max_block_size, total_stopwatch.elapsedMilliseconds(), max_execution_time_ms); + if (total_rows == 0) return {}; diff --git a/src/Storages/RabbitMQ/RabbitMQSource.h b/src/Storages/RabbitMQ/RabbitMQSource.h index 6d948edfb59..6d06927fc79 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.h +++ b/src/Storages/RabbitMQ/RabbitMQSource.h @@ -27,13 +27,12 @@ public: Chunk generate() override; - bool queueEmpty() const { return !consumer || consumer->queueEmpty(); } + bool queueEmpty() const { return !consumer || consumer->hasPendingMessages(); } bool needChannelUpdate(); void updateChannel(); bool sendAck(); - - void setTimeLimit(Poco::Timespan max_execution_time_) { max_execution_time = max_execution_time_; } + void setTimeLimit(uint64_t max_execution_time_ms_) { max_execution_time_ms = max_execution_time_ms_; } private: StorageRabbitMQ & storage; @@ -47,12 +46,13 @@ private: const Block non_virtual_header; const Block virtual_header; + Poco::Logger * log; RabbitMQConsumerPtr consumer; - Poco::Timespan max_execution_time = 0; + uint64_t max_execution_time_ms = 0; Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; - bool checkTimeLimit() const; + bool isTimeLimitExceeded() const; RabbitMQSource( StorageRabbitMQ & storage_, diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index c23ef063145..f10a60419d1 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -36,7 +36,6 @@ namespace DB static const uint32_t QUEUE_SIZE = 100000; static const auto MAX_FAILED_READ_ATTEMPTS = 10; static const auto RESCHEDULE_MS = 500; -static const auto BACKOFF_TRESHOLD = 32000; static const auto MAX_THREAD_WORK_DURATION_MS = 60000; namespace ErrorCodes @@ -90,7 +89,7 @@ StorageRabbitMQ::StorageRabbitMQ( , semaphore(0, static_cast(num_consumers)) , unique_strbase(getRandomName()) , queue_size(std::max(QUEUE_SIZE, static_cast(getMaxBlockSize()))) - , milliseconds_to_wait(RESCHEDULE_MS) + , milliseconds_to_wait(rabbitmq_settings->rabbitmq_empty_queue_backoff_start_ms) , is_attach(is_attach_) { const auto & config = getContext()->getConfigRef(); @@ -717,6 +716,12 @@ void StorageRabbitMQ::read( auto rabbit_source = std::make_shared( *this, storage_snapshot, modified_context, column_names, 1, rabbitmq_settings->rabbitmq_commit_on_select); + uint64_t max_execution_time_ms = rabbitmq_settings->rabbitmq_flush_interval_ms.changed + ? rabbitmq_settings->rabbitmq_flush_interval_ms + : (static_cast(getContext()->getSettingsRef().stream_flush_interval_ms) * 1000); + + rabbit_source->setTimeLimit(max_execution_time_ms); + auto converting_dag = ActionsDAG::makeConvertingActions( rabbit_source->getPort().getHeader().getColumnsWithTypeAndName(), sample_block.getColumnsWithTypeAndName(), @@ -1015,14 +1020,14 @@ void StorageRabbitMQ::streamingToViewsFunc() if (streamToViews()) { /// Reschedule with backoff. - if (milliseconds_to_wait < BACKOFF_TRESHOLD) - milliseconds_to_wait *= 2; + if (milliseconds_to_wait < rabbitmq_settings->rabbitmq_empty_queue_backoff_end_ms) + milliseconds_to_wait += rabbitmq_settings->rabbitmq_empty_queue_backoff_step_ms; stopLoopIfNoReaders(); break; } else { - milliseconds_to_wait = RESCHEDULE_MS; + milliseconds_to_wait = rabbitmq_settings->rabbitmq_empty_queue_backoff_start_ms; } auto end_time = std::chrono::steady_clock::now(); @@ -1085,14 +1090,15 @@ bool StorageRabbitMQ::streamToViews() { auto source = std::make_shared( *this, storage_snapshot, rabbitmq_context, column_names, block_size, false); + + uint64_t max_execution_time_ms = rabbitmq_settings->rabbitmq_flush_interval_ms.changed + ? rabbitmq_settings->rabbitmq_flush_interval_ms + : (static_cast(getContext()->getSettingsRef().stream_flush_interval_ms) * 1000); + + source->setTimeLimit(max_execution_time_ms); + sources.emplace_back(source); pipes.emplace_back(source); - - Poco::Timespan max_execution_time = rabbitmq_settings->rabbitmq_flush_interval_ms.changed - ? rabbitmq_settings->rabbitmq_flush_interval_ms - : getContext()->getSettingsRef().stream_flush_interval_ms; - - source->setTimeLimit(max_execution_time); } block_io.pipeline.complete(Pipe::unitePipes(std::move(pipes))); diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index 86c0dffa60d..90034c81f10 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -217,7 +217,7 @@ void StorageEmbeddedRocksDB::checkMutationIsPossible(const MutationCommands & co throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only DELETE and UPDATE mutation supported for EmbeddedRocksDB"); } -void StorageEmbeddedRocksDB::mutate(const MutationCommands & commands, ContextPtr context_, bool /*force_wait*/) +void StorageEmbeddedRocksDB::mutate(const MutationCommands & commands, ContextPtr context_) { if (commands.empty()) return; diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h index 7f6fc49fb18..32d7740009e 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h @@ -52,7 +52,7 @@ public: void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &) override; void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override; - void mutate(const MutationCommands &, ContextPtr, bool) override; + void mutate(const MutationCommands &, ContextPtr) override; bool supportsParallelInsert() const override { return true; } bool supportsIndexForIn() const override { return true; } diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 9e29d438a4b..40ea84ec68b 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -1,15 +1,16 @@ #pragma once -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include #include +#include +#include +#include #include @@ -207,6 +208,8 @@ struct SelectQueryInfo /// Configured in StorageDistributed::getQueryProcessingStage() ClusterPtr optimized_cluster; + mutable ParallelReplicasReadingCoordinatorPtr coordinator; + TreeRewriterResultPtr syntax_analyzer_result; /// This is an additional filer applied to current table. diff --git a/src/Storages/StorageDeltaLake.cpp b/src/Storages/StorageDeltaLake.cpp index 479a11b5eb4..c74e37a207c 100644 --- a/src/Storages/StorageDeltaLake.cpp +++ b/src/Storages/StorageDeltaLake.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -16,8 +17,6 @@ #include #include -#include -#include #include @@ -101,7 +100,7 @@ std::vector JsonMetadataGetter::getJsonLogFiles() const auto & client = base_configuration.client; - Aws::S3::Model::ListObjectsV2Request request; + S3::ListObjectsV2Request request; Aws::S3::Model::ListObjectsV2Outcome outcome; bool is_finished{false}; diff --git a/src/Storages/StorageDeltaLake.h b/src/Storages/StorageDeltaLake.h index af6485b9a40..5e3fdd8ad71 100644 --- a/src/Storages/StorageDeltaLake.h +++ b/src/Storages/StorageDeltaLake.h @@ -15,11 +15,6 @@ namespace Poco class Logger; } -namespace Aws::S3 -{ -class S3Client; -} - namespace DB { diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 740ad67cc95..a2c3a10d836 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -140,52 +140,6 @@ namespace ActionLocks namespace { -/// select query has database, table and table function names as AST pointers -/// Creates a copy of query, changes database, table and table function names. -ASTPtr rewriteSelectQuery( - ContextPtr context, - const ASTPtr & query, - const std::string & remote_database, - const std::string & remote_table, - ASTPtr table_function_ptr = nullptr) -{ - auto modified_query_ast = query->clone(); - - ASTSelectQuery & select_query = modified_query_ast->as(); - - // Get rid of the settings clause so we don't send them to remote. Thus newly non-important - // settings won't break any remote parser. It's also more reasonable since the query settings - // are written into the query context and will be sent by the query pipeline. - select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {}); - - if (table_function_ptr) - select_query.addTableFunction(table_function_ptr); - else - select_query.replaceDatabaseAndTable(remote_database, remote_table); - - /// Restore long column names (cause our short names are ambiguous). - /// TODO: aliased table functions & CREATE TABLE AS table function cases - if (!table_function_ptr) - { - RestoreQualifiedNamesVisitor::Data data; - data.distributed_table = DatabaseAndTableWithAlias(*getTableExpression(query->as(), 0)); - data.remote_table.database = remote_database; - data.remote_table.table = remote_table; - RestoreQualifiedNamesVisitor(data).visit(modified_query_ast); - } - - /// To make local JOIN works, default database should be added to table names. - /// But only for JOIN section, since the following should work using default_database: - /// - SELECT * FROM d WHERE value IN (SELECT l.value FROM l) ORDER BY value - /// (see 01487_distributed_in_not_default_db) - AddDefaultDatabaseVisitor visitor(context, context->getCurrentDatabase(), - /* only_replace_current_database_function_= */false, - /* only_replace_in_join_= */true); - visitor.visit(modified_query_ast); - - return modified_query_ast; -} - /// Calculate maximum number in file names in directory and all subdirectories. /// To ensure global order of data blocks yet to be sent across server restarts. UInt64 getMaximumFileNumber(const std::string & dir_path) @@ -314,11 +268,11 @@ NamesAndTypesList StorageDistributed::getVirtuals() const /// NOTE This is weird. Most of these virtual columns are part of MergeTree /// tables info. But Distributed is general-purpose engine. return NamesAndTypesList{ - NameAndTypePair("_table", std::make_shared()), - NameAndTypePair("_part", std::make_shared()), + NameAndTypePair("_table", std::make_shared(std::make_shared())), + NameAndTypePair("_part", std::make_shared(std::make_shared())), NameAndTypePair("_part_index", std::make_shared()), NameAndTypePair("_part_uuid", std::make_shared()), - NameAndTypePair("_partition_id", std::make_shared()), + NameAndTypePair("_partition_id", std::make_shared(std::make_shared())), NameAndTypePair("_sample_factor", std::make_shared()), NameAndTypePair("_part_offset", std::make_shared()), NameAndTypePair("_row_exists", std::make_shared()), @@ -696,6 +650,7 @@ void StorageDistributed::read( const size_t /*max_block_size*/, const size_t /*num_streams*/) { + const auto * select_query = query_info.query->as(); if (select_query->final() && local_context->getSettingsRef().allow_experimental_parallel_reading_from_replicas) throw Exception(ErrorCodes::ILLEGAL_FINAL, "Final modifier is not allowed together with parallel reading from replicas feature"); @@ -719,10 +674,11 @@ void StorageDistributed::read( query_ast = query_info.query; } - auto modified_query_ast = rewriteSelectQuery( - local_context, query_ast, + const auto & modified_query_ast = ClusterProxy::rewriteSelectQuery( + local_context, query_info.query, remote_database, remote_table, remote_table_function_ptr); + /// Return directly (with correct header) if no shard to query. if (query_info.getCluster()->getShardsInfo().empty()) { @@ -746,25 +702,13 @@ void StorageDistributed::read( storage_snapshot, processed_stage); - - auto settings = local_context->getSettingsRef(); - bool parallel_replicas = settings.max_parallel_replicas > 1 && settings.allow_experimental_parallel_reading_from_replicas && !settings.use_hedged_requests; - - if (parallel_replicas) - ClusterProxy::executeQueryWithParallelReplicas( - query_plan, main_table, remote_table_function_ptr, - select_stream_factory, modified_query_ast, - local_context, query_info, - sharding_key_expr, sharding_key_column_name, - query_info.cluster, processed_stage); - else - ClusterProxy::executeQuery( - query_plan, header, processed_stage, - main_table, remote_table_function_ptr, - select_stream_factory, log, modified_query_ast, - local_context, query_info, - sharding_key_expr, sharding_key_column_name, - query_info.cluster); + ClusterProxy::executeQuery( + query_plan, header, processed_stage, + main_table, remote_table_function_ptr, + select_stream_factory, log, modified_query_ast, + local_context, query_info, + sharding_key_expr, sharding_key_column_name, + query_info.cluster); /// This is a bug, it is possible only when there is no shards to query, and this is handled earlier. if (!query_plan.isInitialized()) diff --git a/src/Storages/StorageHudi.cpp b/src/Storages/StorageHudi.cpp index d5675ceb17c..1b0de3c3ed2 100644 --- a/src/Storages/StorageHudi.cpp +++ b/src/Storages/StorageHudi.cpp @@ -7,12 +7,11 @@ #include #include +#include #include #include #include #include -#include -#include #include @@ -98,7 +97,7 @@ std::vector getKeysFromS3(const StorageS3::S3Configuration & base_c const auto & client = base_configuration.client; - Aws::S3::Model::ListObjectsV2Request request; + S3::ListObjectsV2Request request; Aws::S3::Model::ListObjectsV2Outcome outcome; bool is_finished{false}; diff --git a/src/Storages/StorageHudi.h b/src/Storages/StorageHudi.h index 00b8c01a46d..d4a9aa7ff09 100644 --- a/src/Storages/StorageHudi.h +++ b/src/Storages/StorageHudi.h @@ -12,11 +12,6 @@ namespace Poco class Logger; } -namespace Aws::S3 -{ -class S3Client; -} - namespace DB { diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index f4cf0875059..b57e717c272 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -108,7 +108,7 @@ void StorageJoin::checkMutationIsPossible(const MutationCommands & commands, con throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine Join supports only DELETE mutations"); } -void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context, bool /*force_wait*/) +void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context) { /// Firstly acquire lock for mutation, that locks changes of data. /// We cannot acquire rwlock here, because read lock is needed diff --git a/src/Storages/StorageJoin.h b/src/Storages/StorageJoin.h index 96afd442c72..61ea743c841 100644 --- a/src/Storages/StorageJoin.h +++ b/src/Storages/StorageJoin.h @@ -45,7 +45,7 @@ public: /// Only delete is supported. void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override; - void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override; + void mutate(const MutationCommands & commands, ContextPtr context) override; /// Return instance of HashJoin holding lock that protects from insertions to StorageJoin. /// HashJoin relies on structure of hash table that's why we need to return it with locked mutex. diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index d424344e7bf..971ecf8dbf2 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -319,10 +319,10 @@ void StorageMaterializedView::checkAlterPartitionIsPossible( getTargetTable()->checkAlterPartitionIsPossible(commands, metadata_snapshot, settings); } -void StorageMaterializedView::mutate(const MutationCommands & commands, ContextPtr local_context, bool force_wait) +void StorageMaterializedView::mutate(const MutationCommands & commands, ContextPtr local_context) { checkStatementCanBeForwarded(); - getTargetTable()->mutate(commands, local_context, force_wait); + getTargetTable()->mutate(commands, local_context); } void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index c0fee7e870b..af2dedf8164 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -65,7 +65,7 @@ public: void checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const override; - void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override; + void mutate(const MutationCommands & commands, ContextPtr context) override; void renameInMemory(const StorageID & new_table_id) override; diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index f1b33977e27..881cbc18b10 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -305,7 +305,7 @@ void StorageMemory::checkMutationIsPossible(const MutationCommands & /*commands* /// Some validation will be added } -void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context, bool /*force_wait*/) +void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context) { std::lock_guard lock(mutex); auto metadata_snapshot = getInMemoryMetadataPtr(); diff --git a/src/Storages/StorageMemory.h b/src/Storages/StorageMemory.h index 2274a27a267..c739088dbe4 100644 --- a/src/Storages/StorageMemory.h +++ b/src/Storages/StorageMemory.h @@ -67,7 +67,7 @@ public: void drop() override; void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override; - void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override; + void mutate(const MutationCommands & commands, ContextPtr context) override; void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f4013b6d2c2..86dd1773496 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -664,7 +664,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( { ColumnWithTypeAndName column; column.name = "_database"; - column.type = std::make_shared(); + column.type = std::make_shared(std::make_shared()); column.column = column.type->createColumnConst(0, Field(database_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); @@ -682,7 +682,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( { ColumnWithTypeAndName column; column.name = "_table"; - column.type = std::make_shared(); + column.type = std::make_shared(std::make_shared()); column.column = column.type->createColumnConst(0, Field(table_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); @@ -980,7 +980,9 @@ void registerStorageMerge(StorageFactory & factory) NamesAndTypesList StorageMerge::getVirtuals() const { - NamesAndTypesList virtuals{{"_database", std::make_shared()}, {"_table", std::make_shared()}}; + NamesAndTypesList virtuals{ + {"_database", std::make_shared(std::make_shared())}, + {"_table", std::make_shared(std::make_shared())}}; auto first_table = getFirstTable([](auto && table) { return table; }); if (first_table) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 60c5157d463..125322281d0 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1,4 +1,5 @@ #include "StorageMergeTree.h" +#include "Core/QueryProcessingStage.h" #include "Storages/MergeTree/IMergeTreeDataPart.h" #include @@ -14,6 +15,8 @@ #include #include #include +#include +#include #include #include #include @@ -209,15 +212,39 @@ void StorageMergeTree::read( size_t max_block_size, size_t num_streams) { - /// If true, then we will ask initiator if we can read chosen ranges - bool enable_parallel_reading = local_context->getClientInfo().collaborate_with_initiator; + if (local_context->canUseParallelReplicasOnInitiator()) + { + auto table_id = getStorageID(); - if (enable_parallel_reading) - LOG_TRACE(log, "Parallel reading from replicas enabled: {}", enable_parallel_reading); + const auto & modified_query_ast = ClusterProxy::rewriteSelectQuery( + local_context, query_info.query, + table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr); - if (auto plan = reader.read( - column_names, storage_snapshot, query_info, local_context, max_block_size, num_streams, processed_stage, nullptr, enable_parallel_reading)) - query_plan = std::move(*plan); + auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas); + + Block header = + InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); + + ClusterProxy::SelectStreamFactory select_stream_factory = + ClusterProxy::SelectStreamFactory( + header, + {}, + storage_snapshot, + processed_stage); + + ClusterProxy::executeQueryWithParallelReplicas( + query_plan, getStorageID(), /*remove_table_function_ptr*/ nullptr, + select_stream_factory, modified_query_ast, + local_context, query_info, cluster); + } + else + { + if (auto plan = reader.read( + column_names, storage_snapshot, query_info, + local_context, max_block_size, num_streams, + processed_stage, nullptr, /*enable_parallel_reading*/local_context->canUseParallelReplicasOnFollower())) + query_plan = std::move(*plan); + } /// Now, copy of parts that is required for the query, stored in the processors, /// while snapshot_data.parts includes all parts, even one that had been filtered out with partition pruning, @@ -532,14 +559,14 @@ void StorageMergeTree::setMutationCSN(const String & mutation_id, CSN csn) it->second.writeCSN(csn); } -void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context, bool force_wait) +void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context) { /// Validate partition IDs (if any) before starting mutation getPartitionIdsAffectedByCommands(commands, query_context); Int64 version = startMutation(commands, query_context); - if (force_wait || query_context->getSettingsRef().mutations_sync > 0 || query_context->getCurrentTransaction()) + if (query_context->getSettingsRef().mutations_sync > 0 || query_context->getCurrentTransaction()) waitForMutation(version); } @@ -1574,37 +1601,39 @@ void StorageMergeTree::renameAndCommitEmptyParts(MutableDataPartsVector & new_pa void StorageMergeTree::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr query_context, TableExclusiveLockHolder &) { - /// Asks to complete merges and does not allow them to start. - /// This protects against "revival" of data for a removed partition after completion of merge. - auto merge_blocker = stopMergesAndWait(); - waitForOutdatedPartsToBeLoaded(); - - Stopwatch watch; - - auto txn = query_context->getCurrentTransaction(); - MergeTreeData::Transaction transaction(*this, txn.get()); { - auto operation_data_parts_lock = lockOperationsWithParts(); + /// Asks to complete merges and does not allow them to start. + /// This protects against "revival" of data for a removed partition after completion of merge. + auto merge_blocker = stopMergesAndWait(); + waitForOutdatedPartsToBeLoaded(); - auto parts = getVisibleDataPartsVector(query_context); + Stopwatch watch; - auto future_parts = initCoverageWithNewEmptyParts(parts); + auto txn = query_context->getCurrentTransaction(); + MergeTreeData::Transaction transaction(*this, txn.get()); + { + auto operation_data_parts_lock = lockOperationsWithParts(); - LOG_TEST(log, "Made {} empty parts in order to cover {} parts. Empty parts: {}, covered parts: {}. With txn {}", - future_parts.size(), parts.size(), - fmt::join(getPartsNames(future_parts), ", "), fmt::join(getPartsNames(parts), ", "), - transaction.getTID()); + auto parts = getVisibleDataPartsVector(query_context); - captureTmpDirectoryHolders(*this, future_parts); + auto future_parts = initCoverageWithNewEmptyParts(parts); - auto new_data_parts = createEmptyDataParts(*this, future_parts, txn); - renameAndCommitEmptyParts(new_data_parts, transaction); + LOG_TEST(log, "Made {} empty parts in order to cover {} parts. Empty parts: {}, covered parts: {}. With txn {}", + future_parts.size(), parts.size(), + fmt::join(getPartsNames(future_parts), ", "), fmt::join(getPartsNames(parts), ", "), + transaction.getTID()); - PartLog::addNewParts(query_context, new_data_parts, watch.elapsed()); + captureTmpDirectoryHolders(*this, future_parts); - LOG_INFO(log, "Truncated table with {} parts by replacing them with new empty {} parts. With txn {}", - parts.size(), future_parts.size(), - transaction.getTID()); + auto new_data_parts = createEmptyDataParts(*this, future_parts, txn); + renameAndCommitEmptyParts(new_data_parts, transaction); + + PartLog::addNewParts(query_context, new_data_parts, watch.elapsed()); + + LOG_INFO(log, "Truncated table with {} parts by replacing them with new empty {} parts. With txn {}", + parts.size(), future_parts.size(), + transaction.getTID()); + } } /// Old parts are needed to be destroyed before clearing them from filesystem. @@ -1615,48 +1644,50 @@ void StorageMergeTree::truncate(const ASTPtr &, const StorageMetadataPtr &, Cont void StorageMergeTree::dropPart(const String & part_name, bool detach, ContextPtr query_context) { - /// Asks to complete merges and does not allow them to start. - /// This protects against "revival" of data for a removed partition after completion of merge. - auto merge_blocker = stopMergesAndWait(); - - Stopwatch watch; - - /// It's important to create it outside of lock scope because - /// otherwise it can lock parts in destructor and deadlock is possible. - auto txn = query_context->getCurrentTransaction(); - MergeTreeData::Transaction transaction(*this, txn.get()); { - auto operation_data_parts_lock = lockOperationsWithParts(); + /// Asks to complete merges and does not allow them to start. + /// This protects against "revival" of data for a removed partition after completion of merge. + auto merge_blocker = stopMergesAndWait(); - auto part = getPartIfExists(part_name, {MergeTreeDataPartState::Active}); - if (!part) - throw Exception(ErrorCodes::NO_SUCH_DATA_PART, "Part {} not found, won't try to drop it.", part_name); + Stopwatch watch; - if (detach) + /// It's important to create it outside of lock scope because + /// otherwise it can lock parts in destructor and deadlock is possible. + auto txn = query_context->getCurrentTransaction(); + MergeTreeData::Transaction transaction(*this, txn.get()); { - auto metadata_snapshot = getInMemoryMetadataPtr(); - LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); - part->makeCloneInDetached("", metadata_snapshot); - } + auto operation_data_parts_lock = lockOperationsWithParts(); - { - auto future_parts = initCoverageWithNewEmptyParts({part}); + auto part = getPartIfExists(part_name, {MergeTreeDataPartState::Active}); + if (!part) + throw Exception(ErrorCodes::NO_SUCH_DATA_PART, "Part {} not found, won't try to drop it.", part_name); - LOG_TEST(log, "Made {} empty parts in order to cover {} part. With txn {}", - fmt::join(getPartsNames(future_parts), ", "), fmt::join(getPartsNames({part}), ", "), - transaction.getTID()); + if (detach) + { + auto metadata_snapshot = getInMemoryMetadataPtr(); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); + part->makeCloneInDetached("", metadata_snapshot); + } - captureTmpDirectoryHolders(*this, future_parts); + { + auto future_parts = initCoverageWithNewEmptyParts({part}); - auto new_data_parts = createEmptyDataParts(*this, future_parts, txn); - renameAndCommitEmptyParts(new_data_parts, transaction); + LOG_TEST(log, "Made {} empty parts in order to cover {} part. With txn {}", + fmt::join(getPartsNames(future_parts), ", "), fmt::join(getPartsNames({part}), ", "), + transaction.getTID()); - PartLog::addNewParts(query_context, new_data_parts, watch.elapsed()); + captureTmpDirectoryHolders(*this, future_parts); - const auto * op = detach ? "Detached" : "Dropped"; - LOG_INFO(log, "{} {} part by replacing it with new empty {} part. With txn {}", - op, part->name, future_parts[0].part_name, - transaction.getTID()); + auto new_data_parts = createEmptyDataParts(*this, future_parts, txn); + renameAndCommitEmptyParts(new_data_parts, transaction); + + PartLog::addNewParts(query_context, new_data_parts, watch.elapsed()); + + const auto * op = detach ? "Detached" : "Dropped"; + LOG_INFO(log, "{} {} part by replacing it with new empty {} part. With txn {}", + op, part->name, future_parts[0].part_name, + transaction.getTID()); + } } } @@ -1668,58 +1699,60 @@ void StorageMergeTree::dropPart(const String & part_name, bool detach, ContextPt void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, ContextPtr query_context) { - const auto * partition_ast = partition->as(); - - /// Asks to complete merges and does not allow them to start. - /// This protects against "revival" of data for a removed partition after completion of merge. - auto merge_blocker = stopMergesAndWait(); - - Stopwatch watch; - - /// It's important to create it outside of lock scope because - /// otherwise it can lock parts in destructor and deadlock is possible. - auto txn = query_context->getCurrentTransaction(); - MergeTreeData::Transaction transaction(*this, txn.get()); { - auto operation_data_parts_lock = lockOperationsWithParts(); + const auto * partition_ast = partition->as(); - DataPartsVector parts; + /// Asks to complete merges and does not allow them to start. + /// This protects against "revival" of data for a removed partition after completion of merge. + auto merge_blocker = stopMergesAndWait(); + + Stopwatch watch; + + /// It's important to create it outside of lock scope because + /// otherwise it can lock parts in destructor and deadlock is possible. + auto txn = query_context->getCurrentTransaction(); + MergeTreeData::Transaction transaction(*this, txn.get()); { - if (partition_ast && partition_ast->all) - parts = getVisibleDataPartsVector(query_context); - else + auto operation_data_parts_lock = lockOperationsWithParts(); + + DataPartsVector parts; { - String partition_id = getPartitionIDFromQuery(partition, query_context); - parts = getVisibleDataPartsVectorInPartition(query_context, partition_id); + if (partition_ast && partition_ast->all) + parts = getVisibleDataPartsVector(query_context); + else + { + String partition_id = getPartitionIDFromQuery(partition, query_context); + parts = getVisibleDataPartsVectorInPartition(query_context, partition_id); + } } + + if (detach) + for (const auto & part : parts) + { + auto metadata_snapshot = getInMemoryMetadataPtr(); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); + part->makeCloneInDetached("", metadata_snapshot); + } + + auto future_parts = initCoverageWithNewEmptyParts(parts); + + LOG_TEST(log, "Made {} empty parts in order to cover {} parts. Empty parts: {}, covered parts: {}. With txn {}", + future_parts.size(), parts.size(), + fmt::join(getPartsNames(future_parts), ", "), fmt::join(getPartsNames(parts), ", "), + transaction.getTID()); + + captureTmpDirectoryHolders(*this, future_parts); + + auto new_data_parts = createEmptyDataParts(*this, future_parts, txn); + renameAndCommitEmptyParts(new_data_parts, transaction); + + PartLog::addNewParts(query_context, new_data_parts, watch.elapsed()); + + const auto * op = detach ? "Detached" : "Dropped"; + LOG_INFO(log, "{} partition with {} parts by replacing them with new empty {} parts. With txn {}", + op, parts.size(), future_parts.size(), + transaction.getTID()); } - - if (detach) - for (const auto & part : parts) - { - auto metadata_snapshot = getInMemoryMetadataPtr(); - LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); - part->makeCloneInDetached("", metadata_snapshot); - } - - auto future_parts = initCoverageWithNewEmptyParts(parts); - - LOG_TEST(log, "Made {} empty parts in order to cover {} parts. Empty parts: {}, covered parts: {}. With txn {}", - future_parts.size(), parts.size(), - fmt::join(getPartsNames(future_parts), ", "), fmt::join(getPartsNames(parts), ", "), - transaction.getTID()); - - captureTmpDirectoryHolders(*this, future_parts); - - auto new_data_parts = createEmptyDataParts(*this, future_parts, txn); - renameAndCommitEmptyParts(new_data_parts, transaction); - - PartLog::addNewParts(query_context, new_data_parts, watch.elapsed()); - - const auto * op = detach ? "Detached" : "Dropped"; - LOG_INFO(log, "{} partition with {} parts by replacing them with new empty {} parts. With txn {}", - op, parts.size(), future_parts.size(), - transaction.getTID()); } /// Old parts are needed to be destroyed before clearing them from filesystem. @@ -1849,7 +1882,7 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con } catch (...) { - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException("", true)); throw; } } @@ -1932,7 +1965,7 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const } catch (...) { - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException("", true)); throw; } } diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 706ceda17b3..1dff6323e4c 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -85,7 +85,7 @@ public: const Names & deduplicate_by_columns, ContextPtr context) override; - void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override; + void mutate(const MutationCommands & commands, ContextPtr context) override; bool hasLightweightDeletedMask() const override; diff --git a/src/Storages/StorageProxy.h b/src/Storages/StorageProxy.h index 2ce5f85e11f..b31707eeb62 100644 --- a/src/Storages/StorageProxy.h +++ b/src/Storages/StorageProxy.h @@ -132,7 +132,7 @@ public: return getNested()->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, context); } - void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override { getNested()->mutate(commands, context, force_wait); } + void mutate(const MutationCommands & commands, ContextPtr context) override { getNested()->mutate(commands, context); } CancellationCode killMutation(const String & mutation_id) override { return getNested()->killMutation(mutation_id); } @@ -162,4 +162,3 @@ public: } - diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index e0504f823dd..16a2f49b8df 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -71,15 +71,18 @@ #include #include -#include -#include +#include +#include #include #include -#include -#include +#include #include +#include +#include +#include #include + #include #include #include @@ -2337,7 +2340,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) } catch (...) { - PartLog::addNewParts(getContext(), res_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + PartLog::addNewParts(getContext(), res_parts, watch.elapsed(), ExecutionStatus::fromCurrentException("", true)); for (const auto & res_part : res_parts) unlockSharedData(*res_part); @@ -4190,7 +4193,7 @@ bool StorageReplicatedMergeTree::fetchPart( catch (...) { if (!to_detached) - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); throw; } @@ -4300,7 +4303,7 @@ MutableDataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( } catch (...) { - write_part_log(ExecutionStatus::fromCurrentException()); + write_part_log(ExecutionStatus::fromCurrentException("", true)); throw; } @@ -4543,9 +4546,6 @@ void StorageReplicatedMergeTree::read( const size_t max_block_size, const size_t num_streams) { - /// If true, then we will ask initiator if we can read chosen ranges - const bool enable_parallel_reading = local_context->getClientInfo().collaborate_with_initiator; - SCOPE_EXIT({ /// Now, copy of parts that is required for the query, stored in the processors, /// while snapshot_data.parts includes all parts, even one that had been filtered out with partition pruning, @@ -4564,16 +4564,43 @@ void StorageReplicatedMergeTree::read( auto max_added_blocks = std::make_shared(getMaxAddedBlocks()); if (auto plan = reader.read( column_names, storage_snapshot, query_info, local_context, - max_block_size, num_streams, processed_stage, std::move(max_added_blocks), enable_parallel_reading)) + max_block_size, num_streams, processed_stage, std::move(max_added_blocks), /*enable_parallel_reading*/false)) query_plan = std::move(*plan); return; } - if (auto plan = reader.read( - column_names, storage_snapshot, query_info, local_context, - max_block_size, num_streams, processed_stage, nullptr, enable_parallel_reading)) + if (local_context->canUseParallelReplicasOnInitiator()) { - query_plan = std::move(*plan); + auto table_id = getStorageID(); + + const auto & modified_query_ast = ClusterProxy::rewriteSelectQuery( + local_context, query_info.query, + table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr); + + auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas); + + Block header = + InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); + + ClusterProxy::SelectStreamFactory select_stream_factory = + ClusterProxy::SelectStreamFactory( + header, + {}, + storage_snapshot, + processed_stage); + + ClusterProxy::executeQueryWithParallelReplicas( + query_plan, getStorageID(), /*remove_table_function_ptr*/ nullptr, + select_stream_factory, modified_query_ast, + local_context, query_info, cluster); + } + else + { + if (auto plan = reader.read( + column_names, storage_snapshot, query_info, + local_context, max_block_size, num_streams, + processed_stage, nullptr, /*enable_parallel_reading*/local_context->canUseParallelReplicasOnFollower())) + query_plan = std::move(*plan); } } @@ -6276,7 +6303,7 @@ void StorageReplicatedMergeTree::fetchPartition( } -void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context, bool force_wait) +void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context) { /// Overview of the mutation algorithm. /// @@ -6390,8 +6417,7 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, Conte throw Coordination::Exception("Unable to create a mutation znode", rc); } - const size_t mutations_sync = force_wait ? 2 : query_context->getSettingsRef().mutations_sync; - waitMutation(mutation_entry.znode_name, mutations_sync); + waitMutation(mutation_entry.znode_name, query_context->getSettingsRef().mutations_sync); } void StorageReplicatedMergeTree::waitMutation(const String & znode_name, size_t mutations_sync) const @@ -7042,7 +7068,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom( } catch (...) { - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException("", true)); for (const auto & dst_part : dst_parts) unlockSharedData(*dst_part); @@ -7271,7 +7297,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta } catch (...) { - PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException()); + PartLog::addNewParts(getContext(), dst_parts, watch.elapsed(), ExecutionStatus::fromCurrentException("", true)); for (const auto & dst_part : dst_parts) dest_table_storage->unlockSharedData(*dst_part); @@ -8484,14 +8510,14 @@ Strings StorageReplicatedMergeTree::getZeroCopyPartPath( return res; } -bool StorageReplicatedMergeTree::checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk) +bool StorageReplicatedMergeTree::checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk, String & lock_replica) { auto path = getZeroCopyPartPath(part_name, disk); if (path) { /// FIXME auto lock_path = fs::path(*path) / "part_exclusive_lock"; - if (getZooKeeper()->exists(lock_path)) + if (getZooKeeper()->tryGet(lock_path, lock_replica)) { return true; } @@ -8524,7 +8550,7 @@ std::optional StorageReplicatedMergeTree::tryCreateZeroCopyExclusi zookeeper->createIfNotExists(zc_zookeeper_path, ""); /// Create actual lock - ZeroCopyLock lock(zookeeper, zc_zookeeper_path); + ZeroCopyLock lock(zookeeper, zc_zookeeper_path, replica_name); if (lock.lock->tryLock()) return lock; else diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 568f9709aaa..3db54430b34 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -153,7 +153,7 @@ public: void alter(const AlterCommands & commands, ContextPtr query_context, AlterLockHolder & table_lock_holder) override; - void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override; + void mutate(const MutationCommands & commands, ContextPtr context) override; void waitMutation(const String & znode_name, size_t mutations_sync) const; std::vector getMutationsStatus() const override; CancellationCode killMutation(const String & mutation_id) override; @@ -858,7 +858,7 @@ private: // Create table id if needed void createTableSharedID() const; - bool checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk); + bool checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk, String & lock_replica); std::optional getZeroCopyPartPath(const String & part_name, const DiskPtr & disk); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 205b0c7d067..f6419430746 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -51,10 +52,6 @@ #include #include -#include -#include -#include -#include #include #include @@ -110,6 +107,7 @@ namespace ErrorCodes extern const int CANNOT_EXTRACT_TABLE_STRUCTURE; extern const int NOT_IMPLEMENTED; extern const int CANNOT_COMPILE_REGEXP; + extern const int FILE_DOESNT_EXIST; } class IOutputFormat; @@ -136,7 +134,7 @@ class StorageS3Source::DisclosedGlobIterator::Impl : WithContext { public: Impl( - const Aws::S3::S3Client & client_, + const S3::Client & client_, const S3::URI & globbed_uri_, ASTPtr & query_, const Block & virtual_header_, @@ -145,7 +143,7 @@ public: Strings * read_keys_, const S3Settings::RequestSettings & request_settings_) : WithContext(context_) - , client(client_) + , client(S3::Client::create(client_)) , globbed_uri(globbed_uri_) , query(query_) , virtual_header(virtual_header_) @@ -263,6 +261,9 @@ private: outcome_future = listObjectsAsync(); } + if (request_settings.throw_on_zero_files_match && result_batch.empty()) + throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Can not match any files using prefix {}", request.GetPrefix()); + KeysWithInfo temp_buffer; temp_buffer.reserve(result_batch.size()); @@ -349,7 +350,7 @@ private: return list_objects_scheduler([this] { ProfileEvents::increment(ProfileEvents::S3ListObjects); - auto outcome = client.ListObjectsV2(request); + auto outcome = client->ListObjectsV2(request); /// Outcome failure will be handled on the caller side. if (outcome.IsSuccess()) @@ -364,7 +365,7 @@ private: KeysWithInfo buffer; KeysWithInfo::iterator buffer_iter; - Aws::S3::S3Client client; + std::unique_ptr client; S3::URI globbed_uri; ASTPtr query; Block virtual_header; @@ -376,7 +377,7 @@ private: ObjectInfos * object_infos; Strings * read_keys; - Aws::S3::Model::ListObjectsV2Request request; + S3::ListObjectsV2Request request; S3Settings::RequestSettings request_settings; ThreadPool list_objects_pool; @@ -386,7 +387,7 @@ private: }; StorageS3Source::DisclosedGlobIterator::DisclosedGlobIterator( - const Aws::S3::S3Client & client_, + const S3::Client & client_, const S3::URI & globbed_uri_, ASTPtr query, const Block & virtual_header, @@ -412,7 +413,7 @@ class StorageS3Source::KeysIterator::Impl : WithContext { public: explicit Impl( - const Aws::S3::S3Client & client_, + const S3::Client & client_, const std::string & version_id_, const std::vector & keys_, const String & bucket_, @@ -507,7 +508,7 @@ private: }; StorageS3Source::KeysIterator::KeysIterator( - const Aws::S3::S3Client & client_, + const S3::Client & client_, const std::string & version_id_, const std::vector & keys_, const String & bucket_, @@ -552,7 +553,7 @@ StorageS3Source::StorageS3Source( UInt64 max_block_size_, const S3Settings::RequestSettings & request_settings_, String compression_hint_, - const std::shared_ptr & client_, + const std::shared_ptr & client_, const String & bucket_, const String & version_id_, std::shared_ptr file_iterator_, @@ -1201,7 +1202,7 @@ void StorageS3::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, } ProfileEvents::increment(ProfileEvents::S3DeleteObjects); - Aws::S3::Model::DeleteObjectsRequest request; + S3::DeleteObjectsRequest request; request.SetBucket(s3_configuration.uri.bucket); request.SetDelete(delkeys); @@ -1211,6 +1212,9 @@ void StorageS3::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, const auto & err = response.GetError(); throw Exception(ErrorCodes::S3_ERROR, "{}: {}", std::to_string(static_cast(err.GetErrorType())), err.GetMessage()); } + + for (const auto & error : response.GetResult().GetErrors()) + LOG_WARNING(&Poco::Logger::get("StorageS3"), "Failed to delete {}, error: {}", error.GetKey(), error.GetMessage()); } diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index 16e38249595..65c6928906d 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -24,7 +24,7 @@ namespace Aws::S3 { - class S3Client; + class Client; } namespace DB @@ -66,7 +66,7 @@ public: { public: DisclosedGlobIterator( - const Aws::S3::S3Client & client_, + const S3::Client & client_, const S3::URI & globbed_uri_, ASTPtr query, const Block & virtual_header, @@ -88,7 +88,7 @@ public: { public: explicit KeysIterator( - const Aws::S3::S3Client & client_, + const S3::Client & client_, const std::string & version_id_, const std::vector & keys_, const String & bucket_, @@ -134,7 +134,7 @@ public: UInt64 max_block_size_, const S3Settings::RequestSettings & request_settings_, String compression_hint_, - const std::shared_ptr & client_, + const std::shared_ptr & client_, const String & bucket, const String & version_id, std::shared_ptr file_iterator_, @@ -155,7 +155,7 @@ private: UInt64 max_block_size; S3Settings::RequestSettings request_settings; String compression_hint; - std::shared_ptr client; + std::shared_ptr client; Block sample_block; std::optional format_settings; @@ -287,7 +287,7 @@ public: struct S3Configuration { const S3::URI uri; - std::shared_ptr client; + std::shared_ptr client; S3::AuthSettings auth_settings; S3Settings::RequestSettings request_settings; diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp index 3ee10113b32..e158aff60a0 100644 --- a/src/Storages/StorageS3Cluster.cpp +++ b/src/Storages/StorageS3Cluster.cpp @@ -8,6 +8,7 @@ #include "Client/Connection.h" #include "Core/QueryProcessingStage.h" #include +#include #include #include #include @@ -32,8 +33,6 @@ #include #include -#include -#include #include #include diff --git a/src/Storages/StorageS3Settings.cpp b/src/Storages/StorageS3Settings.cpp index ee0b1fd88bf..6030b1f81f6 100644 --- a/src/Storages/StorageS3Settings.cpp +++ b/src/Storages/StorageS3Settings.cpp @@ -167,6 +167,7 @@ S3Settings::RequestSettings::RequestSettings(const NamedCollection & collection) max_connections = collection.getOrDefault("max_connections", max_connections); list_object_keys_size = collection.getOrDefault("list_object_keys_size", list_object_keys_size); allow_head_object_request = collection.getOrDefault("allow_head_object_request", allow_head_object_request); + throw_on_zero_files_match = collection.getOrDefault("throw_on_zero_files_match", throw_on_zero_files_match); } S3Settings::RequestSettings::RequestSettings( @@ -182,6 +183,7 @@ S3Settings::RequestSettings::RequestSettings( check_objects_after_upload = config.getBool(key + "check_objects_after_upload", settings.s3_check_objects_after_upload); list_object_keys_size = config.getUInt64(key + "list_object_keys_size", settings.s3_list_object_keys_size); allow_head_object_request = config.getBool(key + "allow_head_object_request", allow_head_object_request); + throw_on_zero_files_match = config.getBool(key + "throw_on_zero_files_match", settings.s3_throw_on_zero_files_match); /// NOTE: it would be better to reuse old throttlers to avoid losing token bucket state on every config reload, /// which could lead to exceeding limit for short time. But it is good enough unless very high `burst` values are used. @@ -231,6 +233,9 @@ void S3Settings::RequestSettings::updateFromSettingsImpl(const Settings & settin if ((!if_changed || settings.s3_max_put_rps.changed || settings.s3_max_put_burst.changed) && settings.s3_max_put_rps) put_request_throttler = std::make_shared( settings.s3_max_put_rps, settings.s3_max_put_burst ? settings.s3_max_put_burst : Throttler::default_burst_seconds * settings.s3_max_put_rps); + + if (!if_changed || settings.s3_throw_on_zero_files_match) + throw_on_zero_files_match = settings.s3_throw_on_zero_files_match; } void S3Settings::RequestSettings::updateFromSettings(const Settings & settings) diff --git a/src/Storages/StorageS3Settings.h b/src/Storages/StorageS3Settings.h index 61da0a37f62..76dbeee21d2 100644 --- a/src/Storages/StorageS3Settings.h +++ b/src/Storages/StorageS3Settings.h @@ -77,6 +77,8 @@ struct S3Settings /// See https://github.com/aws/aws-sdk-cpp/issues/1558 and also the function S3ErrorMarshaller::ExtractRegion() for more information. bool allow_head_object_request = true; + bool throw_on_zero_files_match = false; + const PartUploadSettings & getUploadSettings() const { return upload_settings; } RequestSettings() = default; diff --git a/src/Storages/System/StorageSystemBackups.cpp b/src/Storages/System/StorageSystemBackups.cpp index 268cc9d0963..1f448bcbdbc 100644 --- a/src/Storages/System/StorageSystemBackups.cpp +++ b/src/Storages/System/StorageSystemBackups.cpp @@ -18,14 +18,16 @@ NamesAndTypesList StorageSystemBackups::getNamesAndTypes() {"id", std::make_shared()}, {"name", std::make_shared()}, {"status", std::make_shared(getBackupStatusEnumValues())}, - {"num_files", std::make_shared()}, - {"num_processed_files", std::make_shared()}, - {"processed_files_size", std::make_shared()}, - {"uncompressed_size", std::make_shared()}, - {"compressed_size", std::make_shared()}, {"error", std::make_shared()}, {"start_time", std::make_shared()}, {"end_time", std::make_shared()}, + {"num_files", std::make_shared()}, + {"total_size", std::make_shared()}, + {"num_entries", std::make_shared()}, + {"uncompressed_size", std::make_shared()}, + {"compressed_size", std::make_shared()}, + {"files_read", std::make_shared()}, + {"bytes_read", std::make_shared()}, }; return names_and_types; } @@ -37,28 +39,32 @@ void StorageSystemBackups::fillData(MutableColumns & res_columns, ContextPtr con auto & column_id = assert_cast(*res_columns[column_index++]); auto & column_name = assert_cast(*res_columns[column_index++]); auto & column_status = assert_cast(*res_columns[column_index++]); - auto & column_num_files = assert_cast(*res_columns[column_index++]); - auto & column_num_processed_files = assert_cast(*res_columns[column_index++]); - auto & column_processed_files_size = assert_cast(*res_columns[column_index++]); - auto & column_uncompressed_size = assert_cast(*res_columns[column_index++]); - auto & column_compressed_size = assert_cast(*res_columns[column_index++]); auto & column_error = assert_cast(*res_columns[column_index++]); auto & column_start_time = assert_cast(*res_columns[column_index++]); auto & column_end_time = assert_cast(*res_columns[column_index++]); + auto & column_num_files = assert_cast(*res_columns[column_index++]); + auto & column_total_size = assert_cast(*res_columns[column_index++]); + auto & column_num_entries = assert_cast(*res_columns[column_index++]); + auto & column_uncompressed_size = assert_cast(*res_columns[column_index++]); + auto & column_compressed_size = assert_cast(*res_columns[column_index++]); + auto & column_num_read_files = assert_cast(*res_columns[column_index++]); + auto & column_num_read_bytes = assert_cast(*res_columns[column_index++]); auto add_row = [&](const BackupsWorker::Info & info) { column_id.insertData(info.id.data(), info.id.size()); column_name.insertData(info.name.data(), info.name.size()); column_status.insertValue(static_cast(info.status)); - column_num_files.insertValue(info.num_files); - column_num_processed_files.insertValue(info.num_processed_files); - column_processed_files_size.insertValue(info.processed_files_size); - column_uncompressed_size.insertValue(info.uncompressed_size); - column_compressed_size.insertValue(info.compressed_size); column_error.insertData(info.error_message.data(), info.error_message.size()); column_start_time.insertValue(static_cast(std::chrono::system_clock::to_time_t(info.start_time))); column_end_time.insertValue(static_cast(std::chrono::system_clock::to_time_t(info.end_time))); + column_num_files.insertValue(info.num_files); + column_total_size.insertValue(info.total_size); + column_num_entries.insertValue(info.num_entries); + column_uncompressed_size.insertValue(info.uncompressed_size); + column_compressed_size.insertValue(info.compressed_size); + column_num_read_files.insertValue(info.num_read_files); + column_num_read_bytes.insertValue(info.num_read_bytes); }; for (const auto & entry : context->getBackupsWorker().getAllInfos()) diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index fd4e478004f..3465e47449b 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -51,6 +51,7 @@ const char * auto_config_build[] "USE_ROCKSDB", "@USE_ROCKSDB@", "USE_NURAFT", "@USE_NURAFT@", "USE_NLP", "@USE_NLP@", + "USE_LIBURING", "@USE_LIBURING@", "USE_SQLITE", "@USE_SQLITE@", "USE_LIBPQXX", "@USE_LIBPQXX@", "USE_AZURE_BLOB_STORAGE", "@USE_AZURE_BLOB_STORAGE@", diff --git a/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp index 213e3ed5dc0..e053f2e63ff 100644 --- a/src/Storages/System/StorageSystemProcesses.cpp +++ b/src/Storages/System/StorageSystemProcesses.cpp @@ -61,6 +61,7 @@ NamesAndTypesList StorageSystemProcesses::getNamesAndTypes() {"memory_usage", std::make_shared()}, {"peak_memory_usage", std::make_shared()}, {"query", std::make_shared()}, + {"query_kind", std::make_shared()}, {"thread_ids", std::make_shared(std::make_shared())}, {"ProfileEvents", std::make_shared(std::make_shared(), std::make_shared())}, @@ -119,7 +120,7 @@ void StorageSystemProcesses::fillData(MutableColumns & res_columns, ContextPtr c res_columns[i++]->insert(process.client_info.quota_key); res_columns[i++]->insert(process.client_info.distributed_depth); - res_columns[i++]->insert(static_cast(process.elapsed_microseconds) / 100000.0); + res_columns[i++]->insert(static_cast(process.elapsed_microseconds) / 1'000'000.0); res_columns[i++]->insert(process.is_cancelled); res_columns[i++]->insert(process.is_all_data_sent); res_columns[i++]->insert(process.read_rows); @@ -130,6 +131,7 @@ void StorageSystemProcesses::fillData(MutableColumns & res_columns, ContextPtr c res_columns[i++]->insert(process.memory_usage); res_columns[i++]->insert(process.peak_memory_usage); res_columns[i++]->insert(process.query); + res_columns[i++]->insert(magic_enum::enum_name(process.query_kind)); { Array threads_array; diff --git a/src/Storages/System/StorageSystemQueryResultCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp similarity index 68% rename from src/Storages/System/StorageSystemQueryResultCache.cpp rename to src/Storages/System/StorageSystemQueryCache.cpp index cb6349b6d47..2de8e4594b9 100644 --- a/src/Storages/System/StorageSystemQueryResultCache.cpp +++ b/src/Storages/System/StorageSystemQueryCache.cpp @@ -1,15 +1,15 @@ -#include "StorageSystemQueryResultCache.h" +#include "StorageSystemQueryCache.h" #include #include #include -#include +#include #include namespace DB { -NamesAndTypesList StorageSystemQueryResultCache::getNamesAndTypes() +NamesAndTypesList StorageSystemQueryCache::getNamesAndTypes() { return { {"query", std::make_shared()}, @@ -21,23 +21,23 @@ NamesAndTypesList StorageSystemQueryResultCache::getNamesAndTypes() }; } -StorageSystemQueryResultCache::StorageSystemQueryResultCache(const StorageID & table_id_) +StorageSystemQueryCache::StorageSystemQueryCache(const StorageID & table_id_) : IStorageSystemOneBlock(table_id_) { } -void StorageSystemQueryResultCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const +void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - auto query_result_cache = context->getQueryResultCache(); + auto query_cache = context->getQueryCache(); - if (!query_result_cache) + if (!query_cache) return; const String & username = context->getUserName(); - std::lock_guard lock(query_result_cache->mutex); + std::lock_guard lock(query_cache->mutex); - for (const auto & [key, result] : query_result_cache->cache) + for (const auto & [key, result] : query_cache->cache) { /// Showing other user's queries is considered a security risk if (key.username.has_value() && key.username != username) diff --git a/src/Storages/System/StorageSystemQueryResultCache.h b/src/Storages/System/StorageSystemQueryCache.h similarity index 52% rename from src/Storages/System/StorageSystemQueryResultCache.h rename to src/Storages/System/StorageSystemQueryCache.h index 4862878a31a..5ff5f0a0454 100644 --- a/src/Storages/System/StorageSystemQueryResultCache.h +++ b/src/Storages/System/StorageSystemQueryCache.h @@ -5,12 +5,12 @@ namespace DB { -class StorageSystemQueryResultCache final : public IStorageSystemOneBlock +class StorageSystemQueryCache final : public IStorageSystemOneBlock { public: - explicit StorageSystemQueryResultCache(const StorageID & table_id_); + explicit StorageSystemQueryCache(const StorageID & table_id_); - std::string getName() const override { return "SystemQueryResultCache"; } + std::string getName() const override { return "SystemQueryCache"; } static NamesAndTypesList getNamesAndTypes(); diff --git a/src/Storages/System/StorageSystemReplicationQueue.cpp b/src/Storages/System/StorageSystemReplicationQueue.cpp index 8acd192eac4..fd9f874052d 100644 --- a/src/Storages/System/StorageSystemReplicationQueue.cpp +++ b/src/Storages/System/StorageSystemReplicationQueue.cpp @@ -38,6 +38,7 @@ NamesAndTypesList StorageSystemReplicationQueue::getNamesAndTypes() { "is_currently_executing", std::make_shared() }, { "num_tries", std::make_shared() }, { "last_exception", std::make_shared() }, + { "last_exception_time", std::make_shared() }, { "last_attempt_time", std::make_shared() }, { "num_postponed", std::make_shared() }, { "postpone_reason", std::make_shared() }, @@ -141,7 +142,8 @@ void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, Conte res_columns[col_num++]->insert(entry.detach); res_columns[col_num++]->insert(entry.currently_executing); res_columns[col_num++]->insert(entry.num_tries); - res_columns[col_num++]->insert(entry.exception ? getExceptionMessage(entry.exception, false) : ""); + res_columns[col_num++]->insert(entry.exception ? getExceptionMessage(entry.exception, true) : ""); + res_columns[col_num++]->insert(UInt64(entry.last_exception_time)); res_columns[col_num++]->insert(UInt64(entry.last_attempt_time)); res_columns[col_num++]->insert(entry.num_postponed); res_columns[col_num++]->insert(entry.postpone_reason); diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index eeb08d7e3d4..07db151069f 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -73,7 +73,7 @@ #include #include #include -#include +#include #include #include #include @@ -176,7 +176,7 @@ void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, b attach(context, system_database, "part_moves_between_shards"); attach(context, system_database, "asynchronous_inserts"); attach(context, system_database, "filesystem_cache"); - attach(context, system_database, "query_result_cache"); + attach(context, system_database, "query_cache"); attach(context, system_database, "remote_data_paths"); attach(context, system_database, "certificates"); attach(context, system_database, "named_collections"); diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 0721cfaa9c4..b2737249166 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -79,7 +79,7 @@ ColumnsDescription getStructureOfRemoteTableInShard( ParserExpression expr_parser; - while (Block current = executor.read()) + while (Block current = executor.readBlock()) { ColumnPtr name = current.getByName("name").column; ColumnPtr type = current.getByName("type").column; @@ -187,7 +187,7 @@ ColumnsDescriptionByShardNum getExtendedObjectsOfRemoteTables( executor.setMainTable(remote_table_id); ColumnsDescription res; - while (auto block = executor.read()) + while (auto block = executor.readBlock()) { const auto & name_col = *block.getByName("name").column; const auto & type_col = *block.getByName("type").column; diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp index f2a92b41560..1e37775f574 100644 --- a/src/TableFunctions/TableFunctionFormat.cpp +++ b/src/TableFunctions/TableFunctionFormat.cpp @@ -4,6 +4,7 @@ #include #include +#include #include @@ -38,23 +39,29 @@ void TableFunctionFormat::parseArguments(const ASTPtr & ast_function, ContextPtr ASTs & args = args_func.at(0)->children; - if (args.size() != 2) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function '{}' requires 2 arguments: format and data", getName()); + if (args.size() != 2 && args.size() != 3) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function '{}' requires 2 or 3 arguments: format, [structure], data", getName()); for (auto & arg : args) arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context); format = checkAndGetLiteralArgument(args[0], "format"); - data = checkAndGetLiteralArgument(args[1], "data"); + data = checkAndGetLiteralArgument(args.back(), "data"); + if (args.size() == 3) + structure = checkAndGetLiteralArgument(args[1], "structure"); } ColumnsDescription TableFunctionFormat::getActualTableStructure(ContextPtr context) const { - ReadBufferIterator read_buffer_iterator = [&](ColumnsDescription &) + if (structure == "auto") { - return std::make_unique(data); - }; - return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, false, context); + ReadBufferIterator read_buffer_iterator = [&](ColumnsDescription &) + { + return std::make_unique(data); + }; + return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, false, context); + } + return parseColumnsListFromString(structure, context); } Block TableFunctionFormat::parseData(ColumnsDescription columns, ContextPtr context) const diff --git a/src/TableFunctions/TableFunctionFormat.h b/src/TableFunctions/TableFunctionFormat.h index c6db322343b..d64ab14cb64 100644 --- a/src/TableFunctions/TableFunctionFormat.h +++ b/src/TableFunctions/TableFunctionFormat.h @@ -28,6 +28,7 @@ private: String format; String data; + String structure = "auto"; }; } diff --git a/src/configure_config.cmake b/src/configure_config.cmake index 0e074b5d699..38ea803f5e9 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -132,6 +132,9 @@ if (TARGET ch_contrib::parquet) set(USE_ARROW 1) set(USE_ORC 1) endif() +if (TARGET ch_contrib::liburing) + set(USE_LIBURING 1) +endif () if (TARGET ch_contrib::protobuf) set(USE_PROTOBUF 1) endif() diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index bd96ea04c4c..c6136015316 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -5,7 +5,8 @@ import logging import os import sys import time -from typing import Any, List, Optional +from pathlib import Path +from typing import Any, Callable, List, Optional import requests # type: ignore @@ -56,21 +57,29 @@ def read_build_urls(build_name: str, reports_path: str) -> List[str]: return [] -def download_build_with_progress(url, path): +def download_build_with_progress(url: str, path: Path) -> None: logging.info("Downloading from %s to temp path %s", url, path) for i in range(DOWNLOAD_RETRIES_COUNT): try: + response = get_with_retries(url, retries=1, stream=True) + total_length = int(response.headers.get("content-length", 0)) + if path.is_file() and total_length and path.stat().st_size == total_length: + logging.info( + "The file %s already exists and have a proper size %s", + path, + total_length, + ) + return + with open(path, "wb") as f: - response = get_with_retries(url, retries=1, stream=True) - total_length = response.headers.get("content-length") - if total_length is None or int(total_length) == 0: + if total_length == 0: logging.info( "No content-length, will download file without progress" ) f.write(response.content) else: dl = 0 - total_length = int(total_length) + logging.info("Content length is %ld bytes", total_length) for data in response.iter_content(chunk_size=4096): dl += len(data) @@ -99,12 +108,14 @@ def download_build_with_progress(url, path): logging.info("Downloading finished") -def download_builds(result_path, build_urls, filter_fn): +def download_builds( + result_path: str, build_urls: List[str], filter_fn: Callable[[str], bool] +) -> None: for url in build_urls: if filter_fn(url): fname = os.path.basename(url.replace("%2B", "+").replace("%20", " ")) logging.info("Will download %s to %s", fname, result_path) - download_build_with_progress(url, os.path.join(result_path, fname)) + download_build_with_progress(url, Path(result_path) / fname) def download_builds_filter( diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index a7af807c57c..3e10e4a5698 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -208,6 +208,8 @@ Merge it only if you intend to backport changes to the target branch, otherwise self.cherrypick_pr.add_to_labels(Labels.CHERRYPICK) self.cherrypick_pr.add_to_labels(Labels.DO_NOT_TEST) self._assign_new_pr(self.cherrypick_pr) + # update cherrypick PR to get the state for PR.mergable + self.cherrypick_pr.update() def create_backport(self): assert self.cherrypick_pr is not None diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index c77acfb679f..05742acb314 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -182,6 +182,12 @@ CI_CONFIG = { "tests_config": { # required_build - build name for artifacts # force_tests - force success status for tests + "Install packages (amd64)": { + "required_build": "package_release", + }, + "Install packages (arm64)": { + "required_build": "package_aarch64", + }, "Stateful tests (asan)": { "required_build": "package_asan", }, @@ -209,6 +215,26 @@ CI_CONFIG = { "Stateful tests (release, DatabaseReplicated)": { "required_build": "package_release", }, + # Stateful tests for parallel replicas + "Stateful tests (release, ParallelReplicas)": { + "required_build": "package_release", + }, + "Stateful tests (debug, ParallelReplicas)": { + "required_build": "package_debug", + }, + "Stateful tests (asan, ParallelReplicas)": { + "required_build": "package_asan", + }, + "Stateful tests (msan, ParallelReplicas)": { + "required_build": "package_msan", + }, + "Stateful tests (ubsan, ParallelReplicas)": { + "required_build": "package_ubsan", + }, + "Stateful tests (tsan, ParallelReplicas)": { + "required_build": "package_tsan", + }, + # End stateful tests for parallel replicas "Stateless tests (asan)": { "required_build": "package_asan", }, diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index f914bb42d99..d60a9e6afd1 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -183,6 +183,11 @@ def prepare_tests_results_for_clickhouse( current_row["test_duration_ms"] = int(test_time * 1000) current_row["test_name"] = test_name current_row["test_status"] = test_status + if test_result.raw_logs: + # Protect from too big blobs that contain garbage + current_row["test_context_raw"] = test_result.raw_logs[: 32 * 1024] + else: + current_row["test_context_raw"] = "" result.append(current_row) return result diff --git a/tests/ci/download_binary.py b/tests/ci/download_binary.py index b95c86aa0bd..c57780daa36 100755 --- a/tests/ci/download_binary.py +++ b/tests/ci/download_binary.py @@ -6,6 +6,7 @@ This file is needed to avoid cicle import build_download_helper.py <=> env_helpe import argparse import logging import os +from pathlib import Path from build_download_helper import download_build_with_progress from ci_config import CI_CONFIG, BuildConfig @@ -57,14 +58,15 @@ def parse_args() -> argparse.Namespace: def main(): logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") args = parse_args() - os.makedirs(TEMP_PATH, exist_ok=True) + temp_path = Path(TEMP_PATH) + temp_path.mkdir(parents=True, exist_ok=True) for build in args.build_names: # check if it's in CI_CONFIG config = CI_CONFIG["build_config"][build] # type: BuildConfig if args.rename: - path = os.path.join(TEMP_PATH, f"clickhouse-{config['static_binary_name']}") + path = temp_path / f"clickhouse-{config['static_binary_name']}" else: - path = os.path.join(TEMP_PATH, "clickhouse") + path = temp_path / "clickhouse" url = S3_ARTIFACT_DOWNLOAD_TEMPLATE.format( pr_or_release=f"{args.version.major}.{args.version.minor}", diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index cf5f53afbf9..c33454d1d90 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -48,7 +48,8 @@ def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): result.append("USE_DATABASE_ORDINARY=1") if "wide parts enabled" in check_name: result.append("USE_POLYMORPHIC_PARTS=1") - + if "ParallelReplicas" in check_name: + result.append("USE_PARALLEL_REPLICAS=1") if "s3 storage" in check_name: result.append("USE_S3_STORAGE_FOR_MERGE_TREE=1") @@ -355,16 +356,34 @@ def main(): print(f"::notice:: {check_name} Report url: {report_url}") if args.post_commit_status == "commit_status": - post_commit_status( - gh, pr_info.sha, check_name_with_group, description, state, report_url - ) + if "parallelreplicas" in check_name.lower(): + post_commit_status( + gh, + pr_info.sha, + check_name_with_group, + description, + "success", + report_url, + ) + else: + post_commit_status( + gh, pr_info.sha, check_name_with_group, description, state, report_url + ) elif args.post_commit_status == "file": - post_commit_status_to_file( - post_commit_path, - description, - state, - report_url, - ) + if "parallelreplicas" in check_name.lower(): + post_commit_status_to_file( + post_commit_path, + description, + "success", + report_url, + ) + else: + post_commit_status_to_file( + post_commit_path, + description, + state, + report_url, + ) else: raise Exception( f'Unknown post_commit_status option "{args.post_commit_status}"' @@ -382,7 +401,11 @@ def main(): ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) if state != "success": - if FORCE_TESTS_LABEL in pr_info.labels: + # Parallel replicas are always green for now + if ( + FORCE_TESTS_LABEL in pr_info.labels + or "parallelreplicas" in check_name.lower() + ): print(f"'{FORCE_TESTS_LABEL}' enabled, will report success") else: sys.exit(1) diff --git a/tests/ci/install_check.py b/tests/ci/install_check.py new file mode 100644 index 00000000000..1444759cea0 --- /dev/null +++ b/tests/ci/install_check.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python3 + +import argparse + +import atexit +import logging +import sys +import subprocess +from pathlib import Path + +from typing import Dict + +from github import Github + +from build_download_helper import download_builds_filter +from clickhouse_helper import ( + ClickHouseHelper, + mark_flaky_tests, + prepare_tests_results_for_clickhouse, +) +from commit_status_helper import post_commit_status, update_mergeable_check +from docker_pull_helper import get_image_with_version, DockerImage +from env_helper import CI, TEMP_PATH as TEMP, REPORTS_PATH +from get_robot_token import get_best_robot_token +from pr_info import PRInfo +from report import TestResults, TestResult +from rerun_helper import RerunHelper +from s3_helper import S3Helper +from stopwatch import Stopwatch +from tee_popen import TeePopen +from upload_result_helper import upload_results + + +RPM_IMAGE = "clickhouse/install-rpm-test" +DEB_IMAGE = "clickhouse/install-deb-test" +TEMP_PATH = Path(TEMP) +SUCCESS = "success" +FAILURE = "failure" + + +def prepare_test_scripts(): + server_test = r"""#!/bin/bash +systemctl start clickhouse-server +clickhouse-client -q 'SELECT version()'""" + keeper_test = r"""#!/bin/bash +systemctl start clickhouse-keeper +for i in {1..20}; do + echo wait for clickhouse-keeper to being up + > /dev/tcp/127.0.0.1/9181 2>/dev/null && break || sleep 1 +done +for i in {1..5}; do + echo wait for clickhouse-keeper to answer on mntr request + exec 13<>/dev/tcp/127.0.0.1/9181 + echo mntr >&13 + cat <&13 | grep zk_version && break || sleep 1 + exec 13>&- +done +exec 13>&-""" + binary_test = r"""#!/bin/bash +chmod +x /packages/clickhouse +/packages/clickhouse install +clickhouse-server start --daemon +for i in {1..5}; do + clickhouse-client -q 'SELECT version()' && break || sleep 1 +done +clickhouse-keeper start --daemon +for i in {1..20}; do + echo wait for clickhouse-keeper to being up + > /dev/tcp/127.0.0.1/9181 2>/dev/null && break || sleep 1 +done +for i in {1..5}; do + echo wait for clickhouse-keeper to answer on mntr request + exec 13<>/dev/tcp/127.0.0.1/9181 + echo mntr >&13 + cat <&13 | grep zk_version && break || sleep 1 + exec 13>&- +done +exec 13>&-""" + (TEMP_PATH / "server_test.sh").write_text(server_test, encoding="utf-8") + (TEMP_PATH / "keeper_test.sh").write_text(keeper_test, encoding="utf-8") + (TEMP_PATH / "binary_test.sh").write_text(binary_test, encoding="utf-8") + + +def test_install_deb(image: DockerImage) -> TestResults: + tests = { + "Install server deb": r"""#!/bin/bash -ex +apt-get install /packages/clickhouse-{server,client,common}*deb +bash -ex /packages/server_test.sh""", + "Install keeper deb": r"""#!/bin/bash -ex +apt-get install /packages/clickhouse-keeper*deb +bash -ex /packages/keeper_test.sh""", + "Install clickhouse binary in deb": r"bash -ex /packages/binary_test.sh", + } + return test_install(image, tests) + + +def test_install_rpm(image: DockerImage) -> TestResults: + # FIXME: I couldn't find why Type=notify is broken in centos:8 + # systemd just ignores the watchdog completely + tests = { + "Install server rpm": r"""#!/bin/bash -ex +yum localinstall --disablerepo=* -y /packages/clickhouse-{server,client,common}*rpm +echo CLICKHOUSE_WATCHDOG_ENABLE=0 > /etc/default/clickhouse-server +bash -ex /packages/server_test.sh""", + "Install keeper rpm": r"""#!/bin/bash -ex +yum localinstall --disablerepo=* -y /packages/clickhouse-keeper*rpm +bash -ex /packages/keeper_test.sh""", + "Install clickhouse binary in rpm": r"bash -ex /packages/binary_test.sh", + } + return test_install(image, tests) + + +def test_install_tgz(image: DockerImage) -> TestResults: + # FIXME: I couldn't find why Type=notify is broken in centos:8 + # systemd just ignores the watchdog completely + tests = { + f"Install server tgz in {image.name}": r"""#!/bin/bash -ex +[ -f /etc/debian_version ] && CONFIGURE=configure || CONFIGURE= +for pkg in /packages/clickhouse-{common,client,server}*tgz; do + package=${pkg%-*} + package=${package##*/} + tar xf "$pkg" + "/$package/install/doinst.sh" $CONFIGURE +done +[ -f /etc/yum.conf ] && echo CLICKHOUSE_WATCHDOG_ENABLE=0 > /etc/default/clickhouse-server +bash -ex /packages/server_test.sh""", + f"Install keeper tgz in {image.name}": r"""#!/bin/bash -ex +[ -f /etc/debian_version ] && CONFIGURE=configure || CONFIGURE= +for pkg in /packages/clickhouse-keeper*tgz; do + package=${pkg%-*} + package=${package##*/} + tar xf "$pkg" + "/$package/install/doinst.sh" $CONFIGURE +done +bash -ex /packages/keeper_test.sh""", + } + return test_install(image, tests) + + +def test_install(image: DockerImage, tests: Dict[str, str]) -> TestResults: + test_results = [] # type: TestResults + for name, command in tests.items(): + stopwatch = Stopwatch() + container_name = name.lower().replace(" ", "_").replace("/", "_") + log_file = TEMP_PATH / f"{container_name}.log" + run_command = ( + f"docker run --rm --privileged --detach --cap-add=SYS_PTRACE " + f"--volume={TEMP_PATH}:/packages {image}" + ) + logging.info("Running docker container: `%s`", run_command) + container_id = subprocess.check_output( + run_command, shell=True, encoding="utf-8" + ).strip() + (TEMP_PATH / "install.sh").write_text(command) + install_command = f"docker exec {container_id} bash -ex /packages/install.sh" + with TeePopen(install_command, log_file) as process: + retcode = process.wait() + if retcode == 0: + status = SUCCESS + else: + status = FAILURE + + subprocess.check_call(f"docker kill -s 9 {container_id}", shell=True) + test_results.append( + TestResult(name, status, stopwatch.duration_seconds, [log_file]) + ) + + return test_results + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="The script to check if the packages are able to install", + ) + + parser.add_argument( + "check_name", + help="check name, used to download the packages", + ) + parser.add_argument("--download", default=True, help=argparse.SUPPRESS) + parser.add_argument( + "--no-download", + dest="download", + action="store_false", + default=argparse.SUPPRESS, + help="if set, the packages won't be downloaded, useful for debug", + ) + parser.add_argument("--deb", default=True, help=argparse.SUPPRESS) + parser.add_argument( + "--no-deb", + dest="deb", + action="store_false", + default=argparse.SUPPRESS, + help="if set, the deb packages won't be checked", + ) + parser.add_argument("--rpm", default=True, help=argparse.SUPPRESS) + parser.add_argument( + "--no-rpm", + dest="rpm", + action="store_false", + default=argparse.SUPPRESS, + help="if set, the rpm packages won't be checked", + ) + parser.add_argument("--tgz", default=True, help=argparse.SUPPRESS) + parser.add_argument( + "--no-tgz", + dest="tgz", + action="store_false", + default=argparse.SUPPRESS, + help="if set, the tgz packages won't be checked", + ) + + return parser.parse_args() + + +def main(): + logging.basicConfig(level=logging.INFO) + + stopwatch = Stopwatch() + + args = parse_args() + + TEMP_PATH.mkdir(parents=True, exist_ok=True) + + pr_info = PRInfo() + + if CI: + gh = Github(get_best_robot_token(), per_page=100) + atexit.register(update_mergeable_check, gh, pr_info, args.check_name) + + rerun_helper = RerunHelper(gh, pr_info, args.check_name) + if rerun_helper.is_already_finished_by_status(): + logging.info( + "Check is already finished according to github status, exiting" + ) + sys.exit(0) + + docker_images = { + name: get_image_with_version(REPORTS_PATH, name) + for name in (RPM_IMAGE, DEB_IMAGE) + } + prepare_test_scripts() + + if args.download: + + def filter_artifacts(path: str) -> bool: + return ( + path.endswith(".deb") + or path.endswith(".rpm") + or path.endswith(".tgz") + or path.endswith("/clickhouse") + ) + + download_builds_filter( + args.check_name, REPORTS_PATH, TEMP_PATH, filter_artifacts + ) + + test_results = [] # type: TestResults + if args.deb: + test_results.extend(test_install_deb(docker_images[DEB_IMAGE])) + if args.rpm: + test_results.extend(test_install_rpm(docker_images[RPM_IMAGE])) + if args.tgz: + test_results.extend(test_install_tgz(docker_images[DEB_IMAGE])) + test_results.extend(test_install_tgz(docker_images[RPM_IMAGE])) + + state = SUCCESS + description = "Packages installed successfully" + if FAILURE in (result.status for result in test_results): + state = FAILURE + description = "Failed to install packages: " + ", ".join( + result.name for result in test_results + ) + + s3_helper = S3Helper() + + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_results, + [], + args.check_name, + ) + print(f"::notice ::Report url: {report_url}") + if not CI: + return + + ch_helper = ClickHouseHelper() + mark_flaky_tests(ch_helper, args.check_name, test_results) + + if len(description) >= 140: + description = description[:136] + "..." + + post_commit_status(gh, pr_info.sha, args.check_name, description, state, report_url) + + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_results, + state, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + args.check_name, + ) + + ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + + if state == FAILURE: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tests/ci/report.py b/tests/ci/report.py index da04411632d..947fb33d905 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -211,7 +211,7 @@ def read_test_results(results_path: Path, with_raw_logs: bool = True) -> TestRes name = line[0] status = line[1] time = None - if len(line) >= 3 and line[2]: + if len(line) >= 3 and line[2] and line[2] != "\\N": # The value can be emtpy, but when it's not, # it's the time spent on the test try: @@ -224,7 +224,10 @@ def read_test_results(results_path: Path, with_raw_logs: bool = True) -> TestRes # The value can be emtpy, but when it's not, # the 4th value is a pythonic list, e.g. ['file1', 'file2'] if with_raw_logs: - result.set_raw_logs(line[3]) + # Python does not support TSV, so we unescape manually + result.set_raw_logs( + line[3].replace("\\t", "\t").replace("\\n", "\n") + ) else: result.set_log_files(line[3]) diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index c8edf44b16b..fb38969cb23 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -95,7 +95,7 @@ def process_results( try: results_path = Path(result_folder) / "test_results.tsv" - test_results = read_test_results(results_path, False) + test_results = read_test_results(results_path, True) if len(test_results) == 0: raise Exception("Empty results") except Exception as e: diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index 915a77f3d48..edc096908f4 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -108,13 +108,10 @@ def main(): stopwatch = Stopwatch() - temp_path = TEMP_PATH - reports_path = REPORTS_PATH - check_name = sys.argv[1] - if not os.path.exists(temp_path): - os.makedirs(temp_path) + if not os.path.exists(TEMP_PATH): + os.makedirs(TEMP_PATH) pr_info = PRInfo() @@ -127,14 +124,14 @@ def main(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) - docker_image = get_image_with_version(reports_path, IMAGE_NAME) + docker_image = get_image_with_version(REPORTS_PATH, IMAGE_NAME) - download_unit_tests(check_name, reports_path, temp_path) + download_unit_tests(check_name, REPORTS_PATH, TEMP_PATH) - tests_binary_path = os.path.join(temp_path, "unit_tests_dbms") + tests_binary_path = os.path.join(TEMP_PATH, "unit_tests_dbms") os.chmod(tests_binary_path, 0o777) - test_output = os.path.join(temp_path, "test_output") + test_output = os.path.join(TEMP_PATH, "test_output") if not os.path.exists(test_output): os.makedirs(test_output) @@ -151,7 +148,7 @@ def main(): else: logging.info("Run failed") - subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) + subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {TEMP_PATH}", shell=True) s3_helper = S3Helper() state, description, test_results, additional_logs = process_results(test_output) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 72e6615e720..4361b64b62f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -442,27 +442,38 @@ class FailureReason(enum.Enum): STRESS = "stress" BUILD = "not running for current build" BACKWARD_INCOMPATIBLE = "test is backward incompatible" + NO_PARALLEL_REPLICAS = "smth in not supported with parallel replicas" # UNKNOWN reasons NO_REFERENCE = "no reference file" INTERNAL_ERROR = "Test internal error: " +def threshold_generator(always_on_prob, always_off_prob, min_val, max_val): + def gen(): + tmp = random.random() + if tmp <= always_on_prob: + return min_val + if tmp <= always_on_prob + always_off_prob: + return max_val + + if isinstance(min_val, int) and isinstance(max_val, int): + return random.randint(min_val, max_val) + else: + return random.uniform(min_val, max_val) + + return gen + + class SettingsRandomizer: settings = { "max_insert_threads": lambda: 0 if random.random() < 0.5 else random.randint(1, 16), - "group_by_two_level_threshold": lambda: 1 - if random.random() < 0.1 - else 2**60 - if random.random() < 0.11 - else 100000, - "group_by_two_level_threshold_bytes": lambda: 1 - if random.random() < 0.1 - else 2**60 - if random.random() < 0.11 - else 50000000, + "group_by_two_level_threshold": threshold_generator(0.2, 0.2, 1, 1000000), + "group_by_two_level_threshold_bytes": threshold_generator( + 0.2, 0.2, 1, 50000000 + ), "distributed_aggregation_memory_efficient": lambda: random.randint(0, 1), "fsync_metadata": lambda: random.randint(0, 1), "output_format_parallel_formatting": lambda: random.randint(0, 1), @@ -479,19 +490,17 @@ class SettingsRandomizer: "read_in_order_two_level_merge_threshold": lambda: random.randint(0, 100), "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), + "min_compress_block_size": lambda: random.randint(1, 1048576 * 3), + "max_compress_block_size": lambda: random.randint(1, 1048576 * 3), "use_uncompressed_cache": lambda: random.randint(0, 1), - "min_bytes_to_use_direct_io": lambda: 0 - if random.random() < 0.5 - else 1 - if random.random() < 0.2 - else random.randint(1, 1024 * 1024 * 1024), - "min_bytes_to_use_mmap_io": lambda: 0 - if random.random() < 0.5 - else 1 - if random.random() < 0.2 - else random.randint(1, 1024 * 1024 * 1024), + "min_bytes_to_use_direct_io": threshold_generator( + 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + ), + "min_bytes_to_use_mmap_io": threshold_generator( + 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + ), "local_filesystem_read_method": lambda: random.choice( - ["read", "pread", "mmap", "pread_threadpool"] + ["read", "pread", "mmap", "pread_threadpool", "io_uring"] ), "remote_filesystem_read_method": lambda: random.choice(["read", "threadpool"]), "local_filesystem_read_prefetch": lambda: random.randint(0, 1), @@ -513,6 +522,39 @@ class SettingsRandomizer: return random_settings +class MergeTreeSettingsRandomizer: + settings = { + # Temporary disable due to large number of failures. TODO: fix. + # "ratio_of_defaults_for_sparse_serialization": threshold_generator( + # 0.1, 0.6, 0.0, 1.0 + # ), + "prefer_fetch_merged_part_size_threshold": threshold_generator( + 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + ), + "vertical_merge_algorithm_min_rows_to_activate": threshold_generator( + 0.4, 0.4, 1, 1000000 + ), + "vertical_merge_algorithm_min_columns_to_activate": threshold_generator( + 0.4, 0.4, 1, 100 + ), + "min_merge_bytes_to_use_direct_io": threshold_generator( + 0.25, 0.25, 1, 10 * 1024 * 1024 * 1024 + ), + "index_granularity_bytes": lambda: random.randint(1024, 30 * 1024 * 1024), + "merge_max_block_size": lambda: random.randint(1, 8192 * 3), + "index_granularity": lambda: random.randint(1, 65536), + "min_bytes_for_wide_part": threshold_generator(0.3, 0.3, 0, 1024 * 1024 * 1024), + } + + @staticmethod + def get_random_settings(args): + random_settings = [] + for setting, generator in MergeTreeSettingsRandomizer.settings.items(): + if setting not in args.changed_merge_tree_settings: + random_settings.append(f"{setting}={generator()}") + return random_settings + + class TestResult: def __init__( self, @@ -617,41 +659,48 @@ class TestCase: return testcase_args - def cli_random_settings(self) -> str: - return " ".join([f"--{setting}" for setting in self.random_settings]) + @staticmethod + def cli_format_settings(settings_list) -> str: + return " ".join([f"--{setting}" for setting in settings_list]) - def add_random_settings(self, args, client_options): - if self.tags and "no-random-settings" in self.tags: - return client_options - if args.no_random_settings: - return client_options + def has_show_create_table_in_test(self): + return not subprocess.call(["grep", "-iq", "show create", self.case_file]) - if len(self.base_url_params) == 0: - os.environ["CLICKHOUSE_URL_PARAMS"] = "&".join(self.random_settings) - else: - os.environ["CLICKHOUSE_URL_PARAMS"] = ( - self.base_url_params + "&" + "&".join(self.random_settings) + def add_random_settings(self, client_options): + new_options = "" + if self.randomize_settings: + if len(self.base_url_params) == 0: + os.environ["CLICKHOUSE_URL_PARAMS"] = "&".join(self.random_settings) + else: + os.environ["CLICKHOUSE_URL_PARAMS"] = ( + self.base_url_params + "&" + "&".join(self.random_settings) + ) + + new_options += f" {self.cli_format_settings(self.random_settings)}" + + if self.randomize_merge_tree_settings: + new_options += f" --allow_merge_tree_settings {self.cli_format_settings(self.merge_tree_random_settings)}" + + if new_options != "": + new_options += " --allow_repeated_settings" + + os.environ["CLICKHOUSE_CLIENT_OPT"] = ( + self.base_client_options + new_options + " " ) - new_options = f" --allow_repeated_settings {self.cli_random_settings()}" - os.environ["CLICKHOUSE_CLIENT_OPT"] = ( - self.base_client_options + new_options + " " - ) return client_options + new_options def remove_random_settings_from_env(self): os.environ["CLICKHOUSE_URL_PARAMS"] = self.base_url_params os.environ["CLICKHOUSE_CLIENT_OPT"] = self.base_client_options - def add_info_about_settings(self, args, description): - if self.tags and "no-random-settings" in self.tags: - return description - if args.no_random_settings: - return description + def add_info_about_settings(self, description): + if self.randomize_settings: + description += f"\nSettings used in the test: {self.cli_format_settings(self.random_settings)}" + if self.randomize_merge_tree_settings: + description += f"\n\nMergeTree settings used in test: {self.cli_format_settings(self.merge_tree_random_settings)}" - return ( - f"{description}\nSettings used in the test: {self.cli_random_settings()}\n" - ) + return description + "\n" def __init__(self, suite, case: str, args, is_concurrent: bool): self.case: str = case # case file name @@ -675,12 +724,40 @@ class TestCase: self.testcase_args = None self.runs_count = 0 - self.random_settings = SettingsRandomizer.get_random_settings() + has_no_random_settings_tag = self.tags and "no-random-settings" in self.tags + + self.randomize_settings = not ( + args.no_random_settings or has_no_random_settings_tag + ) + + has_no_random_merge_tree_settings_tag = ( + self.tags and "no-random-merge-tree-settings" in self.tags + ) + + # If test contains SHOW CREATE TABLE do not + # randomize merge tree settings, because + # they will be added to table definition and test will fail + self.randomize_merge_tree_settings = not ( + args.no_random_merge_tree_settings + or has_no_random_settings_tag + or has_no_random_merge_tree_settings_tag + or self.has_show_create_table_in_test() + ) + + if self.randomize_settings: + self.random_settings = SettingsRandomizer.get_random_settings() + + if self.randomize_merge_tree_settings: + self.merge_tree_random_settings = ( + MergeTreeSettingsRandomizer.get_random_settings(args) + ) + self.base_url_params = ( os.environ["CLICKHOUSE_URL_PARAMS"] if "CLICKHOUSE_URL_PARAMS" in os.environ else "" ) + self.base_client_options = ( os.environ["CLICKHOUSE_CLIENT_OPT"] if "CLICKHOUSE_CLIENT_OPT" in os.environ @@ -729,6 +806,9 @@ class TestCase: ): return FailureReason.DISABLED + elif "no-parallel-replicas" in tags and args.no_parallel_replicas: + return FailureReason.NO_PARALLEL_REPLICAS + elif args.skip and any(s in self.name for s in args.skip): return FailureReason.SKIP @@ -1132,7 +1212,7 @@ class TestCase: self.testcase_args = self.configure_testcase_args( args, self.case_file, suite.suite_tmp_path ) - client_options = self.add_random_settings(args, client_options) + client_options = self.add_random_settings(client_options) proc, stdout, stderr, debug_log, total_time = self.run_single_test( server_logs_level, client_options ) @@ -1145,9 +1225,7 @@ class TestCase: result.description = result.description.replace('\0', '') if result.status == TestStatus.FAIL: - result.description = self.add_info_about_settings( - args, result.description - ) + result.description = self.add_info_about_settings(result.description) return result except KeyboardInterrupt as e: raise e @@ -1158,7 +1236,7 @@ class TestCase: FailureReason.INTERNAL_QUERY_FAIL, 0.0, self.add_info_about_settings( - args, self.get_description_from_exception_info(sys.exc_info()) + self.get_description_from_exception_info(sys.exc_info()) ), ) except (ConnectionError, http.client.ImproperConnectionState): @@ -1168,7 +1246,7 @@ class TestCase: FailureReason.SERVER_DIED, 0.0, self.add_info_about_settings( - args, self.get_description_from_exception_info(sys.exc_info()) + self.get_description_from_exception_info(sys.exc_info()) ), ) except Exception: @@ -1676,6 +1754,19 @@ def collect_build_flags(args): return result +def collect_changed_merge_tree_settings(args): + changed_settings = ( + clickhouse_execute( + args, + "SELECT name FROM system.merge_tree_settings WHERE changed", + ) + .strip() + .splitlines() + ) + + return list(map(lambda s: s.decode(), changed_settings)) + + def check_table_column(args, database, table, column): return ( int( @@ -1980,6 +2071,7 @@ def main(args): raise Exception(msg) args.build_flags = collect_build_flags(args) + args.changed_merge_tree_settings = collect_changed_merge_tree_settings(args) args.suppport_system_processes_is_all_data_sent = check_table_column( args, "system", "processes", "is_all_data_sent" ) @@ -2324,7 +2416,12 @@ if __name__ == "__main__": default=False, help="Disable settings randomization", ) - + parser.add_argument( + "--no-random-merge-tree-settings", + action="store_true", + default=False, + help="Disable MergeTree settings randomization", + ) parser.add_argument( "--run-by-hash-num", type=int, @@ -2399,6 +2496,13 @@ if __name__ == "__main__": default=False, help="Report statistics about log messages", ) + parser.add_argument( + "--no-parallel-replicas", + action="store_true", + default=False, + help="Do not include tests that are not supported with parallel replicas feature", + ) + args = parser.parse_args() if args.queries and not os.path.isdir(args.queries): diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index e77a2a779fd..877b2679846 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -953,6 +953,7 @@ "topKWeighted" "stochasticLinearRegression" "corr" +"corrMatrix" "uniqCombined64" "intervalLengthSum" "uniqCombined" @@ -967,6 +968,7 @@ "quantiles" "sum" "covarPop" +"covarPopMatrix" "row_number" "kurtPop" "kurtSamp" @@ -1021,6 +1023,7 @@ "quantilesTiming" "welchTTest" "covarSamp" +"covarSampMatrix" "varPopStable" "quantileTiming" "quantileExactInclusive" diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index fda31979363..0073c25e5d5 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2856,7 +2856,10 @@ class ClickHouseCluster: SANITIZER_SIGN, from_host=True, filename="stderr.log" ): sanitizer_assert_instance = instance.grep_in_log( - SANITIZER_SIGN, from_host=True, filename="stderr.log" + SANITIZER_SIGN, + from_host=True, + filename="stderr.log", + after=1000, ) logging.error( "Sanitizer in instance %s log %s", @@ -2897,8 +2900,8 @@ class ClickHouseCluster: if sanitizer_assert_instance is not None: raise Exception( - "Sanitizer assert found in {} for instance {}".format( - self.docker_logs_path, sanitizer_assert_instance + "Sanitizer assert found for instance {}".format( + sanitizer_assert_instance ) ) if fatal_log is not None: @@ -3652,15 +3655,21 @@ class ClickHouseInstance: ) return len(result) > 0 - def grep_in_log(self, substring, from_host=False, filename="clickhouse-server.log"): + def grep_in_log( + self, substring, from_host=False, filename="clickhouse-server.log", after=None + ): logging.debug(f"grep in log called %s", substring) + if after is not None: + after_opt = "-A{}".format(after) + else: + after_opt = "" if from_host: # We check fist file exists but want to look for all rotated logs as well result = subprocess_check_call( [ "bash", "-c", - f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true', + f'[ -f {self.logs_dir}/{filename} ] && zgrep {after_opt} -a "{substring}" {self.logs_dir}/{filename}* || true', ] ) else: @@ -3668,7 +3677,7 @@ class ClickHouseInstance: [ "bash", "-c", - f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true', + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep {after_opt} -a "{substring}" /var/log/clickhouse-server/{filename}* || true', ] ) logging.debug("grep result %s", result) diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index 322c3a0d9c4..2d88f15f2bb 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -1,11 +1,14 @@ import pytest import asyncio +import glob import re import random import os.path +from collections import namedtuple from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, TSV + cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", @@ -62,6 +65,13 @@ def get_path_to_backup(backup_name): return os.path.join(instance.cluster.instances_dir, "backups", name) +def find_files_in_backup_folder(backup_name): + path = get_path_to_backup(backup_name) + files = [f for f in glob.glob(path + "/**", recursive=True) if os.path.isfile(f)] + files += [f for f in glob.glob(path + "/.**", recursive=True) if os.path.isfile(f)] + return files + + session_id_counter = 0 @@ -80,6 +90,63 @@ def has_mutation_in_backup(mutation_id, backup_name, database, table): ) +BackupInfo = namedtuple( + "BackupInfo", + "name id status error num_files total_size num_entries uncompressed_size compressed_size files_read bytes_read", +) + + +def get_backup_info_from_system_backups(by_id=None, by_name=None): + where_condition = "1" + if by_id: + where_condition = f"id = '{by_id}'" + elif by_name: + where_condition = f"name = '{by_name}'" + + [ + name, + id, + status, + error, + num_files, + total_size, + num_entries, + uncompressed_size, + compressed_size, + files_read, + bytes_read, + ] = ( + instance.query( + f"SELECT name, id, status, error, num_files, total_size, num_entries, uncompressed_size, compressed_size, files_read, bytes_read " + f"FROM system.backups WHERE {where_condition} LIMIT 1" + ) + .strip("\n") + .split("\t") + ) + + num_files = int(num_files) + total_size = int(total_size) + num_entries = int(num_entries) + uncompressed_size = int(uncompressed_size) + compressed_size = int(compressed_size) + files_read = int(files_read) + bytes_read = int(bytes_read) + + return BackupInfo( + name=name, + id=id, + status=status, + error=error, + num_files=num_files, + total_size=total_size, + num_entries=num_entries, + uncompressed_size=uncompressed_size, + compressed_size=compressed_size, + files_read=files_read, + bytes_read=bytes_read, + ) + + @pytest.mark.parametrize( "engine", ["MergeTree", "Log", "TinyLog", "StripeLog", "Memory"] ) @@ -195,96 +262,62 @@ def test_incremental_backup(): def test_increment_backup_without_changes(): backup_name = new_backup_name() incremental_backup_name = new_backup_name() + create_and_fill_table(n=1) - - system_backup_qry = "SELECT status, num_files, num_processed_files, processed_files_size, uncompressed_size, compressed_size, error FROM system.backups WHERE id='{id_backup}'" - assert instance.query("SELECT count(), sum(x) FROM test.table") == TSV([["1", "0"]]) # prepare first backup without base_backup - (id_backup, status) = instance.query( - f"BACKUP TABLE test.table TO {backup_name}" - ).split("\t") + id_backup = instance.query(f"BACKUP TABLE test.table TO {backup_name}").split("\t")[ + 0 + ] + backup_info = get_backup_info_from_system_backups(by_id=id_backup) - ( - backup_status, - num_files, - num_processed_files, - processed_files_size, - uncompressed_size, - compressed_size, - error, - ) = ( - instance.query(system_backup_qry.format(id_backup=id_backup)) - .strip("\n") - .split("\t") + assert backup_info.status == "BACKUP_CREATED" + assert backup_info.error == "" + assert backup_info.num_files > 0 + assert backup_info.total_size > 0 + assert ( + 0 < backup_info.num_entries and backup_info.num_entries <= backup_info.num_files ) - - assert backup_status == "BACKUP_CREATED" - assert num_files == "11" - assert int(uncompressed_size) > 0 - assert int(compressed_size) > 0 - assert error == "" + assert backup_info.uncompressed_size > 0 + assert backup_info.compressed_size == backup_info.uncompressed_size # create second backup without changes based on the first one - (id_backup_wo_changes, status_backup_wo_changes) = instance.query( + id_backup2 = instance.query( f"BACKUP TABLE test.table TO {incremental_backup_name} SETTINGS base_backup = {backup_name}" - ).split("\t") + ).split("\t")[0] - ( - backup_status_wo_changes, - num_files_backup_wo_changes, - num_processed_files_backup_wo_changes, - processed_files_size_backup_wo_changes, - uncompressed_size_backup_wo_changes, - compressed_size_backup_wo_changes, - error_snd, - ) = ( - instance.query(system_backup_qry.format(id_backup=id_backup_wo_changes)) - .strip("\n") - .split("\t") - ) + backup2_info = get_backup_info_from_system_backups(by_id=id_backup2) - assert backup_status_wo_changes == "BACKUP_CREATED" - assert num_files_backup_wo_changes == "1" - assert num_processed_files_backup_wo_changes == "11" - assert int(processed_files_size_backup_wo_changes) > 0 - assert int(uncompressed_size_backup_wo_changes) > 0 - assert int(compressed_size_backup_wo_changes) > 0 - assert error_snd == "" + assert backup2_info.status == "BACKUP_CREATED" + assert backup2_info.error == "" + assert backup2_info.num_files == backup_info.num_files + assert backup2_info.total_size == backup_info.total_size + assert backup2_info.num_entries == 0 + assert backup2_info.uncompressed_size > 0 + assert backup2_info.compressed_size == backup2_info.uncompressed_size # restore the second backup # we expect to see all files in the meta info of the restore and a sum of uncompressed and compressed sizes - (id_restore, status_restore) = instance.query( + id_restore = instance.query( f"RESTORE TABLE test.table AS test.table2 FROM {incremental_backup_name}" - ).split("\t") + ).split("\t")[0] assert instance.query("SELECT count(), sum(x) FROM test.table2") == TSV( [["1", "0"]] ) - ( - restore_status, - restore_num_files, - restore_num_processed_files, - restore_processed_files_size, - restore_uncompressed_size, - restore_compressed_size, - restore_error, - ) = ( - instance.query(system_backup_qry.format(id_backup=id_restore)) - .strip("\n") - .split("\t") - ) + restore_info = get_backup_info_from_system_backups(by_id=id_restore) - assert restore_status == "RESTORED" - assert int(restore_num_files) == 1 - assert int(restore_num_processed_files) == int( - num_processed_files_backup_wo_changes - ) - assert int(restore_uncompressed_size) > 0 - assert int(restore_compressed_size) > 0 - assert restore_error == "" + assert restore_info.status == "RESTORED" + assert restore_info.error == "" + assert restore_info.num_files == backup2_info.num_files + assert restore_info.total_size == backup2_info.total_size + assert restore_info.num_entries == backup2_info.num_entries + assert restore_info.uncompressed_size == backup2_info.uncompressed_size + assert restore_info.compressed_size == backup2_info.compressed_size + assert restore_info.files_read == backup2_info.num_files + assert restore_info.bytes_read == backup2_info.total_size def test_incremental_backup_overflow(): @@ -1196,44 +1229,55 @@ def test_operation_id(): def test_system_backups(): + # Backup create_and_fill_table(n=30) backup_name = new_backup_name() - id = instance.query(f"BACKUP TABLE test.table TO {backup_name}").split("\t")[0] - [ - name, - status, - num_files, - num_processed_files, - processed_files_size, - uncompressed_size, - compressed_size, - error, - ] = ( - instance.query( - f"SELECT name, status, num_files, num_processed_files, processed_files_size, uncompressed_size, compressed_size, error FROM system.backups WHERE id='{id}'" - ) - .strip("\n") - .split("\t") + info = get_backup_info_from_system_backups(by_id=id) + escaped_backup_name = backup_name.replace("'", "\\'") + assert info.name == escaped_backup_name + assert info.status == "BACKUP_CREATED" + assert info.error == "" + assert info.num_files > 0 + assert info.total_size > 0 + assert 0 < info.num_entries and info.num_entries <= info.num_files + assert info.uncompressed_size > 0 + assert info.compressed_size == info.uncompressed_size + assert info.files_read == 0 + assert info.bytes_read == 0 + + files_in_backup_folder = find_files_in_backup_folder(backup_name) + assert info.num_entries == len(files_in_backup_folder) - 1 + assert info.uncompressed_size == sum( + os.path.getsize(f) for f in files_in_backup_folder ) - escaped_backup_name = backup_name.replace("'", "\\'") - num_files = int(num_files) - compressed_size = int(compressed_size) - uncompressed_size = int(uncompressed_size) - num_processed_files = int(num_processed_files) - processed_files_size = int(processed_files_size) - assert name == escaped_backup_name - assert status == "BACKUP_CREATED" - assert num_files > 1 - assert num_processed_files > 1 - assert processed_files_size > 1 - assert uncompressed_size > 1 - assert compressed_size == uncompressed_size - assert error == "" + # The concrete values can change. + info.num_files == 91 + info.total_size == 4973 + info.num_entries == 55 + info.uncompressed_size == 19701 + instance.query("DROP TABLE test.table") + + # Restore + id = instance.query(f"RESTORE TABLE test.table FROM {backup_name}").split("\t")[0] + restore_info = get_backup_info_from_system_backups(by_id=id) + + assert restore_info.name == escaped_backup_name + assert restore_info.status == "RESTORED" + assert restore_info.error == "" + assert restore_info.num_files == info.num_files + assert restore_info.total_size == info.total_size + assert restore_info.num_entries == info.num_entries + assert restore_info.uncompressed_size == info.uncompressed_size + assert restore_info.compressed_size == info.compressed_size + assert restore_info.files_read == restore_info.num_files + assert restore_info.bytes_read == restore_info.total_size + + # Failed backup. backup_name = new_backup_name() expected_error = "Table test.non_existent_table was not found" assert expected_error in instance.query_and_get_error( @@ -1241,34 +1285,17 @@ def test_system_backups(): ) escaped_backup_name = backup_name.replace("'", "\\'") - [ - status, - num_files, - num_processed_files, - processed_files_size, - uncompressed_size, - compressed_size, - error, - ] = ( - instance.query( - f"SELECT status, num_files, num_processed_files, processed_files_size, uncompressed_size, compressed_size, error FROM system.backups WHERE name='{escaped_backup_name}'" - ) - .strip("\n") - .split("\t") - ) + info = get_backup_info_from_system_backups(by_name=escaped_backup_name) - num_files = int(num_files) - compressed_size = int(compressed_size) - uncompressed_size = int(uncompressed_size) - num_processed_files = int(num_processed_files) - processed_files_size = int(processed_files_size) - assert status == "BACKUP_FAILED" - assert num_files == 0 - assert uncompressed_size == 0 - assert compressed_size == 0 - assert num_processed_files == 0 - assert processed_files_size == 0 - assert expected_error in error + assert info.status == "BACKUP_FAILED" + assert expected_error in info.error + assert info.num_files == 0 + assert info.total_size == 0 + assert info.num_entries == 0 + assert info.uncompressed_size == 0 + assert info.compressed_size == 0 + assert info.files_read == 0 + assert info.bytes_read == 0 def test_mutation(): diff --git a/tests/integration/test_backup_restore_on_cluster/_gen/cluster_for_concurrency_test.xml b/tests/integration/test_backup_restore_on_cluster/_gen/cluster_for_concurrency_test.xml new file mode 100644 index 00000000000..08684e34e45 --- /dev/null +++ b/tests/integration/test_backup_restore_on_cluster/_gen/cluster_for_concurrency_test.xml @@ -0,0 +1,48 @@ + + + + + + node0 + 9000 + + + node1 + 9000 + + + node2 + 9000 + + + node3 + 9000 + + + node4 + 9000 + + + node5 + 9000 + + + node6 + 9000 + + + node7 + 9000 + + + node8 + 9000 + + + node9 + 9000 + + + + + \ No newline at end of file diff --git a/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py b/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py index 97f39c60004..777c35f8b50 100644 --- a/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py +++ b/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py @@ -96,6 +96,16 @@ def test_select(started_cluster): 10, ) + # intersect and except are counted + common_pattern( + node_select, + "select", + "select sleep(1) INTERSECT select sleep(1) EXCEPT select sleep(1)", + "insert into test_concurrent_insert values (0)", + 2, + 10, + ) + def test_insert(started_cluster): common_pattern( diff --git a/tests/integration/test_mask_sensitive_info/configs/named_collections.xml b/tests/integration/test_mask_sensitive_info/configs/named_collections.xml new file mode 100644 index 00000000000..ee923a90171 --- /dev/null +++ b/tests/integration/test_mask_sensitive_info/configs/named_collections.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py index f938148e5a0..3f71b047213 100644 --- a/tests/integration/test_mask_sensitive_info/test.py +++ b/tests/integration/test_mask_sensitive_info/test.py @@ -4,7 +4,13 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", with_zookeeper=True) +node = cluster.add_instance( + "node", + main_configs=[ + "configs/named_collections.xml", + ], + with_zookeeper=True, +) @pytest.fixture(scope="module", autouse=True) @@ -116,6 +122,12 @@ def test_create_table(): f"S3('http://minio1:9001/root/data/test3.csv.gz', 'CSV', 'gzip')", f"S3('http://minio1:9001/root/data/test4.csv', 'minio', '{password}', 'CSV')", f"S3('http://minio1:9001/root/data/test5.csv.gz', 'minio', '{password}', 'CSV', 'gzip')", + f"MySQL(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '{password}')", + f"MySQL(named_collection_2, database = 'mysql_db', host = 'mysql57', port = 3306, password = '{password}', table = 'mysql_table', user = 'mysql_user')", + f"MySQL(named_collection_3, database = 'mysql_db', host = 'mysql57', port = 3306, table = 'mysql_table')", + f"PostgreSQL(named_collection_4, host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user', password = '{password}')", + f"MongoDB(named_collection_5, host = 'mongo1', port = 5432, database = 'mongo_db', collection = 'mongo_col', user = 'mongo_user', password = '{password}')", + f"S3(named_collection_6, url = 'http://minio1:9001/root/data/test8.csv', access_key_id = 'minio', secret_access_key = '{password}', format = 'CSV')", ] for i, table_engine in enumerate(table_engines): @@ -147,6 +159,12 @@ def test_create_table(): "CREATE TABLE table5 (x int) ENGINE = S3('http://minio1:9001/root/data/test3.csv.gz', 'CSV', 'gzip')", "CREATE TABLE table6 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test4.csv', 'minio', '[HIDDEN]', 'CSV')", "CREATE TABLE table7 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test5.csv.gz', 'minio', '[HIDDEN]', 'CSV', 'gzip')", + "CREATE TABLE table8 (`x` int) ENGINE = MySQL(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')", + "CREATE TABLE table9 (`x` int) ENGINE = MySQL(named_collection_2, database = 'mysql_db', host = 'mysql57', port = 3306, password = '[HIDDEN]', table = 'mysql_table', user = 'mysql_user')", + "CREATE TABLE table10 (x int) ENGINE = MySQL(named_collection_3, database = 'mysql_db', host = 'mysql57', port = 3306, table = 'mysql_table')", + "CREATE TABLE table11 (`x` int) ENGINE = PostgreSQL(named_collection_4, host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user', password = '[HIDDEN]')", + "CREATE TABLE table12 (`x` int) ENGINE = MongoDB(named_collection_5, host = 'mongo1', port = 5432, database = 'mongo_db', collection = 'mongo_col', user = 'mongo_user', password = '[HIDDEN]'", + "CREATE TABLE table13 (`x` int) ENGINE = S3(named_collection_6, url = 'http://minio1:9001/root/data/test8.csv', access_key_id = 'minio', secret_access_key = '[HIDDEN]', format = 'CSV')", ], must_not_contain=[password], ) @@ -160,6 +178,7 @@ def test_create_database(): database_engines = [ f"MySQL('localhost:3306', 'mysql_db', 'mysql_user', '{password}') SETTINGS connect_timeout=1, connection_max_tries=1", + f"MySQL(named_collection_1, host = 'localhost', port = 3306, database = 'mysql_db', user = 'mysql_user', password = '{password}') SETTINGS connect_timeout=1, connection_max_tries=1", # f"PostgreSQL('localhost:5432', 'postgres_db', 'postgres_user', '{password}')", ] @@ -173,7 +192,8 @@ def test_create_database(): check_logs( must_contain=[ "CREATE DATABASE database0 ENGINE = MySQL('localhost:3306', 'mysql_db', 'mysql_user', '[HIDDEN]')", - # "CREATE DATABASE database1 ENGINE = PostgreSQL('localhost:5432', 'postgres_db', 'postgres_user', '[HIDDEN]')", + "CREATE DATABASE database1 ENGINE = MySQL(named_collection_1, host = 'localhost', port = 3306, database = 'mysql_db', user = 'mysql_user', password = '[HIDDEN]')", + # "CREATE DATABASE database2 ENGINE = PostgreSQL('localhost:5432', 'postgres_db', 'postgres_user', '[HIDDEN]')", ], must_not_contain=[password], ) @@ -211,6 +231,11 @@ def test_table_functions(): f"remote('127.{{2..11}}', numbers(10), 'remote_user', '{password}', rand())", f"remoteSecure('127.{{2..11}}', 'default', 'remote_table', 'remote_user', '{password}')", f"remoteSecure('127.{{2..11}}', 'default', 'remote_table', 'remote_user', rand())", + f"mysql(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '{password}')", + f"postgresql(named_collection_2, password = '{password}', host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user')", + f"s3(named_collection_3, url = 'http://minio1:9001/root/data/test4.csv', access_key_id = 'minio', secret_access_key = '{password}')", + f"remote(named_collection_4, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}', sharding_key = rand())", + f"remoteSecure(named_collection_5, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}')", ] for i, table_function in enumerate(table_functions): @@ -259,6 +284,11 @@ def test_table_functions(): "CREATE TABLE tablefunc22 (`x` int) AS remote('127.{2..11}', numbers(10), 'remote_user', '[HIDDEN]', rand())", "CREATE TABLE tablefunc23 (`x` int) AS remoteSecure('127.{2..11}', 'default', 'remote_table', 'remote_user', '[HIDDEN]')", "CREATE TABLE tablefunc24 (x int) AS remoteSecure('127.{2..11}', 'default', 'remote_table', 'remote_user', rand())", + "CREATE TABLE tablefunc25 (`x` int) AS mysql(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')", + "CREATE TABLE tablefunc26 (`x` int) AS postgresql(named_collection_2, password = '[HIDDEN]', host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user')", + "CREATE TABLE tablefunc27 (`x` int) AS s3(named_collection_3, url = 'http://minio1:9001/root/data/test4.csv', access_key_id = 'minio', secret_access_key = '[HIDDEN]')", + "CREATE TABLE tablefunc28 (`x` int) AS remote(named_collection_4, addresses_expr = '127.{2..11}', database = 'default', table = 'remote_table', user = 'remote_user', password = '[HIDDEN]', sharding_key = rand())", + "CREATE TABLE tablefunc29 (`x` int) AS remoteSecure(named_collection_5, addresses_expr = '127.{2..11}', database = 'default', table = 'remote_table', user = 'remote_user', password = '[HIDDEN]')", ], must_not_contain=[password], ) diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index d3fcc89561a..8160a6b47a7 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -212,6 +212,48 @@ def test_simple_alter_table(started_cluster, engine): competing_node.query("DROP DATABASE testdb SYNC") +@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) +def test_delete_from_table(started_cluster, engine): + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica1');" + ) + + name = "testdb.delete_test_{}".format(engine) + main_node.query( + "CREATE TABLE {} " + "(id UInt64, value String) " + "ENGINE = {} PARTITION BY id%2 ORDER BY (id);".format(name, engine) + ) + main_node.query("INSERT INTO TABLE {} VALUES(1, 'aaaa');".format(name)) + main_node.query("INSERT INTO TABLE {} VALUES(2, 'aaaa');".format(name)) + dummy_node.query("INSERT INTO TABLE {} VALUES(1, 'bbbb');".format(name)) + dummy_node.query("INSERT INTO TABLE {} VALUES(2, 'bbbb');".format(name)) + + main_node.query( + "SET allow_experimental_lightweight_delete=1; DELETE FROM {} WHERE id=2;".format( + name + ) + ) + + expected = "1\taaaa\n1\tbbbb" + + table_for_select = name + if not "Replicated" in engine: + table_for_select = "cluster('testdb', {})".format(name) + for node in [main_node, dummy_node]: + assert_eq_with_retry( + node, + "SELECT * FROM {} ORDER BY id, value;".format(table_for_select), + expected, + ) + + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + + def get_table_uuid(database, name): return main_node.query( f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'" diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 43c964d9d93..f354ae01714 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -1,10 +1,5 @@ import pytest -# FIXME This test is too flaky -# https://github.com/ClickHouse/ClickHouse/issues/45160 - -pytestmark = pytest.mark.skip - import json import os.path as p import random @@ -55,6 +50,7 @@ def rabbitmq_check_result(result, check=False, ref_file="test_rabbitmq_json.refe def wait_rabbitmq_to_start(rabbitmq_docker_id, timeout=180): + logging.getLogger("pika").propagate = False start = time.time() while time.time() - start < timeout: try: @@ -159,6 +155,7 @@ def test_rabbitmq_select_empty(rabbitmq_cluster): rabbitmq_exchange_name = 'empty', rabbitmq_commit_on_select = 1, rabbitmq_format = 'TSV', + rabbitmq_flush_interval_ms=1000, rabbitmq_row_delimiter = '\\n'; """.format( rabbitmq_cluster.rabbitmq_host @@ -175,6 +172,8 @@ def test_rabbitmq_json_without_delimiter(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{}:5672', rabbitmq_commit_on_select = 1, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_exchange_name = 'json', rabbitmq_format = 'JSONEachRow' """.format( @@ -227,6 +226,8 @@ def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster): rabbitmq_exchange_name = 'csv', rabbitmq_commit_on_select = 1, rabbitmq_format = 'CSV', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_row_delimiter = '\\n'; """ ) @@ -268,6 +269,8 @@ def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster): rabbitmq_exchange_name = 'tsv', rabbitmq_format = 'TSV', rabbitmq_commit_on_select = 1, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_queue_base = 'tsv', rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) @@ -309,6 +312,8 @@ def test_rabbitmq_macros(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{rabbitmq_host}:{rabbitmq_port}', rabbitmq_commit_on_select = 1, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_exchange_name = '{rabbitmq_exchange_name}', rabbitmq_format = '{rabbitmq_format}' """ @@ -348,6 +353,8 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'mv', rabbitmq_format = 'JSONEachRow', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() @@ -370,10 +377,11 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster): connection = pika.BlockingConnection(parameters) channel = connection.channel() + instance.wait_for_log_line("Started streaming to 2 attached views") + messages = [] for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - for message in messages: + message = json.dumps({"key": i, "value": i}) channel.basic_publish(exchange="mv", routing_key="", body=message) time_limit_sec = 60 @@ -390,8 +398,10 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster): while time.monotonic() < deadline: result = instance.query("SELECT * FROM test.view2 ORDER BY key") + print(f"Result: {result}") if rabbitmq_check_result(result): break + time.sleep(1) rabbitmq_check_result(result, True) connection.close() @@ -404,6 +414,8 @@ def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'mvsq', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) @@ -447,6 +459,8 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'mmv', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view1 (key UInt64, value UInt64) @@ -469,6 +483,8 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): connection = pika.BlockingConnection(parameters) channel = connection.channel() + instance.wait_for_log_line("Started streaming to 2 attached views") + messages = [] for i in range(50): messages.append(json.dumps({"key": i, "value": i})) @@ -504,6 +520,8 @@ def test_rabbitmq_protobuf(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'pb', rabbitmq_format = 'Protobuf', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_schema = 'rabbitmq.proto:KeyValueProto'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() @@ -578,6 +596,8 @@ def test_rabbitmq_big_message(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'big', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'JSONEachRow'; CREATE TABLE test.view (key UInt64, value String) ENGINE = MergeTree @@ -605,6 +625,7 @@ def test_rabbitmq_big_message(rabbitmq_cluster): def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): NUM_CONSUMERS = 10 NUM_QUEUES = 10 + logging.getLogger("pika").propagate = False instance.query( """ @@ -612,8 +633,10 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'test_sharding', - rabbitmq_num_queues = 10, + rabbitmq_num_queues = 5, rabbitmq_num_consumers = 10, + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms=500, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64, channel_id String) @@ -654,7 +677,7 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): connection.close() threads = [] - threads_num = 20 + threads_num = 10 for _ in range(threads_num): threads.append(threading.Thread(target=produce)) @@ -666,8 +689,10 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): while True: result1 = instance.query("SELECT count() FROM test.view") time.sleep(1) - if int(result1) == messages_num * threads_num: + expected = messages_num * threads_num + if int(result1) == expected: break + print(f"Result {result1} / {expected}") result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.view") @@ -683,6 +708,7 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): def test_rabbitmq_mv_combo(rabbitmq_cluster): NUM_MV = 5 NUM_CONSUMERS = 4 + logging.getLogger("pika").propagate = False instance.query( """ @@ -691,6 +717,8 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'combo', rabbitmq_queue_base = 'combo', + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms=1000, rabbitmq_num_consumers = 2, rabbitmq_num_queues = 5, rabbitmq_format = 'JSONEachRow', @@ -755,8 +783,10 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): result += int( instance.query("SELECT count() FROM test.combo_{0}".format(mv_id)) ) - if int(result) == messages_num * threads_num * NUM_MV: + expected = messages_num * threads_num * NUM_MV + if int(result) == expected: break + print(f"Result: {result} / {expected}") time.sleep(1) for thread in threads: @@ -784,6 +814,8 @@ def test_rabbitmq_insert(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'insert', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'insert1', rabbitmq_format = 'TSV', @@ -841,6 +873,8 @@ def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'insert_headers', rabbitmq_exchange_type = 'headers', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_routing_key_list = 'test=insert,topic=headers', rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; @@ -907,6 +941,8 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): rabbitmq_exchange_name = 'many_inserts', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'insert2', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64) @@ -915,6 +951,8 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): rabbitmq_exchange_name = 'many_inserts', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'insert2', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; """ @@ -993,9 +1031,10 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): rabbitmq_exchange_name = 'over', rabbitmq_queue_base = 'over', rabbitmq_exchange_type = 'direct', - rabbitmq_num_consumers = 5, - rabbitmq_num_queues = 10, - rabbitmq_max_block_size = 10000, + rabbitmq_num_consumers = 3, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size = 100, + rabbitmq_num_queues = 2, rabbitmq_routing_key_list = 'over', rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; @@ -1005,6 +1044,8 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): rabbitmq_exchange_name = 'over', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'over', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size = 100, rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view_overload (key UInt64, value UInt64) @@ -1016,6 +1057,8 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): """ ) + instance.wait_for_log_line("Started streaming to 1 attached views") + messages_num = 100000 def insert(): @@ -1037,7 +1080,7 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): raise threads = [] - threads_num = 5 + threads_num = 3 for _ in range(threads_num): threads.append(threading.Thread(target=insert)) for thread in threads: @@ -1047,8 +1090,10 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): while True: result = instance.query("SELECT count() FROM test.view_overload") time.sleep(1) - if int(result) == messages_num * threads_num: + expected = messages_num * threads_num + if int(result) == expected: break + print(f"Result: {result} / {expected}") instance.query( """ @@ -1090,6 +1135,8 @@ def test_rabbitmq_direct_exchange(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_num_consumers = 2, rabbitmq_num_queues = 2, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_exchange_name = 'direct_exchange_testing', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'direct_{0}', @@ -1181,6 +1228,8 @@ def test_rabbitmq_fanout_exchange(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_num_consumers = 2, rabbitmq_num_queues = 2, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_routing_key_list = 'key_{0}', rabbitmq_exchange_name = 'fanout_exchange_testing', rabbitmq_exchange_type = 'fanout', @@ -1267,6 +1316,8 @@ def test_rabbitmq_topic_exchange(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_num_consumers = 2, rabbitmq_num_queues = 2, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_exchange_name = 'topic_exchange_testing', rabbitmq_exchange_type = 'topic', rabbitmq_routing_key_list = '*.{0}', @@ -1290,6 +1341,8 @@ def test_rabbitmq_topic_exchange(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_num_consumers = 2, rabbitmq_num_queues = 2, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_exchange_name = 'topic_exchange_testing', rabbitmq_exchange_type = 'topic', rabbitmq_routing_key_list = '*.logs', @@ -1391,6 +1444,7 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster): rabbitmq_exchange_type = 'consistent_hash', rabbitmq_exchange_name = 'hash_exchange_testing', rabbitmq_format = 'JSONEachRow', + rabbitmq_flush_interval_ms=1000, rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS SELECT key, value, _channel_id AS channel_id FROM test.{0}; @@ -1488,6 +1542,8 @@ def test_rabbitmq_multiple_bindings(rabbitmq_cluster): rabbitmq_exchange_name = 'multiple_bindings_testing', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.bindings_mv TO test.destination AS @@ -1577,6 +1633,8 @@ def test_rabbitmq_headers_exchange(rabbitmq_cluster): rabbitmq_num_consumers = 2, rabbitmq_exchange_name = 'headers_exchange_testing', rabbitmq_exchange_type = 'headers', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2020', rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; @@ -1601,6 +1659,8 @@ def test_rabbitmq_headers_exchange(rabbitmq_cluster): rabbitmq_exchange_type = 'headers', rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2019', rabbitmq_format = 'JSONEachRow', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.headers_exchange_{0}; @@ -1673,6 +1733,8 @@ def test_rabbitmq_virtual_columns(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'virtuals', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'JSONEachRow'; CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT value, key, _exchange_name, _channel_id, _delivery_tag, _redelivered FROM test.rabbitmq_virtuals; @@ -1741,6 +1803,8 @@ def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'virtuals_mv', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_format = 'JSONEachRow'; CREATE TABLE test.view (key UInt64, value UInt64, exchange_name String, channel_id String, delivery_tag UInt64, redelivered UInt8) ENGINE = MergeTree() @@ -1826,6 +1890,8 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster): rabbitmq_exchange_name = 'many_consumers', rabbitmq_num_queues = 2, rabbitmq_num_consumers = 2, + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_queue_base = 'many_consumers', rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; @@ -1915,6 +1981,8 @@ def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): CREATE TABLE test.consume (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + rabbitmq_flush_interval_ms=500, + rabbitmq_max_block_size = 100, rabbitmq_exchange_name = 'producer_reconnect', rabbitmq_format = 'JSONEachRow', rabbitmq_num_consumers = 2, @@ -1927,6 +1995,7 @@ def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'producer_reconnect', rabbitmq_persistent = '1', + rabbitmq_flush_interval_ms=1000, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; """ @@ -1982,7 +2051,9 @@ def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): ) +@pytest.mark.skip(reason="Timeout: FIXME") def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): + logging.getLogger("pika").propagate = False instance.query( """ CREATE TABLE test.consumer_reconnect (key UInt64, value UInt64) @@ -1990,6 +2061,8 @@ def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'consumer_reconnect', rabbitmq_num_consumers = 10, + rabbitmq_flush_interval_ms = 100, + rabbitmq_max_block_size = 100, rabbitmq_num_queues = 10, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; @@ -2044,10 +2117,11 @@ def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): # revive_rabbitmq() while True: - result = instance.query("SELECT count(DISTINCT key) FROM test.view") - time.sleep(1) + result = instance.query("SELECT count(DISTINCT key) FROM test.view").strip() if int(result) == messages_num: break + print(f"Result: {result} / {messages_num}") + time.sleep(1) instance.query( """ @@ -2062,6 +2136,7 @@ def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): def test_rabbitmq_commit_on_block_write(rabbitmq_cluster): + logging.getLogger("pika").propagate = False instance.query( """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) @@ -2070,6 +2145,7 @@ def test_rabbitmq_commit_on_block_write(rabbitmq_cluster): rabbitmq_exchange_name = 'block', rabbitmq_format = 'JSONEachRow', rabbitmq_queue_base = 'block', + rabbitmq_flush_interval_ms=1000, rabbitmq_max_block_size = 100, rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) @@ -2150,6 +2226,7 @@ def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'cs', rabbitmq_format = 'JSONEachRow', + rabbitmq_flush_interval_ms=1000, rabbitmq_num_consumers = '5', rabbitmq_row_delimiter = '\\n'; """ @@ -2166,6 +2243,8 @@ def test_rabbitmq_no_connection_at_startup_2(rabbitmq_cluster): rabbitmq_exchange_name = 'cs', rabbitmq_format = 'JSONEachRow', rabbitmq_num_consumers = '5', + rabbitmq_flush_interval_ms=1000, + rabbitmq_max_block_size=100, rabbitmq_row_delimiter = '\\n'; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree @@ -2222,6 +2301,7 @@ def test_rabbitmq_format_factory_settings(rabbitmq_cluster): ) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'format_settings', + rabbitmq_flush_interval_ms=1000, rabbitmq_format = 'JSONEachRow', date_time_input_format = 'best_effort'; """ @@ -2284,6 +2364,7 @@ def test_rabbitmq_vhost(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'vhost', rabbitmq_format = 'JSONEachRow', + rabbitmq_flush_interval_ms=1000, rabbitmq_vhost = '/' """ ) @@ -2312,6 +2393,7 @@ def test_rabbitmq_drop_table_properly(rabbitmq_cluster): CREATE TABLE test.rabbitmq_drop (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + rabbitmq_flush_interval_ms=1000, rabbitmq_exchange_name = 'drop', rabbitmq_format = 'JSONEachRow', rabbitmq_queue_base = 'rabbit_queue_drop' @@ -2358,6 +2440,7 @@ def test_rabbitmq_queue_settings(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'rabbit_exchange', + rabbitmq_flush_interval_ms=1000, rabbitmq_format = 'JSONEachRow', rabbitmq_queue_base = 'rabbit_queue_settings', rabbitmq_queue_settings_list = 'x-max-length=10,x-overflow=reject-publish' @@ -2390,12 +2473,11 @@ def test_rabbitmq_queue_settings(rabbitmq_cluster): time.sleep(5) - result = instance.query( - "SELECT count() FROM test.rabbitmq_settings", ignore_error=True - ) - while int(result) != 10: - time.sleep(0.5) + while True: result = instance.query("SELECT count() FROM test.view", ignore_error=True) + if int(result) == 10: + break + time.sleep(0.5) instance.query("DROP TABLE test.rabbitmq_settings") @@ -2439,6 +2521,7 @@ def test_rabbitmq_queue_consume(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_format = 'JSONEachRow', rabbitmq_queue_base = 'rabbit_queue', + rabbitmq_flush_interval_ms=1000, rabbitmq_queue_consume = 1; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key; @@ -2473,6 +2556,7 @@ def test_rabbitmq_produce_consume_avro(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_format = 'Avro', + rabbitmq_flush_interval_ms=1000, rabbitmq_exchange_name = 'avro', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'avro'; @@ -2481,6 +2565,7 @@ def test_rabbitmq_produce_consume_avro(rabbitmq_cluster): ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_format = 'Avro', + rabbitmq_flush_interval_ms=1000, rabbitmq_exchange_name = 'avro', rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'avro'; @@ -2523,6 +2608,7 @@ def test_rabbitmq_bad_args(rabbitmq_cluster): CREATE TABLE test.drop (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + rabbitmq_flush_interval_ms=1000, rabbitmq_exchange_name = 'f', rabbitmq_format = 'JSONEachRow'; """ @@ -2535,6 +2621,7 @@ def test_rabbitmq_issue_30691(rabbitmq_cluster): CREATE TABLE test.rabbitmq_drop (json String) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + rabbitmq_flush_interval_ms=1000, rabbitmq_exchange_name = '30691', rabbitmq_row_delimiter = '\\n', -- Works only if adding this setting rabbitmq_format = 'LineAsString', @@ -2595,6 +2682,7 @@ def test_rabbitmq_drop_mv(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'mv', rabbitmq_format = 'JSONEachRow', + rabbitmq_flush_interval_ms=1000, rabbitmq_queue_base = 'drop_mv'; """ ) @@ -2654,10 +2742,12 @@ def test_rabbitmq_drop_mv(rabbitmq_cluster): result = instance.query("SELECT * FROM test.view ORDER BY key") if rabbitmq_check_result(result): break + time.sleep(1) rabbitmq_check_result(result, True) - instance.query("DROP VIEW test.consumer") + instance.query("DROP VIEW test.consumer NO DELAY") + time.sleep(10) for i in range(50, 60): channel.basic_publish( exchange="mv", routing_key="", body=json.dumps({"key": i, "value": i}) @@ -2685,6 +2775,7 @@ def test_rabbitmq_random_detach(rabbitmq_cluster): rabbitmq_exchange_name = 'random', rabbitmq_queue_base = 'random', rabbitmq_num_queues = 2, + rabbitmq_flush_interval_ms=1000, rabbitmq_num_consumers = 2, rabbitmq_format = 'JSONEachRow'; CREATE TABLE test.view (key UInt64, value UInt64, channel_id String) @@ -2749,7 +2840,9 @@ def test_rabbitmq_predefined_configuration(rabbitmq_cluster): instance.query( """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) - ENGINE = RabbitMQ(rabbit1, rabbitmq_vhost = '/') """ + ENGINE = RabbitMQ(rabbit1, rabbitmq_vhost = '/') + SETTINGS rabbitmq_flush_interval_ms=1000; + """ ) channel.basic_publish( @@ -2785,6 +2878,7 @@ def test_rabbitmq_msgpack(rabbitmq_cluster): settings rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'xhep', rabbitmq_format = 'MsgPack', + rabbitmq_flush_interval_ms=1000, rabbitmq_num_consumers = 1; create table rabbit_out (val String) @@ -2792,6 +2886,7 @@ def test_rabbitmq_msgpack(rabbitmq_cluster): settings rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'xhep', rabbitmq_format = 'MsgPack', + rabbitmq_flush_interval_ms=1000, rabbitmq_num_consumers = 1; set stream_like_engine_allow_direct_select=1; insert into rabbit_out select 'kek'; @@ -2827,12 +2922,14 @@ def test_rabbitmq_address(rabbitmq_cluster): SETTINGS rabbitmq_exchange_name = 'rxhep', rabbitmq_format = 'CSV', rabbitmq_num_consumers = 1, + rabbitmq_flush_interval_ms=1000, rabbitmq_address='amqp://root:clickhouse@rabbitmq1:5672/'; create table rabbit_out (val String) engine=RabbitMQ SETTINGS rabbitmq_exchange_name = 'rxhep', rabbitmq_format = 'CSV', rabbitmq_num_consumers = 1, + rabbitmq_flush_interval_ms=1000, rabbitmq_address='amqp://root:clickhouse@rabbitmq1:5672/'; set stream_like_engine_allow_direct_select=1; insert into rabbit_out select 'kek'; @@ -2856,6 +2953,7 @@ def test_rabbitmq_address(rabbitmq_cluster): instance2.query("drop table rabbit_out sync") +@pytest.mark.skip(reason="FIXME: flaky (something with channel.start_consuming()") def test_format_with_prefix_and_suffix(rabbitmq_cluster): instance.query( """ @@ -2887,11 +2985,14 @@ def test_format_with_prefix_and_suffix(rabbitmq_cluster): insert_messages = [] def onReceived(channel, method, properties, body): - insert_messages.append(body.decode()) + message = body.decode() + insert_messages.append(message) + print(f"Received {len(insert_messages)} message: {message}") if len(insert_messages) == 2: channel.stop_consuming() consumer.basic_consume(onReceived, queue_name) + consumer.start_consuming() consumer_connection.close() @@ -2901,6 +3002,7 @@ def test_format_with_prefix_and_suffix(rabbitmq_cluster): ) +@pytest.mark.skip(reason="FIXME: flaky (something with channel.start_consuming()") def test_max_rows_per_message(rabbitmq_cluster): num_rows = 5 @@ -2917,6 +3019,7 @@ def test_max_rows_per_message(rabbitmq_cluster): rabbitmq_exchange_type = 'direct', rabbitmq_routing_key_list = 'custom1', rabbitmq_max_rows_per_message = 3, + rabbitmq_flush_interval_ms = 1000, format_custom_result_before_delimiter = '\n', format_custom_result_after_delimiter = '\n'; @@ -2972,6 +3075,7 @@ def test_max_rows_per_message(rabbitmq_cluster): assert result == "0\t0\n10\t100\n20\t200\n30\t300\n40\t400\n" +@pytest.mark.skip(reason="FIXME: flaky (something with channel.start_consuming()") def test_row_based_formats(rabbitmq_cluster): num_rows = 10 @@ -3006,9 +3110,11 @@ def test_row_based_formats(rabbitmq_cluster): rabbitmq_format = '{format_name}', rabbitmq_exchange_name = '{format_name}', rabbitmq_exchange_type = 'direct', + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms = 1000, rabbitmq_routing_key_list = '{format_name}', rabbitmq_max_rows_per_message = 5; - + CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT key, value FROM test.rabbit; """ @@ -3066,6 +3172,7 @@ def test_row_based_formats(rabbitmq_cluster): assert result == expected +@pytest.mark.skip(reason="FIXME: flaky (something with channel.start_consuming()") def test_block_based_formats_1(rabbitmq_cluster): instance.query( """ @@ -3074,6 +3181,8 @@ def test_block_based_formats_1(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'PrettySpace', rabbitmq_exchange_type = 'direct', + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms = 1000, rabbitmq_routing_key_list = 'PrettySpace', rabbitmq_format = 'PrettySpace'; """ @@ -3125,6 +3234,7 @@ def test_block_based_formats_1(rabbitmq_cluster): ] +@pytest.mark.skip(reason="FIXME: flaky (something with channel.start_consuming()") def test_block_based_formats_2(rabbitmq_cluster): num_rows = 100 @@ -3150,8 +3260,10 @@ def test_block_based_formats_2(rabbitmq_cluster): rabbitmq_format = '{format_name}', rabbitmq_exchange_name = '{format_name}', rabbitmq_exchange_type = 'direct', + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms = 1000, rabbitmq_routing_key_list = '{format_name}'; - + CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT key, value FROM test.rabbit; """ @@ -3206,3 +3318,172 @@ def test_block_based_formats_2(rabbitmq_cluster): for i in range(num_rows): expected += str(i * 10) + "\t" + str(i * 100) + "\n" assert result == expected + + +def test_rabbitmq_flush_by_block_size(rabbitmq_cluster): + instance.query( + """ + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + + CREATE TABLE test.rabbitmq (key UInt64, value UInt64) + ENGINE = RabbitMQ + SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + rabbitmq_exchange_name = 'flush_by_block', + rabbitmq_queue_base = 'flush_by_block', + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms = 640000, /* should not flush by time during test */ + rabbitmq_format = 'JSONEachRow'; + + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.rabbitmq; + + SYSTEM STOP MERGES; + """ + ) + + cancel = threading.Event() + + def produce(): + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, + rabbitmq_cluster.rabbitmq_port, + "/", + credentials, + ) + connection = pika.BlockingConnection(parameters) + + while not cancel.is_set(): + try: + channel = connection.channel() + channel.basic_publish( + exchange="flush_by_block", + routing_key="", + body=json.dumps({"key": 0, "value": 0}), + ) + except e: + print(f"Got error: {str(e)}") + + produce_thread = threading.Thread(target=produce) + produce_thread.start() + + while 0 == int( + instance.query( + "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'" + ) + ): + time.sleep(0.5) + + cancel.set() + produce_thread.join() + + # more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0). + result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'") + # logging.debug(result) + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + DROP TABLE test.rabbitmq; + """ + ) + + # 100 = first poll should return 100 messages (and rows) + # not waiting for stream_flush_interval_ms + assert ( + int(result) == 100 + ), "Messages from rabbitmq should be flushed when block of size rabbitmq_max_block_size is formed!" + + +def test_rabbitmq_flush_by_time(rabbitmq_cluster): + instance.query( + """ + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + + CREATE TABLE test.rabbitmq (key UInt64, value UInt64) + ENGINE = RabbitMQ + SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + rabbitmq_exchange_name = 'flush_by_time', + rabbitmq_queue_base = 'flush_by_time', + rabbitmq_max_block_size = 100, + rabbitmq_flush_interval_ms = 5000, + rabbitmq_format = 'JSONEachRow'; + + CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3)) + ENGINE = MergeTree() + ORDER BY key; + """ + ) + + cancel = threading.Event() + + def produce(): + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, + rabbitmq_cluster.rabbitmq_port, + "/", + credentials, + ) + connection = pika.BlockingConnection(parameters) + + while not cancel.is_set(): + try: + channel = connection.channel() + channel.basic_publish( + exchange="flush_by_time", + routing_key="", + body=json.dumps({"key": 0, "value": 0}), + ) + print("Produced a message") + time.sleep(0.8) + except e: + print(f"Got error: {str(e)}") + + produce_thread = threading.Thread(target=produce) + produce_thread.start() + + instance.query( + """ + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.rabbitmq; + """ + ) + + while True: + time.sleep(0.2) + count = instance.query( + "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view'" + ) + print(f"kssenii total count: {count}") + count = int( + instance.query( + "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'" + ) + ) + print(f"kssenii count: {count}") + if count > 0: + break + + time.sleep(12) + result = instance.query("SELECT uniqExact(ts) FROM test.view") + + cancel.set() + produce_thread.join() + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + DROP TABLE test.rabbitmq; + """ + ) + + assert int(result) == 3 diff --git a/tests/performance/column_array_filter.xml b/tests/performance/column_array_filter.xml new file mode 100644 index 00000000000..ee57c0e17e4 --- /dev/null +++ b/tests/performance/column_array_filter.xml @@ -0,0 +1,12 @@ + + SELECT arr FROM (SELECT cast(range(number % 10) as Array(Int128)) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + SELECT arr FROM (SELECT cast(range(number % 10) as Array(UInt128)) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + + SELECT arr FROM (SELECT cast(range(number % 10) as Array(Int256)) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + SELECT arr FROM (SELECT cast(range(number % 10) as Array(UInt256)) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + + SELECT arr FROM (SELECT cast(range(number % 10) as Array(Decimal32(0))) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + SELECT arr FROM (SELECT cast(range(number % 10) as Array(Decimal64(0))) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + SELECT arr FROM (SELECT cast(range(number % 10) as Array(Decimal128(0))) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + SELECT arr FROM (SELECT cast(range(number % 10) as Array(Decimal256(0))) AS arr FROM (SELECT * FROM system.numbers LIMIT 10000000) WHERE length(arr) <= 5) format Null + diff --git a/tests/performance/column_array_replicate.xml b/tests/performance/column_array_replicate.xml new file mode 100644 index 00000000000..c07e8452797 --- /dev/null +++ b/tests/performance/column_array_replicate.xml @@ -0,0 +1,12 @@ + + with cast([1,2,3,4] as Array(Int128)) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + with cast([1,2,3,4] as Array(UInt128)) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + + with cast([1,2,3,4] as Array(Int256)) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + with cast([1,2,3,4] as Array(UInt256)) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + + with cast([1,2,3,4] as Array(Decimal32(0))) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + with cast([1,2,3,4] as Array(Decimal64(0))) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + with cast([1,2,3,4] as Array(Decimal128(0))) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + with cast([1,2,3,4] as Array(Decimal256(0))) as elem select arrayWithConstant(rand() % 10 + 5, materialize(elem)) from numbers(1000000) format Null + diff --git a/tests/performance/memory_bound_merging.xml b/tests/performance/memory_bound_merging.xml index 3b13400151c..15dc1b29fba 100644 --- a/tests/performance/memory_bound_merging.xml +++ b/tests/performance/memory_bound_merging.xml @@ -11,7 +11,5 @@ select avg(a) from remote('127.0.0.{{1,2}}', default, t_mbm) group by a format Null - select * from remote('127.0.0.{{1,2}}', default, t_mbm) group by a format Null settings allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, use_hedged_requests = 0 - drop table t_mbm diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index e6c9ea50924..686d04013fe 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -44,7 +44,7 @@ select 100, max2((select count() from logs where level = 'Warning' and message_f group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.005); -- Same as above for Error -select 110, max2((select count() from logs where level = 'Warning' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.01); +select 110, max2((select count() from logs where level = 'Error' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.01); -- Avoid too noisy messages: limit the number of messages with high frequency select 120, max2(count(), 3) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.10); diff --git a/tests/queries/0_stateless/00304_http_external_data.sh b/tests/queries/0_stateless/00304_http_external_data.sh index 4a097249cca..def17bc5cd1 100755 --- a/tests/queries/0_stateless/00304_http_external_data.sh +++ b/tests/queries/0_stateless/00304_http_external_data.sh @@ -6,4 +6,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo -ne '1,Hello\n2,World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String"; echo -ne '1@Hello\n2@World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String&format_csv_delimiter=@"; -echo -ne '\x01\x00\x00\x00\x02\x00\x00\x00' | ${CLICKHOUSE_CURL} -sSF "tmp=@-" "${CLICKHOUSE_URL}&query=SELECT+*+FROM+tmp&tmp_structure=TaskID+UInt32&tmp_format=RowBinary"; + +# use big-endian version of binary data for s390x +if [[ $(uname -a | grep s390x) ]]; then + echo -ne '\x00\x00\x00\x01\x00\x00\x00\x02' | ${CLICKHOUSE_CURL} -sSF "tmp=@-" "${CLICKHOUSE_URL}&query=SELECT+*+FROM+tmp&tmp_structure=TaskID+UInt32&tmp_format=RowBinary"; +else + echo -ne '\x01\x00\x00\x00\x02\x00\x00\x00' | ${CLICKHOUSE_CURL} -sSF "tmp=@-" "${CLICKHOUSE_URL}&query=SELECT+*+FROM+tmp&tmp_structure=TaskID+UInt32&tmp_format=RowBinary"; +fi diff --git a/tests/queries/0_stateless/00419_show_sql_queries.sh b/tests/queries/0_stateless/00419_show_sql_queries.sh index 1737e874ff2..607703b385a 100755 --- a/tests/queries/0_stateless/00419_show_sql_queries.sh +++ b/tests/queries/0_stateless/00419_show_sql_queries.sh @@ -7,3 +7,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "SHOW PROCESSLIST" &>/dev/null $CLICKHOUSE_CLIENT -q "SHOW DATABASES" &>/dev/null $CLICKHOUSE_CLIENT -q "SHOW TABLES" &>/dev/null +$CLICKHOUSE_CLIENT -q "SHOW ENGINES" &>/dev/null diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index 399a4677a44..c184b58bf53 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings set -e @@ -7,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes" -$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0, min_bytes_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO preferred_block_size_bytes (s) SELECT '16_bytes_-_-_-_' AS s FROM system.numbers LIMIT 10, 90" $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE preferred_block_size_bytes" $CLICKHOUSE_CLIENT --preferred_block_size_bytes=26 -q "SELECT DISTINCT blockSize(), ignore(p, s) FROM preferred_block_size_bytes" @@ -18,7 +19,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes" # PREWHERE using empty column $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS pbs" -$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0, min_bytes_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO pbs (p, i, sa) SELECT toDate(i % 30) AS p, number AS i, ['a'] AS sa FROM system.numbers LIMIT 1000" $CLICKHOUSE_CLIENT -q "ALTER TABLE pbs ADD COLUMN s UInt8 DEFAULT 0" $CLICKHOUSE_CLIENT --preferred_block_size_bytes=100000 -q "SELECT count() FROM pbs PREWHERE s = 0" @@ -29,7 +30,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE pbs" # Nullable PREWHERE $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere" -$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0, min_bytes_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "INSERT INTO nullable_prewhere SELECT toDate(0) AS p, if(number % 2 = 0, CAST(number AS Nullable(UInt64)), CAST(NULL AS Nullable(UInt64))) AS f, number as d FROM system.numbers LIMIT 1001" $CLICKHOUSE_CLIENT -q "SELECT sum(d), sum(f), max(d) FROM nullable_prewhere PREWHERE NOT isNull(f)" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere" diff --git a/tests/queries/0_stateless/00626_in_syntax.reference b/tests/queries/0_stateless/00626_in_syntax.reference index bbde210382c..5fb054d6ddb 100644 --- a/tests/queries/0_stateless/00626_in_syntax.reference +++ b/tests/queries/0_stateless/00626_in_syntax.reference @@ -25,15 +25,9 @@ 1 1 0 -1 -0 -1 -1 -0 - 1 1 -1 - (1,2) ((1,2),(3,4)) 1 1 - diff --git a/tests/queries/0_stateless/00626_in_syntax.sql b/tests/queries/0_stateless/00626_in_syntax.sql index 8b5ba2bec37..ac7ec897701 100644 --- a/tests/queries/0_stateless/00626_in_syntax.sql +++ b/tests/queries/0_stateless/00626_in_syntax.sql @@ -28,16 +28,10 @@ select 1 in (0 + 1, 1, toInt8(sin(5))); select (0 + 1, 1, toInt8(sin(5))) in (0 + 1, 1, toInt8(sin(5))); select identity(tuple(1)) in (tuple(1), tuple(2)); select identity(tuple(1)) in (tuple(0), tuple(2)); -select identity(tuple(1)) in (identity(tuple(1)), tuple(2)); -select identity(tuple(1)) in (identity(tuple(0)), tuple(2)); -select identity(tuple(1)) in (identity(tuple(1)), identity(tuple(2))); -select identity(tuple(1)) in (identity(tuple(1)), identity(identity(tuple(2)))); -select identity(tuple(1)) in (identity(tuple(0)), identity(identity(tuple(2)))); select '-'; select identity((1, 2)) in (1, 2); select identity((1, 2)) in ((1, 2), (3, 4)); -select identity((1, 2)) in ((1, 2), identity((3, 4))); select '-'; select (1,2) as x, ((1,2),(3,4)) as y, 1 in x, x in y; @@ -50,4 +44,3 @@ select (1, 2) in (select (1, 2)); select identity(tuple(1)) in (select tuple(1)); select identity((1, 2)) in (select 1, 2); select identity((1, 2)) in (select (1, 2)); - diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index 00a7e3c5232..e10b2f86145 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings #-------------------------------------------- # Description of test result: diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql index bbc7bedcb4f..f4c4110cd5b 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.sql @@ -1,4 +1,4 @@ --- Tags: long, no-s3-storage +-- Tags: long, no-s3-storage, no-random-merge-tree-settings DROP TABLE IF EXISTS check_system_tables; diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql index ca9bb1b177e..4afd8e6d860 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings SET send_logs_level = 'fatal'; SET joined_subquery_requires_alias = 0; diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql index 58b266f106f..42d5f60b8b2 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_collapsing_merge_tree.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + ----- Group of very similar simple tests ------ DROP TABLE IF EXISTS zero_rows_per_granule; @@ -10,6 +12,7 @@ CREATE TABLE zero_rows_per_granule ( ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes=20, min_index_granularity_bytes=10, write_final_mark = 0, min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0; @@ -42,6 +45,7 @@ CREATE TABLE four_rows_per_granule ( ) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes=110, min_index_granularity_bytes=100, write_final_mark = 0, min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0; diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql index a29a1bf9d1c..e812a6cae5c 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_merge_tree.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings ----- Group of very similar simple tests ------ select '----HORIZONTAL MERGE TESTS----'; @@ -9,7 +9,7 @@ CREATE TABLE zero_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -36,7 +36,7 @@ CREATE TABLE two_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -63,7 +63,7 @@ CREATE TABLE four_rows_per_granule ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -97,7 +97,7 @@ CREATE TABLE huge_granularity_small_blocks ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -128,7 +128,7 @@ CREATE TABLE adaptive_granularity_alter ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0; +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -188,7 +188,8 @@ CREATE TABLE zero_rows_per_granule ( enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -223,7 +224,8 @@ CREATE TABLE two_rows_per_granule ( enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -257,7 +259,8 @@ CREATE TABLE four_rows_per_granule ( enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -296,7 +299,8 @@ CREATE TABLE huge_granularity_small_blocks ( enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -334,7 +338,8 @@ CREATE TABLE adaptive_granularity_alter ( enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql index 47e1d3fea0f..636cd6589ce 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_pk.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings SET send_logs_level = 'fatal'; SELECT '----00489----'; @@ -77,7 +77,7 @@ CREATE TABLE large_alter_table_00926 ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) -) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M'; +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M', min_rows_for_wide_part = 0; INSERT INTO large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql index 075548df6c3..a5170aa8058 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_replacing_merge_tree.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings ----- Group of very similar simple tests ------ DROP TABLE IF EXISTS zero_rows_per_granule; @@ -14,7 +14,9 @@ CREATE TABLE zero_rows_per_granule ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -47,7 +49,9 @@ CREATE TABLE two_rows_per_granule ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -80,7 +84,9 @@ CREATE TABLE four_rows_per_granule ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -126,7 +132,9 @@ CREATE TABLE huge_granularity_small_blocks ( SETTINGS index_granularity_bytes=1000000, write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -164,7 +172,9 @@ CREATE TABLE adaptive_granularity_alter ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); diff --git a/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql index 44dd0412aea..43495ea3697 100644 --- a/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql +++ b/tests/queries/0_stateless/00926_adaptive_index_granularity_versioned_collapsing_merge_tree.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + ----- Group of very similar simple tests ------ DROP TABLE IF EXISTS zero_rows_per_granule; @@ -14,7 +16,9 @@ CREATE TABLE zero_rows_per_granule ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); @@ -48,7 +52,9 @@ CREATE TABLE four_rows_per_granule ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); @@ -99,7 +105,9 @@ CREATE TABLE six_rows_per_granule ( write_final_mark = 0, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, - vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0; + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 1, 1000, 2000, -1, 2); diff --git a/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql index e6ca25a3dd6..123e81642e0 100644 --- a/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql +++ b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql @@ -1,4 +1,4 @@ --- Tags: long, replica +-- Tags: long, replica, no-random-merge-tree-settings ----- Group of very similar simple tests ------ select '----HORIZONTAL MERGE TESTS----'; diff --git a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh index df4c5e233e4..b7c2045c83d 100755 --- a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh +++ b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel +# Tags: no-parallel, no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table" -$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a) SETTINGS min_bytes_for_wide_part = 0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a) SETTINGS min_bytes_for_wide_part = 0" $CLICKHOUSE_CLIENT --query="INSERT INTO small_table (n) SELECT * from system.numbers limit 100000;" $CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE small_table FINAL;" diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference similarity index 100% rename from tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.reference rename to tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sh b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sh new file mode 100755 index 00000000000..22d9e0690b3 --- /dev/null +++ b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Tags: replica + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +function query_with_retry +{ + retry=0 + until [ $retry -ge 5 ] + do + result=$($CLICKHOUSE_CLIENT $2 --query="$1" 2>&1) + if [ "$?" == 0 ]; then + echo -n "$result" + return + else + retry=$(($retry + 1)) + sleep 3 + fi + done + echo "Query '$1' failed with '$result'" +} + +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS ttl_repl1" +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS ttl_repl2" + +$CLICKHOUSE_CLIENT --query="CREATE TABLE ttl_repl1(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00933/ttl_repl', '1') PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + INTERVAL 1 DAY;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE ttl_repl2(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00933/ttl_repl', '2') PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + INTERVAL 1 DAY;" + +$CLICKHOUSE_CLIENT --query="INSERT INTO TABLE ttl_repl1 VALUES (toDate('2000-10-10 00:00:00'), 100)" +$CLICKHOUSE_CLIENT --query="INSERT INTO TABLE ttl_repl1 VALUES (toDate('2100-10-10 00:00:00'), 200)" + +$CLICKHOUSE_CLIENT --query="ALTER TABLE ttl_repl1 MODIFY TTL d + INTERVAL 1 DAY" +$CLICKHOUSE_CLIENT --query="SYSTEM SYNC REPLICA ttl_repl2" + +$CLICKHOUSE_CLIENT --query="INSERT INTO TABLE ttl_repl1 VALUES (toDate('2000-10-10 00:00:00'), 300)" +$CLICKHOUSE_CLIENT --query="INSERT INTO TABLE ttl_repl1 VALUES (toDate('2100-10-10 00:00:00'), 400)" + +$CLICKHOUSE_CLIENT --query="SYSTEM SYNC REPLICA ttl_repl2" + +query_with_retry "OPTIMIZE TABLE ttl_repl2 FINAL SETTINGS optimize_throw_if_noop = 1" + +$CLICKHOUSE_CLIENT --query="SELECT x FROM ttl_repl2 ORDER BY x" + +$CLICKHOUSE_CLIENT --query="SHOW CREATE TABLE ttl_repl2" + +$CLICKHOUSE_CLIENT --query="DROP TABLE ttl_repl1" +$CLICKHOUSE_CLIENT --query="DROP TABLE ttl_repl2" diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.sql b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.sql deleted file mode 100644 index 1728cc7458e..00000000000 --- a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.sql +++ /dev/null @@ -1,30 +0,0 @@ --- Tags: long, replica - -DROP TABLE IF EXISTS ttl_repl1; -DROP TABLE IF EXISTS ttl_repl2; - -CREATE TABLE ttl_repl1(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00933/ttl_repl', '1') - PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + INTERVAL 1 DAY; -CREATE TABLE ttl_repl2(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00933/ttl_repl', '2') - PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + INTERVAL 1 DAY; - -INSERT INTO TABLE ttl_repl1 VALUES (toDate('2000-10-10 00:00:00'), 100); -INSERT INTO TABLE ttl_repl1 VALUES (toDate('2100-10-10 00:00:00'), 200); - -ALTER TABLE ttl_repl1 MODIFY TTL d + INTERVAL 1 DAY; -SYSTEM SYNC REPLICA ttl_repl2; - -INSERT INTO TABLE ttl_repl1 VALUES (toDate('2000-10-10 00:00:00'), 300); -INSERT INTO TABLE ttl_repl1 VALUES (toDate('2100-10-10 00:00:00'), 400); - -SYSTEM SYNC REPLICA ttl_repl2; - -SELECT sleep(1) format Null; -- wait for probable merges after inserts - -OPTIMIZE TABLE ttl_repl2 FINAL; -SELECT x FROM ttl_repl2 ORDER BY x; - -SHOW CREATE TABLE ttl_repl2; - -DROP TABLE ttl_repl1; -DROP TABLE ttl_repl2; diff --git a/tests/queries/0_stateless/00950_test_double_delta_codec.sql b/tests/queries/0_stateless/00950_test_double_delta_codec.sql index 6bf9b2628ad..f6199a6e4ec 100644 --- a/tests/queries/0_stateless/00950_test_double_delta_codec.sql +++ b/tests/queries/0_stateless/00950_test_double_delta_codec.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + DROP TABLE IF EXISTS codecTest; CREATE TABLE codecTest ( diff --git a/tests/queries/0_stateless/00955_test_final_mark.sql b/tests/queries/0_stateless/00955_test_final_mark.sql index 6615c945ef8..44eb4a69c2d 100644 --- a/tests/queries/0_stateless/00955_test_final_mark.sql +++ b/tests/queries/0_stateless/00955_test_final_mark.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings SET send_logs_level = 'fatal'; diff --git a/tests/queries/0_stateless/00955_test_final_mark_use.sh b/tests/queries/0_stateless/00955_test_final_mark_use.sh index 2c3219fbffd..b1bccd2d6e6 100755 --- a/tests/queries/0_stateless/00955_test_final_mark_use.sh +++ b/tests/queries/0_stateless/00955_test_final_mark_use.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel +# Tags: no-parallel, no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql index cf9fd3cad12..40da12baddc 100644 --- a/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql +++ b/tests/queries/0_stateless/00961_checksums_in_system_parts_columns_table.sql @@ -1,7 +1,10 @@ +-- Tags: no-random-merge-tree-settings + DROP TABLE IF EXISTS test_00961; CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) - ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0, compress_marks=false, compress_primary_key=false; + ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) + SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0, compress_marks = 0, compress_primary_key = 0; INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); diff --git a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh index a817acd88a6..f8527cd491e 100755 --- a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh +++ b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/00997_set_index_array.sql b/tests/queries/0_stateless/00997_set_index_array.sql index a0cb4ec0547..34d0f0b71ec 100644 --- a/tests/queries/0_stateless/00997_set_index_array.sql +++ b/tests/queries/0_stateless/00997_set_index_array.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + DROP TABLE IF EXISTS set_array; CREATE TABLE set_array diff --git a/tests/queries/0_stateless/01039_mergetree_exec_time.sql b/tests/queries/0_stateless/01039_mergetree_exec_time.sql index d3aade41cea..bb114c41ec8 100644 --- a/tests/queries/0_stateless/01039_mergetree_exec_time.sql +++ b/tests/queries/0_stateless/01039_mergetree_exec_time.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS tab; -create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0; +create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; insert into tab select cityHash64(number) from numbers(1000); select sum(sleep(0.1)) from tab settings max_block_size = 1, max_execution_time=1; -- { serverError 159 } DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql index c62fe25a041..b66aff8384d 100644 --- a/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql +++ b/tests/queries/0_stateless/01042_check_query_and_last_granule_size.sql @@ -1,7 +1,7 @@ SET check_query_single_value_result = 0; DROP TABLE IF EXISTS check_query_test; -CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; -- Number of rows in last granule should be equals to granularity. -- Rows in this table are short, so granularity will be 8192. @@ -17,7 +17,7 @@ DROP TABLE IF EXISTS check_query_test; DROP TABLE IF EXISTS check_query_test_non_adaptive; -CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0; +CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920; diff --git a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh index bb76f3978cc..8bd3d5291e4 100755 --- a/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh +++ b/tests/queries/0_stateless/01045_order_by_pk_special_storages.sh @@ -12,8 +12,8 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS m" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv" -$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" -$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0" $CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge('$CLICKHOUSE_DATABASE', 's[1,2]')" $CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)" @@ -46,7 +46,7 @@ else fi $CLICKHOUSE_CLIENT -q "SELECT '---MaterializedView---'" -$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s SETTINGS min_bytes_for_wide_part = 0 POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0" +$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0 POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0" $CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10" rows_read=$($CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10 FORMAT JSON" --max_threads=1 --max_block_size=20 --optimize_read_in_order=1 | grep "rows_read" | sed 's/[^0-9]*//g') diff --git a/tests/queries/0_stateless/01055_compact_parts_granularity.sh b/tests/queries/0_stateless/01055_compact_parts_granularity.sh index b5c6609bed8..f3da33f6ccf 100755 --- a/tests/queries/0_stateless/01055_compact_parts_granularity.sh +++ b/tests/queries/0_stateless/01055_compact_parts_granularity.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01114_database_atomic.reference b/tests/queries/0_stateless/01114_database_atomic.reference index 05260106ab9..10a39087c57 100644 --- a/tests/queries/0_stateless/01114_database_atomic.reference +++ b/tests/queries/0_stateless/01114_database_atomic.reference @@ -4,14 +4,14 @@ CREATE DATABASE test_01114_3\nENGINE = Ordinary test_01114_1 Atomic store 00001114-1000-4000-8000-000000000001 1 test_01114_2 Atomic store 00001114-1000-4000-8000-000000000002 1 test_01114_3 Ordinary test_01114_3 test_01114_3 1 -20 +110 100 CREATE TABLE test_01114_2.mt UUID \'00001114-0000-4000-8000-000000000002\'\n(\n `n` UInt64\n)\nENGINE = MergeTree\nPARTITION BY n % 5\nORDER BY tuple()\nSETTINGS index_granularity = 8192 mt 00001114-0000-4000-8000-000000000002 CREATE TABLE test_01114_2.mt (`n` UInt64) ENGINE = MergeTree PARTITION BY n % 5 ORDER BY tuple() SETTINGS index_granularity = 8192 -20 +110 CREATE TABLE test_01114_1.mt UUID \'00001114-0000-4000-8000-000000000001\'\n(\n `n` UInt64\n)\nENGINE = MergeTree\nPARTITION BY n % 5\nORDER BY tuple()\nSETTINGS index_granularity = 8192 CREATE TABLE test_01114_2.mt UUID \'00001114-0000-4000-8000-000000000002\'\n(\n `n` UInt64\n)\nENGINE = MergeTree\nPARTITION BY n % 5\nORDER BY tuple()\nSETTINGS index_granularity = 8192 5 dropped -20 190 +110 5995 30 435 diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index e31841b27a0..4a3d35e48b7 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -nm -q " CREATE TABLE test_01114_1.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); INSERT INTO test_01114_1.mt_tmp SELECT * FROM numbers(100); CREATE TABLE test_01114_3.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5); -INSERT INTO test_01114_3.mt SELECT * FROM numbers(20); +INSERT INTO test_01114_3.mt SELECT * FROM numbers(110); RENAME TABLE test_01114_1.mt_tmp TO test_01114_3.mt_tmp; /* move from Atomic to Ordinary */ RENAME TABLE test_01114_3.mt TO test_01114_1.mt; /* move from Ordinary to Atomic */ @@ -49,8 +49,8 @@ $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW $CLICKHOUSE_CLIENT -q "SELECT name, uuid, create_table_query FROM system.tables WHERE database='test_01114_2'" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -q "SELECT count(col), sum(col) FROM (SELECT n + sleepEachRow(1.5) AS col FROM test_01114_1.mt)" & # 30s, result: 20, 190 -$CLICKHOUSE_CLIENT -q "INSERT INTO test_01114_2.mt SELECT number + sleepEachRow(1.5) FROM numbers(30)" & # 45s +$CLICKHOUSE_CLIENT -q "SELECT count(col), sum(col) FROM (SELECT n + sleepEachRow(1.5) AS col FROM test_01114_1.mt)" & # 33s (1.5s * 22 rows per partition), result: 110, 5995 +$CLICKHOUSE_CLIENT -q "INSERT INTO test_01114_2.mt SELECT number + sleepEachRow(1.5) FROM numbers(30)" & # 45s (1.5s * 30 rows) sleep 1 # SELECT and INSERT should start before the following RENAMEs $CLICKHOUSE_CLIENT -nm -q " @@ -74,7 +74,7 @@ INSERT INTO test_01114_1.mt SELECT 's' || toString(number) FROM numbers(5); SELECT count() FROM test_01114_1.mt " # result: 5 -$CLICKHOUSE_CLIENT -q "SELECT tuple(s, sleepEachRow(3)) FROM test_01114_1.mt" > /dev/null & # 15s +$CLICKHOUSE_CLIENT -q "SELECT tuple(s, sleepEachRow(3)) FROM test_01114_1.mt" > /dev/null & # 15s (3s * 5 rows) sleep 1 $CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_1" --database_atomic_wait_for_drop_and_detach_synchronously=0 && echo "dropped" diff --git a/tests/queries/0_stateless/01130_in_memory_parts.sql b/tests/queries/0_stateless/01130_in_memory_parts.sql index dca12a85841..2b15ae24763 100644 --- a/tests/queries/0_stateless/01130_in_memory_parts.sql +++ b/tests/queries/0_stateless/01130_in_memory_parts.sql @@ -1,3 +1,5 @@ +-- Tags: no-s3-storage + DROP TABLE IF EXISTS in_memory; CREATE TABLE in_memory (a UInt32, b UInt32) ENGINE = MergeTree ORDER BY a diff --git a/tests/queries/0_stateless/01130_in_memory_parts_check.sql b/tests/queries/0_stateless/01130_in_memory_parts_check.sql index 57cd1c83528..c2f5eba5949 100644 --- a/tests/queries/0_stateless/01130_in_memory_parts_check.sql +++ b/tests/queries/0_stateless/01130_in_memory_parts_check.sql @@ -1,4 +1,7 @@ +-- Tags: no-s3-storage + -- Part of 00961_check_table test, but with in-memory parts + SET check_query_single_value_result = 0; DROP TABLE IF EXISTS mt_table; CREATE TABLE mt_table (d Date, key UInt64, data String) ENGINE = MergeTree() PARTITION BY toYYYYMM(d) ORDER BY key diff --git a/tests/queries/0_stateless/01130_in_memory_parts_default.sql b/tests/queries/0_stateless/01130_in_memory_parts_default.sql index 61e20c84f3d..776d5f89fcf 100644 --- a/tests/queries/0_stateless/01130_in_memory_parts_default.sql +++ b/tests/queries/0_stateless/01130_in_memory_parts_default.sql @@ -1,3 +1,4 @@ +-- Tags: no-s3-storage -- Test 01266_default_prewhere_reqq, but with in-memory parts DROP TABLE IF EXISTS t1; diff --git a/tests/queries/0_stateless/01130_in_memory_parts_nested.sql b/tests/queries/0_stateless/01130_in_memory_parts_nested.sql index 76e5a6d7751..45e778b9f04 100644 --- a/tests/queries/0_stateless/01130_in_memory_parts_nested.sql +++ b/tests/queries/0_stateless/01130_in_memory_parts_nested.sql @@ -1,3 +1,4 @@ +-- Tags: no-s3-storage -- Test 00576_nested_and_prewhere, but with in-memory parts. DROP TABLE IF EXISTS nested; @@ -8,8 +9,8 @@ INSERT INTO nested SELECT number, number % 2, range(number % 10) FROM system.num ALTER TABLE nested ADD COLUMN n.b Array(UInt64); SELECT DISTINCT n.b FROM nested PREWHERE filter; -SELECT DISTINCT n.b FROM nested PREWHERE filter SETTINGS max_block_size = 10; SELECT DISTINCT n.b FROM nested PREWHERE filter SETTINGS max_block_size = 123; +SELECT DISTINCT n.b FROM nested PREWHERE filter SETTINGS max_block_size = 1234; ALTER TABLE nested ADD COLUMN n.c Array(UInt64) DEFAULT arrayMap(x -> x * 2, n.a); SELECT DISTINCT n.c FROM nested PREWHERE filter; diff --git a/tests/queries/0_stateless/01130_in_memory_parts_partitons.sql b/tests/queries/0_stateless/01130_in_memory_parts_partitons.sql index b1ba8bc5560..18da2d2bd30 100644 --- a/tests/queries/0_stateless/01130_in_memory_parts_partitons.sql +++ b/tests/queries/0_stateless/01130_in_memory_parts_partitons.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-s3-storage DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/01158_zookeeper_log_long.sql b/tests/queries/0_stateless/01158_zookeeper_log_long.sql index cabb0ffe294..45771494af6 100644 --- a/tests/queries/0_stateless/01158_zookeeper_log_long.sql +++ b/tests/queries/0_stateless/01158_zookeeper_log_long.sql @@ -1,4 +1,4 @@ --- Tags: long, zookeeper, no-replicated-database, no-polymorphic-parts +-- Tags: long, zookeeper, no-replicated-database, no-polymorphic-parts, no-random-merge-tree-settings -- Tag no-replicated-database: Fails due to additional replicas or shards SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries diff --git a/tests/queries/0_stateless/01200_mutations_memory_consumption.sql b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql index ff4918c0810..bca2286aa22 100644 --- a/tests/queries/0_stateless/01200_mutations_memory_consumption.sql +++ b/tests/queries/0_stateless/01200_mutations_memory_consumption.sql @@ -1,4 +1,4 @@ --- Tags: no-debug, no-parallel, long, no-s3-storage +-- Tags: no-debug, no-parallel, long, no-s3-storage, no-random-merge-tree-settings DROP TABLE IF EXISTS table_with_single_pk; diff --git a/tests/queries/0_stateless/01213_alter_rename_compact_part.sql b/tests/queries/0_stateless/01213_alter_rename_compact_part.sql index 07188ece519..ebf93521d67 100644 --- a/tests/queries/0_stateless/01213_alter_rename_compact_part.sql +++ b/tests/queries/0_stateless/01213_alter_rename_compact_part.sql @@ -12,7 +12,8 @@ ENGINE = MergeTree() PARTITION BY date ORDER BY key settings index_granularity = 8, -min_rows_for_wide_part = 10; +min_rows_for_wide_part = 10, +min_bytes_for_wide_part = '10G'; INSERT INTO table_with_compact_parts SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index 5d46fd8585f..6e94e142bc2 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -96,10 +96,11 @@ SYSTEM DROP DNS CACHE ['SYSTEM DROP DNS','DROP DNS CACHE','DROP DNS'] GLOBAL SYS SYSTEM DROP MARK CACHE ['SYSTEM DROP MARK','DROP MARK CACHE','DROP MARKS'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP UNCOMPRESSED CACHE ['SYSTEM DROP UNCOMPRESSED','DROP UNCOMPRESSED CACHE','DROP UNCOMPRESSED'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP MMAP CACHE ['SYSTEM DROP MMAP','DROP MMAP CACHE','DROP MMAP'] GLOBAL SYSTEM DROP CACHE -SYSTEM DROP QUERY RESULT CACHE ['SYSTEM DROP QUERY RESULT','DROP QUERY RESULT CACHE','DROP QUERY RESULT'] GLOBAL SYSTEM DROP CACHE +SYSTEM DROP QUERY CACHE ['SYSTEM DROP QUERY','DROP QUERY CACHE','DROP QUERY'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP COMPILED EXPRESSION CACHE ['SYSTEM DROP COMPILED EXPRESSION','DROP COMPILED EXPRESSION CACHE','DROP COMPILED EXPRESSIONS'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP FILESYSTEM CACHE ['SYSTEM DROP FILESYSTEM CACHE','DROP FILESYSTEM CACHE'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP SCHEMA CACHE ['SYSTEM DROP SCHEMA CACHE','DROP SCHEMA CACHE'] GLOBAL SYSTEM DROP CACHE +SYSTEM DROP S3 CLIENT CACHE ['SYSTEM DROP S3 CLIENT','DROP S3 CLIENT CACHE'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP CACHE ['DROP CACHE'] \N SYSTEM SYSTEM RELOAD CONFIG ['RELOAD CONFIG'] GLOBAL SYSTEM RELOAD SYSTEM RELOAD USERS ['RELOAD USERS'] GLOBAL SYSTEM RELOAD diff --git a/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.sql b/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.sql index c76e2650ae9..8dcb9319cce 100644 --- a/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.sql +++ b/tests/queries/0_stateless/01357_version_collapsing_attach_detach_zookeeper.sql @@ -1,4 +1,4 @@ --- Tags: zookeeper +-- Tags: zookeeper, no-random-merge-tree-settings DROP TABLE IF EXISTS versioned_collapsing_table; diff --git a/tests/queries/0_stateless/01375_compact_parts_codecs.sql b/tests/queries/0_stateless/01375_compact_parts_codecs.sql index 35f125515a2..1dd39e67876 100644 --- a/tests/queries/0_stateless/01375_compact_parts_codecs.sql +++ b/tests/queries/0_stateless/01375_compact_parts_codecs.sql @@ -1,13 +1,13 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings DROP TABLE IF EXISTS codecs; -CREATE TABLE codecs (id UInt32, val UInt32, s String) +CREATE TABLE codecs (id UInt32, val UInt32, s String) ENGINE = MergeTree ORDER BY id SETTINGS min_rows_for_wide_part = 10000; INSERT INTO codecs SELECT number, number, toString(number) FROM numbers(1000); -SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) - FROM system.parts +SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) + FROM system.parts WHERE table = 'codecs' AND database = currentDatabase(); SELECT sum(id), sum(val), max(s) FROM codecs; @@ -19,12 +19,12 @@ SELECT sum(id), sum(val), max(s) FROM codecs; DROP TABLE codecs; -CREATE TABLE codecs (id UInt32 CODEC(NONE), val UInt32 CODEC(NONE), s String CODEC(NONE)) +CREATE TABLE codecs (id UInt32 CODEC(NONE), val UInt32 CODEC(NONE), s String CODEC(NONE)) ENGINE = MergeTree ORDER BY id SETTINGS min_rows_for_wide_part = 10000; INSERT INTO codecs SELECT number, number, toString(number) FROM numbers(1000); -SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) - FROM system.parts +SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) + FROM system.parts WHERE table = 'codecs' AND database = currentDatabase(); SELECT sum(id), sum(val), max(s) FROM codecs; @@ -36,12 +36,12 @@ SELECT sum(id), sum(val), max(s) FROM codecs; DROP TABLE codecs; -CREATE TABLE codecs (id UInt32, val UInt32 CODEC(Delta, ZSTD), s String CODEC(ZSTD)) +CREATE TABLE codecs (id UInt32, val UInt32 CODEC(Delta, ZSTD), s String CODEC(ZSTD)) ENGINE = MergeTree ORDER BY id SETTINGS min_rows_for_wide_part = 10000; INSERT INTO codecs SELECT number, number, toString(number) FROM numbers(1000); -SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) - FROM system.parts +SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) + FROM system.parts WHERE table = 'codecs' AND database = currentDatabase(); SELECT sum(id), sum(val), max(s) FROM codecs; diff --git a/tests/queries/0_stateless/01443_merge_truncate_long.sh b/tests/queries/0_stateless/01443_merge_truncate_long.sh index 40cfd0fc627..00abc48493a 100755 --- a/tests/queries/0_stateless/01443_merge_truncate_long.sh +++ b/tests/queries/0_stateless/01443_merge_truncate_long.sh @@ -12,7 +12,7 @@ CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLI ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS t" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE t (x Int8) ENGINE = MergeTree ORDER BY tuple()" -for _ in {1..100}; do +for _ in {1..70}; do ${CLICKHOUSE_CLIENT} --query="INSERT INTO t VALUES (0)" ${CLICKHOUSE_CLIENT} --query="INSERT INTO t VALUES (0)" ${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE t FINAL" 2>/dev/null & diff --git a/tests/queries/0_stateless/01475_read_subcolumns_storages.sh b/tests/queries/0_stateless/01475_read_subcolumns_storages.sh index be22b1b4185..d770d5118ac 100755 --- a/tests/queries/0_stateless/01475_read_subcolumns_storages.sh +++ b/tests/queries/0_stateless/01475_read_subcolumns_storages.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-s3-storage CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference deleted file mode 100644 index c62a2b18918..00000000000 --- a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference +++ /dev/null @@ -1,2 +0,0 @@ -Testing Memory -Done Memory diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh deleted file mode 100755 index b659d550fa4..00000000000 --- a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash -# Tags: deadlock - -set -e - -CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - - -function thread_create { - while true; do - $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" - sleep 0.0$RANDOM - done -} - -function thread_drop { - while true; do - $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" - sleep 0.0$RANDOM - done -} - -function thread_rename { - while true; do - $CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)' - sleep 0.0$RANDOM - done -} - -function thread_select { - while true; do - $CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' - sleep 0.0$RANDOM - done -} - -function thread_insert { - while true; do - $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: '| grep -v -P 'Code: (60|218)' - sleep 0.0$RANDOM - done -} - -function thread_insert_select { - while true; do - $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' - sleep 0.0$RANDOM - done -} - -export -f thread_create -export -f thread_drop -export -f thread_rename -export -f thread_select -export -f thread_insert -export -f thread_insert_select - - -# Do randomized queries and expect nothing extraordinary happens. - -function test_with_engine { - echo "Testing $1" - - timeout 10 bash -c "thread_create t1 $1" & - timeout 10 bash -c "thread_create t2 $1" & - timeout 10 bash -c 'thread_drop t1' & - timeout 10 bash -c 'thread_drop t2' & - timeout 10 bash -c 'thread_rename t1 t2' & - timeout 10 bash -c 'thread_rename t2 t1' & - timeout 10 bash -c 'thread_select t1' & - timeout 10 bash -c 'thread_select t2' & - timeout 10 bash -c 'thread_insert t1 5' & - timeout 10 bash -c 'thread_insert t2 10' & - timeout 10 bash -c 'thread_insert_select t1 t2' & - timeout 10 bash -c 'thread_insert_select t2 t1' & - - wait - echo "Done $1" -} - -#test_with_engine TinyLog -#test_with_engine StripeLog -#test_with_engine Log -test_with_engine Memory - -$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t1" -$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t2" diff --git a/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh index 3cf94a0b2bd..b8efee89b4a 100755 --- a/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh +++ b/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh @@ -33,7 +33,7 @@ function thread_rename { function thread_select { while true; do - $CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' + $CLICKHOUSE_CLIENT --local_filesystem_read_method pread --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' sleep 0.0$RANDOM done } @@ -47,7 +47,7 @@ function thread_insert { function thread_insert_select { while true; do - $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' + $CLICKHOUSE_CLIENT --local_filesystem_read_method pread --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)' sleep 0.0$RANDOM done } diff --git a/tests/queries/0_stateless/01508_partition_pruning_long.sh b/tests/queries/0_stateless/01508_partition_pruning_long.sh index ddf63bf4808..c1f2d6562ab 100755 --- a/tests/queries/0_stateless/01508_partition_pruning_long.sh +++ b/tests/queries/0_stateless/01508_partition_pruning_long.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-polymorphic-parts -# Tag no-polymorphic-parts: bug, shoud be fixed +# Tags: long, no-polymorphic-parts, no-random-merge-tree-settings # Description of test result: # Test the correctness of the partition pruning diff --git a/tests/queries/0_stateless/01508_query_obfuscator.reference b/tests/queries/0_stateless/01508_query_obfuscator.reference index 7d910734dbd..9268b444d90 100644 --- a/tests/queries/0_stateless/01508_query_obfuscator.reference +++ b/tests/queries/0_stateless/01508_query_obfuscator.reference @@ -1,16 +1,16 @@ -SELECT 116, 'Qqfu://2020-02-10isqkc1203 sp 2000-05-27T18:38:01', 13e100, Obsidian_id_diverTeam, sweets(Workplace), avgIf(remote('128.0.0.1')) -SELECT treasury_mammoth_hazelnut between nutmeg and span, case when chive >= 116 then switching else null end +SELECT 116, 'Qqfu://2020-02-10isqkc1203 sp 2000-05-27T18:38:01', 13e100, Jewelry_id_studyBeast, algebra(Stable), avgIf(remote('128.0.0.1')) +SELECT surfboard_solitaire_crunch between understanding and populist, case when instrument >= 116 then poverty else null end SELECT - EarthquakeID, - Workout.ID, Workout.CoupleThrill, - MedalEMPIRE, - HOPE.ListingName, HOPE.ListingBomb, HOPE.ListingRamen, HOPE.ListingResult, HOPE.CoupleThrill, HOPE.Smile -FROM merge.marsh_agreement + BugleID, + Reliability.ID, Reliability.ExperiencePrevalence, + DepressiveTURKEY, + SPARK.RainmakerName, SPARK.RainmakerReligion, SPARK.RainmakerMisfit, SPARK.RainmakerAardvark, SPARK.ExperiencePrevalence, SPARK.Type +FROM merge.invader_schizophrenic WHERE - RecapitulationLeaver >= '2020-10-13' AND RecapitulationLeaver <= '2020-10-21' - AND MasonryID = 30750384 - AND intHash32(EyeballID) = 448362928 AND intHash64(EyeballID) = 12572659331310383983 - AND EarthquakeID IN (8195672321757027078, 7079643623150622129, 5057006826979676478, 7886875230160484653, 7494974311229040743) - AND Aide = 1 + PortraitInvasion >= '2020-10-13' AND PortraitInvasion <= '2020-10-21' + AND FrownID = 30750384 + AND intHash32(HaversackID) = 448362928 AND intHash64(HaversackID) = 12572659331310383983 + AND BugleID IN (8195672321757027078, 7079643623150622129, 5057006826979676478, 7886875230160484653, 7494974311229040743) + AND Hostel = 1 diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh index 7db9f22951c..efe24aa3a88 100755 --- a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh +++ b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: race, zookeeper +# Tags: race, zookeeper, no-s3-storage CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01533_multiple_nested.sql b/tests/queries/0_stateless/01533_multiple_nested.sql index 94d81c110cb..f39b56bbaa2 100644 --- a/tests/queries/0_stateless/01533_multiple_nested.sql +++ b/tests/queries/0_stateless/01533_multiple_nested.sql @@ -1,4 +1,4 @@ --- Tags: no-s3-storage +-- Tags: no-s3-storage, no-random-merge-tree-settings -- no-s3 because read FileOpen metric DROP TABLE IF EXISTS nested; diff --git a/tests/queries/0_stateless/01600_log_queries_with_extensive_info.reference b/tests/queries/0_stateless/01600_log_queries_with_extensive_info.reference index 701e72b3b8e..bb6a99fa522 100644 --- a/tests/queries/0_stateless/01600_log_queries_with_extensive_info.reference +++ b/tests/queries/0_stateless/01600_log_queries_with_extensive_info.reference @@ -16,7 +16,7 @@ alter table test_log_queries.logtable rename column j to x, rename column k to y alter table test_log_queries.logtable2 add column x int, add column y int 1199561338572582360 Alter ['test_log_queries'] ['test_log_queries.logtable2'] ['test_log_queries.logtable2.x','test_log_queries.logtable2.y'] alter table test_log_queries.logtable3 drop column i, drop column k 340702370038862784 Alter ['test_log_queries'] ['test_log_queries.logtable3'] ['test_log_queries.logtable3.i','test_log_queries.logtable3.k'] rename table test_log_queries.logtable2 to test_log_queries.logtable4, test_log_queries.logtable3 to test_log_queries.logtable5 17256232154191063008 Rename ['test_log_queries'] ['test_log_queries.logtable2','test_log_queries.logtable3','test_log_queries.logtable4','test_log_queries.logtable5'] [] -optimize table test_log_queries.logtable 12932884188099170316 ['test_log_queries'] ['test_log_queries.logtable'] [] +optimize table test_log_queries.logtable 12932884188099170316 Optimize ['test_log_queries'] ['test_log_queries.logtable'] [] drop table if exists test_log_queries.logtable 9614905142075064664 Drop ['test_log_queries'] ['test_log_queries.logtable'] [] drop table if exists test_log_queries.logtable2 5276868561533661466 Drop ['test_log_queries'] ['test_log_queries.logtable2'] [] drop table if exists test_log_queries.logtable3 4776768361842582387 Drop ['test_log_queries'] ['test_log_queries.logtable3'] [] diff --git a/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh b/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh index 46fea5001a1..50a16250ea8 100755 --- a/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh +++ b/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel +# Tags: no-parallel, no-random-merge-tree-settings set -ue diff --git a/tests/queries/0_stateless/01600_parts_types_metrics_long.sh b/tests/queries/0_stateless/01600_parts_types_metrics_long.sh index 6e4ccbdd325..05edf02f7ed 100755 --- a/tests/queries/0_stateless/01600_parts_types_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_types_metrics_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-s3-storage CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index 7654be4eb29..85ad7636201 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + SET use_uncompressed_cache = 0; DROP TABLE IF EXISTS adaptive_table; @@ -11,7 +13,10 @@ CREATE TABLE adaptive_table( value String ) ENGINE MergeTree() ORDER BY key -SETTINGS index_granularity_bytes=1048576, min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; +SETTINGS index_granularity_bytes=1048576, +min_bytes_for_wide_part = 0, +min_rows_for_wide_part = 0, +enable_vertical_merge_algorithm = 0; SET max_block_size=900; diff --git a/tests/queries/0_stateless/01606_merge_from_wide_to_compact.sql b/tests/queries/0_stateless/01606_merge_from_wide_to_compact.sql index 15596d3f6db..0f2fbcaa76d 100644 --- a/tests/queries/0_stateless/01606_merge_from_wide_to_compact.sql +++ b/tests/queries/0_stateless/01606_merge_from_wide_to_compact.sql @@ -4,7 +4,8 @@ CREATE TABLE wide_to_comp (a Int, b Int, c Int) ENGINE = MergeTree ORDER BY a settings vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1, - min_bytes_for_wide_part = 0; + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; SYSTEM STOP merges wide_to_comp; diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.sql b/tests/queries/0_stateless/01623_constraints_column_swap.sql index 0fb0d417a43..c81b37c8428 100644 --- a/tests/queries/0_stateless/01623_constraints_column_swap.sql +++ b/tests/queries/0_stateless/01623_constraints_column_swap.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + SET convert_query_to_cnf = 1; SET optimize_using_constraints = 1; SET optimize_move_to_prewhere = 1; diff --git a/tests/queries/0_stateless/01632_tinylog_read_write.sh b/tests/queries/0_stateless/01632_tinylog_read_write.sh index e45fdd91ff6..69f985a9d0d 100755 --- a/tests/queries/0_stateless/01632_tinylog_read_write.sh +++ b/tests/queries/0_stateless/01632_tinylog_read_write.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --multiquery --query "DROP TABLE IF EXISTS test; CREATE TABLE function thread_select { while true; do - $CLICKHOUSE_CLIENT --query "SELECT * FROM test FORMAT Null" + $CLICKHOUSE_CLIENT --local_filesystem_read_method pread --query "SELECT * FROM test FORMAT Null" sleep 0.0$RANDOM done } diff --git a/tests/queries/0_stateless/01640_marks_corruption_regression.sql b/tests/queries/0_stateless/01640_marks_corruption_regression.sql index 7ccd8741dda..c75c26f3165 100644 --- a/tests/queries/0_stateless/01640_marks_corruption_regression.sql +++ b/tests/queries/0_stateless/01640_marks_corruption_regression.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + DROP TABLE IF EXISTS adaptive_table; CREATE TABLE adaptive_table( diff --git a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql index 1c29ea83efc..363f88c5ec9 100644 --- a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql +++ b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql @@ -1,4 +1,4 @@ --- Tags: no-replicated-database +-- Tags: no-replicated-database, no-random-merge-tree-settings drop table if exists data_01641; diff --git a/tests/queries/0_stateless/01643_merge_tree_fsync_smoke.sql b/tests/queries/0_stateless/01643_merge_tree_fsync_smoke.sql index 598e1ef3c34..ad0dfca0db2 100644 --- a/tests/queries/0_stateless/01643_merge_tree_fsync_smoke.sql +++ b/tests/queries/0_stateless/01643_merge_tree_fsync_smoke.sql @@ -1,3 +1,5 @@ +-- Tags: no-s3-storage + drop table if exists data_01643; select 'default'; diff --git a/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql b/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql index dadd7eaba6c..bcce87e11db 100644 --- a/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql +++ b/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-s3-storage -- no-parallel -- for flaky check and to avoid "Removing leftovers from table" (for other tables) -- Temporarily skip warning 'table was created by another server at the same moment, will retry' diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh index bfb4601e62b..e5e57ddb78a 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01702_system_query_log.reference b/tests/queries/0_stateless/01702_system_query_log.reference index 4b9eeb139f4..c653021aa5a 100644 --- a/tests/queries/0_stateless/01702_system_query_log.reference +++ b/tests/queries/0_stateless/01702_system_query_log.reference @@ -10,27 +10,27 @@ Misc queries ACTUAL LOG CONTENT: Select SELECT \'DROP queries and also a cleanup before the test\'; Drop DROP DATABASE IF EXISTS sqllt SYNC; - DROP USER IF EXISTS sqllt_user; - DROP ROLE IF EXISTS sqllt_role; - DROP POLICY IF EXISTS sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary; - DROP ROW POLICY IF EXISTS sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary; - DROP QUOTA IF EXISTS sqllt_quota; - DROP SETTINGS PROFILE IF EXISTS sqllt_settings_profile; +Drop DROP USER IF EXISTS sqllt_user; +Drop DROP ROLE IF EXISTS sqllt_role; +Drop DROP POLICY IF EXISTS sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary; +Drop DROP ROW POLICY IF EXISTS sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary; +Drop DROP QUOTA IF EXISTS sqllt_quota; +Drop DROP SETTINGS PROFILE IF EXISTS sqllt_settings_profile; Select SELECT \'CREATE queries\'; Create CREATE DATABASE sqllt; Create CREATE TABLE sqllt.table\n(\n i UInt8, s String\n)\nENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple(); Create CREATE VIEW sqllt.view AS SELECT i, s FROM sqllt.table; Create CREATE DICTIONARY sqllt.dictionary (key UInt64, value UInt64) PRIMARY KEY key SOURCE(CLICKHOUSE(DB \'sqllt\' TABLE \'table\' HOST \'localhost\' PORT 9001)) LIFETIME(0) LAYOUT(FLAT()); - CREATE USER sqllt_user IDENTIFIED WITH plaintext_password - CREATE ROLE sqllt_role; - CREATE POLICY sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary AS PERMISSIVE TO ALL; - CREATE POLICY sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary AS PERMISSIVE TO ALL; - CREATE QUOTA sqllt_quota KEYED BY user_name TO sqllt_role; - CREATE SETTINGS PROFILE sqllt_settings_profile SETTINGS interactive_delay = 200000; +Create CREATE USER sqllt_user IDENTIFIED WITH plaintext_password +Create CREATE ROLE sqllt_role; +Create CREATE POLICY sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary AS PERMISSIVE TO ALL; +Create CREATE POLICY sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary AS PERMISSIVE TO ALL; +Create CREATE QUOTA sqllt_quota KEYED BY user_name TO sqllt_role; +Create CREATE SETTINGS PROFILE sqllt_settings_profile SETTINGS interactive_delay = 200000; Grant GRANT sqllt_role TO sqllt_user; Select SELECT \'SET queries\'; - SET log_profile_events=false; - SET DEFAULT ROLE sqllt_role TO sqllt_user; +Set SET log_profile_events=false; +Set SET DEFAULT ROLE sqllt_role TO sqllt_user; Select -- SET ROLE sqllt_role; -- tests are executed by user `default` which is defined in XML and is impossible to update.\n\nSELECT \'ALTER TABLE queries\'; Alter ALTER TABLE sqllt.table ADD COLUMN new_col UInt32 DEFAULT 123456789; Alter ALTER TABLE sqllt.table COMMENT COLUMN new_col \'dummy column with a comment\'; @@ -54,19 +54,19 @@ System SYSTEM START FETCHES sqllt.table System SYSTEM STOP REPLICATED SENDS sqllt.table System SYSTEM START REPLICATED SENDS sqllt.table Select -- SYSTEM RELOAD DICTIONARY sqllt.dictionary; -- temporary out of order: Code: 210, Connection refused (localhost:9001) (version 21.3.1.1)\n-- DROP REPLICA\n-- haha, no\n-- SYSTEM KILL;\n-- SYSTEM SHUTDOWN;\n\n-- Since we don\'t really care about the actual output, suppress it with `FORMAT Null`.\nSELECT \'SHOW queries\'; - SHOW CREATE TABLE sqllt.table FORMAT Null; - SHOW CREATE DICTIONARY sqllt.dictionary FORMAT Null; - SHOW DATABASES LIKE \'sqllt\' FORMAT Null; - SHOW TABLES FROM sqllt FORMAT Null; - SHOW DICTIONARIES FROM sqllt FORMAT Null; - SHOW GRANTS FORMAT Null; - SHOW GRANTS FOR sqllt_user FORMAT Null; - SHOW CREATE USER sqllt_user FORMAT Null; - SHOW CREATE ROLE sqllt_role FORMAT Null; - SHOW CREATE POLICY sqllt_policy FORMAT Null; - SHOW CREATE ROW POLICY sqllt_row_policy FORMAT Null; - SHOW CREATE QUOTA sqllt_quota FORMAT Null; - SHOW CREATE SETTINGS PROFILE sqllt_settings_profile FORMAT Null; +Show SHOW CREATE TABLE sqllt.table FORMAT Null; +Show SHOW CREATE DICTIONARY sqllt.dictionary FORMAT Null; +Show SHOW DATABASES LIKE \'sqllt\' FORMAT Null; +Show SHOW TABLES FROM sqllt FORMAT Null; +Show SHOW DICTIONARIES FROM sqllt FORMAT Null; +Show SHOW GRANTS FORMAT Null; +Show SHOW GRANTS FOR sqllt_user FORMAT Null; +Show SHOW CREATE USER sqllt_user FORMAT Null; +Show SHOW CREATE ROLE sqllt_role FORMAT Null; +Show SHOW CREATE POLICY sqllt_policy FORMAT Null; +Show SHOW CREATE ROW POLICY sqllt_row_policy FORMAT Null; +Show SHOW CREATE QUOTA sqllt_quota FORMAT Null; +Show SHOW CREATE SETTINGS PROFILE sqllt_settings_profile FORMAT Null; Select SELECT \'GRANT queries\'; Grant GRANT SELECT ON sqllt.table TO sqllt_user; Grant GRANT DROP ON sqllt.view TO sqllt_user; @@ -74,13 +74,13 @@ Select SELECT \'REVOKE queries\'; Revoke REVOKE SELECT ON sqllt.table FROM sqllt_user; Revoke REVOKE DROP ON sqllt.view FROM sqllt_user; Select SELECT \'Misc queries\'; - DESCRIBE TABLE sqllt.table FORMAT Null; - CHECK TABLE sqllt.table FORMAT Null; +Describe DESCRIBE TABLE sqllt.table FORMAT Null; +Check CHECK TABLE sqllt.table FORMAT Null; Drop DETACH TABLE sqllt.table; Create ATTACH TABLE sqllt.table; Rename RENAME TABLE sqllt.table TO sqllt.table_new; Rename RENAME TABLE sqllt.table_new TO sqllt.table; Drop TRUNCATE TABLE sqllt.table; Drop DROP TABLE sqllt.table SYNC; - SET log_comment=\'\'; +Set SET log_comment=\'\'; DROP queries and also a cleanup after the test diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.sql b/tests/queries/0_stateless/01710_minmax_count_projection.sql index f01e5915393..f7645414187 100644 --- a/tests/queries/0_stateless/01710_minmax_count_projection.sql +++ b/tests/queries/0_stateless/01710_minmax_count_projection.sql @@ -58,7 +58,7 @@ select min(dt), max(dt), count(toDate(dt) >= '2021-10-25') from d where toDate(d select count() from d group by toDate(dt); -- fuzz crash -SELECT min(dt), count(ignore(ignore(ignore(tupleElement(_partition_value, NULL) = NULL), NULL, NULL, NULL), 0, '10485.76', NULL)), max(dt), count(toDate(dt) >= '2021-10-25') FROM d WHERE toDate(dt) >= '2021-10-25'; +SELECT min(dt), count(ignore(ignore(ignore(tupleElement(_partition_value, 'xxxx', NULL) = NULL), NULL, NULL, NULL), 0, '10485.76', NULL)), max(dt), count(toDate(dt) >= '2021-10-25') FROM d WHERE toDate(dt) >= '2021-10-25'; -- fuzz crash SELECT pointInEllipses(min(j), NULL), max(dt), count('0.0000000007') FROM d WHERE toDate(dt) >= '2021-10-25'; diff --git a/tests/queries/0_stateless/01710_projection_group_by_order_by.sql b/tests/queries/0_stateless/01710_projection_group_by_order_by.sql index d93339aedef..780162e0284 100644 --- a/tests/queries/0_stateless/01710_projection_group_by_order_by.sql +++ b/tests/queries/0_stateless/01710_projection_group_by_order_by.sql @@ -1,3 +1,7 @@ +--Tags: no-random-merge-tree-settings +-- Tag no-random-merge-tree-settings: bug in formatting of projections. +-- https://github.com/ClickHouse/ClickHouse/issues/44318 + DROP TABLE IF EXISTS t; drop table if exists tp; diff --git a/tests/queries/0_stateless/01785_parallel_formatting_memory.sh b/tests/queries/0_stateless/01785_parallel_formatting_memory.sh index 6d081c61fd3..1cfe89d191f 100755 --- a/tests/queries/0_stateless/01785_parallel_formatting_memory.sh +++ b/tests/queries/0_stateless/01785_parallel_formatting_memory.sh @@ -4,5 +4,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --compress 0 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1 -$CLICKHOUSE_CLIENT --compress 1 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1 +$CLICKHOUSE_CLIENT --compression 0 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1 +$CLICKHOUSE_CLIENT --compression 1 --max_memory_usage 1G --query "SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number" 2>&1 | grep -oF 'Code: 241' | head -n1 diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.sh b/tests/queries/0_stateless/01786_explain_merge_tree.sh index 138905c65e7..15f8821d80d 100755 --- a/tests/queries/0_stateless/01786_explain_merge_tree.sh +++ b/tests/queries/0_stateless/01786_explain_merge_tree.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql b/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql index c4ef5516fc8..2987c541aef 100644 --- a/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql +++ b/tests/queries/0_stateless/01824_move_to_prewhere_many_columns.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + SET optimize_move_to_prewhere = 1; SET convert_query_to_cnf = 0; diff --git a/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.reference b/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.reference index 91c37a277a1..d8e51196fc7 100644 --- a/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.reference +++ b/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.reference @@ -1,2 +1,2 @@ -SleepFunctionCalls: 3 (increment) -SleepFunctionMicroseconds: 300000 (increment) +SleepFunctionCalls: 1 (increment) +SleepFunctionMicroseconds: 100000 (increment) diff --git a/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.sh b/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.sh index 26fcfd38c48..bf8bb716a9f 100755 --- a/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.sh +++ b/tests/queries/0_stateless/01834_alias_columns_laziness_filimonov.sh @@ -12,11 +12,7 @@ insert into aliases_lazyness(x) select * from numbers(100); # In very old ClickHouse versions alias column was calculated for every row. # If it works this way, the query will take at least 0.1 * 100 = 10 seconds. -# If the issue does not exist, the query should call sleepEachRow() "only" 4 times: -# - from MergeTreeData::getQueryProcessingStageWithAggregateProjection() -> MergeTreeWhereOptimizer -> getBlockWithConstants() -# - from MergeTreeWhereOptimizer -> getBlockWithConstants() -# - ReadFromMergeTree::selectRangesToRead() -> getBlockWithConstants() -# - Pipeline +# If the issue does not exist, the query should call sleepEachRow() only 1 time. ${CLICKHOUSE_CLIENT} --profile-events-delay-ms=-1 --print-profile-events --query "SELECT x, y FROM aliases_lazyness WHERE x = 1 FORMAT Null" |& grep -o -e "SleepFunctionMicroseconds.*" -e "SleepFunctionCalls.*" ${CLICKHOUSE_CLIENT} --query "drop table aliases_lazyness" diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql index 7aa1b0112a6..2eec08635eb 100644 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + CREATE TABLE test_extract(str String, arr Array(Array(String)) ALIAS extractAllGroupsHorizontal(str, '\\W(\\w+)=("[^"]*?"|[^",}]*)')) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY tuple(); INSERT INTO test_extract (str) WITH range(8) as range_arr, arrayMap(x-> concat(toString(x),'Id'), range_arr) as key, arrayMap(x -> rand() % 8, range_arr) as val, arrayStringConcat(arrayMap((x,y) -> concat(x,'=',toString(y)), key, val),',') as str SELECT str FROM numbers(500000); diff --git a/tests/queries/0_stateless/01913_exact_rows_before_limit.sql b/tests/queries/0_stateless/01913_exact_rows_before_limit.sql index f6b02d0a510..6a354f575be 100644 --- a/tests/queries/0_stateless/01913_exact_rows_before_limit.sql +++ b/tests/queries/0_stateless/01913_exact_rows_before_limit.sql @@ -1,4 +1,5 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings + drop table if exists test_rows_compact_part; create table test_rows_compact_part(f1 int,f2 int) engine=MergeTree partition by f1 order by f2 settings min_bytes_for_wide_part=10485760; insert into test_rows_compact_part select 0,arrayJoin(range(10000)) ; diff --git a/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql b/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql index 3ab969ca256..defc3d7b686 100644 --- a/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql +++ b/tests/queries/0_stateless/01934_constexpr_aggregate_function_parameters.sql @@ -5,7 +5,7 @@ SELECT groupArray()(number) FROM numbers(10); -- { serverError 36 } SELECT groupArray(NULL)(number) FROM numbers(10); -- { serverError 36 } SELECT groupArray(NULL + NULL)(number) FROM numbers(10); -- { serverError 36 } SELECT groupArray([])(number) FROM numbers(10); -- { serverError 36 } -SELECT groupArray(throwIf(1))(number) FROM numbers(10); -- { serverError 395 } +SELECT groupArray(throwIf(1))(number) FROM numbers(10); -- { serverError 134 } -- Not the best error message, can be improved. SELECT groupArray(number)(number) FROM numbers(10); -- { serverError 47 } diff --git a/tests/queries/0_stateless/02016_aggregation_spark_bar.reference b/tests/queries/0_stateless/02016_aggregation_spark_bar.reference index 118d42a62d4..534942fc1d5 100644 --- a/tests/queries/0_stateless/02016_aggregation_spark_bar.reference +++ b/tests/queries/0_stateless/02016_aggregation_spark_bar.reference @@ -1,26 +1,69 @@ -▁ -▁█ -▃█▁ -▄▅█▁ -▄▄█▇▁ -▃▄▅█▃▁ -▂▅▃▇█▁▂ -▂▅▃▅██ ▁ -▁▅▄▃██▅ ▁ -▁▄▄▂▅▇█▂ ▂ +-- { echoOn } + +SELECT sparkbar(2)(event_date,cnt) FROM spark_bar_test; +▅█ +SELECT sparkbar(3)(event_date,cnt) FROM spark_bar_test; +▄█▃ +SELECT sparkbar(4)(event_date,cnt) FROM spark_bar_test; +▄▅█▃ +SELECT sparkbar(5)(event_date,cnt) FROM spark_bar_test; +▃▂▆█▂ +SELECT sparkbar(6)(event_date,cnt) FROM spark_bar_test; +▃▄▆█ ▃ +SELECT sparkbar(7)(event_date,cnt) FROM spark_bar_test; +▂▃▃▆█ ▂ +SELECT sparkbar(8)(event_date,cnt) FROM spark_bar_test; +▂▅▂▇▆█ ▂ +SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_test; +▂▅▂▃▆█ ▂ +SELECT sparkbar(10)(event_date,cnt) FROM spark_bar_test; +▂▅▂▃▇▆█ ▂ +SELECT sparkbar(11)(event_date,cnt) FROM spark_bar_test; ▁▄▅▂▃▇▆█ ▂ +SELECT sparkbar(11,2,5)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11,3,7)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11,4,11)(event_date,cnt) FROM spark_bar_test; + +SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-05'))(event_date,cnt) FROM spark_bar_test; +▆ █ ▃ ▅ +SELECT sparkbar(11,toDate('2020-01-03'),toDate('2020-01-07'))(event_date,cnt) FROM spark_bar_test; +▆ ▃ ▄ █ ▇ +SELECT sparkbar(11,toDate('2020-01-04'),toDate('2020-01-11'))(event_date,cnt) FROM spark_bar_test; +▂▃▇ ▆█ ▂ +SELECT sparkbar(2,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test; +▄█ +SELECT sparkbar(2,toDate('2020-01-02'),toDate('2020-01-09'))(event_date,cnt) FROM spark_bar_test; +▄█ +SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-09'))(event_date,cnt) FROM spark_bar_test; +▄▅█ +SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; +▃▅█ +SELECT sparkbar(4,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test; +▃▄▆█ +SELECT sparkbar(5,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; +▃▄▆█ +SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; +▂▅▂▃▇▆█ +WITH number DIV 50 AS k, number % 50 AS value SELECT k, sparkbar(50, 0, 99)(number, value) FROM numbers(100) GROUP BY k ORDER BY k; +0 ▁▁▁▁▂▂▂▃▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇█ +1 ▁▁▁▁▂▂▂▃▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇█ +SELECT sparkbar(128, 0, 9223372036854775806)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100); + █ +SELECT sparkbar(128)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100); +█ +SELECT sparkbar(9)(x, y) FROM (SELECT * FROM Values('x UInt64, y UInt8', (18446744073709551615,255), (0,0), (0,0), (4036797895307271799,254))); + ▇ █ +SELECT sparkbar(8, 0, 7)((number + 1) % 8, 1), sparkbar(8, 0, 7)((number + 2) % 8, 1), sparkbar(8, 0, 7)((number + 3) % 8, 1) FROM numbers(7); + ███████ █ ██████ ██ █████ +SELECT sparkbar(2)(number, -number) FROM numbers(10); + +SELECT sparkbar(10)(number, number - 7) FROM numbers(10); + ▄█ +SELECT sparkbar(1024)(number, number) FROM numbers(1024); + ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇█ +SELECT sparkbar(1024)(number, 1) FROM numbers(1024); +████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ +SELECT sparkbar(1024)(number, 0) FROM numbers(1024); -▁ -▆█▁▃ -▅▁▂█▇ -▁▂▇▆█ ▁ -▁█ -▁█ -▁▄█ -▂█▁ -▁▃▅█ -▁▃▅█ -0 ▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇▇██ -1 ▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇▇██ diff --git a/tests/queries/0_stateless/02016_aggregation_spark_bar.sql b/tests/queries/0_stateless/02016_aggregation_spark_bar.sql index 8b5b62305ec..2100a3dd4a6 100644 --- a/tests/queries/0_stateless/02016_aggregation_spark_bar.sql +++ b/tests/queries/0_stateless/02016_aggregation_spark_bar.sql @@ -4,7 +4,8 @@ CREATE TABLE spark_bar_test (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree INSERT INTO spark_bar_test VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11'); -SELECT sparkbar(1)(event_date,cnt) FROM spark_bar_test; +-- { echoOn } + SELECT sparkbar(2)(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(3)(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(4)(event_date,cnt) FROM spark_bar_test; @@ -20,7 +21,6 @@ SELECT sparkbar(11,2,5)(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(11,3,7)(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(11,4,11)(event_date,cnt) FROM spark_bar_test; -SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-02'))(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-05'))(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(11,toDate('2020-01-03'),toDate('2020-01-07'))(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(11,toDate('2020-01-04'),toDate('2020-01-11'))(event_date,cnt) FROM spark_bar_test; @@ -31,7 +31,32 @@ SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-09'))(event_date,cnt) FRO SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(4,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test; SELECT sparkbar(5,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; - -DROP TABLE IF EXISTS spark_bar_test; +SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; WITH number DIV 50 AS k, number % 50 AS value SELECT k, sparkbar(50, 0, 99)(number, value) FROM numbers(100) GROUP BY k ORDER BY k; + +SELECT sparkbar(128, 0, 9223372036854775806)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100); +SELECT sparkbar(128)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100); +SELECT sparkbar(9)(x, y) FROM (SELECT * FROM Values('x UInt64, y UInt8', (18446744073709551615,255), (0,0), (0,0), (4036797895307271799,254))); + +SELECT sparkbar(8, 0, 7)((number + 1) % 8, 1), sparkbar(8, 0, 7)((number + 2) % 8, 1), sparkbar(8, 0, 7)((number + 3) % 8, 1) FROM numbers(7); + +SELECT sparkbar(2)(number, -number) FROM numbers(10); +SELECT sparkbar(10)(number, number - 7) FROM numbers(10); +SELECT sparkbar(1024)(number, number) FROM numbers(1024); +SELECT sparkbar(1024)(number, 1) FROM numbers(1024); +SELECT sparkbar(1024)(number, 0) FROM numbers(1024); + +-- { echoOff } + +SELECT sparkbar(0)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(1)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(1025)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(2, 10, 9)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(2, -5, -1)(number, number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sparkbar(2, -5, 1)(number, number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sparkbar(2)(toInt32(number), number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sparkbar(2, 0)(number, number) FROM numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sparkbar(2, 0, 5, 8)(number, number) FROM numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +DROP TABLE IF EXISTS spark_bar_test; diff --git a/tests/queries/0_stateless/02051_read_settings.reference.j2 b/tests/queries/0_stateless/02051_read_settings.reference.j2 index 391cf3adf35..ee6c4bdd918 100644 --- a/tests/queries/0_stateless/02051_read_settings.reference.j2 +++ b/tests/queries/0_stateless/02051_read_settings.reference.j2 @@ -1,5 +1,5 @@ {% for index_granularity_bytes in [0, 10 * 1024 * 1024] -%} -{% for read_method in ['read', 'mmap', 'pread_threadpool', 'pread_fake_async'] -%} +{% for read_method in ['read', 'mmap', 'io_uring', 'pread_threadpool', 'pread_fake_async'] -%} {% for direct_io in [0, 1] -%} {% for prefetch in [0, 1] -%} {% for priority in [0, 1] -%} diff --git a/tests/queries/0_stateless/02051_read_settings.sql.j2 b/tests/queries/0_stateless/02051_read_settings.sql.j2 index 69dd3c264ba..1f121b0c268 100644 --- a/tests/queries/0_stateless/02051_read_settings.sql.j2 +++ b/tests/queries/0_stateless/02051_read_settings.sql.j2 @@ -19,7 +19,7 @@ settings as select number, repeat(toString(number), 5) from numbers(1e6); {# check each local_filesystem_read_method #} -{% for read_method in ['read', 'mmap', 'pread_threadpool', 'pread_fake_async'] %} +{% for read_method in ['read', 'mmap', 'io_uring', 'pread_threadpool', 'pread_fake_async'] %} {# check w/ O_DIRECT and w/o (min_bytes_to_use_direct_io) #} {% for direct_io in [0, 1] %} {# check local_filesystem_read_prefetch (just a smoke test) #} diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index beff51fb294..dc7cdddf8ec 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -288,7 +288,7 @@ CREATE TABLE system.grants ( `user_name` Nullable(String), `role_name` Nullable(String), - `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY RESULT CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156), + `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP S3 CLIENT CACHE' = 102, 'SYSTEM DROP CACHE' = 103, 'SYSTEM RELOAD CONFIG' = 104, 'SYSTEM RELOAD USERS' = 105, 'SYSTEM RELOAD SYMBOLS' = 106, 'SYSTEM RELOAD DICTIONARY' = 107, 'SYSTEM RELOAD MODEL' = 108, 'SYSTEM RELOAD FUNCTION' = 109, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 110, 'SYSTEM RELOAD' = 111, 'SYSTEM RESTART DISK' = 112, 'SYSTEM MERGES' = 113, 'SYSTEM TTL MERGES' = 114, 'SYSTEM FETCHES' = 115, 'SYSTEM MOVES' = 116, 'SYSTEM DISTRIBUTED SENDS' = 117, 'SYSTEM REPLICATED SENDS' = 118, 'SYSTEM SENDS' = 119, 'SYSTEM REPLICATION QUEUES' = 120, 'SYSTEM DROP REPLICA' = 121, 'SYSTEM SYNC REPLICA' = 122, 'SYSTEM RESTART REPLICA' = 123, 'SYSTEM RESTORE REPLICA' = 124, 'SYSTEM WAIT LOADING PARTS' = 125, 'SYSTEM SYNC DATABASE REPLICA' = 126, 'SYSTEM SYNC TRANSACTION LOG' = 127, 'SYSTEM SYNC FILE CACHE' = 128, 'SYSTEM FLUSH DISTRIBUTED' = 129, 'SYSTEM FLUSH LOGS' = 130, 'SYSTEM FLUSH' = 131, 'SYSTEM THREAD FUZZER' = 132, 'SYSTEM UNFREEZE' = 133, 'SYSTEM' = 134, 'dictGet' = 135, 'addressToLine' = 136, 'addressToLineWithInlines' = 137, 'addressToSymbol' = 138, 'demangle' = 139, 'INTROSPECTION' = 140, 'FILE' = 141, 'URL' = 142, 'REMOTE' = 143, 'MONGO' = 144, 'MEILISEARCH' = 145, 'MYSQL' = 146, 'POSTGRES' = 147, 'SQLITE' = 148, 'ODBC' = 149, 'JDBC' = 150, 'HDFS' = 151, 'S3' = 152, 'HIVE' = 153, 'SOURCES' = 154, 'CLUSTER' = 155, 'ALL' = 156, 'NONE' = 157), `database` Nullable(String), `table` Nullable(String), `column` Nullable(String), @@ -569,10 +569,10 @@ ENGINE = SystemPartsColumns COMMENT 'SYSTEM TABLE is built on the fly.' CREATE TABLE system.privileges ( - `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY RESULT CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156), + `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP S3 CLIENT CACHE' = 102, 'SYSTEM DROP CACHE' = 103, 'SYSTEM RELOAD CONFIG' = 104, 'SYSTEM RELOAD USERS' = 105, 'SYSTEM RELOAD SYMBOLS' = 106, 'SYSTEM RELOAD DICTIONARY' = 107, 'SYSTEM RELOAD MODEL' = 108, 'SYSTEM RELOAD FUNCTION' = 109, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 110, 'SYSTEM RELOAD' = 111, 'SYSTEM RESTART DISK' = 112, 'SYSTEM MERGES' = 113, 'SYSTEM TTL MERGES' = 114, 'SYSTEM FETCHES' = 115, 'SYSTEM MOVES' = 116, 'SYSTEM DISTRIBUTED SENDS' = 117, 'SYSTEM REPLICATED SENDS' = 118, 'SYSTEM SENDS' = 119, 'SYSTEM REPLICATION QUEUES' = 120, 'SYSTEM DROP REPLICA' = 121, 'SYSTEM SYNC REPLICA' = 122, 'SYSTEM RESTART REPLICA' = 123, 'SYSTEM RESTORE REPLICA' = 124, 'SYSTEM WAIT LOADING PARTS' = 125, 'SYSTEM SYNC DATABASE REPLICA' = 126, 'SYSTEM SYNC TRANSACTION LOG' = 127, 'SYSTEM SYNC FILE CACHE' = 128, 'SYSTEM FLUSH DISTRIBUTED' = 129, 'SYSTEM FLUSH LOGS' = 130, 'SYSTEM FLUSH' = 131, 'SYSTEM THREAD FUZZER' = 132, 'SYSTEM UNFREEZE' = 133, 'SYSTEM' = 134, 'dictGet' = 135, 'addressToLine' = 136, 'addressToLineWithInlines' = 137, 'addressToSymbol' = 138, 'demangle' = 139, 'INTROSPECTION' = 140, 'FILE' = 141, 'URL' = 142, 'REMOTE' = 143, 'MONGO' = 144, 'MEILISEARCH' = 145, 'MYSQL' = 146, 'POSTGRES' = 147, 'SQLITE' = 148, 'ODBC' = 149, 'JDBC' = 150, 'HDFS' = 151, 'S3' = 152, 'HIVE' = 153, 'SOURCES' = 154, 'CLUSTER' = 155, 'ALL' = 156, 'NONE' = 157), `aliases` Array(String), `level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)), - `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY RESULT CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156)) + `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP S3 CLIENT CACHE' = 102, 'SYSTEM DROP CACHE' = 103, 'SYSTEM RELOAD CONFIG' = 104, 'SYSTEM RELOAD USERS' = 105, 'SYSTEM RELOAD SYMBOLS' = 106, 'SYSTEM RELOAD DICTIONARY' = 107, 'SYSTEM RELOAD MODEL' = 108, 'SYSTEM RELOAD FUNCTION' = 109, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 110, 'SYSTEM RELOAD' = 111, 'SYSTEM RESTART DISK' = 112, 'SYSTEM MERGES' = 113, 'SYSTEM TTL MERGES' = 114, 'SYSTEM FETCHES' = 115, 'SYSTEM MOVES' = 116, 'SYSTEM DISTRIBUTED SENDS' = 117, 'SYSTEM REPLICATED SENDS' = 118, 'SYSTEM SENDS' = 119, 'SYSTEM REPLICATION QUEUES' = 120, 'SYSTEM DROP REPLICA' = 121, 'SYSTEM SYNC REPLICA' = 122, 'SYSTEM RESTART REPLICA' = 123, 'SYSTEM RESTORE REPLICA' = 124, 'SYSTEM WAIT LOADING PARTS' = 125, 'SYSTEM SYNC DATABASE REPLICA' = 126, 'SYSTEM SYNC TRANSACTION LOG' = 127, 'SYSTEM SYNC FILE CACHE' = 128, 'SYSTEM FLUSH DISTRIBUTED' = 129, 'SYSTEM FLUSH LOGS' = 130, 'SYSTEM FLUSH' = 131, 'SYSTEM THREAD FUZZER' = 132, 'SYSTEM UNFREEZE' = 133, 'SYSTEM' = 134, 'dictGet' = 135, 'addressToLine' = 136, 'addressToLineWithInlines' = 137, 'addressToSymbol' = 138, 'demangle' = 139, 'INTROSPECTION' = 140, 'FILE' = 141, 'URL' = 142, 'REMOTE' = 143, 'MONGO' = 144, 'MEILISEARCH' = 145, 'MYSQL' = 146, 'POSTGRES' = 147, 'SQLITE' = 148, 'ODBC' = 149, 'JDBC' = 150, 'HDFS' = 151, 'S3' = 152, 'HIVE' = 153, 'SOURCES' = 154, 'CLUSTER' = 155, 'ALL' = 156, 'NONE' = 157)) ) ENGINE = SystemPrivileges COMMENT 'SYSTEM TABLE is built on the fly.' @@ -612,6 +612,7 @@ CREATE TABLE system.processes `memory_usage` Int64, `peak_memory_usage` Int64, `query` String, + `query_kind` String, `thread_ids` Array(UInt64), `ProfileEvents` Map(String, UInt64), `Settings` Map(String, String), @@ -922,6 +923,7 @@ CREATE TABLE system.replication_queue `is_currently_executing` UInt8, `num_tries` UInt32, `last_exception` String, + `last_exception_time` DateTime, `last_attempt_time` DateTime, `num_postponed` UInt32, `postpone_reason` String, diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference index 96aae2a978c..9023b36a9bf 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference @@ -31,6 +31,9 @@ IPv6 functions ::ffff:127.0.0.1 ::ffff:127.0.0.1 ::ffff:127.0.0.1 +:: +\N +100000000 -- ::ffff:127.0.0.1 -- diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql index 436f232e441..6c65fe86cc9 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql @@ -56,6 +56,12 @@ SELECT toIPv6('::ffff:127.0.0.1'); SELECT toIPv6OrDefault('::ffff:127.0.0.1'); SELECT toIPv6OrNull('::ffff:127.0.0.1'); +SELECT toIPv6('::.1.2.3'); --{serverError CANNOT_PARSE_IPV6} +SELECT toIPv6OrDefault('::.1.2.3'); +SELECT toIPv6OrNull('::.1.2.3'); + +SELECT count() FROM numbers_mt(100000000) WHERE NOT ignore(toIPv6OrZero(randomString(8))); + SELECT '--'; SELECT cast('test' , 'IPv6'); --{serverError CANNOT_PARSE_IPV6} diff --git a/tests/queries/0_stateless/02245_weird_partitions_pruning.sql b/tests/queries/0_stateless/02245_weird_partitions_pruning.sql index 6273a9f3d59..8b0208da6aa 100644 --- a/tests/queries/0_stateless/02245_weird_partitions_pruning.sql +++ b/tests/queries/0_stateless/02245_weird_partitions_pruning.sql @@ -1,9 +1,11 @@ --- We use a hack - partition by ignore(d1). In some cases there are two columns +-- Tags: no-random-merge-tree-settings + +-- We use a hack - partition by ignore(d1). In some cases there are two columns -- not fully correlated (<1) (date_begin - date_end or datetime - datetime_in_TZ_with_DST) -- If we partition by these columns instead of one it will be twice more partitions. --- Partition by (.., ignore(d1)) allows to partition by the first column but build +-- Partition by (.., ignore(d1)) allows to partition by the first column but build -- min_max indexes for both column, so partition pruning works for both columns. --- It's very similar to min_max skip index but gives bigger performance boost, +-- It's very similar to min_max skip index but gives bigger performance boost, -- because partition pruning happens on very early query stage. diff --git a/tests/queries/0_stateless/02286_parallel_final.sh b/tests/queries/0_stateless/02286_parallel_final.sh index d31450a0482..de0cca0e966 100755 --- a/tests/queries/0_stateless/02286_parallel_final.sh +++ b/tests/queries/0_stateless/02286_parallel_final.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02293_compatibility_ignore_auto_increment_in_create_table.sql b/tests/queries/0_stateless/02293_compatibility_ignore_auto_increment_in_create_table.sql index a671826ccf5..5d9b711ae9c 100644 --- a/tests/queries/0_stateless/02293_compatibility_ignore_auto_increment_in_create_table.sql +++ b/tests/queries/0_stateless/02293_compatibility_ignore_auto_increment_in_create_table.sql @@ -1,3 +1,7 @@ +-- Tags: no-random-merge-tree-settings +-- Tag no-random-merge-tree-settings: query is rewritten in parser +-- while adding merge tree settings + select 'disable AUTO_INCREMENT compatibility mode'; set compatibility_ignore_auto_increment_in_create_table=false; diff --git a/tests/queries/0_stateless/02293_selected_rows_and_merges.sh b/tests/queries/0_stateless/02293_selected_rows_and_merges.sh index 33bfdd45810..9d1483f5bf7 100755 --- a/tests/queries/0_stateless/02293_selected_rows_and_merges.sh +++ b/tests/queries/0_stateless/02293_selected_rows_and_merges.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings set -ue CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.sh b/tests/queries/0_stateless/02361_fsync_profile_events.sh index 85f82c59c71..44a1bd58d36 100755 --- a/tests/queries/0_stateless/02361_fsync_profile_events.sh +++ b/tests/queries/0_stateless/02361_fsync_profile_events.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-s3-storage +# Tags: no-s3-storage, no-random-merge-tree-settings # Tag no-s3-storage: s3 does not have fsync CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql b/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql index 0f1b4f638cb..7eaa46242ae 100644 --- a/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql +++ b/tests/queries/0_stateless/02381_compress_marks_and_primary_key.sql @@ -1,4 +1,4 @@ --- Tags: no-backward-compatibility-check +-- Tags: no-backward-compatibility-check, no-random-merge-tree-settings drop table if exists test_02381; create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks=false, compress_primary_key=false; diff --git a/tests/queries/0_stateless/02404_memory_bound_merging.reference b/tests/queries/0_stateless/02404_memory_bound_merging.reference index 47d3470ef6e..98e53cd50ab 100644 --- a/tests/queries/0_stateless/02404_memory_bound_merging.reference +++ b/tests/queries/0_stateless/02404_memory_bound_merging.reference @@ -98,8 +98,9 @@ select a, count() from dist_t_different_dbs group by a, b order by a limit 5 off 502 2000 503 2000 504 2000 +1000000 -- { echoOn } -- -explain pipeline select a from dist_pr_t group by a order by a limit 5 offset 500; +explain pipeline select a from pr_t group by a order by a limit 5 offset 500; (Expression) ExpressionTransform (Limit) @@ -112,28 +113,29 @@ ExpressionTransform (Expression) ExpressionTransform × 4 (MergingAggregated) - MergingAggregatedBucketTransform × 4 - Resize 1 → 4 - FinishAggregatingInOrderTransform 3 → 1 - (Union) - (Aggregating) - SortingAggregatedForMemoryBoundMergingTransform 4 → 1 - MergingAggregatedBucketTransform × 4 - Resize 1 → 4 - FinishAggregatingInOrderTransform 4 → 1 - AggregatingInOrderTransform × 4 - (Expression) - ExpressionTransform × 4 - (ReadFromMergeTree) - MergeTreeInOrder × 4 0 → 1 - (ReadFromRemoteParallelReplicas) -select a, count() from dist_pr_t group by a order by a limit 5 offset 500; + Resize 1 → 4 + SortingAggregatedTransform 4 → 1 + MergingAggregatedBucketTransform × 4 + Resize 1 → 4 + GroupingAggregatedTransform 6 → 1 + (Union) + (Aggregating) + MergingAggregatedBucketTransform × 4 + Resize 1 → 4 + FinishAggregatingInOrderTransform 4 → 1 + AggregatingInOrderTransform × 4 + (Expression) + ExpressionTransform × 4 + (ReadFromMergeTree) + MergeTreeInOrder × 4 0 → 1 + (ReadFromRemoteParallelReplicas) +select a, count() from pr_t group by a order by a limit 5 offset 500; 500 1000 501 1000 502 1000 503 1000 504 1000 -select a, count() from dist_pr_t group by a, b order by a limit 5 offset 500; +select a, count() from pr_t group by a, b order by a limit 5 offset 500; 500 1000 501 1000 502 1000 diff --git a/tests/queries/0_stateless/02404_memory_bound_merging.sql b/tests/queries/0_stateless/02404_memory_bound_merging.sql index f4a1e75e398..b6299de9aae 100644 --- a/tests/queries/0_stateless/02404_memory_bound_merging.sql +++ b/tests/queries/0_stateless/02404_memory_bound_merging.sql @@ -1,13 +1,13 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-random-merge-tree-settings drop table if exists pr_t; -drop table if exists dist_pr_t; drop table if exists dist_t_different_dbs; drop table if exists shard_1.t_different_dbs; drop table if exists t_different_dbs; drop table if exists dist_t; drop table if exists t; + create table t(a UInt64, b UInt64) engine=MergeTree order by a; system stop merges t; insert into t select number, number from numbers_mt(1e6); @@ -15,6 +15,7 @@ insert into t select number, number from numbers_mt(1e6); set enable_memory_bound_merging_of_aggregation_results = 1; set max_threads = 4; set optimize_aggregation_in_order = 1; +set optimize_read_in_order = 1; set prefer_localhost_replica = 1; -- slightly different transforms will be generated by reading steps if we let settings randomisation to change this setting value -- @@ -56,26 +57,28 @@ select a, count() from dist_t_different_dbs group by a, b order by a limit 5 off -- { echoOff } -- +create table pr_t(a UInt64, b UInt64) engine=MergeTree order by a; +insert into pr_t select number % 1000, number % 1000 from numbers_mt(1e6); + set allow_experimental_parallel_reading_from_replicas = 1; set max_parallel_replicas = 3; set use_hedged_requests = 0; +set cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set distributed_aggregation_memory_efficient=1; -create table pr_t(a UInt64, b UInt64) engine=MergeTree order by a; -insert into pr_t select number % 1000, number % 1000 from numbers_mt(1e6); -create table dist_pr_t as pr_t engine = Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), pr_t); +select count() from pr_t; -- { echoOn } -- -explain pipeline select a from dist_pr_t group by a order by a limit 5 offset 500; +explain pipeline select a from pr_t group by a order by a limit 5 offset 500; -select a, count() from dist_pr_t group by a order by a limit 5 offset 500; -select a, count() from dist_pr_t group by a, b order by a limit 5 offset 500; +select a, count() from pr_t group by a order by a limit 5 offset 500; +select a, count() from pr_t group by a, b order by a limit 5 offset 500; -- { echoOff } -- -drop table pr_t; -drop table dist_pr_t; -drop table dist_t_different_dbs; -drop table shard_1.t_different_dbs; -drop table t_different_dbs; -drop table dist_t; -drop table t; +drop table if exists pr_t; +drop table if exists dist_t_different_dbs; +drop table if exists shard_1.t_different_dbs; +drop table if exists t_different_dbs; +drop table if exists dist_t; +drop table if exists t; diff --git a/tests/queries/0_stateless/02410_inmemory_wal_cleanup.sql b/tests/queries/0_stateless/02410_inmemory_wal_cleanup.sql index 0228852a115..7f832d980ba 100644 --- a/tests/queries/0_stateless/02410_inmemory_wal_cleanup.sql +++ b/tests/queries/0_stateless/02410_inmemory_wal_cleanup.sql @@ -1,3 +1,5 @@ +-- Tags: no-s3-storage + -- { echo } DROP TABLE IF EXISTS in_memory; diff --git a/tests/queries/0_stateless/02423_drop_memory_parts.sql b/tests/queries/0_stateless/02423_drop_memory_parts.sql index fad81d46e68..9326f159b0c 100644 --- a/tests/queries/0_stateless/02423_drop_memory_parts.sql +++ b/tests/queries/0_stateless/02423_drop_memory_parts.sql @@ -1,3 +1,5 @@ +-- Tags: no-s3-storage + DROP TABLE IF EXISTS table_in_memory; CREATE TABLE table_in_memory diff --git a/tests/queries/0_stateless/02480_s3_support_wildcard.reference b/tests/queries/0_stateless/02480_s3_support_wildcard.reference index c6b63f647f8..94151ea87a1 100644 --- a/tests/queries/0_stateless/02480_s3_support_wildcard.reference +++ b/tests/queries/0_stateless/02480_s3_support_wildcard.reference @@ -1,43 +1,43 @@ -- { echo } -drop table if exists test_02480_write; -drop table if exists test_02480_write2; -create table test_02480_write (a UInt64, b String) engine = S3(s3_conn, filename='test_02480_{_partition_id}', format=Parquet) partition by a; +drop table if exists test_02480_support_wildcard_write; +drop table if exists test_02480_support_wildcard_write2; +create table test_02480_support_wildcard_write (a UInt64, b String) engine = S3(s3_conn, filename='test_02480_support_wildcard_{_partition_id}', format=Parquet) partition by a; set s3_truncate_on_insert=1; -insert into test_02480_write values (1, 'a'), (22, 'b'), (333, 'c'); -select a, b from s3(s3_conn, filename='test_02480_*', format=Parquet) order by a; +insert into test_02480_support_wildcard_write values (1, 'a'), (22, 'b'), (333, 'c'); +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_*', format=Parquet) order by a; 1 a 22 b 333 c -select a, b from s3(s3_conn, filename='test_02480_?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_?', format=Parquet) order by a; 1 a -select a, b from s3(s3_conn, filename='test_02480_??', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_??', format=Parquet) order by a; 22 b -select a, b from s3(s3_conn, filename='test_02480_?*?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_?*?', format=Parquet) order by a; 22 b 333 c -select a, b from s3(s3_conn, filename='test_02480_{1,333}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_{1,333}', format=Parquet) order by a; 1 a 333 c -select a, b from s3(s3_conn, filename='test_02480_{1..333}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_{1..333}', format=Parquet) order by a; 1 a 22 b 333 c -create table test_02480_write2 (a UInt64, b String) engine = S3(s3_conn, filename='prefix/test_02480_{_partition_id}', format=Parquet) partition by a; +create table test_02480_support_wildcard_write2 (a UInt64, b String) engine = S3(s3_conn, filename='prefix/test_02480_support_wildcard_{_partition_id}', format=Parquet) partition by a; set s3_truncate_on_insert=1; -insert into test_02480_write2 values (4, 'd'), (55, 'f'), (666, 'g'); -select a, b from s3(s3_conn, filename='*/test_02480_*', format=Parquet) order by a; +insert into test_02480_support_wildcard_write2 values (4, 'd'), (55, 'f'), (666, 'g'); +select a, b from s3(s3_conn, filename='*/test_02480_support_wildcard_*', format=Parquet) order by a; 4 d 55 f 666 g -select a, b from s3(s3_conn, filename='*/test_02480_?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='*/test_02480_support_wildcard_?', format=Parquet) order by a; 4 d -select a, b from s3(s3_conn, filename='prefix/test_02480_??', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='prefix/test_02480_support_wildcard_??', format=Parquet) order by a; 55 f -select a, b from s3(s3_conn, filename='prefi?/test_02480_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='prefi?/test_02480_support_wildcard_*', format=Parquet) order by a; 4 d 55 f 666 g -select a, b from s3(s3_conn, filename='p?*/test_02480_{56..666}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='p?*/test_02480_support_wildcard_{56..666}', format=Parquet) order by a; 666 g -drop table test_02480_write; -drop table test_02480_write2; +drop table test_02480_support_wildcard_write; +drop table test_02480_support_wildcard_write2; diff --git a/tests/queries/0_stateless/02480_s3_support_wildcard.sql b/tests/queries/0_stateless/02480_s3_support_wildcard.sql index 9da5a022dc4..6078dd13983 100644 --- a/tests/queries/0_stateless/02480_s3_support_wildcard.sql +++ b/tests/queries/0_stateless/02480_s3_support_wildcard.sql @@ -2,28 +2,28 @@ -- Tag no-fasttest: Depends on AWS -- { echo } -drop table if exists test_02480_write; -drop table if exists test_02480_write2; -create table test_02480_write (a UInt64, b String) engine = S3(s3_conn, filename='test_02480_{_partition_id}', format=Parquet) partition by a; +drop table if exists test_02480_support_wildcard_write; +drop table if exists test_02480_support_wildcard_write2; +create table test_02480_support_wildcard_write (a UInt64, b String) engine = S3(s3_conn, filename='test_02480_support_wildcard_{_partition_id}', format=Parquet) partition by a; set s3_truncate_on_insert=1; -insert into test_02480_write values (1, 'a'), (22, 'b'), (333, 'c'); +insert into test_02480_support_wildcard_write values (1, 'a'), (22, 'b'), (333, 'c'); -select a, b from s3(s3_conn, filename='test_02480_*', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='test_02480_?', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='test_02480_??', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='test_02480_?*?', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='test_02480_{1,333}', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='test_02480_{1..333}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_??', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_?*?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_{1,333}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_{1..333}', format=Parquet) order by a; -create table test_02480_write2 (a UInt64, b String) engine = S3(s3_conn, filename='prefix/test_02480_{_partition_id}', format=Parquet) partition by a; +create table test_02480_support_wildcard_write2 (a UInt64, b String) engine = S3(s3_conn, filename='prefix/test_02480_support_wildcard_{_partition_id}', format=Parquet) partition by a; set s3_truncate_on_insert=1; -insert into test_02480_write2 values (4, 'd'), (55, 'f'), (666, 'g'); +insert into test_02480_support_wildcard_write2 values (4, 'd'), (55, 'f'), (666, 'g'); -select a, b from s3(s3_conn, filename='*/test_02480_*', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='*/test_02480_?', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='prefix/test_02480_??', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='prefi?/test_02480_*', format=Parquet) order by a; -select a, b from s3(s3_conn, filename='p?*/test_02480_{56..666}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='*/test_02480_support_wildcard_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='*/test_02480_support_wildcard_?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='prefix/test_02480_support_wildcard_??', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='prefi?/test_02480_support_wildcard_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='p?*/test_02480_support_wildcard_{56..666}', format=Parquet) order by a; -drop table test_02480_write; -drop table test_02480_write2; +drop table test_02480_support_wildcard_write; +drop table test_02480_support_wildcard_write2; diff --git a/tests/queries/0_stateless/02481_parquet_int_list_multiple_chunks.sh b/tests/queries/0_stateless/02481_parquet_int_list_multiple_chunks.sh index c2c6f689851..5c7c9701a67 100755 --- a/tests/queries/0_stateless/02481_parquet_int_list_multiple_chunks.sh +++ b/tests/queries/0_stateless/02481_parquet_int_list_multiple_chunks.sh @@ -37,6 +37,6 @@ DATA_FILE=$CUR_DIR/data_parquet/int-list-zero-based-chunked-array.parquet ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (arr Array(Int64)) ENGINE = Memory" cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum +${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load SETTINGS max_threads=1" | md5sum ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load" ${CLICKHOUSE_CLIENT} --query="drop table parquet_load" \ No newline at end of file diff --git a/tests/queries/0_stateless/02481_s3_throw_if_mismatch_files.reference b/tests/queries/0_stateless/02481_s3_throw_if_mismatch_files.reference new file mode 100644 index 00000000000..02650f92607 --- /dev/null +++ b/tests/queries/0_stateless/02481_s3_throw_if_mismatch_files.reference @@ -0,0 +1,7 @@ +-- { echo } +drop table if exists test_02481_mismatch_files; +create table test_02481_mismatch_files (a UInt64, b String) engine = S3(s3_conn, filename='test_02481_mismatch_files_{_partition_id}', format=Parquet) partition by a; +set s3_truncate_on_insert=1; +insert into test_02481_mismatch_files values (1, 'a'), (22, 'b'), (333, 'c'); +select a, b from s3(s3_conn, filename='test_02481_mismatch_filesxxx*', format=Parquet); -- { serverError 636 } +select a, b from s3(s3_conn, filename='test_02481_mismatch_filesxxx*', format=Parquet) settings s3_throw_on_zero_files_match=1; -- { serverError 107 } diff --git a/tests/queries/0_stateless/02481_s3_throw_if_mismatch_files.sql b/tests/queries/0_stateless/02481_s3_throw_if_mismatch_files.sql new file mode 100644 index 00000000000..6e6f456bfad --- /dev/null +++ b/tests/queries/0_stateless/02481_s3_throw_if_mismatch_files.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- { echo } +drop table if exists test_02481_mismatch_files; +create table test_02481_mismatch_files (a UInt64, b String) engine = S3(s3_conn, filename='test_02481_mismatch_files_{_partition_id}', format=Parquet) partition by a; +set s3_truncate_on_insert=1; +insert into test_02481_mismatch_files values (1, 'a'), (22, 'b'), (333, 'c'); + +select a, b from s3(s3_conn, filename='test_02481_mismatch_filesxxx*', format=Parquet); -- { serverError 636 } + +select a, b from s3(s3_conn, filename='test_02481_mismatch_filesxxx*', format=Parquet) settings s3_throw_on_zero_files_match=1; -- { serverError 107 } diff --git a/tests/queries/0_stateless/02493_max_streams_for_merge_tree_reading.sql b/tests/queries/0_stateless/02493_max_streams_for_merge_tree_reading.sql index bf124092b41..c8643b5c758 100644 --- a/tests/queries/0_stateless/02493_max_streams_for_merge_tree_reading.sql +++ b/tests/queries/0_stateless/02493_max_streams_for_merge_tree_reading.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-merge-tree-settings + drop table if exists t; create table t (x UInt64) engine = MergeTree order by x; insert into t select number from numbers_mt(10000000) settings max_insert_threads=8; diff --git a/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.reference b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.reference rename to tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.reference diff --git a/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql new file mode 100644 index 00000000000..9440a1fd9c0 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +-- Start with empty query cache (QC) and query log +SYSTEM DROP QUERY CACHE; +DROP TABLE system.query_log SYNC; + +-- Insert an entry into the query cache. +SELECT 1 SETTINGS use_query_cache = true; +-- Check that entry in QC exists +SELECT COUNT(*) FROM system.query_cache; + +-- Run the same SELECT but with different case (--> select). We want its result to be served from the QC. +SELECT '---'; +select 1 SETTINGS use_query_cache = true; + +-- There should still be just one entry in the QC +SELECT COUNT(*) FROM system.query_cache; + +-- The second query should cause a QC hit. +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND query = 'select 1 SETTINGS use_query_cache = true;'; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_drop_cache.reference b/tests/queries/0_stateless/02494_query_cache_drop_cache.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_drop_cache.reference rename to tests/queries/0_stateless/02494_query_cache_drop_cache.reference diff --git a/tests/queries/0_stateless/02494_query_cache_drop_cache.sql b/tests/queries/0_stateless/02494_query_cache_drop_cache.sql new file mode 100644 index 00000000000..1f61472fcb0 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_drop_cache.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +-- Cache query result in query cache +SELECT 1 SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; + +-- No query results are cached after DROP +SYSTEM DROP QUERY CACHE; +SELECT count(*) FROM system.query_cache; diff --git a/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.reference b/tests/queries/0_stateless/02494_query_cache_eligible_queries.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_eligible_queries.reference rename to tests/queries/0_stateless/02494_query_cache_eligible_queries.reference diff --git a/tests/queries/0_stateless/02494_query_cache_eligible_queries.sql b/tests/queries/0_stateless/02494_query_cache_eligible_queries.sql new file mode 100644 index 00000000000..b4bc9e2c258 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_eligible_queries.sql @@ -0,0 +1,68 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; +DROP TABLE IF EXISTS eligible_test; +DROP TABLE IF EXISTS eligible_test2; + +-- enable query cache session-wide but also force it individually in each of below statements +SET use_query_cache = true; + +-- check that SELECT statements create entries in the query cache ... +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +-- ... and all other statements also should not create entries: + +-- CREATE +CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS use_query_cache = true; -- SETTINGS rejected as unknown +SELECT COUNT(*) FROM system.query_cache; + +-- ALTER +ALTER TABLE eligible_test ADD COLUMN b String SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- INSERT +INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS use_query_cache = true; -- SETTINGS rejected as unknown +SELECT COUNT(*) FROM system.query_cache; +INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- SHOW +SHOW TABLES SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- CHECK +CHECK TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- DESCRIBE +DESCRIBE TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- EXISTS +EXISTS TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- KILL +KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- OPTIMIZE +OPTIMIZE TABLE eligible_test FINAL SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- TRUNCATE +TRUNCATE TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- RENAME +RENAME TABLE eligible_test TO eligible_test2 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; +DROP TABLE eligible_test2; diff --git a/tests/queries/0_stateless/02494_query_result_cache_events.reference b/tests/queries/0_stateless/02494_query_cache_events.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_events.reference rename to tests/queries/0_stateless/02494_query_cache_events.reference diff --git a/tests/queries/0_stateless/02494_query_cache_events.sql b/tests/queries/0_stateless/02494_query_cache_events.sql new file mode 100644 index 00000000000..d775467d525 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_events.sql @@ -0,0 +1,32 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +-- Start with empty query cache QC and query log +SYSTEM DROP QUERY CACHE; +DROP TABLE system.query_log SYNC; + +-- Run a query with QC on. The first execution is a QC miss. +SELECT '---'; +SELECT 1 SETTINGS use_query_cache = true; + +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND query = 'SELECT 1 SETTINGS use_query_cache = true;'; + + +-- Run previous query again with query cache on +SELECT '---'; +SELECT 1 SETTINGS use_query_cache = true; + +DROP TABLE system.query_log SYNC; +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND query = 'SELECT 1 SETTINGS use_query_cache = true;'; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_exception_handling.reference b/tests/queries/0_stateless/02494_query_cache_exception_handling.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_exception_handling.reference rename to tests/queries/0_stateless/02494_query_cache_exception_handling.reference diff --git a/tests/queries/0_stateless/02494_query_cache_exception_handling.sql b/tests/queries/0_stateless/02494_query_cache_exception_handling.sql new file mode 100644 index 00000000000..4d686d81ed3 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_exception_handling.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- If an exception is thrown during query execution, no entry must be created in the query cache +SELECT throwIf(1) SETTINGS use_query_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_explain.reference b/tests/queries/0_stateless/02494_query_cache_explain.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_explain.reference rename to tests/queries/0_stateless/02494_query_cache_explain.reference diff --git a/tests/queries/0_stateless/02494_query_result_cache_explain.sql b/tests/queries/0_stateless/02494_query_cache_explain.sql similarity index 54% rename from tests/queries/0_stateless/02494_query_result_cache_explain.sql rename to tests/queries/0_stateless/02494_query_cache_explain.sql index 0daed9df151..67717efde13 100644 --- a/tests/queries/0_stateless/02494_query_result_cache_explain.sql +++ b/tests/queries/0_stateless/02494_query_cache_explain.sql @@ -1,23 +1,23 @@ -- Tags: no-parallel -- Tag no-parallel: Messes with internal cache -SET allow_experimental_query_result_cache = true; +SET allow_experimental_query_cache = true; -SYSTEM DROP QUERY RESULT CACHE; +SYSTEM DROP QUERY CACHE; --- Run a silly query with a non-trivial plan and put the result into the query result cache (QRC) -SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -SELECT count(*) FROM system.query_result_cache; +-- Run a silly query with a non-trivial plan and put the result into the query cache QC +SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; --- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QRC +-- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QC EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1; -EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -- (*) +EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; -- (*) --- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QRC +-- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QC EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1; -EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -- (*) +EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; -- (*) --- Statements (*) must not cache their results into the QRC -SELECT count(*) FROM system.query_result_cache; +-- Statements (*) must not cache their results into the QC +SELECT count(*) FROM system.query_cache; -SYSTEM DROP QUERY RESULT CACHE; +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.reference b/tests/queries/0_stateless/02494_query_cache_min_query_duration.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_min_query_duration.reference rename to tests/queries/0_stateless/02494_query_cache_min_query_duration.reference diff --git a/tests/queries/0_stateless/02494_query_cache_min_query_duration.sql b/tests/queries/0_stateless/02494_query_cache_min_query_duration.sql new file mode 100644 index 00000000000..7d759c86130 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_min_query_duration.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- This creates an entry in the query cache ... +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '---'; + +-- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_duration = 10000; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.reference b/tests/queries/0_stateless/02494_query_cache_min_query_runs.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_min_query_runs.reference rename to tests/queries/0_stateless/02494_query_cache_min_query_runs.reference diff --git a/tests/queries/0_stateless/02494_query_cache_min_query_runs.sql b/tests/queries/0_stateless/02494_query_cache_min_query_runs.sql new file mode 100644 index 00000000000..2401beeab13 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_min_query_runs.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- Cache the query after the 1st query invocation +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 0; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Cache the query result after the 2nd query invocation +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 1; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 1; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Cache the query result after the 3rd query invocation +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.reference b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.reference rename to tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference diff --git a/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql new file mode 100644 index 00000000000..534d63aa427 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- rand() is non-deterministic, with default settings no entry in the query cache should be created +SELECT COUNT(rand(1)) SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +-- But an entry can be forced using a setting +SELECT COUNT(RAND(1)) SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.reference b/tests/queries/0_stateless/02494_query_cache_normalize_ast.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_normalize_ast.reference rename to tests/queries/0_stateless/02494_query_cache_normalize_ast.reference diff --git a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql new file mode 100644 index 00000000000..7e3cd273312 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +-- Start with empty query cache (QC) and query log. +SYSTEM DROP QUERY CACHE; +DROP TABLE system.query_log SYNC; + +-- Run query whose result gets cached in the query cache. +-- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care +-- *that* they are passed and not about their effect. +SELECT 1 SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16; + +-- Check that entry in QC exists +SELECT COUNT(*) FROM system.query_cache; + +-- Run the same SELECT but with different SETTINGS. We want its result to be served from the QC (--> passive mode, achieve it by +-- disabling active mode) +SELECT '---'; +SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16; + +-- Technically, both SELECT queries have different ASTs, leading to different QC keys. QC does some AST normalization (erase all +-- QC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QC hit. +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16;'; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_passive_usage.reference b/tests/queries/0_stateless/02494_query_cache_passive_usage.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_passive_usage.reference rename to tests/queries/0_stateless/02494_query_cache_passive_usage.reference diff --git a/tests/queries/0_stateless/02494_query_cache_passive_usage.sql b/tests/queries/0_stateless/02494_query_cache_passive_usage.sql new file mode 100644 index 00000000000..8f1e3972b6d --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_passive_usage.sql @@ -0,0 +1,41 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +-- Start with empty query cache (QC). +SYSTEM DROP QUERY CACHE; + +-- By default, don't write query result into QC. +SELECT 1; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '-----'; + +-- Try to retrieve query from empty QC using the passive mode. Do this by disabling the active mode. The cache should still be empty (no insert). +SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '-----'; + +-- Put query into cache. +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '-----'; + +-- Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit. + +-- Get rid of log of previous SELECT +DROP TABLE system.query_log SYNC; + +SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM FLUSH LOGS; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;'; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_secrets.reference b/tests/queries/0_stateless/02494_query_cache_secrets.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_secrets.reference rename to tests/queries/0_stateless/02494_query_cache_secrets.reference diff --git a/tests/queries/0_stateless/02494_query_cache_secrets.sql b/tests/queries/0_stateless/02494_query_cache_secrets.sql new file mode 100644 index 00000000000..99a972b003c --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_secrets.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on OpenSSL +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- Cache a result of a query with secret in the query cache +SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS use_query_cache = true; + +-- The secret should not be revealed in system.query_cache +SELECT query FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_ttl_long.reference b/tests/queries/0_stateless/02494_query_cache_ttl_long.reference similarity index 100% rename from tests/queries/0_stateless/02494_query_result_cache_ttl_long.reference rename to tests/queries/0_stateless/02494_query_cache_ttl_long.reference diff --git a/tests/queries/0_stateless/02494_query_cache_ttl_long.sql b/tests/queries/0_stateless/02494_query_cache_ttl_long.sql new file mode 100644 index 00000000000..135ddf2195c --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_ttl_long.sql @@ -0,0 +1,31 @@ +-- Tags: no-fasttest, no-parallel, long +-- Tag no-fasttest: Test runtime is > 6 sec +-- Tag long: Test runtime is > 6 sec +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- Cache query result into query cache with a TTL of 3 sec +SELECT 1 SETTINGS use_query_cache = true, query_cache_ttl = 3; + +-- Expect one non-stale cache entry +SELECT COUNT(*) FROM system.query_cache; +SELECT stale FROM system.query_cache; + +-- Wait until entry is expired +SELECT sleep(3); +SELECT sleep(3); +SELECT stale FROM system.query_cache; + +SELECT '---'; + +-- Run same query as before +SELECT 1 SETTINGS use_query_cache = true, query_cache_ttl = 3; + +-- The entry should have been refreshed (non-stale) +SELECT COUNT(*) FROM system.query_cache; +SELECT stale FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.sql b/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.sql deleted file mode 100644 index e37c0a9cb3f..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.sql +++ /dev/null @@ -1,29 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - --- Start with empty query result cache (QRC) and query log -SYSTEM DROP QUERY RESULT CACHE; -DROP TABLE system.query_log SYNC; - --- Insert an entry into the query result cache. -SELECT 1 SETTINGS use_query_result_cache = true; --- Check that entry in QRC exists -SELECT COUNT(*) FROM system.query_result_cache; - --- Run the same SELECT but with different case (--> select). We want its result to be served from the QRC. -SELECT '---'; -select 1 SETTINGS use_query_result_cache = true; - --- There should still be just one entry in the QRC -SELECT COUNT(*) FROM system.query_result_cache; - --- The second query should cause a QRC hit. -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND query = 'select 1 SETTINGS use_query_result_cache = true;'; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_drop_cache.sql b/tests/queries/0_stateless/02494_query_result_cache_drop_cache.sql deleted file mode 100644 index 49e4298bd76..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_drop_cache.sql +++ /dev/null @@ -1,12 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - --- Cache query result in query result cache -SELECT 1 SETTINGS use_query_result_cache = true; -SELECT count(*) FROM system.query_result_cache; - --- No query results are cached after DROP -SYSTEM DROP QUERY RESULT CACHE; -SELECT count(*) FROM system.query_result_cache; diff --git a/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.sql b/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.sql deleted file mode 100644 index 23a869f9df7..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.sql +++ /dev/null @@ -1,68 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; -DROP TABLE IF EXISTS eligible_test; -DROP TABLE IF EXISTS eligible_test2; - --- enable query result cache session-wide but also force it individually in each of below statements -SET use_query_result_cache = true; - --- check that SELECT statements create entries in the query result cache ... -SELECT 1 SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; - --- ... and all other statements also should not create entries: - --- CREATE -CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS use_query_result_cache = true; -- SETTINGS rejected as unknown -SELECT COUNT(*) FROM system.query_result_cache; - --- ALTER -ALTER TABLE eligible_test ADD COLUMN b String SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- INSERT -INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS use_query_result_cache = true; -- SETTINGS rejected as unknown -SELECT COUNT(*) FROM system.query_result_cache; -INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- SHOW -SHOW TABLES SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- CHECK -CHECK TABLE eligible_test SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- DESCRIBE -DESCRIBE TABLE eligible_test SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- EXISTS -EXISTS TABLE eligible_test SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- KILL -KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- OPTIMIZE -OPTIMIZE TABLE eligible_test FINAL SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- TRUNCATE -TRUNCATE TABLE eligible_test SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - --- RENAME -RENAME TABLE eligible_test TO eligible_test2 SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; -DROP TABLE eligible_test2; diff --git a/tests/queries/0_stateless/02494_query_result_cache_events.sql b/tests/queries/0_stateless/02494_query_result_cache_events.sql deleted file mode 100644 index 73f95ef8f36..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_events.sql +++ /dev/null @@ -1,32 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - --- Start with empty query result cache (QRC) and query log -SYSTEM DROP QUERY RESULT CACHE; -DROP TABLE system.query_log SYNC; - --- Run a query with QRC on. The first execution is a QRC miss. -SELECT '---'; -SELECT 1 SETTINGS use_query_result_cache = true; - -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND query = 'SELECT 1 SETTINGS use_query_result_cache = true;'; - - --- Run previous query again with query result cache on -SELECT '---'; -SELECT 1 SETTINGS use_query_result_cache = true; - -DROP TABLE system.query_log SYNC; -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND query = 'SELECT 1 SETTINGS use_query_result_cache = true;'; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_exception_handling.sql b/tests/queries/0_stateless/02494_query_result_cache_exception_handling.sql deleted file mode 100644 index 4ba3b73ad2f..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_exception_handling.sql +++ /dev/null @@ -1,12 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; - --- If an exception is thrown during query execution, no entry must be created in the query result cache -SELECT throwIf(1) SETTINGS use_query_result_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.sql b/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.sql deleted file mode 100644 index 37cd4590396..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.sql +++ /dev/null @@ -1,20 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; - --- This creates an entry in the query result cache ... -SELECT 1 SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; - -SELECT '---'; - --- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_duration = 10000; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.sql b/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.sql deleted file mode 100644 index 4a93ee507ab..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.sql +++ /dev/null @@ -1,34 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; - --- Cache the query result after the 1st query invocation -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 0; -SELECT COUNT(*) FROM system.query_result_cache; - -SELECT '---'; - -SYSTEM DROP QUERY RESULT CACHE; - --- Cache the query result after the 2nd query invocation -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 1; -SELECT COUNT(*) FROM system.query_result_cache; -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 1; -SELECT COUNT(*) FROM system.query_result_cache; - -SELECT '---'; - -SYSTEM DROP QUERY RESULT CACHE; - --- Cache the query result after the 3rd query invocation -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2; -SELECT COUNT(*) FROM system.query_result_cache; -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2; -SELECT COUNT(*) FROM system.query_result_cache; -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.sql b/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.sql deleted file mode 100644 index 87dc04c9919..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.sql +++ /dev/null @@ -1,18 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; - --- rand() is non-deterministic, with default settings no entry in the query result cache should be created -SELECT COUNT(rand(1)) SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - -SELECT '---'; - --- But an entry can be forced using a setting -SELECT COUNT(RAND(1)) SETTINGS use_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.sql deleted file mode 100644 index 15bab7e5584..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.sql +++ /dev/null @@ -1,31 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - --- Start with empty query result cache (QRC) and query log. -SYSTEM DROP QUERY RESULT CACHE; -DROP TABLE system.query_log SYNC; - --- Run query whose result gets cached in the query result cache. --- Besides "use_query_result_cache", pass two more knobs (one QRC-specific knob and one non-QRC-specific knob). We just care --- *that* they are passed and not about their effect. -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16; - --- Check that entry in QRC exists -SELECT COUNT(*) FROM system.query_result_cache; - --- Run the same SELECT but with different SETTINGS. We want its result to be served from the QRC (--> passive mode, achieve it by --- disabling active mode) -SELECT '---'; -SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false, max_threads = 16; - --- Technically, both SELECT queries have different ASTs, leading to different QRC keys. QRC does some AST normalization (erase all --- QRC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QRC hit. -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND query = 'SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false, max_threads = 16;'; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_passive_usage.sql b/tests/queries/0_stateless/02494_query_result_cache_passive_usage.sql deleted file mode 100644 index 86c06461463..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_passive_usage.sql +++ /dev/null @@ -1,41 +0,0 @@ --- Tags: no-parallel --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - --- Start with empty query result cache (QRC). -SYSTEM DROP QUERY RESULT CACHE; - --- By default, don't write query result into query result cache (QRC). -SELECT 1; -SELECT COUNT(*) FROM system.query_result_cache; - -SELECT '-----'; - --- Try to retrieve query result from empty QRC using the passive mode. Do this by disabling the active mode. The cache should still be empty (no insert). -SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false; -SELECT COUNT(*) FROM system.query_result_cache; - -SELECT '-----'; - --- Put query result into cache. -SELECT 1 SETTINGS use_query_result_cache = true; -SELECT COUNT(*) FROM system.query_result_cache; - -SELECT '-----'; - --- Run same query with passive mode again. There must still be one entry in the QRC and we must have a QRC hit. - --- Get rid of log of previous SELECT -DROP TABLE system.query_log SYNC; - -SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false; -SELECT COUNT(*) FROM system.query_result_cache; - -SYSTEM FLUSH LOGS; -SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] -FROM system.query_log -WHERE type = 'QueryFinish' - AND query = 'SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;'; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_secrets.sql b/tests/queries/0_stateless/02494_query_result_cache_secrets.sql deleted file mode 100644 index b45db639efb..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_secrets.sql +++ /dev/null @@ -1,15 +0,0 @@ --- Tags: no-parallel, no-fasttest --- Tag no-fasttest: Depends on OpenSSL --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; - --- Cache a result of a query with secret in the query result cache -SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS use_query_result_cache = true; - --- The secret should not be revealed in system.query_result_cache -SELECT query FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02494_query_result_cache_ttl_long.sql b/tests/queries/0_stateless/02494_query_result_cache_ttl_long.sql deleted file mode 100644 index 7acee9b2a5b..00000000000 --- a/tests/queries/0_stateless/02494_query_result_cache_ttl_long.sql +++ /dev/null @@ -1,31 +0,0 @@ --- Tags: no-fasttest, no-parallel, long --- Tag no-fasttest: Test runtime is > 6 sec --- Tag long: Test runtime is > 6 sec --- Tag no-parallel: Messes with internal cache - -SET allow_experimental_query_result_cache = true; - -SYSTEM DROP QUERY RESULT CACHE; - --- Cache query result into query result cache with a TTL of 3 sec -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_ttl = 3; - --- Expect one non-stale cache entry -SELECT COUNT(*) FROM system.query_result_cache; -SELECT stale FROM system.query_result_cache; - --- Wait until entry is expired -SELECT sleep(3); -SELECT sleep(3); -SELECT stale FROM system.query_result_cache; - -SELECT '---'; - --- Run same query as before -SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_ttl = 3; - --- The entry should have been refreshed (non-stale) -SELECT COUNT(*) FROM system.query_result_cache; -SELECT stale FROM system.query_result_cache; - -SYSTEM DROP QUERY RESULT CACHE; diff --git a/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh b/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh index d3cbc6ec861..1223d7957b5 100755 --- a/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh +++ b/tests/queries/0_stateless/02499_monotonicity_toUnixTimestamp64.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-merge-tree-settings # shellcheck disable=SC2154 diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.reference b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.reference new file mode 100644 index 00000000000..b161b099eef --- /dev/null +++ b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.reference @@ -0,0 +1,793 @@ +AppleTV Other 0.0 ATV OS X 0.0.0 +LG-M150 Firefox Mobile 68.0 Android 7.0.0 +Generic Smartphone Firefox Mobile 68.0 Android 8.0.0 +Generic Tablet Firefox Mobile 68.0 Android 8.1.0 +Generic Smartphone Firefox Mobile 68.0 Android 9.0.0 +PH-1 Chrome Mobile 77.0 Android 10.0.0 +Pixel 2 XL Chrome Mobile 77.0 Android 10.0.0 +Pixel 2 Chrome Mobile 77.0 Android 10.0.0 +Pixel 3 Facebook 240.0 Android 10.0.0 +Pixel XL Chrome Mobile WebView 77.0 Android 10.0.0 +Pixel XL Chrome Mobile 77.0 Android 10.0.0 +HTC Sensation 4G Chrome Mobile 42.0 Android 4.0.3 +Kindle Amazon Silk 73.7 Android 4.0.3 +Samsung GT-I9152 Chrome Mobile 42.0 Android 4.2.2 +Samsung GT-N5110 Chrome 76.0 Android 4.4.2 +RCT6773W22 Chrome 77.0 Android 4.4.2 +Samsung SM-T217S Chrome 77.0 Android 4.4.2 +Samsung SM-T530NU Chrome 77.0 Android 4.4.2 +TegraNote-P1640 Chrome 69.0 Android 4.4.2 +Kindle Amazon Silk 76.3 Android 4.4.3 +Samsung SM-A500H Chrome Mobile 73.0 Android 5.0.2 +Samsung SM-T357T Chrome 77.0 Android 5.0.2 +Samsung SM-T530NU Chrome 76.0 Android 5.0.2 +Samsung SM-T530NU Chrome 77.0 Android 5.0.2 +RCT6213W87DK Yandex Browser 19.4 Android 5.0.0 +Samsung SM-N900T Facebook 229.0 Android 5.0.0 +Generic Smartphone Chrome Mobile WebView 70.0 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +AFTT Chrome Mobile WebView 70.0 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +Kindle Amazon Silk 71.2 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +Kindle Amazon Silk 76.3 Android 5.1.1 +Kindle Amazon Silk 77.1 Android 5.1.1 +LG-AS330 Chrome Mobile 77.0 Android 5.1.1 +LGL43AL Chrome Mobile 77.0 Android 5.1.1 +Samsung SM-G530R7 Samsung Internet 9.2 Android 5.1.1 +Samsung SM-T377P Samsung Internet 10.1 Android 5.1.1 +Samsung SM-T900 Samsung Internet 10.1 Android 5.1.1 +Samsung SM-T337A Chrome 69.0 Android 5.1.1 +Samsung SM-G360T1 Chrome Mobile 67.0 Android 5.1.1 +Samsung SM-J320FN Chrome Mobile 74.0 Android 5.1.1 +SM-T280 Chrome 74.0 Android 5.1.1 +Samsung SM-T330NU Chrome 71.0 Android 5.1.1 +SM-T670 Chrome 76.0 Android 5.1.1 +SM-T670 Chrome 77.0 Android 5.1.1 +Vodafone Smart ultra 6 Chrome Mobile WebView 74.0 Android 5.1.1 +BLU Advance 5.0 Chrome Mobile 66.0 Android 5.1.0 +HTC Desire 626s Chrome Mobile 77.0 Android 5.1.0 +HUAWEI LUA-L22 Chrome Mobile 50.0 Android 5.1.0 +NX16A11264 Chrome 77.0 Android 5.1.0 +XT1526 Chrome Mobile 73.0 Android 5.1.0 +Oppo CPH1613 Chrome Mobile 77.0 Android 6.0.1 +LG-M153 Chrome Mobile WebView 55.0 Android 6.0.1 +LG-M153 Chrome Mobile 77.0 Android 6.0.1 +LGLS676 Chrome Mobile 77.0 Android 6.0.1 +N9136 Chrome Mobile 74.0 Android 6.0.1 +Asus Nexus 7 Chrome 44.0 Android 6.0.1 +Samsung SM-G900I Samsung Internet 10.1 Android 6.0.1 +Samsung SM-G900P Samsung Internet 7.2 Android 6.0.1 +Samsung SM-J700M Samsung Internet 10.1 Android 6.0.1 +Samsung SM-S327VL Samsung Internet 10.1 Android 6.0.1 +Samsung SM-T377A Chrome 77.0 Android 6.0.1 +Samsung SM-G532M Chrome Mobile 55.0 Android 6.0.1 +Samsung SM-G532M Facebook 240.0 Android 6.0.1 +Samsung SM-G532M Chrome Mobile 77.0 Android 6.0.1 +Samsung SM-G550T Chrome Mobile 76.0 Android 6.0.1 +Samsung SM-G550T Chrome Mobile 77.0 Android 6.0.1 +Samsung SM-G550T1 Chrome Mobile 76.0 Android 6.0.1 +Samsung SM-G900V Chrome Mobile 73.0 Android 6.0.1 +Samsung SM-G920A Chrome Mobile 77.0 Android 6.0.1 +Samsung SM-J327P Chrome Mobile 77.0 Android 6.0.1 +Samsung SM-N910S Chrome Mobile 75.0 Android 6.0.1 +Samsung SM-N920V Chrome Mobile 76.0 Android 6.0.1 +Samsung SM-T350 Chrome 59.0 Android 6.0.1 +Samsung SM-T560NU Chrome 77.0 Android 6.0.1 +SM-T800 Chrome 77.0 Android 6.0.1 +XT1254 Chrome Mobile 77.0 Android 6.0.1 +Z798BL Chrome Mobile 67.0 Android 6.0.1 +Z799VL Chrome Mobile WebView 45.0 Android 6.0.1 +5010X Chrome Mobile 76.0 Android 6.0.0 +Huawei CAM-L21 Chrome Mobile 77.0 Android 6.0.0 +F3313 Chrome Mobile 77.0 Android 6.0.0 +RCT6603W47M7 Chrome 77.0 Android 6.0.0 +5049Z Chrome Mobile 56.0 Android 7.0.0 +Asus A002A Chrome Mobile 77.0 Android 7.0.0 +Alcatel_5044C Chrome Mobile 77.0 Android 7.0.0 +Astra Young Pro Chrome Mobile WebView 59.0 Android 7.0.0 +Infinix X571 Chrome Mobile 77.0 Android 7.0.0 +LG-H872 Chrome Mobile 64.0 Android 7.0.0 +LG-K425 Chrome Mobile 55.0 Android 7.0.0 +LG-LS777 Chrome Mobile 77.0 Android 7.0.0 +LG-M210 Chrome Mobile 77.0 Android 7.0.0 +LG-M430 Chrome Mobile 77.0 Android 7.0.0 +LG-TP260 Chrome Mobile WebView 64.0 Android 7.0.0 +LG-TP260 Chrome Mobile 77.0 Android 7.0.0 +LG-TP450 Chrome Mobile 64.0 Android 7.0.0 +LG-V521 Chrome 75.0 Android 7.0.0 +LG-V521 Chrome 77.0 Android 7.0.0 +LGMP260 Chrome Mobile 58.0 Android 7.0.0 +LGMS210 Chrome Mobile 55.0 Android 7.0.0 +LGMS210 Chrome Mobile 77.0 Android 7.0.0 +P00I Chrome 77.0 Android 7.0.0 +RS988 Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-J701F Samsung Internet 10.1 Android 7.0.0 +Samsung SM-J710F Samsung Internet 10.1 Android 7.0.0 +Samsung SM-N920T Samsung Internet 9.2 Android 7.0.0 +Samsung SM-G920A Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-G920P Flipboard 4.2 Android 7.0.0 +Samsung SM-G920V Chrome Mobile 76.0 Android 7.0.0 +Samsung SM-G928V Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-G950U Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-G955U Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-J327T Chrome Mobile 74.0 Android 7.0.0 +Samsung SM-J327T Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-J327T1 Chrome Mobile 64.0 Android 7.0.0 +Samsung SM-J327T1 Chrome Mobile 75.0 Android 7.0.0 +Samsung SM-J327T1 Chrome Mobile 77.0 Android 7.0.0 +Samsung SM-N9208 Chrome Mobile 73.0 Android 7.0.0 +Samsung SM-N920P Chrome Mobile 74.0 Android 7.0.0 +Samsung SM-N920T Chrome Mobile 77.0 Android 7.0.0 +SM-T585 Chrome 77.0 Android 7.0.0 +SM-T810 Chrome 75.0 Android 7.0.0 +SM-T810 Chrome 76.0 Android 7.0.0 +SM-T810 Chrome 77.0 Android 7.0.0 +SM-T813 Chrome 76.0 Android 7.0.0 +SM-T813 Chrome 76.0 Android 7.0.0 +Trekstor ST1009X Chrome 75.0 Android 7.0.0 +XT1663 Chrome Mobile 77.0 Android 7.0.0 +Generic Smartphone Chrome Mobile 58.0 Android 7.0.0 +A574BL Chrome Mobile WebView 77.0 Android 7.1.1 +A574BL Chrome Mobile 77.0 Android 7.1.1 +Oppo CPH1729 Facebook 240.0 Android 7.1.1 +3632A Chrome Mobile 74.0 Android 7.1.1 +General Mobile 4G Dual Chrome Mobile 77.0 Android 7.1.1 +Moto E (4) Plus Chrome Mobile WebView 76.0 Android 7.1.1 +Moto E (4) Chrome Mobile 70.0 Android 7.1.1 +Moto E (4) Chrome Mobile 76.0 Android 7.1.1 +Moto E (4) Chrome Mobile 77.0 Android 7.1.1 +Moto E (4) Chrome Mobile 77.0 Android 7.1.1 +NX591J Chrome Mobile 77.0 Android 7.1.1 +REVVLPLUS C3701A Chrome Mobile 64.0 Android 7.1.1 +Samsung SM-J320A Samsung Internet 10.1 Android 7.1.1 +Samsung SM-T550 Samsung Internet 10.1 Android 7.1.1 +Samsung SM-T377A Chrome 64.0 Android 7.1.1 +Samsung SM-J250F Chrome Mobile 76.0 Android 7.1.1 +Samsung SM-J700T Chrome Mobile 77.0 Android 7.1.1 +SM-T350 Chrome 77.0 Android 7.1.1 +Samsung SM-T377T Chrome 77.0 Android 7.1.1 +Samsung SM-T550 Chrome 69.0 Android 7.1.1 +SM-T550 Chrome 77.0 Android 7.1.1 +Samsung SM-T560NU Chrome 77.0 Android 7.1.1 +X20 Chrome Mobile WebView 52.0 Android 7.1.1 +Z851M Chrome Mobile 58.0 Android 7.1.1 +Z899VL Chrome Mobile WebView 74.0 Android 7.1.1 +Z982 Chrome Mobile WebView 75.0 Android 7.1.1 +Z982 Chrome Mobile 77.0 Android 7.1.1 +Generic Smartphone Chrome Mobile WebView 70.0 Android 7.1.2 +AFTKMST12 Chrome Mobile WebView 70.0 Android 7.1.2 +Kindle Amazon Silk 76.3 Android 7.1.2 +AFTMM Chrome Mobile WebView 70.0 Android 7.1.2 +AFTN Chrome Mobile WebView 70.0 Android 7.1.2 +KFKAWI Chrome Mobile WebView 59.0 Android 7.1.2 +Kindle Amazon Silk 76.3 Android 7.1.2 +Kindle Amazon Silk 76.3 Android 7.1.2 +LG-SP200 Chrome Mobile 75.0 Android 7.1.2 +LG-SP200 Chrome Mobile 76.0 Android 7.1.2 +LM-X210(G) Chrome Mobile 76.0 Android 7.1.2 +LM-X210 Chrome Mobile 76.0 Android 7.1.2 +RCT6973W43R Chrome 77.0 Android 7.1.2 +XiaoMi Redmi 4 Chrome Mobile 77.0 Android 7.1.2 +Generic Smartphone Chrome Mobile WebView 76.0 Android 8.0.0 +Asus Z01FD Chrome Mobile 77.0 Android 8.0.0 +Huawei AUM-L29 Chrome Mobile 77.0 Android 8.0.0 +BRAVIA 4K GB Chrome Mobile WebView 77.0 Android 8.0.0 +CMR-W09 Chrome 77.0 Android 8.0.0 +EVA-AL00 Chrome Mobile 77.0 Android 8.0.0 +G3223 Chrome Mobile 77.0 Android 8.0.0 +LG-H910 Chrome Mobile 77.0 Android 8.0.0 +LG-H931 Chrome Mobile 76.0 Android 8.0.0 +LG-H932 Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-A520F Samsung Internet 10.1 Android 8.0.0 +Samsung SM-G891A Samsung Internet 8.2 Android 8.0.0 +Samsung SM-G935T Samsung Internet 10.1 Android 8.0.0 +Samsung SM-G955U Samsung Internet 10.1 Android 8.0.0 +Samsung SM-J337T Samsung Internet 9.2 Android 8.0.0 +Samsung SM-J737P Samsung Internet 10.1 Android 8.0.0 +Samsung SM-N950F Samsung Internet 10.1 Android 8.0.0 +Samsung SM-G891A Chrome Mobile 72.0 Android 8.0.0 +Samsung SM-G935A Chrome Mobile 76.0 Android 8.0.0 +Samsung SM-A720F Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-G570F Facebook 231.0 Android 8.0.0 +Samsung SM-G570Y Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-G930T Chrome Mobile WebView 77.0 Android 8.0.0 +Samsung SM-G930V Chrome Mobile 64.0 Android 8.0.0 +Samsung SM-G930VL Chrome Mobile 74.0 Android 8.0.0 +Samsung SM-G935F Chrome Mobile 75.0 Android 8.0.0 +Samsung SM-G935P Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-G935T Facebook 240.0 Android 8.0.0 +Samsung SM-G935T Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-G950U Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-G955U Chrome Mobile 74.0 Android 8.0.0 +Samsung SM-G955U Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-J330G Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-J337T Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-J737A Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-J737T1 Chrome Mobile 66.0 Android 8.0.0 +Samsung SM-J737T1 Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-N950F Chrome Mobile 66.0 Android 8.0.0 +Samsung SM-N950U Chrome Mobile 76.0 Android 8.0.0 +Samsung SM-N950U Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-N950U1 Chrome Mobile 77.0 Android 8.0.0 +Samsung SM-S367VL Chrome Mobile 77.0 Android 8.0.0 +VS995 Chrome Mobile 77.0 Android 8.0.0 +XT1635-02 Chrome Mobile 77.0 Android 8.0.0 +moto e5 play Chrome Mobile 76.0 Android 8.0.0 +moto e5 play Chrome Mobile 77.0 Android 8.0.0 +moto e5 supra Chrome Mobile 76.0 Android 8.0.0 +moto g(6) Chrome Mobile 77.0 Android 8.0.0 +5041C Chrome Mobile 77.0 Android 8.1.0 +6062W Chrome Mobile 77.0 Android 8.1.0 +A502DL Chrome Mobile 67.0 Android 8.1.0 +A502DL Chrome Mobile 76.0 Android 8.1.0 +Huawei BKK-LX2 Chrome Mobile 76.0 Android 8.1.0 +C4 Chrome Mobile 70.0 Android 8.1.0 +3310A Chrome Mobile 77.0 Android 8.1.0 +Infinix X604 Chrome Mobile 64.0 Android 8.1.0 +Joy 1 Chrome Mobile 77.0 Android 8.1.0 +LAVA LE9820 Chrome Mobile 77.0 Android 8.1.0 +LG-Q710AL Chrome Mobile 77.0 Android 8.1.0 +LM-Q610(FGN) Chrome Mobile 77.0 Android 8.1.0 +LM-Q710(FGN) Facebook 235.0 Android 8.1.0 +LM-Q710(FGN) Chrome Mobile 70.0 Android 8.1.0 +LM-Q710(FGN) Chrome Mobile 76.0 Android 8.1.0 +LM-Q710(FGN) Chrome Mobile 76.0 Android 8.1.0 +LM-Q710(FGN) Chrome Mobile 77.0 Android 8.1.0 +LM-V405 Chrome Mobile 77.0 Android 8.1.0 +LM-X210(G) UC Browser 11.6 Android 8.1.0 +LM-X210(G) Chrome Mobile 70.0 Android 8.1.0 +LM-X210(G) Chrome Mobile 72.0 Android 8.1.0 +LM-X210(G) Chrome Mobile 77.0 Android 8.1.0 +LM-X212(G) Chrome Mobile 77.0 Android 8.1.0 +LM-X220 Chrome Mobile 70.0 Android 8.1.0 +LM-X220 Chrome Mobile 76.0 Android 8.1.0 +LM-X220PM Chrome Mobile WebView 77.0 Android 8.1.0 +LM-X410(FG) Chrome Mobile 70.0 Android 8.1.0 +LM-X410(FG) Chrome Mobile 76.0 Android 8.1.0 +LM-X410(FG) Chrome Mobile 77.0 Android 8.1.0 +LM-X410.FGN Chrome Mobile 68.0 Android 8.1.0 +LML414DL Chrome Mobile 76.0 Android 8.1.0 +LML713DL Chrome Mobile 77.0 Android 8.1.0 +Moto G (5S) Plus Chrome Mobile 77.0 Android 8.1.0 +HTC One Chrome Mobile WebView 70.0 Android 8.1.0 +RCT6873W42BMF8KC Chrome Mobile 77.0 Android 8.1.0 +REVVL 2 Chrome Mobile 67.0 Android 8.1.0 +REVVL 2 Chrome Mobile 76.0 Android 8.1.0 +Samsung SM-J727T Samsung Internet 10.1 Android 8.1.0 +Samsung SM-J727T1 Samsung Internet 9.4 Android 8.1.0 +Samsung SM-J727T1 Samsung Internet 10.1 Android 8.1.0 +Samsung SM-T580 Samsung Internet 9.4 Android 8.1.0 +Samsung SM-J727A Facebook 240.0 Android 8.1.0 +Samsung SM-G610F Chrome Mobile 77.0 Android 8.1.0 +Samsung SM-J260T1 Chrome Mobile 76.0 Android 8.1.0 +Samsung SM-J260T1 Chrome Mobile 76.0 Android 8.1.0 +Samsung SM-J260T1 Chrome Mobile 77.0 Android 8.1.0 +Samsung SM-J410F Chrome Mobile 77.0 Android 8.1.0 +Samsung SM-J727P Chrome Mobile 68.0 Android 8.1.0 +Samsung SM-J727T Chrome Mobile 66.0 Android 8.1.0 +Samsung SM-J727T1 Chrome Mobile 76.0 Android 8.1.0 +Samsung SM-J727T1 Chrome Mobile 77.0 Android 8.1.0 +Samsung SM-J727T1 Chrome Mobile 77.0 Android 8.1.0 +Samsung SM-J727V Chrome Mobile 70.0 Android 8.1.0 +Samsung SM-J727V Chrome Mobile 77.0 Android 8.1.0 +SM-P580 Chrome 77.0 Android 8.1.0 +SM-T380 Chrome 75.0 Android 8.1.0 +SM-T580 Edge Mobile 42.0 Android 8.1.0 +SM-T580 Chrome 76.0 Android 8.1.0 +SM-T580 Chrome 76.0 Android 8.1.0 +SM-T580 Chrome 77.0 Android 8.1.0 +Samsung SM-T837T Chrome 77.0 Android 8.1.0 +TECNO CF8 Facebook 239.0 Android 8.1.0 +V1818CA Chrome Mobile 75.0 Android 8.1.0 +meizu C9 Chrome Mobile 68.0 Android 8.1.0 +vivo 1724 Chrome Mobile 76.0 Android 8.1.0 +vivo 1814 Chrome Mobile 77.0 Android 8.1.0 +Generic Smartphone DuckDuckGo Mobile 5.0 Android 9.0.0 +1825 Chrome Mobile 70.0 Android 9.0.0 +ANE-LX2 Facebook 236.0 Android 9.0.0 +BLA-A09 Chrome Mobile 77.0 Android 9.0.0 +Huawei CLT-L04 Chrome Mobile 77.0 Android 9.0.0 +Oppo CPH1911 Facebook 239.0 Android 9.0.0 +Oppo CPH1923 Chrome Mobile WebView 76.0 Android 9.0.0 +Huawei ELE-L29 Chrome Mobile 77.0 Android 9.0.0 +G8142 Chrome Mobile 77.0 Android 9.0.0 +GM1911 Chrome Mobile 76.0 Android 9.0.0 +GM1917 Chrome Mobile 77.0 Android 9.0.0 +Huawei INE-LX2 Chrome Mobile 76.0 Android 9.0.0 +LM-G710 Chrome Mobile WebView 77.0 Android 9.0.0 +LM-Q720 Chrome Mobile 77.0 Android 9.0.0 +LM-V405 Chrome Mobile WebView 77.0 Android 9.0.0 +LM-V405 Chrome Mobile 76.0 Android 9.0.0 +LM-V500N Chrome Mobile 77.0 Android 9.0.0 +LM-X420 Chrome Mobile 72.0 Android 9.0.0 +LM-X420 Chrome Mobile 77.0 Android 9.0.0 +MAR-LX1A Chrome Mobile 77.0 Android 9.0.0 +XiaoMi MI 9 Chrome Mobile 77.0 Android 9.0.0 +XiaoMi Mi A2 Chrome Mobile 77.0 Android 9.0.0 +Moto Z (2) Chrome Mobile 77.0 Android 9.0.0 +Nokia 6 Chrome Mobile 77.0 Android 9.0.0 +OnePlus ONEPLUS A6000 Chrome Mobile 77.0 Android 9.0.0 +OnePlus ONEPLUS A6003 Chrome Mobile 77.0 Android 9.0.0 +OnePlus ONEPLUS A6013 Chrome Mobile WebView 77.0 Android 9.0.0 +OnePlus ONEPLUS A6013 Chrome Mobile 74.0 Android 9.0.0 +OnePlus ONEPLUS A6013 Chrome Mobile 77.0 Android 9.0.0 +PAR-AL00 Facebook 235.0 Android 9.0.0 +Pixel 2 XL Chrome Mobile 77.0 Android 9.0.0 +Pixel 3 Chrome Mobile WebView 77.0 Android 9.0.0 +Pixel 3 Chrome Mobile 76.0 Android 9.0.0 +Pixel 3 Chrome Mobile 77.0 Android 9.0.0 +Pixel 3a XL Chrome Mobile 77.0 Android 9.0.0 +REVVLRY Chrome Mobile 73.0 Android 9.0.0 +Oppo RMX1801 Chrome Mobile 75.0 Android 9.0.0 +XiaoMi Redmi 7 Chrome Mobile 77.0 Android 9.0.0 +XiaoMi Redmi Note 7 Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-A102U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-A505FN Samsung Internet 10.1 Android 9.0.0 +Samsung SM-A505GN Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G892U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G950U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G955F Samsung Internet 9.4 Android 9.0.0 +Samsung SM-G955U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G9600 Samsung Internet 9.4 Android 9.0.0 +Samsung SM-G960U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G965U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G970F Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G970U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G973U Samsung Internet 9.4 Android 9.0.0 +Samsung SM-G973U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-G975U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-J415F Samsung Internet 10.1 Android 9.0.0 +Samsung SM-J730F Samsung Internet 10.1 Android 9.0.0 +Samsung SM-J737P Samsung Internet 10.1 Android 9.0.0 +Samsung SM-J737T Samsung Internet 9.0 Android 9.0.0 +Samsung SM-N950U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-N960F Samsung Internet 10.1 Android 9.0.0 +Samsung SM-N960U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-N960U1 Samsung Internet 9.2 Android 9.0.0 +Samsung SM-N970U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-N975U Samsung Internet 10.1 Android 9.0.0 +Samsung SM-N975U1 Samsung Internet 10.1 Android 9.0.0 +Samsung SM-T510 Samsung Internet 10.1 Android 9.0.0 +Samsung SM-T720 Samsung Internet 10.1 Android 9.0.0 +SHIELD Android TV Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-A102U Chrome Mobile 72.0 Android 9.0.0 +Samsung SM-A102U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-A105M Facebook 237.0 Android 9.0.0 +Samsung SM-A205G Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-A205U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-A505F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-A530F Facebook 240.0 Android 9.0.0 +Samsung SM-A530N Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-A600T Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-A605F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-A920F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G892A Chrome Mobile 74.0 Android 9.0.0 +Samsung SM-G950F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G950U Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-G950U Chrome Mobile 71.0 Android 9.0.0 +Samsung SM-G950U Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-G950U Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-G950U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G950U1 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G955F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G955U Facebook 240.0 Android 9.0.0 +Samsung SM-G955U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G9600 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G960U Facebook 233.0 Android 9.0.0 +Samsung SM-G960U Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-G960U Chrome Mobile 71.0 Android 9.0.0 +Samsung SM-G960U Chrome Mobile 74.0 Android 9.0.0 +Samsung SM-G960U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G960U1 Facebook 240.0 Android 9.0.0 +Samsung SM-G960U1 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G965F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G965U Chrome Mobile 74.0 Android 9.0.0 +Samsung SM-G965U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G965U Chrome Mobile 79.0 Android 9.0.0 +Samsung SM-G965U1 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G970U Facebook 240.0 Android 9.0.0 +Samsung SM-G970U Chrome Mobile 75.0 Android 9.0.0 +Samsung SM-G970U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G970U1 Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-G973U Chrome Mobile 74.0 Android 9.0.0 +Samsung SM-G973U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G973U1 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G975U Chrome Mobile 75.0 Android 9.0.0 +Samsung SM-G975U Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-G975U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-G975U1 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-J260A Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-J337P Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-J600FN Chrome Mobile 75.0 Android 9.0.0 +Samsung SM-J600G Facebook 238.0 Android 9.0.0 +Samsung SM-J730F Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-J737A Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-J737A Chrome Mobile 74.0 Android 9.0.0 +Samsung SM-J737V Pinterest 0.0 Android 9.0.0 +Samsung SM-J737V Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-J810M Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-N950U Facebook 240.0 Android 9.0.0 +Samsung SM-N950U Chrome Mobile 72.0 Android 9.0.0 +Samsung SM-N950U Chrome Mobile 75.0 Android 9.0.0 +Samsung SM-N950U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-N950U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-N960F Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-N960U Facebook 240.0 Android 9.0.0 +Samsung SM-N960U Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-N960U Chrome Mobile 74.0 Android 9.0.0 +Samsung SM-N960U Chrome Mobile 75.0 Android 9.0.0 +Samsung SM-N960U Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-N960U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-N960U1 Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-N975U Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-N975U Chrome Mobile WebView 77.0 Android 9.0.0 +Samsung SM-N975U Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-N976V Facebook 240.0 Android 9.0.0 +Samsung SM-S367VL Chrome Mobile 77.0 Android 9.0.0 +Samsung SM-S767VL Chrome Mobile 76.0 Android 9.0.0 +Samsung SM-T597P Chrome 77.0 Android 9.0.0 +SM-T720 Chrome 77.0 Android 9.0.0 +TECNO KC8 Chrome Mobile 77.0 Android 9.0.0 +Huawei VOG-L29 Chrome Mobile 77.0 Android 9.0.0 +cp3705A Chrome Mobile 74.0 Android 9.0.0 +moto g(6) Chrome Mobile WebView 77.0 Android 9.0.0 +moto g(6) play Chrome Mobile 77.0 Android 9.0.0 +moto g(7) play Facebook 235.0 Android 9.0.0 +moto g(7) play Chrome Mobile 70.0 Android 9.0.0 +moto g(7) power Chrome Mobile 75.0 Android 9.0.0 +moto g(7) power Chrome Mobile 77.0 Android 9.0.0 +moto z4 Chrome Mobile 73.0 Android 9.0.0 +moto z4 Chrome Mobile 77.0 Android 9.0.0 +Samsung GT-P3113 Android 4.1 Android 4.1.1 +Samsung GT-I8160 Android 4.1 Android 4.1.2 +Asus Nexus 7 Android 4.2 Android 4.2.2 +Samsung SM-E500H Android 4.4 Android 4.4.0 +LGMS550 Chrome Mobile WebView 43.0 Android 6.0.1 +Samsung SM-J737T1 Chrome Mobile WebView 43.0 Android 6.0.1 +TECNO CA6 Opera Mobile 5.3 Android 7.0.0 +XiaoMi Redmi 5A MiuiBrowser 9.5 Android 7.1.2 +Oppo CPH1911 Chrome Mobile WebView 70.0 Android 9.0.0 +vivo 1904 Opera Mobile 44.1 Android 9.0.0 +Mac Firefox 68.0 Mac OS X 10.11.0 +Mac Firefox 69.0 Mac OS X 10.13.0 +Mac Firefox 67.0 Mac OS X 10.14.0 +Mac Firefox 68.0 Mac OS X 10.14.0 +Mac Firefox 69.0 Mac OS X 10.14.0 +Mac Firefox 70.0 Mac OS X 10.14.0 +Mac Chrome 76.0 Mac OS X 10.10.5 +Mac Chrome 77.0 Mac OS X 10.10.5 +Mac Safari 10.1 Mac OS X 10.10.5 +Mac Chrome 76.0 Mac OS X 10.11.4 +Mac Chrome 72.0 Mac OS X 10.11.6 +Mac Chrome 76.0 Mac OS X 10.11.6 +Mac Chrome 76.0 Mac OS X 10.11.6 +Mac Chrome 77.0 Mac OS X 10.11.6 +Mac Safari 9.1 Mac OS X 10.11.6 +Mac Safari 10.0 Mac OS X 10.11.6 +Mac Safari 11.1 Mac OS X 10.11.6 +Mac Chrome 77.0 Mac OS X 10.12.1 +Mac Safari 10.0 Mac OS X 10.12.3 +Mac Chrome 75.0 Mac OS X 10.12.6 +Mac Chrome 76.0 Mac OS X 10.12.6 +Mac Chrome 76.0 Mac OS X 10.12.6 +Mac Chrome 77.0 Mac OS X 10.12.6 +Mac Safari 12.1 Mac OS X 10.12.6 +Mac Safari 11.0 Mac OS X 10.13.0 +Mac Chrome 77.0 Mac OS X 10.13.1 +Mac Chrome 77.0 Mac OS X 10.13.2 +Mac Chrome 76.0 Mac OS X 10.13.4 +Mac Chrome 76.0 Mac OS X 10.13.4 +Mac Chrome 76.0 Mac OS X 10.13.5 +Mac Chrome 75.0 Mac OS X 10.13.6 +Mac Chrome 76.0 Mac OS X 10.13.6 +Mac Chrome 77.0 Mac OS X 10.13.6 +Mac Safari 12.0 Mac OS X 10.13.6 +Mac Safari 12.1 Mac OS X 10.13.6 +Mac Safari 12.1 Mac OS X 10.13.6 +Mac Safari 13.0 Mac OS X 10.13.6 +Mac Safari 13.0 Mac OS X 10.13.6 +Mac Chrome 75.0 Mac OS X 10.14.0 +Mac Chrome 76.0 Mac OS X 10.14.0 +Mac Chrome 77.0 Mac OS X 10.14.0 +Mac Chrome 77.0 Mac OS X 10.14.1 +Mac Chrome 76.0 Mac OS X 10.14.2 +Mac Chrome 69.0 Mac OS X 10.14.3 +Mac Safari 12.0 Mac OS X 10.14.3 +Mac Chrome 75.0 Mac OS X 10.14.4 +Mac Chrome 77.0 Mac OS X 10.14.4 +Mac Safari 12.1 Mac OS X 10.14.4 +Mac Chrome 76.0 Mac OS X 10.14.5 +Mac Chrome 77.0 Mac OS X 10.14.5 +Mac Safari 12.1 Mac OS X 10.14.5 +Mac Chrome 75.0 Mac OS X 10.14.6 +Mac Chrome 76.0 Mac OS X 10.14.6 +Mac Chrome 76.0 Mac OS X 10.14.6 +Mac Chrome 77.0 Mac OS X 10.14.6 +Mac Chrome 77.0 Mac OS X 10.14.6 +Mac Safari 12.1 Mac OS X 10.14.6 +Mac Safari 13.0 Mac OS X 10.14.6 +Mac Chrome 65.0 Mac OS X 10.9.5 +Mac Chrome 66.0 Mac OS X 10.9.5 +Mac Chrome 67.0 Mac OS X 10.9.5 +PlayStation 4 Apple Mail 605.1 Other 0.0.0 +Samsung SMART-TV Safari 3.0 Tizen 3.0.0 +Samsung SMART-TV Samsung Internet 2.0 Tizen 3.0.0 +Samsung SMART-TV Samsung Internet 2.1 Tizen 4.0.0 +Samsung SMART-TV Samsung Internet 2.2 Tizen 5.0.0 +Other Edge 17.17134 Windows 10.0.0 +Other Edge 18.17763 Windows 10.0.0 +Other Chrome 77.0 Windows 10.0.0 +Other Maxthon 5.2 Windows 10.0.0 +Other Chrome 73.1 Windows 10.0.0 +Other Chrome 76.0 Windows 10.0.0 +Other Opera 63.0 Windows 10.0.0 +Other Chrome 77.0 Windows 10.0.0 +Other Chrome 77.0 Windows 10.0.0 +Other Coc Coc 82.0 Windows 10.0.0 +Other IE 11.0 Windows 10.0.0 +Other Firefox 59.0 Windows 10.0.0 +Other Firefox 60.0 Windows 10.0.0 +Other Edge 15.15063 Windows 10.0.0 +Other Edge 16.16299 Windows 10.0.0 +Other Edge 17.17134 Windows 10.0.0 +Other Edge 18.17763 Windows 10.0.0 +Other Chrome 65.0 Windows 10.0.0 +Other Chrome 70.0 Windows 10.0.0 +Other Edge 18.18362 Windows 10.0.0 +Other Edge 18.18995 Windows 10.0.0 +Other Edge 18.19493 Windows 10.0.0 +Other Chrome 70.0 Windows 10.0.0 +Other Chrome 71.0 Windows 10.0.0 +Other Chrome 73.0 Windows 10.0.0 +Other Chrome 74.0 Windows 10.0.0 +Other Chrome 75.0 Windows 10.0.0 +Other Chrome 76.0 Windows 10.0.0 +Other Vivaldi 2.7 Windows 10.0.0 +Other Chrome 76.0 Windows 10.0.0 +Other Opera 63.0 Windows 10.0.0 +Other Chrome 77.0 Windows 10.0.0 +Other Chrome 77.0 Windows 10.0.0 +Other Edge 79.0 Windows 10.0.0 +Other Edge 18.18362 Windows 10.0.0 +Other Edge 18.18363 Windows 10.0.0 +Other Edge 18.18362 Windows 10.0.0 +Other Firefox 61.0 Windows 10.0.0 +Other Firefox 63.0 Windows 10.0.0 +Other Firefox 67.0 Windows 10.0.0 +Other Firefox 68.0 Windows 10.0.0 +Other Firefox 69.0 Windows 10.0.0 +Other Firefox 69.0 Windows 10.0.0 +Other Chrome 49.0 Windows XP.0.0 +Other Chrome 49.0 Windows Vista.0.0 +Other Chrome 49.0 Windows Vista.0.0 +Other Chrome 76.0 Windows 7.0.0 +Other Chrome 77.0 Windows 7.0.0 +Other Chrome 77.0 Windows 7.0.0 +Other Coc Coc 80.0 Windows 7.0.0 +Other Coc Coc 82.0 Windows 7.0.0 +Other IE 11.0 Windows 7.0.0 +Other Chrome 67.0 Windows 7.0.0 +Other Chrome 70.0 Windows 7.0.0 +Other Chrome 72.0 Windows 7.0.0 +Other Chrome 74.0 Windows 7.0.0 +Other Chrome 75.0 Windows 7.0.0 +Other Chrome 76.0 Windows 7.0.0 +Other Chrome 76.0 Windows 7.0.0 +Other Chrome 77.0 Windows 7.0.0 +Other Waterfox 56.2 Windows 7.0.0 +Other Firefox 60.0 Windows 7.0.0 +Other Firefox 63.0 Windows 7.0.0 +Other Firefox 68.0 Windows 7.0.0 +Other Firefox 69.0 Windows 7.0.0 +Other Firefox 69.0 Windows 7.0.0 +Other Chrome 77.0 Windows 8.0.0 +Other Firefox 69.0 Windows 8.0.0 +Other Chrome 77.0 Windows 8.1.0 +Other IE 11.0 Windows RT 8.1.0 +Other IE 11.0 Windows 8.1.0 +Other IE 11.0 Windows 8.1.0 +Other Chrome 63.0 Windows 8.1.0 +Other Chrome 64.0 Windows 8.1.0 +Other Chrome 76.0 Windows 8.1.0 +Other Chrome 76.0 Windows 8.1.0 +Other Chrome 77.0 Windows 8.1.0 +Other Firefox 69.0 Windows 8.1.0 +Other Firefox 69.0 Windows 8.1.0 +Other Chrome 72.0 Windows 10.0.0 +Other Chrome 77.0 Chrome OS 12371.75.0 +Other Chrome 76.0 Chrome OS 12239.92.0 +Other Chrome 69.0 Chrome OS 10895.78.0 +Other Chrome 70.0 Chrome OS 11021.81.0 +Other Chrome 74.0 Chrome OS 11895.118.0 +Other Chrome 76.0 Chrome OS 12239.92.0 +Other Chrome 76.0 Chrome OS 12239.92.1 +Other Chrome 76.0 Chrome OS 12239.92.4 +Other Chrome 77.0 Chrome OS 12371.46.0 +Other Chrome 77.0 Chrome OS 12371.65.0 +Other Chrome 75.0 Linux 0.0.0 +Other Chrome 77.0 Linux 0.0.0 +Other Samsung Internet 10.1 Linux 0.0.0 +Other Chrome 66.0 Linux 0.0.0 +Other Chrome 66.0 Linux 0.0.0 +Other Chrome 66.0 Linux 0.0.0 +Other Chrome 66.0 Linux 0.0.0 +Other Chrome 66.0 Linux 0.0.0 +Other Firefox 65.0 Ubuntu 0.0.0 +Other Firefox 66.0 Ubuntu 0.0.0 +Other Firefox 67.0 Ubuntu 0.0.0 +iPad Google 22.0 iOS 10.3.3 +iPad Chrome Mobile iOS 71.0 iOS 10.3.3 +iPad Firefox iOS 14.0 iOS 10.3.3 +iPad Mobile Safari UI/WKWebView 0.0 iOS 10.3.3 +iPad Facebook 240.0 iOS 10.3.3 +iPad Mobile Safari 10.0 iOS 10.3.3 +iPad Mobile Safari 10.0 iOS 10.3.4 +iPad Chrome Mobile iOS 76.0 iOS 11.1.0 +iPad Chrome Mobile iOS 76.0 iOS 11.1.2 +iPad Mobile Safari 11.0 iOS 11.2.1 +iPad Mobile Safari 11.0 iOS 11.2.2 +iPad Mobile Safari 11.0 iOS 11.2.6 +iPad Mobile Safari 11.0 iOS 11.3.0 +iPad Mobile Safari 11.0 iOS 11.4.0 +iPad Mobile Safari UI/WKWebView 0.0 iOS 11.4.1 +iPad Mobile Safari 11.0 iOS 11.4.1 +iPad Google 83.0 iOS 12.0.0 +iPad Mobile Safari 12.0 iOS 12.0.0 +iPad Chrome Mobile iOS 75.0 iOS 12.1.0 +iPad Chrome Mobile iOS 76.0 iOS 12.1.0 +iPad Mobile Safari UI/WKWebView 0.0 iOS 12.1.0 +iPad Mobile Safari 12.0 iOS 12.1.0 +iPad Mobile Safari 12.0 iOS 12.1.1 +iPad Google 48.0 iOS 12.1.4 +iPad Mobile Safari UI/WKWebView 0.0 iOS 12.1.4 +iPad Mobile Safari 12.0 iOS 12.1.4 +iPad Chrome Mobile iOS 76.0 iOS 12.2.0 +iPad Mobile Safari UI/WKWebView 0.0 iOS 12.2.0 +iPad Mobile Safari 12.1 iOS 12.2.0 +iPad Chrome Mobile iOS 77.0 iOS 12.3.0 +iPad Google 83.0 iOS 12.3.0 +iPad Mobile Safari 12.1 iOS 12.3.0 +iPad Mobile Safari UI/WKWebView 0.0 iOS 12.3.1 +iPad Mobile Safari 12.1 iOS 12.3.1 +iPad Chrome Mobile iOS 76.0 iOS 12.4.0 +iPad Chrome Mobile iOS 76.0 iOS 12.4.0 +iPad Chrome Mobile iOS 77.0 iOS 12.4.0 +iPad Chrome Mobile iOS 77.0 iOS 12.4.0 +iPad Chrome Mobile iOS 77.0 iOS 12.4.0 +iPad Google 74.0 iOS 12.4.0 +iPad Google 83.0 iOS 12.4.0 +iPad Mobile Safari UI/WKWebView 0.0 iOS 12.4.0 +iPad Mobile Safari 12.1 iOS 12.4.0 +iPad Chrome Mobile iOS 67.0 iOS 12.4.1 +iPad Firefox iOS 19.0 iOS 12.4.1 +iPad Mobile Safari UI/WKWebView 0.0 iOS 12.4.1 +iPad Facebook 0.0 iOS 12.4.1 +iPad Facebook 0.0 iOS 12.4.1 +iPad Facebook 0.0 iOS 12.4.1 +iPad Facebook 0.0 iOS 12.4.1 +iPad Mobile Safari 12.1 iOS 12.4.1 +iPad Mobile Safari 6.0 iOS 6.1.3 +iPad Mobile Safari 8.0 iOS 8.0.0 +iPad Mobile Safari 8.0 iOS 8.2.0 +iPad Google 23.1 iOS 8.4.0 +iPad Mobile Safari 9.0 iOS 9.3.2 +iPad Mobile Safari 9.0 iOS 9.3.5 +iPhone Mobile Safari 10.0 iOS 10.2.0 +iPhone Facebook 0.0 iOS 10.3.3 +iPhone Google 68.0 iOS 10.3.4 +iPhone Mobile Safari 10.0 iOS 10.3.4 +iPhone Mobile Safari 11.0 iOS 11.0.3 +iPhone Mobile Safari 11.0 iOS 11.1.1 +iPhone Mobile Safari 11.0 iOS 11.1.2 +iPhone Mobile Safari 11.0 iOS 11.2.1 +iPhone Facebook 207.0 iOS 11.2.6 +iPhone Chrome Mobile iOS 76.0 iOS 11.3.0 +iPhone Facebook 0.0 iOS 11.3.0 +iPhone Mobile Safari 11.0 iOS 11.3.0 +iPhone Google 83.0 iOS 11.4.0 +iPhone Mobile Safari 11.0 iOS 11.4.0 +iPhone Google 74.1 iOS 11.4.1 +iPhone Mobile Safari 11.0 iOS 11.4.1 +iPhone Mobile Safari 12.0 iOS 12.0.0 +iPhone Mobile Safari 12.0 iOS 12.1.0 +iPhone Mobile Safari 12.0 iOS 12.1.1 +iPhone Google 74.1 iOS 12.1.2 +iPhone Facebook 0.0 iOS 12.1.2 +iPhone Mobile Safari 12.0 iOS 12.1.2 +iPhone Mobile Safari 12.0 iOS 12.1.3 +iPhone Google 74.1 iOS 12.1.4 +iPhone Mobile Safari 12.0 iOS 12.1.4 +iPhone Chrome Mobile iOS 72.0 iOS 12.2.0 +iPhone Chrome Mobile iOS 76.0 iOS 12.2.0 +iPhone Chrome Mobile iOS 77.0 iOS 12.2.0 +iPhone Facebook 0.0 iOS 12.2.0 +iPhone Facebook 0.0 iOS 12.2.0 +iPhone Mobile Safari 12.1 iOS 12.2.0 +iPhone Chrome Mobile iOS 77.0 iOS 12.3.0 +iPhone Google 83.0 iOS 12.3.0 +iPhone Mobile Safari 12.1 iOS 12.3.0 +iPhone Google 79.0 iOS 12.3.1 +iPhone Mobile Safari UI/WKWebView 0.0 iOS 12.3.1 +iPhone DuckDuckGo Mobile 7.0 iOS 12.3.1 +iPhone Facebook 0.0 iOS 12.3.1 +iPhone Facebook 0.0 iOS 12.3.1 +iPhone Facebook 0.0 iOS 12.3.1 +iPhone Mobile Safari 12.1 iOS 12.3.1 +iPhone Mobile Safari 12.1 iOS 12.3.2 +iPhone Chrome Mobile iOS 69.0 iOS 12.4.0 +iPhone Chrome Mobile iOS 73.0 iOS 12.4.0 +iPhone Chrome Mobile iOS 75.0 iOS 12.4.0 +iPhone Chrome Mobile iOS 76.0 iOS 12.4.0 +iPhone Chrome Mobile iOS 77.0 iOS 12.4.0 +iPhone Chrome Mobile iOS 77.0 iOS 12.4.0 +iPhone Google 81.0 iOS 12.4.0 +iPhone Google 82.1 iOS 12.4.0 +iPhone Google 83.0 iOS 12.4.0 +iPhone Facebook 0.0 iOS 12.4.0 +iPhone Facebook 0.0 iOS 12.4.0 +iPhone Facebook 0.0 iOS 12.4.0 +iPhone Facebook 0.0 iOS 12.4.0 +iPhone Mobile Safari 12.1 iOS 12.4.0 +iPhone Google 74.1 iOS 12.4.1 +iPhone Mobile Safari UI/WKWebView 0.0 iOS 12.4.1 +iPhone Instagram 89.0 iOS 12.4.1 +iPhone Facebook 240.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Facebook 0.0 iOS 12.4.1 +iPhone Mobile Safari 12.1 iOS 12.4.1 +iPhone Mobile Safari 12.4 iOS 12.4.1 +iPhone Mobile Safari UI/WKWebView 0.0 iOS 12.4.2 +iPhone Mobile Safari 12.1 iOS 12.4.2 +iPhone Chrome Mobile iOS 77.0 iOS 13.0.0 +iPhone Facebook 0.0 iOS 13.0.0 +iPhone Facebook 0.0 iOS 13.0.0 +iPhone Facebook 0.0 iOS 13.0.0 +iPhone Facebook 0.0 iOS 13.0.0 +iPhone Facebook 0.0 iOS 13.0.0 +iPhone Facebook 0.0 iOS 13.0.0 +iPhone Mobile Safari 13.0 iOS 13.0.0 +iPhone Chrome Mobile iOS 76.0 iOS 13.1.0 +iPhone Chrome Mobile iOS 77.0 iOS 13.1.0 +iPhone Chrome Mobile iOS 77.0 iOS 13.1.0 +iPhone Firefox iOS 8.1 iOS 13.1.0 +iPhone Google 83.0 iOS 13.1.0 +iPhone Mobile Safari UI/WKWebView 0.0 iOS 13.1.0 +iPhone DuckDuckGo Mobile 7.0 iOS 13.1.0 +iPhone Facebook 0.0 iOS 13.1.0 +iPhone Facebook 0.0 iOS 13.1.0 +iPhone Facebook 0.0 iOS 13.1.0 +iPhone Facebook 0.0 iOS 13.1.0 +iPhone Facebook 0.0 iOS 13.1.0 +iPhone Mobile Safari 13.0 iOS 13.1.0 +iPhone Mobile Safari UI/WKWebView 0.0 iOS 13.1.1 +iPhone Facebook 0.0 iOS 13.1.1 +iPhone Facebook 0.0 iOS 13.1.1 +iPhone Facebook 0.0 iOS 13.1.1 +iPhone Facebook 0.0 iOS 13.1.1 +iPhone Mobile Safari 13.0 iOS 13.1.1 +iPhone Mobile Safari UI/WKWebView 0.0 iOS 13.1.2 +iPhone Facebook 0.0 iOS 13.1.2 +iPhone Mobile Safari 13.0 iOS 13.1.2 diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh new file mode 100755 index 00000000000..8e361a191d6 --- /dev/null +++ b/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Tags: no-fasttest, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') + +mkdir -p $user_files_path/test_02504 + +cp $CURDIR/data_ua_parser/os.yaml ${user_files_path}/test_02504/ +cp $CURDIR/data_ua_parser/browser.yaml ${user_files_path}/test_02504/ +cp $CURDIR/data_ua_parser/device.yaml ${user_files_path}/test_02504/ + +$CLICKHOUSE_CLIENT -n --query=" +drop dictionary if exists regexp_os; +drop dictionary if exists regexp_browser; +drop dictionary if exists regexp_device; +drop table if exists user_agents; +create dictionary regexp_os +( + regex String, + os_replacement String default 'Other', + os_v1_replacement String default '0', + os_v2_replacement String default '0', + os_v3_replacement String default '0', + os_v4_replacement String default '0' +) +PRIMARY KEY(regex) +SOURCE(YAMLRegExpTree(PATH '${user_files_path}/test_02504/os.yaml')) +LIFETIME(0) +LAYOUT(regexp_tree); + +create dictionary regexp_browser +( + regex String, + family_replacement String default 'Other', + v1_replacement String default '0', + v2_replacement String default '0' +) +PRIMARY KEY(regex) +SOURCE(YAMLRegExpTree(PATH '${user_files_path}/test_02504/browser.yaml')) +LIFETIME(0) +LAYOUT(regexp_tree); + +create dictionary regexp_device +( + regex String, + device_replacement String default 'Other', + brand_replacement String, + model_replacement String +) +PRIMARY KEY(regex) +SOURCE(YAMLRegExpTree(PATH '${user_files_path}/test_02504/device.yaml')) +LIFETIME(0) +LAYOUT(regexp_tree); + +create table user_agents +( + ua String +) +Engine = Log(); +" + +$CLICKHOUSE_CLIENT -n --query=" +insert into user_agents select ua from input('ua String') FORMAT LineAsString" < $CURDIR/data_ua_parser/useragents.txt + +$CLICKHOUSE_CLIENT -n --query=" +select device, +concat(tupleElement(browser, 1), ' ', tupleElement(browser, 2), '.', tupleElement(browser, 3)) as browser , +concat(tupleElement(os, 1), ' ', tupleElement(os, 2), '.', tupleElement(os, 3), '.', tupleElement(os, 4)) as os +from ( + select dictGet('regexp_os', ('os_replacement', 'os_v1_replacement', 'os_v2_replacement', 'os_v3_replacement'), ua) os, + dictGet('regexp_browser', ('family_replacement', 'v1_replacement', 'v2_replacement'), ua) as browser, + dictGet('regexp_device', 'device_replacement', ua) device from user_agents); +" + +$CLICKHOUSE_CLIENT -n --query=" +drop dictionary if exists regexp_os; +drop dictionary if exists regexp_browser; +drop dictionary if exists regexp_device; +drop table if exists user_agents; +" + +rm -rf "$user_files_path/test_02504" diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.reference b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.reference index d4e23bf981c..dfcd170e8f4 100644 --- a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.reference +++ b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.reference @@ -1,5 +1,7 @@ ('TencentOS','123.45.67') -('Andriod','12') -('Andriod','default') -('Andriod','default') +('Android','12') +('Android','default') +('Android','default') +('BlackBerry WebKit','10.0') +('BlackBerry WebKit','1.0') (true,'61f0c404-5cb3-11e7-907b-a6006ad3dba0','2023-01-01','2023-01-01 01:01:01',[1,2,3,-1,-2,-3]) diff --git a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh index f09409bb90b..1b5a9cdeea4 100755 --- a/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh +++ b/tests/queries/0_stateless/02504_regexp_dictionary_yaml_source.sh @@ -18,7 +18,7 @@ cat > "$yaml" < "$yaml" < "$yaml" < "$yaml" < arrayMap(y -> round(y, 5), x), corrMatrix(a_value, b_value, c_value, d_value)) FROM fh; + +SELECT round(abs(corr(x1,x2) - corrMatrix(x1,x2)[1][2]), 5), round(abs(corr(x1,x1) - corrMatrix(x1,x2)[1][1]), 5), round(abs(corr(x2,x2) - corrMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000)); + +SELECT covarSampMatrix(a_value) FROM (select a_value from fh limit 0); + +SELECT covarSampMatrix(a_value) FROM (select a_value from fh limit 1); + +SELECT covarSampMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0); + +SELECT covarSampMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1); + +SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), covarSampMatrix(a_value, b_value, c_value, d_value)) FROM fh; + +SELECT round(abs(covarSamp(x1,x2) - covarSampMatrix(x1,x2)[1][2]), 5), round(abs(covarSamp(x1,x1) - covarSampMatrix(x1,x2)[1][1]), 5), round(abs(covarSamp(x2,x2) - covarSampMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000)); + +SELECT covarPopMatrix(a_value) FROM (select a_value from fh limit 0); + +SELECT covarPopMatrix(a_value) FROM (select a_value from fh limit 1); + +SELECT covarPopMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0); + +SELECT covarPopMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1); + +SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), covarPopMatrix(a_value, b_value, c_value, d_value)) FROM fh; + +SELECT round(abs(covarPop(x1,x2) - covarPopMatrix(x1,x2)[1][2]), 5), round(abs(covarPop(x1,x1) - covarPopMatrix(x1,x2)[1][1]), 5), round(abs(covarPop(x2,x2) - covarPopMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000)); diff --git a/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh b/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh index 8c47471ad3c..d93fe59134f 100755 --- a/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh +++ b/tests/queries/0_stateless/02531_two_level_aggregation_bug.sh @@ -4,7 +4,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -query_id=$(echo "select queryID() from (select sum(s), k from remote('127.0.0.{1,2}', view(select sum(number) s, bitAnd(number, 3) k from numbers_mt(1000000) group by k)) group by k) limit 1 settings group_by_two_level_threshold=1, max_threads=3, prefer_localhost_replica=1" | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- 2>&1) +# It is totally ok if sometimes some of the query processing threads did not process any data, as all the data processed by the other threads. +# Check that at least once all 6 threads converted their aggregation data into two-level hash table. -${CLICKHOUSE_CLIENT} --query="system flush logs" -${CLICKHOUSE_CLIENT} --query="select count() from system.text_log where event_date >= today() - 1 and query_id = '${query_id}' and message like '%Converting aggregation data to two-level%'" +while true +do + query_id=$(echo "select queryID() from (select sum(s), k from remote('127.0.0.{1,2}', view(select sum(number) s, bitAnd(number, 3) k from numbers_mt(1000000) group by k)) group by k) limit 1 settings group_by_two_level_threshold=1, max_threads=3, prefer_localhost_replica=1" | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- 2>&1) + + ${CLICKHOUSE_CLIENT} --query="system flush logs" + ${CLICKHOUSE_CLIENT} --query="select count() from system.text_log where event_date >= yesterday() and query_id = '${query_id}' and message like '%Converting aggregation data to two-level%'" | grep -P '^6$' && break; +done diff --git a/tests/queries/0_stateless/025334_keyed_siphash.reference b/tests/queries/0_stateless/02534_keyed_siphash.reference similarity index 100% rename from tests/queries/0_stateless/025334_keyed_siphash.reference rename to tests/queries/0_stateless/02534_keyed_siphash.reference diff --git a/tests/queries/0_stateless/025334_keyed_siphash.sql b/tests/queries/0_stateless/02534_keyed_siphash.sql similarity index 100% rename from tests/queries/0_stateless/025334_keyed_siphash.sql rename to tests/queries/0_stateless/02534_keyed_siphash.sql diff --git a/tests/queries/0_stateless/02535_analyzer_limit_offset.reference b/tests/queries/0_stateless/02535_analyzer_limit_offset.reference new file mode 100644 index 00000000000..ea7e98aa8ab --- /dev/null +++ b/tests/queries/0_stateless/02535_analyzer_limit_offset.reference @@ -0,0 +1,10 @@ +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 diff --git a/tests/queries/0_stateless/02535_analyzer_limit_offset.sql b/tests/queries/0_stateless/02535_analyzer_limit_offset.sql new file mode 100644 index 00000000000..8f98d823e5c --- /dev/null +++ b/tests/queries/0_stateless/02535_analyzer_limit_offset.sql @@ -0,0 +1,3 @@ +SET allow_experimental_analyzer = 1; + +SELECT number FROM numbers(100) LIMIT 10 OFFSET 10; diff --git a/tests/queries/0_stateless/02536_date_from_number_inference_fix.reference b/tests/queries/0_stateless/02536_date_from_number_inference_fix.reference new file mode 100644 index 00000000000..3fb7eba4357 --- /dev/null +++ b/tests/queries/0_stateless/02536_date_from_number_inference_fix.reference @@ -0,0 +1,3 @@ +x Nullable(Int64) +20000101 +19000101 diff --git a/tests/queries/0_stateless/02536_date_from_number_inference_fix.sql b/tests/queries/0_stateless/02536_date_from_number_inference_fix.sql new file mode 100644 index 00000000000..912057265e7 --- /dev/null +++ b/tests/queries/0_stateless/02536_date_from_number_inference_fix.sql @@ -0,0 +1,4 @@ +desc format(JSONEachRow, '{"x" : "20000101"}'); +select * from format(JSONEachRow, '{"x" : "20000101"}'); +select * from format(JSONEachRow, '{"x" : "19000101"}'); + diff --git a/tests/queries/0_stateless/02540_duplicate_primary_key.sql b/tests/queries/0_stateless/02540_duplicate_primary_key.sql index 5934f597334..322b6d74845 100644 --- a/tests/queries/0_stateless/02540_duplicate_primary_key.sql +++ b/tests/queries/0_stateless/02540_duplicate_primary_key.sql @@ -4,103 +4,102 @@ set allow_suspicious_low_cardinality_types = 1; CREATE TABLE test ( - `timestamp` DateTime, - `latitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), - `longitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), - `m_registered` UInt8, - `m_mcc` Nullable(Int16), - `m_mnc` Nullable(Int16), - `m_ci` Nullable(Int32), - `m_tac` Nullable(Int32), - `enb_id` Nullable(Int32), - `ci` Nullable(Int32), - `m_earfcn` Int32, - `rsrp` Nullable(Int16), - `rsrq` Nullable(Int16), - `cqi` Nullable(Int16), - `source` String, - `gps_accuracy` Nullable(Float32), - `operator_name` String, - `band` Nullable(String), - `NAME_2` String, - `NAME_1` String, - `quadkey_19_key` FixedString(19), - `quadkey_17_key` FixedString(17), - `manipulation` UInt8, - `ss_rsrp` Nullable(Int16), - `ss_rsrq` Nullable(Int16), - `ss_sinr` Nullable(Int16), - `csi_rsrp` Nullable(Int16), - `csi_rsrq` Nullable(Int16), - `csi_sinr` Nullable(Int16), - `altitude` Nullable(Float32), - `access_technology` Nullable(String), - `buildingtype` String, - `LocationType` String, - `carrier_name` Nullable(String), - `CustomPolygonName` String, - `h3_10_pixel` UInt64, - `stc_cluster` Nullable(String), - PROJECTION cumsum_projection_simple + `coverage` DateTime, + `haunt` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), + `sail` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), + `empowerment_turnstile` UInt8, + `empowerment_haversack` Nullable(Int16), + `empowerment_function` Nullable(Int16), + `empowerment_guidance` Nullable(Int32), + `empowerment_high` Nullable(Int32), + `trading_id` Nullable(Int32), + `guidance` Nullable(Int32), + `empowerment_rawhide` Int32, + `memo` Nullable(Int16), + `oeuvre` Nullable(Int16), + `bun` Nullable(Int16), + `tramp` String, + `anthropology_total` Nullable(Float32), + `situation_name` String, + `timing` Nullable(String), + `NAME_cockroach` String, + `NAME_toe` String, + `business_error_methane` FixedString(110), + `business_instrumentation_methane` FixedString(15), + `market` UInt8, + `crew_memo` Nullable(Int16), + `crew_oeuvre` Nullable(Int16), + `crew_fortnight` Nullable(Int16), + `princess_memo` Nullable(Int16), + `princess_oeuvre` Nullable(Int16), + `princess_fortnight` Nullable(Int16), + `emerald` Nullable(Float32), + `cannon_crate` Nullable(String), + `thinking` String, + `SectorMen` String, + `rage_name` Nullable(String), + `DevelopmentalLigandName` String, + `chard_heavy_quadrant` UInt64, + `poster_effective` Nullable(String), + PROJECTION chrysalis_trapezium_ham ( SELECT - m_registered, - toStartOfInterval(timestamp, toIntervalMonth(1)), - toStartOfWeek(timestamp, 8), - toStartOfInterval(timestamp, toIntervalDay(1)), - NAME_1, - NAME_2, - operator_name, - rsrp, - rsrq, - ss_rsrp, - ss_rsrq, - cqi, - sum(multiIf(ss_rsrp IS NULL, 0, 1)), - sum(multiIf(ss_rsrq IS NULL, 0, 1)), - sum(multiIf(ss_sinr IS NULL, 0, 1)), - max(toStartOfInterval(timestamp, toIntervalDay(1))), - max(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), - min(toStartOfInterval(timestamp, toIntervalDay(1))), - min(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), + empowerment_turnstile, + toStartOfInterval(coverage, toIntervalMonth(1)), + toStartOfWeek(coverage, 10), + toStartOfInterval(coverage, toIntervalDay(1)), + NAME_toe, + NAME_cockroach, + situation_name, + memo, + oeuvre, + crew_memo, + crew_oeuvre, + bun, + sum(multiIf(crew_memo IS NULL, 0, 1)), + sum(multiIf(crew_oeuvre IS NULL, 0, 1)), + sum(multiIf(crew_fortnight IS NULL, 0, 1)), + max(toStartOfInterval(coverage, toIntervalDay(1))), + max(CAST(CAST(toStartOfInterval(coverage, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), + min(toStartOfInterval(coverage, toIntervalDay(1))), + min(CAST(CAST(toStartOfInterval(coverage, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), count(), sum(1) GROUP BY - m_registered, - toStartOfInterval(timestamp, toIntervalMonth(1)), - toStartOfWeek(timestamp, 8), - toStartOfInterval(timestamp, toIntervalDay(1)), - m_registered, - toStartOfInterval(timestamp, toIntervalMonth(1)), - toStartOfWeek(timestamp, 8), - toStartOfInterval(timestamp, toIntervalDay(1)), - NAME_1, - NAME_2, - operator_name, - rsrp, - rsrq, - ss_rsrp, - ss_rsrq, - cqi + empowerment_turnstile, + toStartOfInterval(coverage, toIntervalMonth(1)), + toStartOfWeek(coverage, 10), + toStartOfInterval(coverage, toIntervalDay(1)), + empowerment_turnstile, + toStartOfInterval(coverage, toIntervalMonth(1)), + toStartOfWeek(coverage, 10), + toStartOfInterval(coverage, toIntervalDay(1)), + NAME_toe, + NAME_cockroach, + situation_name, + memo, + oeuvre, + crew_memo, + crew_oeuvre, + bun ) ) ENGINE = MergeTree -PARTITION BY toYYYYMM(timestamp) -ORDER BY (timestamp, operator_name, NAME_1, NAME_2) -SETTINGS index_granularity = 8192; +PARTITION BY toYYYYMM(coverage) +ORDER BY (coverage, situation_name, NAME_toe, NAME_cockroach); insert into test select * from generateRandom() limit 10; -with tt as ( - select cast(toStartOfInterval(timestamp, INTERVAL 1 day) as Date) as dd, count() as samples +with dissonance as ( + Select cast(toStartOfInterval(coverage, INTERVAL 1 day) as Date) as flour, count() as regulation from test - group by dd having dd >= toDate(now())-100 + group by flour having flour >= toDate(now())-100 ), -tt2 as ( - select dd, samples from tt - union distinct - select toDate(now())-1, ifnull((select samples from tt where dd = toDate(now())-1),0) as samples -) -select dd, samples from tt2 order by dd with fill step 1 limit 100 format Null; +cheetah as ( + Select flour, regulation from dissonance + union distinct + Select toDate(now())-1, ifnull((select regulation from dissonance where flour = toDate(now())-1),0) as regulation +) +Select flour, regulation from cheetah order by flour with fill step 1 limit 100 format Null; drop table test; diff --git a/tests/queries/0_stateless/02541_arrow_duration_type.reference b/tests/queries/0_stateless/02541_arrow_duration_type.reference new file mode 100644 index 00000000000..acd96eda1e7 --- /dev/null +++ b/tests/queries/0_stateless/02541_arrow_duration_type.reference @@ -0,0 +1,2 @@ +duration Nullable(Int64) +10000 diff --git a/tests/queries/0_stateless/02541_arrow_duration_type.sh b/tests/queries/0_stateless/02541_arrow_duration_type.sh new file mode 100755 index 00000000000..93f4f66bbf0 --- /dev/null +++ b/tests/queries/0_stateless/02541_arrow_duration_type.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +set -e + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +DATA_DIR=$CUR_DIR/data_arrow + +$CLICKHOUSE_LOCAL -q "desc file('$DATA_DIR/duration.arrow')" +$CLICKHOUSE_LOCAL -q "select count() from file('$DATA_DIR/duration.arrow')" diff --git a/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.reference b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.reference new file mode 100644 index 00000000000..02801a64d21 --- /dev/null +++ b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.reference @@ -0,0 +1,13 @@ +localhost 9000 0 0 0 +-- { echoOn } + +SELECT * FROM t1_local ORDER BY tc1, tc2; +partition1 1 1 +partition2 1 2 +partition1 2 3 +partition2 2 4 +DELETE FROM t1_local ON CLUSTER test_shard_localhost WHERE tc1 = 1; +localhost 9000 0 0 0 +SELECT * FROM t1_local ORDER BY tc1, tc2; +partition1 2 3 +partition2 2 4 diff --git a/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.sql b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.sql new file mode 100644 index 00000000000..5d3da88d727 --- /dev/null +++ b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.sql @@ -0,0 +1,23 @@ +-- Tags: distributed, no-replicated-database +-- Tag no-replicated-database: ON CLUSTER is not allowed + +SET distributed_ddl_output_mode='throw'; + +CREATE TABLE t1_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int) ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1; + +INSERT INTO t1_local VALUES('partition1', 1,1); +INSERT INTO t1_local VALUES('partition2', 1,2); +INSERT INTO t1_local VALUES('partition1', 2,3); +INSERT INTO t1_local VALUES('partition2', 2,4); + +SET allow_experimental_lightweight_delete=1; + +-- { echoOn } + +SELECT * FROM t1_local ORDER BY tc1, tc2; + +DELETE FROM t1_local ON CLUSTER test_shard_localhost WHERE tc1 = 1; + +SELECT * FROM t1_local ORDER BY tc1, tc2; + +-- { echoOff } diff --git a/tests/queries/0_stateless/02541_tuple_element_with_null.reference b/tests/queries/0_stateless/02541_tuple_element_with_null.reference new file mode 100644 index 00000000000..7afa6248576 --- /dev/null +++ b/tests/queries/0_stateless/02541_tuple_element_with_null.reference @@ -0,0 +1,2 @@ +1 1 2 2 +\N \N 3 3 diff --git a/tests/queries/0_stateless/02541_tuple_element_with_null.sql b/tests/queries/0_stateless/02541_tuple_element_with_null.sql new file mode 100644 index 00000000000..d2062b60d49 --- /dev/null +++ b/tests/queries/0_stateless/02541_tuple_element_with_null.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS test_tuple_element; +CREATE TABLE test_tuple_element +( + tuple Tuple(k1 Nullable(UInt64), k2 UInt64) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; + +INSERT INTO test_tuple_element VALUES (tuple(1,2)), (tuple(NULL, 3)); + +SELECT + tupleElement(tuple, 'k1', 0) fine_k1_with_0, + tupleElement(tuple, 'k1', NULL) k1_with_null, + tupleElement(tuple, 'k2', 0) k2_with_0, + tupleElement(tuple, 'k2', NULL) k2_with_null +FROM test_tuple_element; + +DROP TABLE test_tuple_element; diff --git a/tests/queries/0_stateless/02542_table_function_format.reference b/tests/queries/0_stateless/02542_table_function_format.reference new file mode 100644 index 00000000000..c8488967144 --- /dev/null +++ b/tests/queries/0_stateless/02542_table_function_format.reference @@ -0,0 +1,22 @@ +a Nullable(String) +b Nullable(Int64) +a String +b Int64 +Hello 111 +World 123 +Hello 111 +World 123 +c1 Nullable(Int64) +c2 Nullable(Int64) +c3 Array(Nullable(Int64)) +c4 Array(Array(Nullable(String))) +a1 Int32 +a2 UInt64 +a3 Array(Int32) +a4 Array(Array(String)) +1 2 [1,2,3] [['abc'],[],['d','e']] +1 2 [1,2,3] [['abc'],[],['d','e']] +20210129005809043707 +123456789 +987654321 +cust_id UInt128 diff --git a/tests/queries/0_stateless/02542_table_function_format.sql b/tests/queries/0_stateless/02542_table_function_format.sql new file mode 100644 index 00000000000..e32e9001b9f --- /dev/null +++ b/tests/queries/0_stateless/02542_table_function_format.sql @@ -0,0 +1,36 @@ +desc format(JSONEachRow, +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +desc format(JSONEachRow, 'a String, b Int64', +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +select * from format(JSONEachRow, 'a String, b Int64', +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +desc format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +desc format(CSV, 'a1 Int32, a2 UInt64, a3 Array(Int32), a4 Array(Array(String))', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +select * from format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +select * from format(CSV, 'a1 Int32, a2 UInt64, a3 Array(Int32), a4 Array(Array(String))', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); + +drop table if exists test; + +create table test as format(TSV, 'cust_id UInt128', '20210129005809043707\n123456789\n987654321'); + +select * from test; +desc table test; +drop table test; diff --git a/tests/queries/0_stateless/02551_ipv4_implicit_uint64.reference b/tests/queries/0_stateless/02551_ipv4_implicit_uint64.reference new file mode 100644 index 00000000000..ece1759ef64 --- /dev/null +++ b/tests/queries/0_stateless/02551_ipv4_implicit_uint64.reference @@ -0,0 +1,2 @@ +85.85.85.85 +138.68.230.86 diff --git a/tests/queries/0_stateless/02551_ipv4_implicit_uint64.sql b/tests/queries/0_stateless/02551_ipv4_implicit_uint64.sql new file mode 100644 index 00000000000..ff04f553851 --- /dev/null +++ b/tests/queries/0_stateless/02551_ipv4_implicit_uint64.sql @@ -0,0 +1,4 @@ +CREATE TABLE ip4test (ip IPv4) ENGINE=Memory; +INSERT INTO ip4test VALUES (22906492245), (2319771222); +SELECT * FROM ip4test; +DROP TABLE ip4test; diff --git a/tests/queries/0_stateless/02551_obfuscator_keywords.reference b/tests/queries/0_stateless/02551_obfuscator_keywords.reference new file mode 100644 index 00000000000..fc1d5e81bc8 --- /dev/null +++ b/tests/queries/0_stateless/02551_obfuscator_keywords.reference @@ -0,0 +1,3 @@ +select 1 order by 1 with fill step 1 +SELECT id, untuple(id) FROM id +SELECT 1 IS NULL diff --git a/tests/queries/0_stateless/02551_obfuscator_keywords.sh b/tests/queries/0_stateless/02551_obfuscator_keywords.sh new file mode 100755 index 00000000000..a17ad670f0a --- /dev/null +++ b/tests/queries/0_stateless/02551_obfuscator_keywords.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +obf="$CLICKHOUSE_FORMAT --obfuscate" + +echo "select 1 order by 1 with fill step 1" | $obf +echo "SELECT id, untuple(id) FROM id" | $obf +echo "SELECT 1 IS NULL" | $obf diff --git a/tests/queries/0_stateless/02552_regression_crash.reference b/tests/queries/0_stateless/02552_regression_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02552_regression_crash.sql b/tests/queries/0_stateless/02552_regression_crash.sql new file mode 100644 index 00000000000..af272dbcebc --- /dev/null +++ b/tests/queries/0_stateless/02552_regression_crash.sql @@ -0,0 +1,14 @@ +CREATE TABLE store_sales +( + `ss_sold_date_sk` Float64, + `ss_sold_time_sk` Float64, + `ss_customer_sk` Float64, + `ss_cdemo_sk` Float64, + `ss_hdemo_sk` Float64 +) +ENGINE = Memory; + +insert into store_sales values (-3.273, -1.452, 4.267, 20.0, 40.0),(0.121, -0.615, 4.290, 20.0, 40.0), (-1.099, 2.755, -3.060, 20.0, 40.0),(1.090, 2.945, -2.346, 20.0, 40.0), (0.305, 2.179, -1.205, 20.0, 40.0),(-0.925, 0.702, 1.134, 20.0, 40.0), (3.178, -1.316, 7.221, 20.0, 40.0),(-2.756, -0.473, 2.569, 20.0, 40.0), (3.665, 2.303, 0.226, 20.0, 40.0),(1.662, 1.951, -0.070, 20.0, 40.0), (2.869, 0.593, 3.249, 20.0, 40.0),(0.818, -0.593, 4.594, 20.0, 40.0), (-1.917, 0.916, 0.209, 20.0, 40.0),(2.706, 1.523, 1.307, 20.0, 40.0), (0.219, 2.162, -1.214, 20.0, 40.0),(-4.510, 1.376, -2.007, 20.0, 40.0), (4.284, -0.515, 6.173, 20.0, 40.0),(-1.101, 2.810, -3.170, 20.0, 40.0), (-1.810, -1.117, 4.329, 20.0, 40.0),(0.055, 1.115, 0.797, 20.0, 40.0), (-2.178, 2.904, -3.898, 20.0, 40.0),(-3.494, -1.814, 4.882, 20.0, 40.0), (3.027, 0.476, 3.562, 20.0, 40.0),(-1.434, 1.151, -0.018, 20.0, 40.0), (1.180, 0.992, 1.606, 20.0, 40.0),(0.015, 0.971, 1.067, 20.0, 40.0), (-0.511, -0.875, 4.495, 20.0, 40.0),(0.961, 2.348, -1.216, 20.0, 40.0), (-2.279, 0.038, 1.785, 20.0, 40.0),(-1.568, -0.248, 2.712, 20.0, 40.0), (-0.496, 0.366, 2.020, 20.0, 40.0),(1.177, -1.401, 6.390, 20.0, 40.0), (2.882, -1.442, 7.325, 20.0, 40.0),(-1.066, 1.817, -1.167, 20.0, 40.0), (-2.144, 2.791, -3.655, 20.0, 40.0),(-4.370, 2.228, -3.642, 20.0, 40.0), (3.996, 2.775, -0.553, 20.0, 40.0),(0.289, 2.055, -0.965, 20.0, 40.0), (-0.588, -1.601, 5.908, 20.0, 40.0),(-1.801, 0.417, 1.265, 20.0, 40.0), (4.375, -1.499, 8.186, 20.0, 40.0),(-2.618, 0.038, 1.615, 20.0, 40.0), (3.616, -0.833, 6.475, 20.0, 40.0),(-4.045, -1.558, 4.094, 20.0, 40.0), (-3.962, 0.636, -0.253, 20.0, 40.0),(3.505, 2.625, -0.497, 20.0, 40.0), (3.029, -0.523, 5.560, 20.0, 40.0),(-3.520, -0.474, 2.188, 20.0, 40.0), (2.430, -1.469, 7.154, 20.0, 40.0),(1.547, -1.654, 7.082, 20.0, 40.0), (-1.370, 0.575, 1.165, 20.0, 40.0),(-1.869, -1.555, 5.176, 20.0, 40.0), (3.536, 2.841, -0.913, 20.0, 40.0),(-3.810, 1.220, -1.344, 20.0, 40.0), (-1.971, 1.462, -0.910, 20.0, 40.0),(-0.243, 0.167, 2.545, 20.0, 40.0), (-1.403, 2.645, -2.991, 20.0, 40.0),(0.532, -0.114, 3.494, 20.0, 40.0), (-1.678, 0.975, 0.212, 20.0, 40.0),(-0.656, 2.140, -1.609, 20.0, 40.0), (1.743, 2.631, -1.390, 20.0, 40.0),(2.586, 2.943, -1.593, 20.0, 40.0), (-0.512, 2.969, -3.195, 20.0, 40.0),(2.283, -0.100, 4.342, 20.0, 40.0), (-4.293, 0.872, -0.890, 20.0, 40.0),(3.411, 1.300, 2.106, 20.0, 40.0), (-0.281, 2.951, -3.042, 20.0, 40.0),(-4.442, 0.384, 0.012, 20.0, 40.0), (1.194, 1.746, 0.104, 20.0, 40.0),(-1.152, 1.862, -1.300, 20.0, 40.0), (1.362, -1.341, 6.363, 20.0, 40.0),(-4.488, 2.618, -4.481, 20.0, 40.0), (3.419, -0.564, 5.837, 20.0, 40.0),(-3.392, 0.396, 0.512, 20.0, 40.0), (-1.629, -0.909, 4.003, 20.0, 40.0),(4.447, -1.088, 7.399, 20.0, 40.0), (-1.232, 1.699, -1.014, 20.0, 40.0),(-1.286, -0.609, 3.575, 20.0, 40.0), (2.437, 2.796, -1.374, 20.0, 40.0),(-4.864, 1.989, -3.410, 20.0, 40.0), (-1.716, -1.399, 4.940, 20.0, 40.0),(-3.084, 1.858, -2.259, 20.0, 40.0), (2.828, -0.319, 5.053, 20.0, 40.0),(-1.226, 2.586, -2.786, 20.0, 40.0), (2.456, 0.092, 4.044, 20.0, 40.0),(-0.989, 2.375, -2.245, 20.0, 40.0), (3.268, 0.935, 2.765, 20.0, 40.0),(-4.128, -1.995, 4.927, 20.0, 40.0), (-1.083, 2.197, -1.935, 20.0, 40.0),(-3.471, -1.198, 3.660, 20.0, 40.0), (4.617, -1.136, 7.579, 20.0, 40.0),(2.054, -1.675, 7.378, 20.0, 40.0), (4.106, 2.326, 0.402, 20.0, 40.0),(1.558, 0.310, 3.158, 20.0, 40.0), (0.792, 0.900, 1.596, 20.0, 40.0),(-3.229, 0.300, 0.785, 20.0, 40.0), (3.787, -0.793, 6.479, 20.0, 40.0),(1.786, 2.288, -0.684, 20.0, 40.0), (2.643, 0.223, 3.875, 20.0, 40.0),(-3.592, 2.122, -3.040, 20.0, 40.0), (4.519, -1.760, 8.779, 20.0, 40.0),(3.221, 2.255, 0.101, 20.0, 40.0), (4.151, 1.788, 1.500, 20.0, 40.0),(-1.033, -1.195, 4.874, 20.0, 40.0), (-1.636, -1.037, 4.257, 20.0, 40.0),(-3.548, 1.911, -2.596, 20.0, 40.0), (4.829, -0.293, 6.001, 20.0, 40.0),(-4.684, -1.664, 3.986, 20.0, 40.0), (4.531, -0.503, 6.271, 20.0, 40.0),(-3.503, -1.606, 4.460, 20.0, 40.0), (-2.036, -1.522, 5.027, 20.0, 40.0),(-0.473, -0.617, 3.997, 20.0, 40.0), (-1.554, -1.630, 5.483, 20.0, 40.0),(-3.567, -1.043, 3.302, 20.0, 40.0), (-2.038, 0.579, 0.823, 20.0, 40.0),(-3.040, 0.857, -0.233, 20.0, 40.0), (4.610, 0.562, 4.181, 20.0, 40.0),(-3.323, -1.938, 5.215, 20.0, 40.0), (4.314, 1.720, 1.717, 20.0, 40.0),(-1.220, 0.615, 1.161, 20.0, 40.0), (-2.556, 1.120, -0.519, 20.0, 40.0),(-3.717, -0.108, 1.358, 20.0, 40.0), (4.689, -1.826, 8.996, 20.0, 40.0),(3.452, 0.506, 3.713, 20.0, 40.0), (2.472, 0.612, 3.012, 20.0, 40.0),(3.452, 0.450, 3.826, 20.0, 40.0), (1.207, 2.585, -1.567, 20.0, 40.0),(-4.826, 1.090, -1.593, 20.0, 40.0), (3.116, -1.118, 6.794, 20.0, 40.0),(0.448, 2.732, -2.240, 20.0, 40.0), (-1.096, -0.525, 3.503, 20.0, 40.0),(-4.680, -0.238, 1.137, 20.0, 40.0), (2.552, -1.403, 7.082, 20.0, 40.0),(0.719, 2.997, -2.635, 20.0, 40.0), (0.347, -1.966, 7.105, 20.0, 40.0),(2.958, -0.404, 5.288, 20.0, 40.0), (0.722, -1.950, 7.261, 20.0, 40.0),(-2.851, -0.986, 3.546, 20.0, 40.0), (-4.316, -0.439, 1.721, 20.0, 40.0),(-1.685, -0.201, 2.560, 20.0, 40.0), (1.856, 0.190, 3.549, 20.0, 40.0),(-2.052, 0.206, 1.562, 20.0, 40.0), (-2.504, -0.646, 3.041, 20.0, 40.0),(3.235, 0.882, 2.854, 20.0, 40.0), (-1.366, -1.573, 5.463, 20.0, 40.0),(-3.447, 2.419, -3.562, 20.0, 40.0), (4.155, 2.092, 0.893, 20.0, 40.0),(-0.935, 0.209, 2.116, 20.0, 40.0), (3.117, -1.821, 8.201, 20.0, 40.0),(3.759, 0.577, 3.725, 20.0, 40.0), (-0.938, 2.992, -3.453, 20.0, 40.0),(-0.525, 2.341, -1.945, 20.0, 40.0), (4.540, 2.625, 0.019, 20.0, 40.0),(-2.097, 1.190, -0.429, 20.0, 40.0), (-2.672, 1.983, -2.302, 20.0, 40.0),(-3.038, -1.490, 4.460, 20.0, 40.0), (-0.943, 2.149, -1.770, 20.0, 40.0),(0.739, 1.598, 0.174, 20.0, 40.0), (1.828, 1.853, 0.208, 20.0, 40.0),(4.856, 0.137, 5.153, 20.0, 40.0), (-1.617, 0.468, 1.255, 20.0, 40.0),(-1.972, 2.053, -2.092, 20.0, 40.0), (-4.633, 1.389, -2.094, 20.0, 40.0),(-3.628, -1.156, 3.498, 20.0, 40.0), (3.597, 1.034, 2.731, 20.0, 40.0),(-1.488, -0.002, 2.261, 20.0, 40.0), (0.749, 1.921, -0.468, 20.0, 40.0),(1.304, -1.371, 6.394, 20.0, 40.0), (4.587, 2.936, -0.579, 20.0, 40.0),(-2.241, 1.791, -1.703, 20.0, 40.0), (-2.945, 1.372, -1.216, 20.0, 40.0),(1.375, 0.395, 2.898, 20.0, 40.0), (-1.281, -0.641, 3.642, 20.0, 40.0),(2.178, 0.895, 2.299, 20.0, 40.0), (3.031, -0.786, 6.087, 20.0, 40.0),(-1.385, -0.375, 3.058, 20.0, 40.0), (4.041, -0.431, 5.882, 20.0, 40.0),(0.480, -0.507, 4.254, 20.0, 40.0), (-3.797, 0.140, 0.822, 20.0, 40.0),(2.355, 2.502, -0.827, 20.0, 40.0), (1.376, -1.583, 6.854, 20.0, 40.0),(0.164, 1.405, 0.273, 20.0, 40.0), (-1.273, 1.471, -0.579, 20.0, 40.0),(0.770, 2.246, -1.107, 20.0, 40.0), (4.552, 2.904, -0.533, 20.0, 40.0),(4.259, -1.772, 8.674, 20.0, 40.0), (-0.309, 1.159, 0.528, 20.0, 40.0),(3.581, 2.700, -0.610, 20.0, 40.0), (-3.202, 0.346, 0.707, 20.0, 40.0),(-1.575, 1.242, -0.271, 20.0, 40.0), (-1.584, -0.493, 3.194, 20.0, 40.0),(-3.778, 0.150, 0.810, 20.0, 40.0), (-4.675, 1.749, -2.835, 20.0, 40.0),(3.567, -0.792, 6.367, 20.0, 40.0), (-0.417, 1.399, -0.006, 20.0, 40.0),(-4.672, 2.007, -3.349, 20.0, 40.0), (-1.034, 0.196, 2.090, 20.0, 40.0),(-3.796, 2.496, -3.890, 20.0, 40.0), (3.532, -0.497, 5.759, 20.0, 40.0),(4.868, -1.359, 8.151, 20.0, 40.0), (-0.769, 0.302, 2.011, 20.0, 40.0),(4.475, 2.612, 0.014, 20.0, 40.0), (-3.532, -0.395, 2.024, 20.0, 40.0),(0.322, 0.675, 1.812, 20.0, 40.0), (-2.028, -1.942, 5.870, 20.0, 40.0),(1.810, -1.244, 6.392, 20.0, 40.0), (-0.783, 1.242, 0.124, 20.0, 40.0),(-4.745, -1.300, 3.227, 20.0, 40.0), (1.902, 1.973, 0.005, 20.0, 40.0),(-3.453, -1.429, 4.132, 20.0, 40.0), (1.559, 0.986, 1.808, 20.0, 40.0),(0.128, 2.754, -2.443, 20.0, 40.0), (2.759, 1.727, 0.926, 20.0, 40.0),(-4.468, 1.690, -2.614, 20.0, 40.0), (-2.368, -1.922, 5.659, 20.0, 40.0),(-2.766, 2.128, -2.640, 20.0, 40.0), (0.967, -1.825, 7.133, 20.0, 40.0),(-2.854, 2.855, -4.136, 20.0, 40.0), (-2.944, 1.875, -2.222, 20.0, 40.0),(-2.632, -0.983, 3.649, 20.0, 40.0), (2.427, 2.239, -0.266, 20.0, 40.0),(-1.726, -0.838, 3.812, 20.0, 40.0), (0.007, -0.903, 4.809, 20.0, 40.0),(-2.013, 1.092, -0.191, 20.0, 40.0), (-0.449, 0.970, 0.836, 20.0, 40.0),(1.396, 0.411, 2.876, 20.0, 40.0), (-1.115, -1.790, 6.023, 20.0, 40.0),(3.748, 1.917, 1.039, 20.0, 40.0), (2.978, 1.043, 2.404, 20.0, 40.0),(-3.969, 2.514, -4.013, 20.0, 40.0), (4.455, -0.050, 5.328, 20.0, 40.0),(-3.065, -0.846, 3.160, 20.0, 40.0), (-1.069, 2.167, -1.869, 20.0, 40.0),(3.016, -1.393, 7.294, 20.0, 40.0), (0.045, -1.928, 6.879, 20.0, 40.0),(-2.555, -0.984, 3.690, 20.0, 40.0), (-1.995, -0.054, 2.111, 20.0, 40.0),(4.600, -0.509, 6.318, 20.0, 40.0), (-1.942, 1.215, -0.402, 20.0, 40.0),(1.262, 2.765, -1.899, 20.0, 40.0), (2.617, -1.106, 6.521, 20.0, 40.0),(1.737, 0.554, 2.761, 20.0, 40.0), (-2.197, 0.632, 0.638, 20.0, 40.0),(4.768, 2.618, 0.147, 20.0, 40.0), (-3.737, -0.939, 3.010, 20.0, 40.0),(-2.623, 0.595, 0.499, 20.0, 40.0), (4.752, -0.340, 6.057, 20.0, 40.0),(2.333, -1.037, 6.240, 20.0, 40.0), (4.234, -1.882, 8.881, 20.0, 40.0),(-3.393, -0.812, 2.927, 20.0, 40.0), (0.885, 1.383, 0.678, 20.0, 40.0),(0.123, 2.937, -2.812, 20.0, 40.0), (2.969, 0.760, 2.964, 20.0, 40.0),(-4.929, 1.251, -1.967, 20.0, 40.0), (1.916, 2.223, -0.488, 20.0, 40.0),(-0.020, -1.740, 6.469, 20.0, 40.0), (0.702, -1.272, 5.895, 20.0, 40.0),(2.496, 2.648, -1.048, 20.0, 40.0), (4.067, -1.475, 7.984, 20.0, 40.0),(-3.717, 1.851, -2.561, 20.0, 40.0), (1.678, -0.624, 5.088, 20.0, 40.0),(1.073, 0.695, 2.146, 20.0, 40.0), (1.842, -0.749, 5.419, 20.0, 40.0),(-3.518, 1.909, -2.578, 20.0, 40.0), (2.229, 1.189, 1.737, 20.0, 40.0),(4.987, 2.893, -0.292, 20.0, 40.0), (-4.809, 1.043, -1.490, 20.0, 40.0),(-0.241, -0.728, 4.334, 20.0, 40.0), (-3.331, 0.590, 0.156, 20.0, 40.0),(-0.455, 2.621, -2.470, 20.0, 40.0), (1.492, 1.223, 1.301, 20.0, 40.0),(3.948, 2.841, -0.709, 20.0, 40.0), (0.732, 0.446, 2.475, 20.0, 40.0),(2.400, 2.390, -0.579, 20.0, 40.0), (-2.718, 1.427, -1.213, 20.0, 40.0),(-1.826, 1.451, -0.815, 20.0, 40.0), (1.125, 0.438, 2.686, 20.0, 40.0),(-4.918, 1.880, -3.219, 20.0, 40.0), (3.068, -0.442, 5.418, 20.0, 40.0),(1.982, 1.201, 1.589, 20.0, 40.0), (0.701, -1.709, 6.768, 20.0, 40.0),(-1.496, 2.564, -2.877, 20.0, 40.0), (-3.812, 0.974, -0.853, 20.0, 40.0),(-3.405, 2.018, -2.739, 20.0, 40.0), (2.211, 2.889, -1.674, 20.0, 40.0),(-2.481, 2.931, -4.103, 20.0, 40.0), (-3.721, 2.765, -4.391, 20.0, 40.0),(-1.768, -1.292, 4.699, 20.0, 40.0), (-4.462, 1.058, -1.347, 20.0, 40.0),(-3.516, -1.942, 5.126, 20.0, 40.0), (0.485, 2.420, -1.597, 20.0, 40.0),(-0.492, 0.242, 2.270, 20.0, 40.0), (4.245, 1.689, 1.744, 20.0, 40.0),(2.234, 0.364, 3.389, 20.0, 40.0), (2.629, 2.224, -0.134, 20.0, 40.0),(-4.375, 1.221, -1.630, 20.0, 40.0), (-0.618, 1.374, -0.057, 20.0, 40.0),(-2.580, -1.604, 4.918, 20.0, 40.0), (0.159, 1.104, 0.871, 20.0, 40.0),(-3.597, 0.975, -0.749, 20.0, 40.0); +INSERT INTO store_sales (ss_sold_time_sk) VALUES (1); +INSERT INTO store_sales (ss_cdemo_sk) VALUES (0.1); +select stochasticLinearRegressionState(0.03, 0.00001, 2, 'Momentum')(ss_sold_time_sk, ss_sold_time_sk, ss_sold_time_sk) as ss_wholesale_cost from store_sales format Null; diff --git a/tests/queries/0_stateless/02552_sparse_columns_intersect.reference b/tests/queries/0_stateless/02552_sparse_columns_intersect.reference new file mode 100644 index 00000000000..7f0c43eab59 --- /dev/null +++ b/tests/queries/0_stateless/02552_sparse_columns_intersect.reference @@ -0,0 +1,2 @@ +0 +2000 diff --git a/tests/queries/0_stateless/02552_sparse_columns_intersect.sql b/tests/queries/0_stateless/02552_sparse_columns_intersect.sql new file mode 100644 index 00000000000..cdad50583c5 --- /dev/null +++ b/tests/queries/0_stateless/02552_sparse_columns_intersect.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_sparse_intersect; + +CREATE TABLE t_sparse_intersect (a UInt64, c Int64) ENGINE = MergeTree +ORDER BY tuple() SETTINGS ratio_of_defaults_for_sparse_serialization = 0.8; + +SYSTEM STOP MERGES t_sparse_intersect; + +INSERT INTO t_sparse_intersect SELECT if (number % 10 = 0, number, 0), number FROM numbers(1000); +INSERT INTO t_sparse_intersect SELECT number, number FROM numbers(1000); + +SELECT count() FROM (SELECT * FROM t_sparse_intersect EXCEPT SELECT * FROM t_sparse_intersect); +SELECT count() FROM (SELECT * FROM t_sparse_intersect INTERSECT SELECT * FROM t_sparse_intersect); + +DROP TABLE t_sparse_intersect; diff --git a/tests/queries/0_stateless/02553_type_json_attach_partition.reference b/tests/queries/0_stateless/02553_type_json_attach_partition.reference new file mode 100644 index 00000000000..611090b5207 --- /dev/null +++ b/tests/queries/0_stateless/02553_type_json_attach_partition.reference @@ -0,0 +1 @@ +{"b":"1","c":{"k1":[1,2]}} diff --git a/tests/queries/0_stateless/02553_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_type_json_attach_partition.sql new file mode 100644 index 00000000000..9225106f767 --- /dev/null +++ b/tests/queries/0_stateless/02553_type_json_attach_partition.sql @@ -0,0 +1,14 @@ +SET allow_experimental_object_type = 1; + +DROP TABLE IF EXISTS t_json_attach_partition; + +CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; +ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": [1, 2]}}; + +ALTER TABLE t_json_attach_partition ATTACH PARTITION tuple(); -- { serverError TYPE_MISMATCH } +SELECT * FROM t_json_attach_partition FORMAT JSONEachRow; + +DROP TABLE t_json_attach_partition; diff --git a/tests/queries/0_stateless/02554_format_json_columns_for_empty.reference b/tests/queries/0_stateless/02554_format_json_columns_for_empty.reference new file mode 100644 index 00000000000..4155c5d1a7a --- /dev/null +++ b/tests/queries/0_stateless/02554_format_json_columns_for_empty.reference @@ -0,0 +1,4 @@ +{ + "n": [], + "s": [] +} diff --git a/tests/queries/0_stateless/02554_format_json_columns_for_empty.sql b/tests/queries/0_stateless/02554_format_json_columns_for_empty.sql new file mode 100644 index 00000000000..55179ebac85 --- /dev/null +++ b/tests/queries/0_stateless/02554_format_json_columns_for_empty.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS json_columns; + +CREATE TABLE json_columns (n UInt32, s String) ENGINE = MergeTree order by n; + +SELECT * FROM json_columns FORMAT JSONColumns; diff --git a/tests/queries/0_stateless/02558_system_processes_elapsed.reference b/tests/queries/0_stateless/02558_system_processes_elapsed.reference new file mode 100644 index 00000000000..d86bac9de59 --- /dev/null +++ b/tests/queries/0_stateless/02558_system_processes_elapsed.reference @@ -0,0 +1 @@ +OK diff --git a/tests/queries/0_stateless/02558_system_processes_elapsed.sh b/tests/queries/0_stateless/02558_system_processes_elapsed.sh new file mode 100755 index 00000000000..891ac3cf7bc --- /dev/null +++ b/tests/queries/0_stateless/02558_system_processes_elapsed.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +while :; do + $CLICKHOUSE_CLIENT -q "select sleepEachRow(0.1) from numbers(100) settings max_block_size=1 format Null" & + pid=$! + sleep 1.5 + duration="$($CLICKHOUSE_CLIENT -q "select floor(elapsed) from system.processes where current_database = currentDatabase() and query not like '%system.processes%'")" + kill -INT $pid + wait + $CLICKHOUSE_CLIENT -q "kill query where current_database = currentDatabase() sync format Null" + if [[ $duration -eq 1 ]]; then + echo "OK" + break + fi +done diff --git a/tests/queries/0_stateless/02559_ip_types_bloom.reference b/tests/queries/0_stateless/02559_ip_types_bloom.reference new file mode 100644 index 00000000000..7a76aeeff8e --- /dev/null +++ b/tests/queries/0_stateless/02559_ip_types_bloom.reference @@ -0,0 +1 @@ +1 1.1.1.1 ::1 diff --git a/tests/queries/0_stateless/02559_ip_types_bloom.sql b/tests/queries/0_stateless/02559_ip_types_bloom.sql new file mode 100644 index 00000000000..b3fc16debd9 --- /dev/null +++ b/tests/queries/0_stateless/02559_ip_types_bloom.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS ip_bloom; + +CREATE TABLE ip_bloom +( + `a` UInt32, + `ip4` Nullable(IPv4), + `ip6` Nullable(IPv6), + INDEX x4 ip4 TYPE bloom_filter(0.1) GRANULARITY 3, + INDEX x6 ip6 TYPE bloom_filter(0.1) GRANULARITY 3 +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO ip_bloom VALUES (1, '1.1.1.1', '::1'); + +SELECT * FROM ip_bloom; + +DROP TABLE ip_bloom; diff --git a/tests/queries/0_stateless/add-test b/tests/queries/0_stateless/add-test index 39f6742f71c..e8e68cf174e 100755 --- a/tests/queries/0_stateless/add-test +++ b/tests/queries/0_stateless/add-test @@ -27,5 +27,12 @@ set -x touch ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT} if [[ $FILEEXT == "sh" ]] ; then chmod +x ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT} + # shellcheck disable=SC2016 + echo '#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh +' >> ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT} fi touch ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.reference diff --git a/tests/queries/0_stateless/data_arrow/duration.arrow b/tests/queries/0_stateless/data_arrow/duration.arrow new file mode 100644 index 00000000000..abbdae772ed Binary files /dev/null and b/tests/queries/0_stateless/data_arrow/duration.arrow differ diff --git a/tests/queries/0_stateless/data_ua_parser/browser.yaml b/tests/queries/0_stateless/data_ua_parser/browser.yaml new file mode 100644 index 00000000000..3596589c94e --- /dev/null +++ b/tests/queries/0_stateless/data_ua_parser/browser.yaml @@ -0,0 +1,1409 @@ +- family_replacement: $1 + regex: (GeoEvent Server) (\d+)(?:\.(\d+)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '(ArcGIS Pro)(?: (\d+)\.(\d+)\.([^ ]+)|)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ArcMap + regex: ArcGIS Client Using WinInet + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Operations Dashboard for ArcGIS + regex: (OperationsDashboard)-(?:Windows)-(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ArcGIS Earth + regex: (arcgisearth)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ArcGIS Earth + regex: com.esri.(earth).phone/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Explorer for ArcGIS + regex: (arcgis-explorer)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Collector for ArcGIS + regex: arcgis-(collector|aurora)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Workforce for ArcGIS + regex: (arcgis-workforce)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 for ArcGIS + regex: (Collector|Explorer|Workforce)-(?:Android|iOS)-(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 for ArcGIS + regex: (Explorer|Collector)/(\d+) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ArcGIS Runtime SDK for $1 + regex: ArcGISRuntime-(Android|iOS|NET|Qt)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ArcGIS Runtime SDK for $1 + regex: ArcGIS\.?(iOS|Android|NET|Qt)(?:-|\.)(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ArcGIS Runtime SDK for $1 + regex: ArcGIS\.Runtime\.(Qt)\.(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Luminary)[Stage]+/(\d+) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (ESPN)[%20| ]+Radio/(\d+)\.(\d+)\.(\d+) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: AntennaPod + regex: (Antenna)/(\d+) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (TopPodcasts)Pro/(\d+) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (MusicDownloader)Lite/(\d+)\.(\d+)\.(\d+) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(.{0,200})-iPad\/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(.{0,200})-iPhone/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(.{0,200})/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Luminary)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ESPN + regex: (espn\.go) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ESPN + regex: (espnradio\.com) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ESPN + regex: ESPN APP$ + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: AudioBoom + regex: (audioboom\.com) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ' (Rivo) RHYTHM' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: CFNetwork + regex: (CFNetwork)(?:/(\d+)\.(\d+)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: PingdomBot + regex: (Pingdom\.com_bot_version_)(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: PingdomBot + regex: (PingdomTMS)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: WebPageTest.org bot + regex: ' (PTST)/(\d+)(?:\.(\d+)|)$' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: X11; (Datanyze); Linux + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NewRelicPingerBot + regex: (NewRelicPinger)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Tableau + regex: (Tableau)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Adobe CreativeCloud + regex: AppleWebKit/\d{1,10}\.\d{1,10}.{0,200} Safari.{0,200} (CreativeCloud)/(\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Salesforce)(?:.)\/(\d+)\.(\d?) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: StatusCakeBot + regex: (\(StatusCake\)) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: FacebookBot + regex: (facebookexternalhit)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: GooglePlusBot + regex: Google.{0,50}/\+/web/snippet + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: GmailImageProxy + regex: via ggpht\.com GoogleImageProxy + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: YahooMailProxy + regex: YahooMailProxy; https://help\.yahoo\.com/kb/yahoo-mail-proxy-SLN28749\.html + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Twitterbot + regex: (Twitterbot)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: /((?:Ant-|)Nutch|[A-z]+[Bb]ot|[A-z]+[Ss]pider|Axtaris|fetchurl|Isara|ShopSalad|Tailsweep)[ + \-](\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \b(008|Altresium|Argus|BaiduMobaider|BoardReader|DNSGroup|DataparkSearch|EDI|Goodzer|Grub|INGRID|Infohelfer|LinkedInBot|LOOQ|Nutch|OgScrper|Pandora|PathDefender|Peew|PostPost|Steeler|Twitterbot|VSE|WebCrunch|WebZIP|Y!J-BR[A-Z]|YahooSeeker|envolk|sproose|wminer)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: MSIECrawler + regex: (MSIE) (\d+)\.(\d+)([a-z]\d|[a-z]|);.{0,200} MSIECrawler + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (DAVdroid)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Google-HTTP-Java-Client|Apache-HttpClient|PostmanRuntime|Go-http-client|scalaj-http|http%20client|Python-urllib|HttpMonitor|TLSProber|WinHTTP|JNLP|okhttp|aihttp|reqwest|axios|unirest-(?:java|python|ruby|nodejs|php|net))(?:[ + /](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Pinterestbot + regex: (Pinterest(?:bot|))/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)[;\s(]+\+https://www.pinterest.com/bot.html + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '(CSimpleSpider|Cityreview Robot|CrawlDaddy|CrawlFire|Finderbots|Index crawler|Job + Roboter|KiwiStatus Spider|Lijit Crawler|QuerySeekerSpider|ScollSpider|Trends Crawler|USyd-NLP-Spider|SiteCat + Webbot|BotName\/\$BotVersion|123metaspider-Bot|1470\.net crawler|50\.nu|8bo Crawler + Bot|Aboundex|Accoona-[A-z]{1,30}-Agent|AdsBot-Google(?:-[a-z]{1,30}|)|altavista|AppEngine-Google|archive.{0,30}\.org_bot|archiver|Ask + Jeeves|[Bb]ai[Dd]u[Ss]pider(?:-[A-Za-z]{1,30})(?:-[A-Za-z]{1,30}|)|bingbot|BingPreview|blitzbot|BlogBridge|Bloglovin|BoardReader + Blog Indexer|BoardReader Favicon Fetcher|boitho.com-dc|BotSeer|BUbiNG|\b\w{0,30}favicon\w{0,30}\b|\bYeti(?:-[a-z]{1,30}|)|Catchpoint(?: + bot|)|[Cc]harlotte|Checklinks|clumboot|Comodo HTTP\(S\) Crawler|Comodo-Webinspector-Crawler|ConveraCrawler|CRAWL-E|CrawlConvera|Daumoa(?:-feedfetcher|)|Feed + Seeker Bot|Feedbin|findlinks|Flamingo_SearchEngine|FollowSite Bot|furlbot|Genieo|gigabot|GomezAgent|gonzo1|(?:[a-zA-Z]{1,30}-|)Googlebot(?:-[a-zA-Z]{1,30}|)|Google + SketchUp|grub-client|gsa-crawler|heritrix|HiddenMarket|holmes|HooWWWer|htdig|ia_archiver|ICC-Crawler|Icarus6j|ichiro(?:/mobile|)|IconSurf|IlTrovatore(?:-Setaccio|)|InfuzApp|Innovazion + Crawler|InternetArchive|IP2[a-z]{1,30}Bot|jbot\b|KaloogaBot|Kraken|Kurzor|larbin|LEIA|LesnikBot|Linguee + Bot|LinkAider|LinkedInBot|Lite Bot|Llaut|lycos|Mail\.RU_Bot|masscan|masidani_bot|Mediapartners-Google|Microsoft + .{0,30} Bot|mogimogi|mozDex|MJ12bot|msnbot(?:-media {0,2}|)|msrbot|Mtps Feed Aggregation + System|netresearch|Netvibes|NewsGator[^/]{0,30}|^NING|Nutch[^/]{0,30}|Nymesis|ObjectsSearch|OgScrper|Orbiter|OOZBOT|PagePeeker|PagesInventory|PaxleFramework|Peeplo + Screenshot Bot|PHPCrawl|PlantyNet_WebRobot|Pompos|Qwantify|Read%20Later|Reaper|RedCarpet|Retreiver|Riddler|Rival + IQ|scooter|Scrapy|Scrubby|searchsight|seekbot|semanticdiscovery|SemrushBot|Simpy|SimplePie|SEOstats|SimpleRSS|SiteCon|Slackbot-LinkExpanding|Slack-ImgProxy|Slurp|snappy|Speedy + Spider|Squrl Java|Stringer|TheUsefulbot|ThumbShotsBot|Thumbshots\.ru|Tiny Tiny + RSS|Twitterbot|WhatsApp|URL2PNG|Vagabondo|VoilaBot|^vortex|Votay bot|^voyager|WASALive.Bot|Web-sniffer|WebThumb|WeSEE:[A-z]{1,30}|WhatWeb|WIRE|WordPress|Wotbox|www\.almaden\.ibm\.com|Xenu(?:.s|) + Link Sleuth|Xerka [A-z]{1,30}Bot|yacy(?:bot|)|YahooSeeker|Yahoo! Slurp|Yandex\w{1,30}|YodaoBot(?:-[A-z]{1,30}|)|YottaaMonitor|Yowedo|^Zao|^Zao-Crawler|ZeBot_www\.ze\.bz|ZooShot|ZyBorg|ArcGIS + Hub Indexer)(?:[ /]v?(\d+)(?:\.(\d+)(?:\.(\d+)|)|)|)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \b(Boto3?|JetS3t|aws-(?:cli|sdk-(?:cpp|go|java|nodejs|ruby2?|dotnet-(?:\d{1,2}|core)))|s3fs)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (FME)\/(\d+\.\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (QGIS)\/(\d)\.?0?(\d{1,2})\.?0?(\d{1,2}) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (JOSM)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Tygron Platform) \((\d+)\.(\d+)\.(\d+(?:\.\d+| RC \d+\.\d+)) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Facebook Messenger + regex: \[(FBAN/MessengerForiOS|FB_IAB/MESSENGER);FBAV/(\d+)(?:\.(\d+)(?:\.(\d+)(?:\.(\d+)|)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Facebook + regex: \[FB.{0,300};(FBAV)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Facebook + regex: \[FB.{0,300}; + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^.{0,200}?(?:\/[A-Za-z0-9\.]{0,50}|) {0,2}([A-Za-z0-9 \-_\!\[\]:]{0,50}(?:[Aa]rchiver|[Ii]ndexer|[Ss]craper|[Bb]ot|[Ss]pider|[Cc]rawl[a-z]{0,50}))[/ + ](\d+)(?:\.(\d+)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^.{0,200}?((?:[A-Za-z][A-Za-z0-9 -]{0,50}|)[^C][^Uu][Bb]ot)\b(?:(?:[ /]| + v)(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '^.{0,200}?((?:[A-z0-9]{1,50}|[A-z\-]{1,50} ?|)(?: the |)(?:[Ss][Pp][Ii][Dd][Ee][Rr]|[Ss]crape|[Cc][Rr][Aa][Ww][Ll])[A-z0-9]{0,50})(?:(?:[ + /]| v)(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (HbbTV)/(\d+)\.(\d+)\.(\d+) \( + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Chimera|SeaMonkey|Camino|Waterfox)/(\d+)\.(\d+)\.?([ab]?\d+[a-z]*|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Sailfish Browser + regex: (SailfishBrowser)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \[(Pinterest)/[^\]]{1,50}\] + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '(Pinterest)(?: for Android(?: Tablet|)|)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: Mozilla.{1,200}Mobile.{1,100}(Instagram).(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: Mozilla.{1,200}Mobile.{1,100}(Flipboard).(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: Mozilla.{1,200}Mobile.{1,100}(Flipboard-Briefing).(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: Mozilla.{1,200}Mobile.{1,100}(Onefootball)\/Android.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Snapchat)\/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Twitter + regex: (Twitter for (?:iPhone|iPad)|TwitterAndroid)(?:\/(\d+)\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Spider + regex: Mozilla.{1,100}Mobile.{1,100}AspiegelBot + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Spider + regex: AspiegelBot + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Basilisk + regex: (Firefox)/(\d+)\.(\d+) Basilisk/(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Pale Moon + regex: (PaleMoon)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Mobile + regex: (Fennec)/(\d+)\.(\d+)\.?([ab]?\d+[a-z]*) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Mobile + regex: (Fennec)/(\d+)\.(\d+)(pre) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Mobile + regex: (Fennec)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Mobile + regex: (?:Mobile|Tablet);.{0,200}(Firefox)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox ($1) + regex: (Namoroka|Shiretoko|Minefield)/(\d+)\.(\d+)\.(\d+(?:pre|)) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Alpha + regex: (Firefox)/(\d+)\.(\d+)(a\d+[a-z]*) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Beta + regex: (Firefox)/(\d+)\.(\d+)(b\d+[a-z]*) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Alpha + regex: (Firefox)-(?:\d+\.\d+|)/(\d+)\.(\d+)(a\d+[a-z]*) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox Beta + regex: (Firefox)-(?:\d+\.\d+|)/(\d+)\.(\d+)(b\d+[a-z]*) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox ($1) + regex: (Namoroka|Shiretoko|Minefield)/(\d+)\.(\d+)([ab]\d+[a-z]*|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: MicroB + regex: (Firefox).{0,200}Tablet browser (\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (MozillaDeveloperPreview)/(\d+)\.(\d+)([ab]\d+[a-z]*|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Firefox iOS + regex: (FxiOS)/(\d+)\.(\d+)(\.(\d+)|)(\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Flock)/(\d+)\.(\d+)(b\d+?) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (RockMelt)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Netscape + regex: (Navigator)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Netscape + regex: (Navigator)/(\d+)\.(\d+)([ab]\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Netscape + regex: (Netscape6)/(\d+)\.(\d+)\.?([ab]?\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: My Internet Browser + regex: (MyIBrow)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: UC Browser + regex: (UC? ?Browser|UCWEB|U3)[ /]?(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Opera Tablet).{0,200}Version/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Opera Mini)(?:/att|)/?(\d+|)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Mobile + regex: (Opera)/.{1,100}Opera Mobi.{1,100}Version/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Mobile + regex: (Opera)/(\d+)\.(\d+).{1,100}Opera Mobi + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Mobile + regex: Opera Mobi.{1,100}(Opera)(?:/|\s+)(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Mobile + regex: Opera Mobi + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Opera)/9.80.{0,200}Version/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Mobile + regex: (?:Mobile Safari).{1,300}(OPR)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera + regex: (?:Chrome).{1,300}(OPR)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Coast + regex: (Coast)/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Mini + regex: (OPiOS)/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Opera Neon + regex: Chrome/.{1,200}( MMS)/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: webOS Browser + regex: (hpw|web)OS/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: LuaKit + regex: (luakit) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Snowshoe)/(\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: Gecko/\d+ (Lightning)/(\d+)\.(\d+)\.?((?:[ab]?\d+[a-z]*)|(?:\d*)) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Swiftfox + regex: (Firefox)/(\d+)\.(\d+)\.(\d+(?:pre|)) \(Swiftfox\) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Swiftfox + regex: (Firefox)/(\d+)\.(\d+)([ab]\d+[a-z]*|) \(Swiftfox\) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Rekonq + regex: (rekonq)/(\d+)\.(\d+)(?:\.(\d+)|) Safari + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Rekonq + regex: rekonq + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Conkeror + regex: (conkeror|Conkeror)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Konqueror + regex: (konqueror)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (WeTab)-Browser + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Comodo Dragon + regex: (Comodo_Dragon)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Symphony) (\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NetFront NX + regex: PLAYSTATION 3.{1,200}WebKit + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NetFront + regex: PLAYSTATION 3 + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NetFront + regex: (PlayStation Portable) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NetFront NX + regex: (PlayStation Vita) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NetFront NX + regex: AppleWebKit.{1,200} (NX)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NetFront NX + regex: (Nintendo 3DS) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Amazon Silk + regex: (Silk)/(\d+)\.(\d+)(?:\.([0-9\-]+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Puffin)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Edge Mobile + regex: Windows Phone .{0,200}(Edge)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Edge Mobile + regex: (EdgiOS|EdgA)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Samsung Internet + regex: (SamsungBrowser)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: "Seznam prohl\xED\u017Ee\u010D" + regex: (SznProhlizec)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Coc Coc + regex: (coc_coc_browser)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Baidu Browser + regex: (baidubrowser)[/\s](\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Baidu Explorer + regex: (FlyFlow)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Maxthon + regex: (MxBrowser)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Crosswalk)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: LINE + regex: (Line)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: MiuiBrowser + regex: (MiuiBrowser)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mint Browser + regex: (Mint Browser)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: TopBuzz + regex: (TopBuzz)/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Google + regex: Mozilla.{1,200}Android.{1,200}(GSA)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: QQ Browser Mini + regex: (MQQBrowser/Mini)(?:(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: QQ Browser Mobile + regex: (MQQBrowser)(?:/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: QQ Browser + regex: (QQBrowser)(?:/(\d+)(?:\.(\d+)\.(\d+)(?:\.(\d+)|)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: DuckDuckGo Mobile + regex: Mobile.{0,200}(DuckDuckGo)/(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Tenta Browser + regex: (Tenta/)(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Mobile WebView + regex: Version/.{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Mobile WebView + regex: ; wv\).{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Mobile + regex: (CrMo)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Mobile iOS + regex: (CriOS)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Mobile + regex: (Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+) Mobile(?:[ /]|$) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Mobile + regex: ' Mobile .{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Chrome Frame + regex: (chromeframe)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Tizen Browser + regex: (SLP Browser)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Sogou Explorer + regex: (SE 2\.X) MetaSr (\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: RackspaceBot + regex: (Rackspace Monitoring)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (PRTG Network Monitor) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (PyAMF)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Yandex Browser + regex: (YaBrowser)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mail.ru Chromium Browser + regex: (Chrome)/(\d+)\.(\d+)\.(\d+).{0,100} MRCHROME + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (AOL) (\d+)\.(\d+); AOLBuild (\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (PodCruncher|Downcast)[ /]?(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ' (BoxNotes)/(\d+)\.(\d+)\.(\d+)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Whale + regex: (Whale)/(\d+)\.(\d+)\.(\d+)\.(\d+) Mobile(?:[ /]|$) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Whale + regex: (Whale)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (1Password)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Ghost)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: PAN (GlobalProtect)/(\d+)\.(\d+)\.(\d+) .{1,100} \(X11; Linux x86_64\) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Slack Desktop Client + regex: (Slack_SSB)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: HipChat Desktop Client + regex: (HipChat)/?(\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \b(MobileIron|FireWeb|Jasmine|ANTGalio|Midori|Fresco|Lobo|PaleMoon|Maxthon|Lynx|OmniWeb|Dillo|Camino|Demeter|Fluid|Fennec|Epiphany|Shiira|Sunrise|Spotify|Flock|Netscape|Lunascape|WebPilot|NetFront|Netfront|Konqueror|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|Opera + Mini|iCab|NetNewsWire|ThunderBrowse|Iris|UP\.Browser|Bunjalloo|Google Earth|Raven + for Mac|Openwave|MacOutlook|Electron|OktaMobile)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Outlook + regex: Microsoft Office Outlook 12\.\d+\.\d+|MSOffice 12 + v1_replacement: '2007' + v2_replacement: $3 +- family_replacement: Outlook + regex: Microsoft Outlook 14\.\d+\.\d+|MSOffice 14 + v1_replacement: '2010' + v2_replacement: $3 +- family_replacement: Outlook + regex: Microsoft Outlook 15\.\d+\.\d+ + v1_replacement: '2013' + v2_replacement: $3 +- family_replacement: Outlook + regex: Microsoft Outlook (?:Mail )?16\.\d+\.\d+|MSOffice 16 + v1_replacement: '2016' + v2_replacement: $3 +- family_replacement: $1 + regex: Microsoft Office (Word) 2014 + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Windows Live Mail + regex: Outlook-Express\/7\.0 + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Airmail) (\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Thunderbird + regex: (Thunderbird)/(\d+)\.(\d+)(?:\.(\d+(?:pre|))|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Postbox + regex: (Postbox)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Barca + regex: (Barca(?:Pro)?)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Lotus Notes + regex: (Lotus-Notes)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Superhuman + regex: Superhuman + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Vivaldi)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Edge + regex: (Edge?)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Brave + regex: (brave)/(\d+)\.(\d+)\.(\d+) Chrome + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Iron + regex: (Chrome)/(\d+)\.(\d+)\.(\d+)[\d.]{0,100} Iron[^/] + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '\b(Dolphin)(?: |HDCN/|/INT\-)(\d+)\.(\d+)(?:\.(\d+)|)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (HeadlessChrome)(?:/(\d+)\.(\d+)\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Evolution)/(\d+)\.(\d+)\.(\d+\.\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (RCM CardDAV plugin)/(\d+)\.(\d+)\.(\d+(?:-dev|)) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (bingbot|Bolt|AdobeAIR|Jasmine|IceCat|Skyfire|Midori|Maxthon|Lynx|Arora|IBrowse|Dillo|Camino|Shiira|Fennec|Phoenix|Flock|Netscape|Lunascape|Epiphany|WebPilot|Opera + Mini|Opera|NetFront|Netfront|Konqueror|Googlebot|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|iCab|iTunes|MacAppStore|NetNewsWire|Space + Bison|Stainless|Orca|Dolfin|BOLT|Minimo|Tizen Browser|Polaris|Abrowser|Planetweb|ICE + Browser|mDolphin|qutebrowser|Otter|QupZilla|MailBar|kmail2|YahooMobileMail|ExchangeWebServices|ExchangeServicesClient|Dragon|Outlook-iOS-Android)/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Chromium|Chrome)/(\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: IE Mobile + regex: (IEMobile)[ /](\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (BacaBerita App)\/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(bPod|Pocket Casts|Player FM)$ + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(AlexaMediaPlayer|VLC)/(\d+)\.(\d+)\.([^.\s]+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(AntennaPod|WMPlayer|Zune|Podkicker|Radio|ExoPlayerDemo|Overcast|PocketTunes|NSPlayer|okhttp|DoggCatcher|QuickNews|QuickTime|Peapod|Podcasts|GoldenPod|VLC|Spotify|Miro|MediaGo|Juice|iPodder|gPodder|Banshee)/(\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Peapod|Liferea)/([^.\s]+)\.([^.\s]+|)\.?([^.\s]+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(bPod|Player FM) BMID/(\S+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '^(Podcast ?Addict)/v(\d+) ' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: PodcastAddict + regex: '^(Podcast ?Addict) ' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Replay) AV + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (VOX) Music Player + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (CITA) RSS Aggregator/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Pocket Casts)$ + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Player FM)$ + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (LG Player|Doppler|FancyMusic|MediaMonkey|Clementine) (\d+)\.(\d+)\.?([^.\s]+|)\.?([^.\s]+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (philpodder)/(\d+)\.(\d+)\.?([^.\s]+|)\.?([^.\s]+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Player FM|Pocket Casts|DoggCatcher|Spotify|MediaMonkey|MediaGo|BashPodder) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (QuickTime)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Kinoma)(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: FancyMusic + regex: (Fancy) Cloud Music (\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ESPN + regex: EspnDownloadManager + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '(ESPN) Radio (\d+)\.(\d+)(?:\.(\d+)|) ?(?:rv:(\d+)|) ' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (podracer|jPodder) v ?(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (ZDM)/(\d+)\.(\d+)[; ]? + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Zune|BeyondPod) (\d+)(?:\.(\d+)|)[\);] + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (WMPlayer)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: WMPlayer + regex: ^(Lavf) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(RSSRadio)[ /]?(\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: RSSRadio + regex: (RSS_Radio) (\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Podkicker + regex: (Podkicker) \S+/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(HTC) Streaming Player \S+ / \S+ / \S+ / (\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Stitcher)/iOS + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Stitcher)/Android + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(VLC) .{0,200}version (\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ' (VLC) for' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: VLC + regex: (vlc)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(foobar)\S{1,10}/(\d+)\.(\d+|)\.?([\da-z]+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Clementine)\S{1,10} (\d+)\.(\d+|)\.?(\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Amarok + regex: (amarok)/(\d+)\.(\d+|)\.?(\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Custom)-Feed Reader + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (iRider|Crazy Browser|SkipStone|iCab|Lunascape|Sleipnir|Maemo Browser) (\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (iCab|Lunascape|Opera|Android|Jasmine|Polaris|Microsoft SkyDriveSync|The + Bat!) (\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Kindle)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Android) Donut + v1_replacement: '1' + v2_replacement: '2' +- family_replacement: $1 + regex: (Android) Eclair + v1_replacement: '2' + v2_replacement: '1' +- family_replacement: $1 + regex: (Android) Froyo + v1_replacement: '2' + v2_replacement: '2' +- family_replacement: $1 + regex: (Android) Gingerbread + v1_replacement: '2' + v2_replacement: '3' +- family_replacement: $1 + regex: (Android) Honeycomb + v1_replacement: '3' + v2_replacement: $3 +- family_replacement: IE Large Screen + regex: (MSIE) (\d+)\.(\d+).{0,100}XBLWP7 + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Nextcloud) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (mirall)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Owncloud + regex: (ownCloud-android)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Skype + regex: (OC)/(\d+)\.(\d+)\.(\d+)\.(\d+) \(Skype for Business\) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: OpenVAS Scanner + regex: (OpenVAS)(?:-VT)?(?:[ \/](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (AnyConnect)\/(\d+)(?:\.(\d+)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Monitis + regex: compatible; monitis + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Obigo)InternetBrowser + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Obigo)\-Browser + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Obigo + regex: (Obigo|OBIGO)[^\d]*(\d+)(?:.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Maxthon + regex: (MAXTHON|Maxthon) (\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Maxthon|MyIE2|Uzbl|Shiira) + v1_replacement: '0' + v2_replacement: $3 +- family_replacement: $1 + regex: (BrowseX) \((\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: NCSA Mosaic + regex: (NCSA_Mosaic)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Polaris + regex: (POLARIS)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Polaris + regex: (Embider)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Bon Echo + regex: (BonEcho)/(\d+)\.(\d+)\.?([ab]?\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: TopBuzz + regex: (TopBuzz) com.alex.NewsMaster/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: TopBuzz + regex: (TopBuzz) com.mobilesrepublic.newsrepublic/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: TopBuzz + regex: (TopBuzz) com.topbuzz.videoen/(\d+).(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Google + regex: (iPod|iPhone|iPad).{1,200}GSA/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|) Mobile + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mobile Safari + regex: (iPod|iPhone|iPad).{1,200}Version/(\d+)\.(\d+)(?:\.(\d+)|).{1,200}[ +]Safari + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mobile Safari UI/WKWebView + regex: (iPod|iPod touch|iPhone|iPad);.{0,30}CPU.{0,30}OS[ +](\d+)_(\d+)(?:_(\d+)|).{0,30} + AppleNews\/\d+\.\d+(?:\.\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mobile Safari UI/WKWebView + regex: (iPod|iPhone|iPad).{1,200}Version/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mobile Safari + regex: (iPod|iPod touch|iPhone|iPad).{0,200} Safari + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Mobile Safari UI/WKWebView + regex: (iPod|iPod touch|iPhone|iPad) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Apple $1 App + regex: (Watch)(\d+),(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Outlook-iOS)/\d+\.\d+\.prod\.iphone \((\d+)\.(\d+)\.(\d+)\) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (AvantGo) (\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ONE Browser + regex: (OneBrowser)/(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Avant) + v1_replacement: '1' + v2_replacement: $3 +- family_replacement: $1 + regex: (QtCarBrowser) + v1_replacement: '1' + v2_replacement: $3 +- family_replacement: iBrowser Mini + regex: ^(iBrowser/Mini)(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(iBrowser|iRAPP)/(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Nokia Services (WAP) Browser + regex: ^(Nokia) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Nokia Browser + regex: (NokiaBrowser)/(\d+)\.(\d+).(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Nokia Browser + regex: (NokiaBrowser)/(\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Nokia Browser + regex: (NokiaBrowser)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Nokia Browser + regex: (BrowserNG)/(\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Nokia Browser + regex: (Series60)/5\.0 + v1_replacement: '7' + v2_replacement: '0' +- family_replacement: Nokia OSS Browser + regex: (Series60)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Ovi Browser + regex: (S40OviBrowser)/(\d+)\.(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Nokia)[EN]?(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: BlackBerry WebKit + regex: (PlayBook).{1,200}RIM Tablet OS (\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: BlackBerry WebKit + regex: (Black[bB]erry|BB10).{1,200}Version/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: BlackBerry + regex: (Black[bB]erry)\s?(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (OmniWeb)/v(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Palm Blazer + regex: (Blazer)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Palm Pre + regex: (Pre)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (ELinks)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (ELinks) \((\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Links) \((\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (QtWeb) Internet Browser/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (PhantomJS)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: WebKit Nightly + regex: (AppleWebKit)/(\d+)(?:\.(\d+)|)\+ .{0,200} Safari + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Safari + regex: (Version)/(\d+)\.(\d+)(?:\.(\d+)|).{0,100}Safari/ + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Safari)/\d+ + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (OLPC)/Update(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (OLPC)/Update()\.(\d+) + v1_replacement: '0' + v2_replacement: $3 +- family_replacement: $1 + regex: (SEMC\-Browser)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Teleca Browser + regex: (Teleca) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Phantom Browser + regex: (Phantom)/V(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: IE + regex: (Trident)/(7|8)\.(0) + v1_replacement: '11' + v2_replacement: $3 +- family_replacement: IE + regex: (Trident)/(6)\.(0) + v1_replacement: '10' + v2_replacement: $3 +- family_replacement: IE + regex: (Trident)/(5)\.(0) + v1_replacement: '9' + v2_replacement: $3 +- family_replacement: IE + regex: (Trident)/(4)\.(0) + v1_replacement: '8' + v2_replacement: $3 +- family_replacement: $1 + regex: (Espial)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Apple Mail + regex: (AppleWebKit)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Firefox)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Firefox)/(\d+)\.(\d+)(pre|[ab]\d+[a-z]*|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: IE + regex: ([MS]?IE) (\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Python Requests + regex: (python-requests)/(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \b(Windows-Update-Agent|WindowsPowerShell|Microsoft-CryptoAPI|SophosUpdateManager|SophosAgent|Debian + APT-HTTP|Ubuntu APT-HTTP|libcurl-agent|libwww-perl|urlgrabber|curl|PycURL|Wget|wget2|aria2|Axel|OpenBSD + ftp|lftp|jupdate|insomnia|fetch libfetch|akka-http|got|CloudCockpitBackend|ReactorNetty|axios|Jersey|Vert.x-WebClient|Apache-CXF|Go-CF-client|go-resty|AHC)(?:[ + /](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: CloudFoundry + regex: ^(cf)\/(\d+)\.(\d+)\.(\S+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(sap-leonardo-iot-sdk-nodejs) \/ (\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(SAP NetWeaver Application Server) \(1.0;(\d{1})(\d{2})\) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: HTTPClient + regex: ^(\w+-HTTPClient)\/(\d+)\.(\d+)-(\S+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(go-cli)\s(\d+)\.(\d+).(\S+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Java-EurekaClient|Java-EurekaClient-Replication|HTTPClient|lua-resty-http)\/v?(\d+)\.(\d+)\.?(\d*) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(ping-service|sap xsuaa|Node-oauth|Site24x7|SAP CPI|JAEGER_SECURITY) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Python aiohttp + regex: (Python/3\.\d{1,3} aiohttp)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Java)[/ ]?\d+\.(\d+)\.(\d+)[_-]*([a-zA-Z0-9]+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (minio-go)/v(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(ureq)[/ ](\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(http\.rb)/(\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(GuzzleHttp)/(\d+)\.(\d+).(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(grab)\b + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Cyberduck)/(\d+)\.(\d+)\.(\d+)(?:\.\d+|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(S3 Browser) (\d+)[.-](\d+)[.-](\d+)(?:\s*https?://s3browser\.com|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (S3Gof3r) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \b(ibm-cos-sdk-(?:core|java|js|python))/(\d+)\.(\d+)(?:\.(\d+)|) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(rusoto)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(rclone)/v(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: ^(Roku)/DVP-(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: Kurio App + regex: (Kurio)\/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: '^(Box(?: Sync)?)/(\d+)\.(\d+)\.(\d+)' + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: ViaFree + regex: ^(ViaFree|Viafree)-(?:tvOS-)?[A-Z]{2}/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Transmit)/(\d+)\.(\d+)\.(\d+) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: (Download Master) + v1_replacement: $2 + v2_replacement: $3 +- family_replacement: $1 + regex: \b(HTTrack) (\d+)\.(\d+)(?:[\.\-](\d+)|) + v1_replacement: $2 + v2_replacement: $3 diff --git a/tests/queries/0_stateless/data_ua_parser/device.yaml b/tests/queries/0_stateless/data_ua_parser/device.yaml new file mode 100644 index 00000000000..bf5cfcc84cd --- /dev/null +++ b/tests/queries/0_stateless/data_ua_parser/device.yaml @@ -0,0 +1,2651 @@ +- brand_replacement: Spider + device_replacement: Spider + model_replacement: Smartphone + regex: ^.{0,100}?(?:(?:iPhone|Windows CE|Windows Phone|Android).{0,300}(?:(?:Bot|Yeti)-Mobile|YRSpider|BingPreview|bots?/\d|(?:bot|spider)\.html)|AdsBot-Google-Mobile.{0,200}iPhone) + regex_flag: i +- brand_replacement: Spider + device_replacement: Spider + model_replacement: Feature Phone + regex: ^.{0,100}?(?:DoCoMo|\bMOT\b|\bLG\b|Nokia|Samsung|SonyEricsson).{0,200}(?:(?:Bot|Yeti)-Mobile|bots?/\d|(?:bot|crawler)\.html|(?:jump|google|Wukong)bot|ichiro/mobile|/spider|YahooSeeker) + regex_flag: i +- brand_replacement: Spider + device_replacement: Spider + model_replacement: $1 + regex: ' PTST/\d+(?:\.\d+|)$' +- brand_replacement: Spider + device_replacement: Spider + model_replacement: $1 + regex: X11; Datanyze; Linux +- brand_replacement: Spider + device_replacement: Spider + model_replacement: Smartphone + regex: Mozilla.{1,100}Mobile.{1,100}AspiegelBot +- brand_replacement: Spider + device_replacement: Spider + model_replacement: Desktop + regex: Mozilla.{0,200}AspiegelBot +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: \bSmartWatch {0,2}\( {0,2}([^;]{1,200}) {0,2}; {0,2}([^;]{1,200}) {0,2}; +- brand_replacement: $1$2 + device_replacement: $1 $2 + model_replacement: $3 + regex: 'Android Application[^\-]{1,300} - (Sony) ?(Ericsson|) (.{1,200}) \w{1,20} + - ' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: 'Android Application[^\-]{1,300} - (?:HTC|HUAWEI|LGE|LENOVO|MEDION|TCT) (HTC|HUAWEI|LG|LENOVO|MEDION|ALCATEL)[ + _\-](.{1,200}) \w{1,20} - ' + regex_flag: i +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: 'Android Application[^\-]{1,300} - ([^ ]+) (.{1,200}) \w{1,20} - ' +- brand_replacement: 3Q + device_replacement: 3Q $1 + model_replacement: $1 + regex: '; {0,2}([BLRQ]C\d{4}[A-Z]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: 3Q + device_replacement: 3Q $1 + model_replacement: $1 + regex: '; {0,2}(?:3Q_)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Acer + device_replacement: $1 + model_replacement: $1 + regex: 'Android [34].{0,200}; {0,2}(A100|A101|A110|A200|A210|A211|A500|A501|A510|A511|A700(?: + Lite| 3G|)|A701|B1-A71|A1-\d{3}|B1-\d{3}|V360|V370|W500|W500P|W501|W501P|W510|W511|W700|Slider + SL101|DA22[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Acer + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}Acer Iconia Tab ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Acer + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Z1[1235]0|E320[^/]{0,10}|S500|S510|Liquid[^;/]{0,30}|Iconia A\d+)(?: + Build|\) AppleWebKit)' +- brand_replacement: Acer + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(Acer |ACER )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Advent + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(Advent |)(Vega(?:Bean|Comb|)).{0,200}?(?: Build|\) AppleWebKit)' +- brand_replacement: Ainol + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(Ainol |)((?:NOVO|[Nn]ovo)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Airis + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}AIRIS[ _\-]?([^/;\)]+) {0,2}(?:;|\)|Build) + regex_flag: i +- brand_replacement: Airis + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(OnePAD[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Airpad + device_replacement: Airpad $1 + model_replacement: $1 + regex: '; {0,2}Airpad[ \-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Alcatel + device_replacement: Alcatel One Touch $2 + model_replacement: One Touch $2 + regex: '; {0,2}(one ?touch) (EVO7|T10|T20)(?: Build|\) AppleWebKit)' +- brand_replacement: Alcatel + device_replacement: Alcatel One Touch $1 + model_replacement: One Touch $1 + regex: '; {0,2}(?:alcatel[ _]|)(?:(?:one[ _]?touch[ _])|ot[ \-])([^;/]{1,100}?)(?: + Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(TCL)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Alcatel + device_replacement: Alcatel $1 + model_replacement: $1 + regex: '; {0,2}(Vodafone Smart II|Optimus_Madrid)(?: Build|\) AppleWebKit)' +- brand_replacement: Alcatel + device_replacement: Alcatel One Touch 998 + model_replacement: One Touch 998 + regex: '; {0,2}BASE_Lutea_3(?: Build|\) AppleWebKit)' +- brand_replacement: Alcatel + device_replacement: Alcatel One Touch 918D + model_replacement: One Touch 918D + regex: '; {0,2}BASE_Varia(?: Build|\) AppleWebKit)' +- brand_replacement: Allfine + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:FINE|Fine)\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Allview + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?)((?:Speed|SPEED).{0,200}?)(?: Build|\) + AppleWebKit)' +- brand_replacement: Allview + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?|)(AX1_Shine|AX2_Frenzy)(?: Build|\) AppleWebKit)' +- brand_replacement: Allview + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?)([^;/]*?)(?: Build|\) AppleWebKit)' +- brand_replacement: Allwinner + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(A13-MID)(?: Build|\) AppleWebKit)' +- brand_replacement: Allwinner + device_replacement: $1 $2 + model_replacement: $1 + regex: '; {0,2}(Allwinner)[ _\-]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Amaway + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(A651|A701B?|A702|A703|A705|A706|A707|A711|A712|A713|A717|A722|A785|A801|A802|A803|A901|A902|A1002|A1003|A1006|A1007|A9701|A9703|Q710|Q80)(?: + Build|\) AppleWebKit)' +- brand_replacement: Amoi + device_replacement: Amoi $1 + model_replacement: $1 + regex: '; {0,2}(?:AMOI|Amoi)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Amoi + device_replacement: Amoi $1 + model_replacement: $1 + regex: ^(?:AMOI|Amoi)[ _]([^;/]{1,100}?) Linux +- brand_replacement: Aoc + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(MW(?:0[789]|10)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Aoson + device_replacement: Aoson $1 + model_replacement: $1 + regex: '; {0,2}(G7|M1013|M1015G|M11[CG]?|M-?12[B]?|M15|M19[G]?|M30[ACQ]?|M31[GQ]|M32|M33[GQ]|M36|M37|M38|M701T|M710|M712B|M713|M715G|M716G|M71(?:G|GS|T|)|M72[T]?|M73[T]?|M75[GT]?|M77G|M79T|M7L|M7LN|M81|M810|M81T|M82|M92|M92KS|M92S|M717G|M721|M722G|M723|M725G|M739|M785|M791|M92SK|M93D)(?: + Build|\) AppleWebKit)' +- brand_replacement: Aoson + device_replacement: Aoson $1 + model_replacement: $1 + regex: '; {0,2}Aoson ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Apanda + device_replacement: Apanda $1 + model_replacement: $1 + regex: '; {0,2}[Aa]panda[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Archos + device_replacement: Archos $1 + model_replacement: $1 + regex: '; {0,2}(?:ARCHOS|Archos) ?(GAMEPAD.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Archos + device_replacement: Archos $1 + model_replacement: $1 + regex: ARCHOS; GOGI; ([^;]{1,200}); +- brand_replacement: Archos + device_replacement: Archos $1 + model_replacement: $1 + regex: '(?:ARCHOS|Archos)[ _]?(.{0,200}?)(?: Build|[;/\(\)\-]|$)' +- brand_replacement: Archos + device_replacement: Archos $1 + model_replacement: $1 + regex: '; {0,2}(AN(?:7|8|9|10|13)[A-Z0-9]{1,4})(?: Build|\) AppleWebKit)' +- brand_replacement: Archos + device_replacement: Archos $1 + model_replacement: $1 + regex: '; {0,2}(A28|A32|A43|A70(?:BHT|CHT|HB|S|X)|A101(?:B|C|IT)|A7EB|A7EB-WK|101G9|80G9)(?: + Build|\) AppleWebKit)' +- brand_replacement: Arival + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(PAD-FMD[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Arival + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(BioniQ) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Arnova + device_replacement: Arnova $1 + model_replacement: $1 + regex: '; {0,2}(AN\d[^;/]{1,100}|ARCHM\d+)(?: Build|\) AppleWebKit)' +- brand_replacement: Arnova + device_replacement: Arnova $1 + model_replacement: $1 + regex: '; {0,2}(?:ARNOVA|Arnova) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Assistant + device_replacement: Assistant $1-$2 + model_replacement: $1-$2 + regex: '; {0,2}(?:ASSISTANT |)(AP)-?([1789]\d{2}[A-Z]{0,2}|80104)(?: Build|\) AppleWebKit)' +- brand_replacement: Asus + device_replacement: Asus $1 + model_replacement: $1 + regex: '; {0,2}(ME17\d[^;/]*|ME3\d{2}[^;/]{1,100}|K00[A-Z]|Nexus 10|Nexus 7(?: 2013|)|PadFone[^;/]*|Transformer[^;/]*|TF\d{3}[^;/]*|eeepc)(?: + Build|\) AppleWebKit)' +- brand_replacement: Asus + device_replacement: Asus $1 + model_replacement: $1 + regex: '; {0,2}ASUS[ _]{0,10}([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Garmin-Asus + device_replacement: Garmin-Asus $1 + model_replacement: $1 + regex: '; {0,2}Garmin-Asus ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Garmin-Asus + device_replacement: Garmin $1 + model_replacement: $1 + regex: '; {0,2}(Garminfone)(?: Build|\) AppleWebKit)' +- brand_replacement: Attab + device_replacement: $1 + model_replacement: $1 + regex: '; (@TAB-[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Audiosonic + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(T-(?:07|[^0]\d)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Axioo + device_replacement: Axioo $1$2 $3 + model_replacement: $1$2 $3 + regex: '; {0,2}(?:Axioo[ _\-]([^;/]{1,100}?)|(picopad)[ _\-]([^;/]{1,100}?))(?: + Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Azend + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(V(?:100|700|800)[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Bak + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IBAK\-[^;/]*)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Bedove + device_replacement: Bedove $1 + model_replacement: $1 + regex: '; {0,2}(HY5001|HY6501|X12|X21|I5)(?: Build|\) AppleWebKit)' +- brand_replacement: Benss + device_replacement: Benss $1 + model_replacement: $1 + regex: '; {0,2}(JC-[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Blackberry + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(BB) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(BlackBird)[ _](I8.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(BlackBird)[ _](.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Blaupunkt + device_replacement: Blaupunkt $1 + model_replacement: $1 + regex: '; {0,2}([0-9]+BP[EM][^;/]*|Endeavour[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Blu + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}((?:BLU|Blu)[ _\-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Blu + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:BMOBILE )?(Blu|BLU|DASH [^;/]{1,100}|VIVO 4\.3|TANK 4\.5)(?: Build|\) + AppleWebKit)' +- brand_replacement: Blusens + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TOUCH\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Bmobile + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(AX5\d+)(?: Build|\) AppleWebKit)' +- brand_replacement: bq + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}([Bb]q) ([^;/]{1,100}?);?(?: Build|\) AppleWebKit)' +- brand_replacement: bq + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Maxwell [^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Braun + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:B-Tab|B-TAB) ?\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Broncho) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Captiva + device_replacement: Captiva $1 + model_replacement: $1 + regex: '; {0,2}CAPTIVA ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Casio + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(C771|CAL21|IS11CA)(?: Build|\) AppleWebKit)' +- brand_replacement: Cat + device_replacement: Cat $1 + model_replacement: $1 + regex: '; {0,2}(?:Cat|CAT) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Cat + device_replacement: Cat $1 + model_replacement: $1 + regex: '; {0,2}(?:Cat)(Nova.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Cat + device_replacement: $1 + model_replacement: Tablet PHOENIX 8.1J0 + regex: '; {0,2}(INM8002KP|ADM8000KP_[AB])(?: Build|\) AppleWebKit)' +- brand_replacement: Celkon + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(?:[Cc]elkon[ _\*]|CELKON[ _\*])([^;/\)]+) ?(?:Build|;|\)) +- brand_replacement: Celkon + device_replacement: $1 + model_replacement: $1 + regex: Build/(?:[Cc]elkon)+_?([^;/_\)]+) +- brand_replacement: Celkon + device_replacement: $1$2 + model_replacement: $1$2 + regex: '; {0,2}(CT)-?(\d+)(?: Build|\) AppleWebKit)' +- brand_replacement: Celkon + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(A19|A19Q|A105|A107[^;/\)]*) ?(?:Build|;|\)) +- brand_replacement: ChangJia + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TPC[0-9]{4,5})(?: Build|\) AppleWebKit)' +- brand_replacement: Cloudfone + device_replacement: $1 $2 $3 + model_replacement: $1 $2 $3 + regex: '; {0,2}(Cloudfone)[ _](Excite)([^ ][^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Cloudfone + device_replacement: Cloudfone $1 $2 + model_replacement: Cloudfone $1 $2 + regex: '; {0,2}(Excite|ICE)[ _](\d+[^;/]{0,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Cloudfone + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(Cloudfone|CloudPad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Cmx + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:Aquila|Clanga|Rapax)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: CobyKyros + device_replacement: CobyKyros $1$2 + model_replacement: $1$2 + regex: ; {0,2}(?:CFW-|Kyros )?(MID[0-9]{4}(?:[ABC]|SR|TV)?)(\(3G\)-4G| GB 8K| 3G| + 8K| GB)? {0,2}(?:Build|[;\)]) +- brand_replacement: Coolpad + device_replacement: $1$2 + model_replacement: $1$2 + regex: '; {0,2}([^;/]{0,50})Coolpad[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Cube + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(CUBE[ _])?([KU][0-9]+ ?GT.{0,200}?|A5300)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Cubot + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}CUBOT ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Cubot + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(BOBBY)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Danew + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Dslide [^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1$2 + model_replacement: $1$2 + regex: '; {0,2}(XCD)[ _]?(28|35)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: Streak + regex: '; {0,2}(001DL)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: Streak + regex: '; {0,2}(?:Dell|DELL) (Streak)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: Streak Pro + regex: '; {0,2}(101DL|GS01|Streak Pro[^;/]{0,100})(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: Streak 7 + regex: '; {0,2}([Ss]treak ?7)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: $1 + regex: '; {0,2}(Mini-3iX)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: $1 + regex: '; {0,2}(?:Dell|DELL)[ _](Aero|Venue|Thunder|Mini.{0,200}?|Streak[ _]Pro)(?: + Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: $1 + regex: '; {0,2}Dell[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: $1 + regex: '; {0,2}Dell ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Denver + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TA[CD]-\d+[^;/]{0,100})(?: Build|\) AppleWebKit)' +- brand_replacement: Dex + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(iP[789]\d{2}(?:-3G)?|IP10\d{2}(?:-8GB)?)(?: Build|\) AppleWebKit)' +- brand_replacement: DNS + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(AirTab)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Fujitsu + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(F\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: HTC + device_replacement: $1 + model_replacement: Magic + regex: '; {0,2}(HT-03A)(?: Build|\) AppleWebKit)' +- brand_replacement: HTC + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(HT\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: LG + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(L\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Nec + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(N\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Panasonic + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(P\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Samsung + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SC\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Sharp + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SH\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: SonyEricsson + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SO\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Toshiba + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(T\-0[12][^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: DOOV + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(DOOV)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Enot + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Enot|ENOT)[ -]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Evercoss + device_replacement: CROSS $1 + model_replacement: Cross $1 + regex: ; {0,2}[^;/]{1,100} Build/(?:CROSS|Cross)+[ _\-]([^\)]+) +- brand_replacement: Evercoss + device_replacement: $1 $2 + model_replacement: Cross $2 + regex: '; {0,2}(CROSS|Cross)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Explay + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}Explay[_ ](.{1,200}?)(?:[\)]| Build) +- brand_replacement: Fly + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IQ.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Fly + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Fly|FLY)[ _](IQ[^;]{1,100}?|F[34]\d+[^;]{0,100}?);?(?: Build|\) + AppleWebKit)' +- brand_replacement: Fujitsu + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(M532|Q572|FJL21)(?: Build|\) AppleWebKit)' +- brand_replacement: Galapad + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(G1)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Geeksphone) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Gfive + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(G[^F]?FIVE) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Gionee + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Gionee)[ _\-]([^;/]{1,100}?)(?:/[^;/]{1,100}|)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Gionee + device_replacement: Gionee $1 + model_replacement: $1 + regex: '; {0,2}(GN\d+[A-Z]?|INFINITY_PASSION|Ctrl_V1)(?: Build|\) AppleWebKit)' +- brand_replacement: Gionee + device_replacement: Gionee $1 + model_replacement: $1 + regex: ; {0,2}(E3) Build/JOP40D +- brand_replacement: Gionee + device_replacement: Gionee $1 + model_replacement: $1 + regex: \sGIONEE[-\s_](\w*) + regex_flag: i +- brand_replacement: GoClever + device_replacement: GoClever $1 + model_replacement: $1 + regex: '; {0,2}((?:FONE|QUANTUM|INSIGNIA) \d+[^;/]{0,100}|PLAYTAB)(?: Build|\) AppleWebKit)' +- brand_replacement: GoClever + device_replacement: GoClever $1 + model_replacement: $1 + regex: '; {0,2}GOCLEVER ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Google + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Glass \d+)(?: Build|\) AppleWebKit)' +- brand_replacement: Google + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Pixel.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Gigabyte + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(GSmart)[ -]([^/]{1,50})(?: Build|\) AppleWebKit)' +- brand_replacement: Freescale + device_replacement: Freescale $1 + model_replacement: $1 + regex: '; {0,2}(imx5[13]_[^/]{1,50})(?: Build|\) AppleWebKit)' +- brand_replacement: Haier + device_replacement: Haier $1 + model_replacement: $1 + regex: '; {0,2}Haier[ _\-]([^/]{1,50})(?: Build|\) AppleWebKit)' +- brand_replacement: Haipad + device_replacement: Haipad $1 + model_replacement: $1 + regex: '; {0,2}(PAD1016)(?: Build|\) AppleWebKit)' +- brand_replacement: Haipad + device_replacement: Haipad $1 + model_replacement: $1 + regex: '; {0,2}(M701|M7|M8|M9)(?: Build|\) AppleWebKit)' +- brand_replacement: Hannspree + device_replacement: Hannspree $1 + model_replacement: $1 + regex: '; {0,2}(SN\d+T[^;\)/]*)(?: Build|[;\)])' +- brand_replacement: HCLme + device_replacement: HCLme $1 + model_replacement: $1 + regex: Build/HCL ME Tablet ([^;\)]{1,3})[\);] +- brand_replacement: HCLme + device_replacement: HCLme $1 + model_replacement: $1 + regex: ; {0,2}([^;\/]+) Build/HCL +- brand_replacement: Hena + device_replacement: Hena $1 + model_replacement: $1 + regex: '; {0,2}(MID-?\d{4}C[EM])(?: Build|\) AppleWebKit)' +- brand_replacement: Hisense + device_replacement: Hisense $1 + model_replacement: $1 + regex: '; {0,2}(EG\d{2,}|HS-[^;/]{1,100}|MIRA[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Hisense + device_replacement: Hisense $1 + model_replacement: $1 + regex: '; {0,2}(andromax[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: hitech + device_replacement: AMAZE $1$2 + model_replacement: AMAZE $1$2 + regex: '; {0,2}(?:AMAZE[ _](S\d+)|(S\d+)[ _]AMAZE)(?: Build|\) AppleWebKit)' +- brand_replacement: HP + device_replacement: HP $1 + model_replacement: $1 + regex: '; {0,2}(PlayBook)(?: Build|\) AppleWebKit)' +- brand_replacement: HP + device_replacement: HP $1 + model_replacement: $1 + regex: '; {0,2}HP ([^/]{1,50})(?: Build|\) AppleWebKit)' +- brand_replacement: HP + device_replacement: HP TouchPad + model_replacement: TouchPad + regex: '; {0,2}([^/]{1,30}_tenderloin)(?: Build|\) AppleWebKit)' +- brand_replacement: Huawei + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(HUAWEI |Huawei-|)([UY][^;/]{1,100}) Build/(?:Huawei|HUAWEI)([UY][^\);]+)\) +- brand_replacement: Huawei + device_replacement: $1 + model_replacement: $2 + regex: ; {0,2}([^;/]{1,100}) Build[/ ]Huawei(MT1-U06|[A-Z]{1,50}\d+[^\);]{1,50})\) +- brand_replacement: Huawei + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(S7|M860) Build +- brand_replacement: Huawei + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}((?:HUAWEI|Huawei)[ \-]?)(MediaPad) Build +- brand_replacement: Huawei + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}((?:HUAWEI[ _]?|Huawei[ _]|)Ascend[ _])([^;/]{1,100}) Build +- brand_replacement: Huawei + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}((?:HUAWEI|Huawei)[ _\-]?)((?:G700-|MT-)[^;/]{1,100}) Build +- brand_replacement: Huawei + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}((?:HUAWEI|Huawei)[ _\-]?)([^;/]{1,100}) Build +- brand_replacement: Huawei + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(MediaPad[^;]{1,200}|SpringBoard) Build/Huawei +- brand_replacement: Huawei + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}([^;]{1,200}) Build/(?:Huawei|HUAWEI) +- brand_replacement: Huawei + device_replacement: $1$2 + model_replacement: U$2 + regex: ; {0,2}([Uu])([89]\d{3}) Build +- brand_replacement: Huawei + device_replacement: Huawei Ideos$1 + model_replacement: Ideos$1 + regex: ; {0,2}(?:Ideos |IDEOS )(S7) Build +- brand_replacement: Huawei + device_replacement: Huawei Ideos$1 + model_replacement: Ideos$1 + regex: ; {0,2}(?:Ideos |IDEOS )([^;/]{1,50}\s{0,5}|\s{0,5})Build +- brand_replacement: Huawei + device_replacement: Huawei $1 + model_replacement: $1 + regex: ; {0,2}(Orange Daytona|Pulse|Pulse Mini|Vodafone 858|C8500|C8600|C8650|C8660|Nexus + 6P|ATH-.{1,200}?) Build[/ ] +- brand_replacement: Huawei + device_replacement: Huawei $1 + model_replacement: $1 + regex: ; {0,2}((?:[A-Z]{3})\-L[A-Za0-9]{2})[\)] +- brand_replacement: Huawei + device_replacement: Huawei Honor $1 + model_replacement: Honor $1 + regex: ; {0,2}([^;]{1,200}) Build/(HONOR|Honor) +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: ; {0,2}HTC[ _]([^;]{1,200}); Windows Phone +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' +- brand_replacement: HTC + device_replacement: HTC $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: + {0,2}Build|\))' +- brand_replacement: HTC + device_replacement: HTC $1 $2 $3 + model_replacement: $1 $2 $3 + regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)|)|)(?:[/\\]1\.0 + | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' +- brand_replacement: HTC + device_replacement: HTC $1 $2 $3 $4 + model_replacement: $1 $2 $3 $4 + regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ + _/]+)|)|)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/;]+)(?: {0,2}Build|[;\)]| + - )' +- brand_replacement: HTC + device_replacement: HTC $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/;\)]+)|)(?: + {0,2}Build|[;\)]| - )' +- brand_replacement: HTC + device_replacement: HTC $1 $2 $3 + model_replacement: $1 $2 $3 + regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ + _/]([^ _/;\)]+)|)|)(?: {0,2}Build|[;\)]| - )' +- brand_replacement: HTC + device_replacement: HTC $1 $2 $3 $4 + model_replacement: $1 $2 $3 $4 + regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ + _/]([^ _/]+)(?:[ _/]([^ /;]+)|)|)|)(?: {0,2}Build|[;\)]| - )' +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: HTC Streaming Player [^\/]{0,30}/[^\/]{0,10}/ htc_([^/]{1,10}) / +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: '(?:[;,] {0,2}|^)(?:htccn_chs-|)HTC[ _-]?([^;]{1,200}?)(?: {0,2}Build|clay|Android|-?Mozilla| + Opera| Profile| UNTRUSTED|[;/\(\)]|$)' + regex_flag: i +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: '; {0,2}(A6277|ADR6200|ADR6300|ADR6350|ADR6400[A-Z]*|ADR6425[A-Z]*|APX515CKT|ARIA|Desire[^_ + ]*|Dream|EndeavorU|Eris|Evo|Flyer|HD2|Hero|HERO200|Hero CDMA|HTL21|Incredible|Inspire[A-Z0-9]*|Legend|Liberty|Nexus + ?(?:One|HD2)|One|One S C2|One[ _]?(?:S|V|X\+?)\w*|PC36100|PG06100|PG86100|S31HT|Sensation|Wildfire)(?: + Build|[/;\(\)])' + regex_flag: i +- brand_replacement: HTC + device_replacement: HTC $1 $2 + model_replacement: $1 $2 + regex: ; {0,2}(ADR6200|ADR6400L|ADR6425LVW|Amaze|DesireS?|EndeavorU|Eris|EVO|Evo\d[A-Z]+|HD2|IncredibleS?|Inspire[A-Z0-9]*|Inspire[A-Z0-9]*|Sensation[A-Z0-9]*|Wildfire)[ + _-](.{1,200}?)(?:[/;\)]|Build|MIUI|1\.0) + regex_flag: i +- brand_replacement: Hyundai + device_replacement: Hyundai $1 + model_replacement: $1 + regex: '; {0,2}HYUNDAI (T\d[^/]{0,10})(?: Build|\) AppleWebKit)' +- brand_replacement: Hyundai + device_replacement: Hyundai $1 + model_replacement: $1 + regex: '; {0,2}HYUNDAI ([^;/]{1,10}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Hyundai + device_replacement: Hyundai $1 + model_replacement: $1 + regex: '; {0,2}(X700|Hold X|MB-6900)(?: Build|\) AppleWebKit)' +- brand_replacement: iBall + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(?:iBall[ _\-]|)(Andi)[ _]?(\d[^;/]*)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: iBall + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(IBall)(?:[ _]([^;/]{1,100}?)|)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: IconBIT + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(NT-\d+[^ ;/]{0,50}|Net[Tt]AB [^;/]{1,50}|Mercury [A-Z]{1,50}|iconBIT)(?: + S/N:[^;/]{1,50}|)(?: Build|\) AppleWebKit)' +- brand_replacement: IMO + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(IMO)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: imobile + device_replacement: i-mobile $1 + model_replacement: $1 + regex: '; {0,2}i-?mobile[ _]([^/]{1,50})(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: imobile + device_replacement: i-mobile $1 + model_replacement: $1 + regex: '; {0,2}(i-(?:style|note)[^/]{0,10})(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Impression + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(ImPAD) ?(\d+(?:.){0,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Infinix + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Infinix)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Informer + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Informer)[ \-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Intenso + device_replacement: Intenso $1 + model_replacement: $1 $2 + regex: '; {0,2}(TAB) ?([78][12]4)(?: Build|\) AppleWebKit)' +- brand_replacement: Intex + device_replacement: $1$2$3 + model_replacement: $1 $3 + regex: ; {0,2}(?:Intex[ _]|)(AQUA|Aqua)([ _\.\-])([^;/]{1,100}?) {0,2}(?:Build|;) +- brand_replacement: Intex + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: ; {0,2}(?:INTEX|Intex)(?:[_ ]([^\ _;/]+))(?:[_ ]([^\ _;/]+)|) {0,2}(?:Build|;) +- brand_replacement: Intex + device_replacement: $1 $2 $3 + model_replacement: iBuddy $2 $3 + regex: ; {0,2}([iI]Buddy)[ _]?(Connect)(?:_|\?_| |)([^;/]{0,50}) {0,2}(?:Build|;) +- brand_replacement: Intex + device_replacement: $1 $2 + model_replacement: iBuddy $2 + regex: ; {0,2}(I-Buddy)[ _]([^;/]{1,100}?) {0,2}(?:Build|;) +- brand_replacement: iOCEAN + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(iOCEAN) ([^/]{1,50})(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: ionik + device_replacement: ionik $1 + model_replacement: $1 + regex: '; {0,2}(TP\d+(?:\.\d+|)\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Iru + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(M702pro)(?: Build|\) AppleWebKit)' +- brand_replacement: Itel + device_replacement: Itel $1 + model_replacement: $1 + regex: '; {0,2}itel ([^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Ivio + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(DE88Plus|MD70)(?: Build|\) AppleWebKit)' +- brand_replacement: Ivio + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}IVIO[_\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Jaytech + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TPC-\d+|JAY-TECH)(?: Build|\) AppleWebKit)' +- brand_replacement: Jiayu + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(JY-[^;/]{1,100}|G[234]S?)(?: Build|\) AppleWebKit)' +- brand_replacement: JXD + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(JXD)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Karbonn + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}Karbonn[ _]?([^;/]{1,100}) {0,2}(?:Build|;) + regex_flag: i +- brand_replacement: Karbonn + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}([^;]{1,200}) Build/Karbonn +- brand_replacement: Karbonn + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(A11|A39|A37|A34|ST8|ST10|ST7|Smart Tab3|Smart Tab2|Titanium S\d) + +Build +- brand_replacement: Sharp + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IS01|IS03|IS05|IS\d{2}SH)(?: Build|\) AppleWebKit)' +- brand_replacement: Regza + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IS04)(?: Build|\) AppleWebKit)' +- brand_replacement: Pantech + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IS06|IS\d{2}PT)(?: Build|\) AppleWebKit)' +- brand_replacement: SonyEricsson + device_replacement: $1 + model_replacement: Xperia Acro + regex: '; {0,2}(IS11S)(?: Build|\) AppleWebKit)' +- brand_replacement: Casio + device_replacement: $1 + model_replacement: GzOne $1 + regex: '; {0,2}(IS11CA)(?: Build|\) AppleWebKit)' +- brand_replacement: LG + device_replacement: $1 + model_replacement: Optimus X + regex: '; {0,2}(IS11LG)(?: Build|\) AppleWebKit)' +- brand_replacement: Medias + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IS11N)(?: Build|\) AppleWebKit)' +- brand_replacement: Pantech + device_replacement: $1 + model_replacement: MIRACH + regex: '; {0,2}(IS11PT)(?: Build|\) AppleWebKit)' +- brand_replacement: Fujitsu + device_replacement: $1 + model_replacement: Arrows ES + regex: '; {0,2}(IS12F)(?: Build|\) AppleWebKit)' +- brand_replacement: Motorola + device_replacement: $1 + model_replacement: XT909 + regex: '; {0,2}(IS12M)(?: Build|\) AppleWebKit)' +- brand_replacement: SonyEricsson + device_replacement: $1 + model_replacement: Xperia Acro HD + regex: '; {0,2}(IS12S)(?: Build|\) AppleWebKit)' +- brand_replacement: Fujitsu + device_replacement: $1 + model_replacement: Arrowz Z + regex: '; {0,2}(ISW11F)(?: Build|\) AppleWebKit)' +- brand_replacement: HTC + device_replacement: $1 + model_replacement: EVO + regex: '; {0,2}(ISW11HT)(?: Build|\) AppleWebKit)' +- brand_replacement: Kyocera + device_replacement: $1 + model_replacement: DIGNO + regex: '; {0,2}(ISW11K)(?: Build|\) AppleWebKit)' +- brand_replacement: Motorola + device_replacement: $1 + model_replacement: Photon + regex: '; {0,2}(ISW11M)(?: Build|\) AppleWebKit)' +- brand_replacement: Samsung + device_replacement: $1 + model_replacement: GALAXY S II WiMAX + regex: '; {0,2}(ISW11SC)(?: Build|\) AppleWebKit)' +- brand_replacement: HTC + device_replacement: $1 + model_replacement: EVO 3D + regex: '; {0,2}(ISW12HT)(?: Build|\) AppleWebKit)' +- brand_replacement: HTC + device_replacement: $1 + model_replacement: J + regex: '; {0,2}(ISW13HT)(?: Build|\) AppleWebKit)' +- brand_replacement: KDDI + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(ISW?[0-9]{2}[A-Z]{0,2})(?: Build|\) AppleWebKit)' +- brand_replacement: KDDI + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(INFOBAR [^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Kingcom + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(JOYPAD|Joypad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Kobo + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Vox|VOX|Arc|K080)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Kobo + device_replacement: $1 + model_replacement: $1 + regex: \b(Kobo Touch)\b +- brand_replacement: Ktouch + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(K-Touch)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: KTtech + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:EV|KM)-S\d+[A-Z]?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Kyocera + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Zio|Hydro|Torque|Event|EVENT|Echo|Milano|Rise|URBANO PROGRESSO|WX04K|WX06K|WX10K|KYL21|101K|C5[12]\d{2})(?: + Build|\) AppleWebKit)' +- brand_replacement: Lava + device_replacement: Iris $1 + model_replacement: Iris $1 + regex: ; {0,2}(?:LAVA[ _]|)IRIS[ _\-]?([^/;\)]+) {0,2}(?:;|\)|Build) + regex_flag: i +- brand_replacement: Lava + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}LAVA[ _]([^;/]{1,100}) Build +- brand_replacement: Lemon + device_replacement: Lemon $1$2 + model_replacement: $1$2 + regex: '; {0,2}(?:(Aspire A1)|(?:LEMON|Lemon)[ _]([^;/]{1,100}))_?(?: Build|\) AppleWebKit)' +- brand_replacement: Lenco + device_replacement: Lenco $1 + model_replacement: $1 + regex: '; {0,2}(TAB-1012)(?: Build|\) AppleWebKit)' +- brand_replacement: Lenco + device_replacement: Lenco $1 + model_replacement: $1 + regex: '; Lenco ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Lenovo + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(A1_07|A2107A-H|S2005A-H|S1-37AH0) Build +- brand_replacement: Lenovo + device_replacement: Lenovo $1 $2 + model_replacement: $1 $2 + regex: ; {0,2}(Idea[Tp]ab)[ _]([^;/]{1,100});? Build +- brand_replacement: Lenovo + device_replacement: Lenovo $1 $2 + model_replacement: $1 $2 + regex: ; {0,2}(Idea(?:Tab|pad)) ?([^;/]{1,100}) Build +- brand_replacement: Lenovo + device_replacement: Lenovo $1 $2 + model_replacement: $1 $2 + regex: ; {0,2}(ThinkPad) ?(Tablet) Build/ +- brand_replacement: Lenovo + device_replacement: Lenovo $1 + model_replacement: $1 + regex: ; {0,2}(?:LNV-|)(?:=?[Ll]enovo[ _\-]?|LENOVO[ _])(.{1,200}?)(?:Build|[;/\)]) +- brand_replacement: Lenovo + device_replacement: Lenovo $1 $2 $3 + model_replacement: $1 $2 $3 + regex: '[;,] (?:Vodafone |)(SmartTab) ?(II) ?(\d+) Build/' +- brand_replacement: Lenovo + device_replacement: Lenovo Ideapad K1 + model_replacement: Ideapad K1 + regex: ; {0,2}(?:Ideapad |)K1 Build/ +- brand_replacement: Lenovo + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(3GC101|3GW10[01]|A390) Build/ +- brand_replacement: Lenovo + device_replacement: Lenovo $1 + model_replacement: $1 + regex: \b(?:Lenovo|LENOVO)+[ _\-]?([^,;:/ ]+) +- brand_replacement: Lexibook + device_replacement: $1$2 + model_replacement: $1$2 + regex: '; {0,2}(MFC\d+)[A-Z]{2}([^;,/]*),?(?: Build|\) AppleWebKit)' +- brand_replacement: LG + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(E[34][0-9]{2}|LS[6-8][0-9]{2}|VS[6-9][0-9]+[^;/]{1,30}|Nexus 4|Nexus + 5X?|GT540f?|Optimus (?:2X|G|4X HD)|OptimusX4HD) {0,2}(?:Build|;) +- brand_replacement: LG + device_replacement: $1 + model_replacement: $1 + regex: '[;:] {0,2}(L-\d+[A-Z]|LGL\d+[A-Z]?)(?:/V\d+|) {0,2}(?:Build|[;\)])' +- brand_replacement: LG + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(LG-)([A-Z]{1,2}\d{2,}[^,;/\)\(]*?)(?:Build| V\d+|[,;/\)\(]|$) +- brand_replacement: LG + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(LG[ \-]|LG)([^;/]{1,100})[;/]? Build +- brand_replacement: LG + device_replacement: $1 $2 + model_replacement: $2 + regex: ^(LG)-([^;/]{1,100})/ Mozilla/.{0,200}; Android +- brand_replacement: LG + device_replacement: LG $1 $2 + model_replacement: $1 $2 + regex: (Web0S); Linux/(SmartTV) +- brand_replacement: Malata + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:SMB|smb)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Malata + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:Malata|MALATA) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Manta + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(MS[45][0-9]{3}|MID0[568][NS]?|MID[1-9]|MID[78]0[1-9]|MID970[1-9]|MID100[1-9])(?: + Build|\) AppleWebKit)' +- brand_replacement: Match + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(M1052|M806|M9000|M9100|M9701|MID100|MID120|MID125|MID130|MID135|MID140|MID701|MID710|MID713|MID727|MID728|MID731|MID732|MID733|MID735|MID736|MID737|MID760|MID800|MID810|MID820|MID830|MID833|MID835|MID860|MID900|MID930|MID933|MID960|MID980)(?: + Build|\) AppleWebKit)' +- brand_replacement: Maxx + device_replacement: Maxx $1 + model_replacement: $1 + regex: '; {0,2}(GenxDroid7|MSD7.{0,200}?|AX\d.{0,200}?|Tab 701|Tab 722)(?: Build|\) + AppleWebKit)' +- brand_replacement: Mediacom + device_replacement: Mediacom $1 + model_replacement: $1 + regex: '; {0,2}(M-PP[^;/]{1,30}|PhonePad ?\d{2,}[^;/]{1,30}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Mediacom + device_replacement: Mediacom $1 + model_replacement: $1 + regex: '; {0,2}(M-MP[^;/]{1,30}|SmartPad ?\d{2,}[^;/]{1,30}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Medion + device_replacement: Medion Lifetab $1 + model_replacement: Lifetab $1 + regex: '; {0,2}(?:MD_|)LIFETAB[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Medion + device_replacement: Medion $1 + model_replacement: $1 + regex: '; {0,2}MEDION ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Meizu + device_replacement: Meizu $1 + model_replacement: $1 + regex: '; {0,2}(M030|M031|M035|M040|M065|m9)(?: Build|\) AppleWebKit)' +- brand_replacement: Meizu + device_replacement: Meizu $1 + model_replacement: $1 + regex: ; {0,2}(?:meizu_|MEIZU )(.{1,200}?) {0,2}(?:Build|[;\)]) +- brand_replacement: Micromax + device_replacement: Micromax $1$2 + model_replacement: $1$2 + regex: ; {0,2}(?:Micromax[ _](A111|A240)|(A111|A240)) Build + regex_flag: i +- brand_replacement: Micromax + device_replacement: Micromax $1 + model_replacement: $1 + regex: ; {0,2}Micromax[ _](A\d{2,3}[^;/]*) Build + regex_flag: i +- brand_replacement: Micromax + device_replacement: Micromax $1 + model_replacement: $1 + regex: ; {0,2}(A\d{2}|A[12]\d{2}|A90S|A110Q) Build + regex_flag: i +- brand_replacement: Micromax + device_replacement: Micromax $1 + model_replacement: $1 + regex: ; {0,2}Micromax[ _](P\d{3}[^;/]*) Build + regex_flag: i +- brand_replacement: Micromax + device_replacement: Micromax $1 + model_replacement: $1 + regex: ; {0,2}(P\d{3}|P\d{3}\(Funbook\)) Build + regex_flag: i +- brand_replacement: Mito + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(MITO)[ _\-]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Mobistel + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: ; {0,2}(Cynus)[ _](F5|T\d|.{1,200}?) {0,2}(?:Build|[;/\)]) + regex_flag: i +- brand_replacement: Modecom + device_replacement: $1$2 $3 + model_replacement: $2 $3 + regex: '; {0,2}(MODECOM |)(FreeTab) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Modecom + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(MODECOM )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Motorola + device_replacement: Motorola $1 + model_replacement: $1 + regex: ; {0,2}(MZ\d{3}\+?|MZ\d{3} 4G|Xoom|XOOM[^;/]*) Build +- brand_replacement: Motorola + device_replacement: Motorola $1$2 + model_replacement: $2 + regex: ; {0,2}(Milestone )(XT[^;/]*) Build +- brand_replacement: Motorola + device_replacement: Motorola $1 + model_replacement: DROID X + regex: ; {0,2}(Motoroi ?x|Droid X|DROIDX) Build + regex_flag: i +- brand_replacement: Motorola + device_replacement: Motorola $1 + model_replacement: $1 + regex: ; {0,2}(Droid[^;/]*|DROID[^;/]*|Milestone[^;/]*|Photon|Triumph|Devour|Titanium) + Build +- brand_replacement: Motorola + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(A555|A85[34][^;/]*|A95[356]|ME[58]\d{2}\+?|ME600|ME632|ME722|MB\d{3}\+?|MT680|MT710|MT870|MT887|MT917|WX435|WX453|WX44[25]|XT\d{3,4}[A-Z\+]*|CL[iI]Q|CL[iI]Q + XT) Build +- brand_replacement: Motorola + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(Motorola MOT-|Motorola[ _\-]|MOT\-?)([^;/]{1,100}) Build +- brand_replacement: Motorola + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(Moto[_ ]?|MOT\-)([^;/]{1,100}) Build +- brand_replacement: Mpman + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:MP[DQ]C|MPG\d{1,4}|MP\d{3,4}|MID(?:(?:10[234]|114|43|7[247]|8[24]|7)C|8[01]1))[^;/]*)(?: + Build|\) AppleWebKit)' +- brand_replacement: Msi + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:MSI[ _]|)(Primo\d+|Enjoy[ _\-][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Multilaser + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}Multilaser[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: MyPhone + device_replacement: $1$2 $3 + model_replacement: $1$2 $3 + regex: '; {0,2}(My)[_]?(Pad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: MyPhone + device_replacement: $1$2 $3 + model_replacement: $3 + regex: '; {0,2}(My)\|?(Phone)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: MyPhone + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(A\d+)[ _](Duo|)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Mytab + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(myTab[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Nabi + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(NABI2?-)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Nec + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(N-\d+[CDE])(?: Build|\) AppleWebKit)' +- brand_replacement: Nec + device_replacement: $1$2 + model_replacement: $2 + regex: '; ?(NEC-)(.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Nec + device_replacement: $1 + model_replacement: Lifetouch Note + regex: '; {0,2}(LT-NA7)(?: Build|\) AppleWebKit)' +- brand_replacement: Nextbook + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(NXM\d+[A-Za-z0-9_]{0,50}|Next\d[A-Za-z0-9_ \-]{0,50}|NEXT\d[A-Za-z0-9_ + \-]{0,50}|Nextbook [A-Za-z0-9_ ]{0,50}|DATAM803HC|M805)(?: Build|[\);])' +- brand_replacement: Nokia + device_replacement: $1$2$3 + model_replacement: $3 + regex: ; {0,2}(Nokia)([ _\-]{0,5})([^;/]{0,50}) Build + regex_flag: i +- brand_replacement: Nokia + device_replacement: Nokia $1 + model_replacement: $1 + regex: '; {0,2}(TA\-\d{4})(?: Build|\) AppleWebKit)' +- brand_replacement: Nook + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(Nook ?|Barnes & Noble Nook |BN )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Nook + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(NOOK |)(BNRV200|BNRV200A|BNTV250|BNTV250A|BNTV400|BNTV600|LogicPD + Zoom2)(?: Build|\) AppleWebKit)' +- brand_replacement: Nook + device_replacement: $1 + model_replacement: Tablet + regex: ; Build/(Nook) +- brand_replacement: Olivetti + device_replacement: Olivetti $1 + model_replacement: $1 + regex: '; {0,2}(OP110|OliPad[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Omega + device_replacement: Omega $1 + model_replacement: $1 + regex: '; {0,2}OMEGA[ _\-](MID[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Omega + device_replacement: Omega $1 + model_replacement: $1 + regex: ^(MID7500|MID\d+) Mozilla/5\.0 \(iPad; +- brand_replacement: Openpeak + device_replacement: Openpeak $1 + model_replacement: $1 + regex: '; {0,2}((?:CIUS|cius)[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Oppo + device_replacement: Oppo $1 + model_replacement: $1 + regex: '; {0,2}(Find ?(?:5|7a)|R8[012]\d{1,2}|T703\d?|U70\d{1,2}T?|X90\d{1,2}|[AFR]\d{1,2}[a-z]{1,2})(?: + Build|\) AppleWebKit)' +- brand_replacement: Oppo + device_replacement: Oppo $1 + model_replacement: $1 + regex: '; {0,2}OPPO ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Oppo + device_replacement: Oppo $1 + model_replacement: $1 + regex: '; {0,2}(CPH\d{1,4}|RMX\d{1,4}|P[A-Z]{3}\d{2})(?: Build|\) AppleWebKit)' +- brand_replacement: Oppo + device_replacement: Oppo F1s + model_replacement: $1 + regex: '; {0,2}(A1601)(?: Build|\) AppleWebKit)' +- brand_replacement: Odys + device_replacement: Odys $1 + model_replacement: $1 + regex: '; {0,2}(?:Odys\-|ODYS\-|ODYS )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Odys + device_replacement: Odys $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(SELECT) ?(7)(?: Build|\) AppleWebKit)' +- brand_replacement: Odys + device_replacement: Odys $1 $2 $3 + model_replacement: $1 $2 $3 + regex: '; {0,2}(PEDI)_(PLUS)_(W)(?: Build|\) AppleWebKit)' +- brand_replacement: Odys + device_replacement: Odys $1 + model_replacement: $1 + regex: '; {0,2}(AEON|BRAVIO|FUSION|FUSION2IN1|Genio|EOS10|IEOS[^;/]*|IRON|Loox|LOOX|LOOX + Plus|Motion|NOON|NOON_PRO|NEXT|OPOS|PEDI[^;/]*|PRIME[^;/]*|STUDYTAB|TABLO|Tablet-PC-4|UNO_X8|XELIO[^;/]*|Xelio + ?\d+ ?[Pp]ro|XENO10|XPRESS PRO)(?: Build|\) AppleWebKit)' +- brand_replacement: OnePlus + device_replacement: OnePlus $1 + model_replacement: $1 + regex: '; (ONE [a-zA-Z]\d+)(?: Build|\) AppleWebKit)' +- brand_replacement: OnePlus + device_replacement: OnePlus $1 + model_replacement: $1 + regex: '; (ONEPLUS [a-zA-Z]\d+)(?: Build|\) AppleWebKit)' +- brand_replacement: Orion + device_replacement: Orion $1 + model_replacement: $1 + regex: '; {0,2}(TP-\d+)(?: Build|\) AppleWebKit)' +- brand_replacement: PackardBell + device_replacement: PackardBell $1 + model_replacement: $1 + regex: '; {0,2}(G100W?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Panasonic)[_ ]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Panasonic + device_replacement: Panasonic $1 + model_replacement: $1 + regex: '; {0,2}(FZ-A1B|JT-B1)(?: Build|\) AppleWebKit)' +- brand_replacement: Panasonic + device_replacement: Panasonic $1 + model_replacement: $1 + regex: '; {0,2}(dL1|DL1)(?: Build|\) AppleWebKit)' +- brand_replacement: Pantech + device_replacement: Pantech $1$2 + model_replacement: $1$2 + regex: ; {0,2}(SKY[ _]|)(IM\-[AT]\d{3}[^;/]{1,100}).{0,30} Build/ +- brand_replacement: Pantech + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:ADR8995|ADR910L|ADR930L|ADR930VW|PTL21|P8000)(?: 4G|)) Build/' +- brand_replacement: Pantech + device_replacement: Pantech $1 + model_replacement: $1 + regex: ; {0,2}Pantech([^;/]{1,30}).{0,200}? Build/ +- brand_replacement: Papyre + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(papyre)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Pearl + device_replacement: Pearl $1 + model_replacement: $1 + regex: '; {0,2}(?:Touchlet )?(X10\.[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Phicomm + device_replacement: Phicomm $1 + model_replacement: $1 + regex: '; PHICOMM (i800)(?: Build|\) AppleWebKit)' +- brand_replacement: Phicomm + device_replacement: Phicomm $1 + model_replacement: $1 + regex: '; PHICOMM ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Phicomm + device_replacement: Phicomm $1 + model_replacement: $1 + regex: '; {0,2}(FWS\d{3}[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Philips + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(D633|D822|D833|T539|T939|V726|W335|W336|W337|W3568|W536|W5510|W626|W632|W6350|W6360|W6500|W732|W736|W737|W7376|W820|W832|W8355|W8500|W8510|W930)(?: + Build|\) AppleWebKit)' +- brand_replacement: Philips + device_replacement: Philips $1 + model_replacement: $1 + regex: '; {0,2}(?:Philips|PHILIPS)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Pipo + device_replacement: Pipo $1$2 + model_replacement: $1$2 + regex: 'Android 4\..{0,200}; {0,2}(M[12356789]|U[12368]|S[123])\ ?(pro)?(?: Build|\) + AppleWebKit)' +- brand_replacement: Ployer + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(MOMO[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Polaroid + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:Polaroid[ _]|)((?:MIDC\d{3,}|PMID\d{2,}|PTAB\d{3,})[^;/]{0,30}?)(\/[^;/]{0,30}|)(?: + Build|\) AppleWebKit)' +- brand_replacement: Polaroid + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:Polaroid )(Tablet)(?: Build|\) AppleWebKit)' +- brand_replacement: Pomp + device_replacement: $1 $2 + model_replacement: $2 + regex: ; {0,2}(POMP)[ _\-](.{1,200}?) {0,2}(?:Build|[;/\)]) +- brand_replacement: Positivo + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TB07STA|TB10STA|TB07FTA|TB10FTA)(?: Build|\) AppleWebKit)' +- brand_replacement: Positivo + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:Positivo |)((?:YPY|Ypy)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: POV + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(MOB-[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: POV + device_replacement: POV $1 + model_replacement: $1 + regex: '; {0,2}POV[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: POV + device_replacement: POV $1 + model_replacement: $1 + regex: '; {0,2}((?:TAB-PLAYTAB|TAB-PROTAB|PROTAB|PlayTabPro|Mobii[ _\-]|TAB-P)[^;/]*)(?: + Build|\) AppleWebKit)' +- brand_replacement: Prestigio + device_replacement: Prestigio $1 + model_replacement: $1 + regex: '; {0,2}(?:Prestigio |)((?:PAP|PMP)\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Proscan + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(PLT[0-9]{4}.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Qmobile + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; {0,2}(A2|A5|A8|A900)_?(Classic|)(?: Build|\) AppleWebKit)' +- brand_replacement: Qmobile + device_replacement: Qmobile $2 $3 + model_replacement: $2 $3 + regex: '; {0,2}(Q[Mm]obile)_([^_]+)_([^_]+?)(?: Build|\) AppleWebKit)' +- brand_replacement: Qmobile + device_replacement: Qmobile $2 + model_replacement: $2 + regex: '; {0,2}(Q\-?[Mm]obile)[_ ](A[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Qmobilevn + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Q\-Smart)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Qmobilevn + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Q\-?[Mm]obile)[ _\-](S[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Quanta + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TA1013)(?: Build|\) AppleWebKit)' +- brand_replacement: RCA + device_replacement: $1 + model_replacement: $1 + regex: '; (RCT\w+)(?: Build|\) AppleWebKit)' +- brand_replacement: RCA + device_replacement: RCA $1 + model_replacement: $1 + regex: '; RCA (\w+)(?: Build|\) AppleWebKit)' +- brand_replacement: Rockchip + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(RK\d+),?(?: Build|\) AppleWebKit)' +- brand_replacement: Rockchip + device_replacement: $1 + model_replacement: $1 + regex: ' Build/(RK\d+)' +- brand_replacement: Samsung + device_replacement: Samsung $1$2 + model_replacement: $2 + regex: ; {0,2}(SAMSUNG |Samsung |)((?:Galaxy (?:Note II|S\d)|GT-I9082|GT-I9205|GT-N7\d{3}|SM-N9005)[^;/]{0,100})\/?[^;/]{0,50} + Build/ +- brand_replacement: Samsung + device_replacement: Samsung $1$2 + model_replacement: $2 + regex: '; {0,2}(Google |)(Nexus [Ss](?: 4G|)) Build/' +- brand_replacement: Samsung + device_replacement: Samsung $2 + model_replacement: $2 + regex: ; {0,2}(SAMSUNG |Samsung )([^\/]{0,50})\/[^ ]{0,50} Build/ +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: '; {0,2}(Galaxy(?: Ace| Nexus| S ?II+|Nexus S| with MCR 1.2| Mini Plus 4G|)) + Build/' +- brand_replacement: Samsung + device_replacement: Samsung $2 + model_replacement: $2 + regex: ; {0,2}(SAMSUNG[ _\-]|)(?:SAMSUNG[ _\-])([^;/]{1,100}) Build +- brand_replacement: Samsung + device_replacement: Samsung $1$2$3 + model_replacement: $2 + regex: ; {0,2}(SAMSUNG-|)(GT\-[BINPS]\d{4}[^\/]{0,50})(\/[^ ]{0,50}) Build +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: (?:; {0,2}|^)((?:GT\-[BIiNPS]\d{4}|I9\d{2}0[A-Za-z\+]?\b)[^;/\)]*?)(?:Build|Linux|MIUI|[;/\)]) +- brand_replacement: Samsung + device_replacement: Samsung $1$2 + model_replacement: $2 + regex: ; (SAMSUNG-)([A-Za-z0-9\-]{0,50}).{0,200} Build/ +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: ; {0,2}((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|) Build +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: ; {0,2}((?:SC)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|)\) +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: ' ((?:SCH)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|) Build' +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: ; {0,2}(Behold ?(?:2|II)|YP\-G[^;/]{1,100}|EK-GC100|SCL21|I9300) Build +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: ; {0,2}((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\-[A-Za-z0-9]{5,6})[\)] +- brand_replacement: Sharp + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SH\-?\d\d[^;/]{1,100}|SBM\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Sharp + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(SHARP[ -])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Simvalley + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SPX[_\-]\d[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Simvalley + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SX7\-PEARL\.GmbH)(?: Build|\) AppleWebKit)' +- brand_replacement: Simvalley + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SP[T]?\-\d{2}[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: SKtelesys + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SK\-.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Skytex + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(?:SKYTEX|SX)-([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Skytex + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(IMAGINE [^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(SmartQ) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Smartbitt + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(WF7C|WF10C|SBT[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Sharp + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(SBM(?:003SH|005SH|006SH|007SH|102SH)) Build +- brand_replacement: Panasonic + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(003P|101P|101P11C|102P) Build +- brand_replacement: ZTE + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(00\dZ) Build/ +- brand_replacement: HTC + device_replacement: $1 + model_replacement: $1 + regex: ; HTC(X06HT) Build +- brand_replacement: HTC + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(001HT|X06HT) Build +- brand_replacement: Motorola + device_replacement: $1 + model_replacement: XT902 + regex: ; {0,2}(201M) Build +- brand_replacement: Trekstor + device_replacement: Trekstor $1 + model_replacement: $1 + regex: ; {0,2}(ST\d{4}.{0,200})Build/ST +- brand_replacement: Trekstor + device_replacement: Trekstor $1 + model_replacement: $1 + regex: '; {0,2}(ST\d{4}.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: SonyEricsson + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(Sony ?Ericsson ?)([^;/]{1,100}) Build +- brand_replacement: SonyEricsson + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}((?:SK|ST|E|X|LT|MK|MT|WT)\d{2}[a-z0-9]*(?:-o|)|R800i|U20i) Build +- brand_replacement: SonyEricsson + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(Xperia (?:A8|Arc|Acro|Active|Live with Walkman|Mini|Neo|Play|Pro|Ray|X\d+)[^;/]{0,50}) + Build + regex_flag: i +- brand_replacement: Sony + device_replacement: Sony $1 + model_replacement: $1 + regex: '; Sony (Tablet[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Sony + device_replacement: Sony $1 + model_replacement: $1 + regex: '; Sony ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Sony)([A-Za-z0-9\-]+)(?: Build|\) AppleWebKit)' +- brand_replacement: Sony + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(Xperia [^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Sony + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(C(?:1[0-9]|2[0-9]|53|55|6[0-9])[0-9]{2}|D[25]\d{3}|D6[56]\d{2})(?: + Build|\) AppleWebKit)' +- brand_replacement: Sony + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SGP\d{3}|SGPT\d{2})(?: Build|\) AppleWebKit)' +- brand_replacement: Sony + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(NW-Z1000Series)(?: Build|\) AppleWebKit)' +- brand_replacement: Sony + device_replacement: PlayStation 3 + model_replacement: PlayStation 3 + regex: PLAYSTATION 3 +- brand_replacement: Sony + device_replacement: $1 + model_replacement: $1 + regex: (PlayStation (?:Portable|Vita|\d+)) +- brand_replacement: Spice + device_replacement: $1$2$3$4 + model_replacement: Mi$4 + regex: '; {0,2}((?:CSL_Spice|Spice|SPICE|CSL)[ _\-]?|)([Mm][Ii])([ _\-]|)(\d{3}[^;/]*)(?: + Build|\) AppleWebKit)' +- brand_replacement: Sprint + device_replacement: $1$2 + model_replacement: $2 + regex: ; {0,2}(Sprint )(.{1,200}?) {0,2}(?:Build|[;/]) +- brand_replacement: Sprint + device_replacement: $1$2 + model_replacement: $2 + regex: '\b(Sprint)[: ]([^;,/ ]+)' +- brand_replacement: Tagi + device_replacement: $1$2$3 + model_replacement: $2$3 + regex: '; {0,2}(TAGI[ ]?)(MID) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Tecmobile + device_replacement: Tecmobile $1 + model_replacement: $1 + regex: '; {0,2}(Oyster500|Opal 800)(?: Build|\) AppleWebKit)' +- brand_replacement: Tecno + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(TECNO[ _])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}Android for (Telechips|Techvision) ([^ ]+) ' + regex_flag: i +- brand_replacement: Telstra + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(T-Hub2)(?: Build|\) AppleWebKit)' +- brand_replacement: Terra + device_replacement: Terra $1$2 + model_replacement: $1$2 + regex: '; {0,2}(PAD) ?(100[12])(?: Build|\) AppleWebKit)' +- brand_replacement: Texet + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(T[BM]-\d{3}[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Thalia + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(tolino [^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Thalia + device_replacement: $1 + model_replacement: Tolino Shine + regex: ; {0,2}Build/.{0,200} (TOLINO_BROWSER) +- brand_replacement: Thl + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(?:CJ[ -])?(ThL|THL)[ -]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Thl + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(T100|T200|T5|W100|W200|W8s)(?: Build|\) AppleWebKit)' +- brand_replacement: HTC + device_replacement: $1 + model_replacement: Hero + regex: ; {0,2}(T-Mobile[ _]G2[ _]Touch) Build +- brand_replacement: HTC + device_replacement: $1 + model_replacement: Desire Z + regex: ; {0,2}(T-Mobile[ _]G2) Build +- brand_replacement: Huawei + device_replacement: $1 + model_replacement: U8730 + regex: ; {0,2}(T-Mobile myTouch Q) Build +- brand_replacement: Huawei + device_replacement: $1 + model_replacement: U8680 + regex: ; {0,2}(T-Mobile myTouch) Build +- brand_replacement: HTC + device_replacement: $1 + model_replacement: Espresso + regex: ; {0,2}(T-Mobile_Espresso) Build +- brand_replacement: HTC + device_replacement: $1 + model_replacement: Dream + regex: ; {0,2}(T-Mobile G1) Build +- brand_replacement: HTC + device_replacement: $1$2 $3 $4 + model_replacement: $2 $3 $4 + regex: \b(T-Mobile ?|)(myTouch)[ _]?([34]G)[ _]?([^\/]*) (?:Mozilla|Build) +- brand_replacement: Tmobile + device_replacement: $1 $2 $3 + model_replacement: $2 $3 + regex: \b(T-Mobile)_([^_]+)_(.{0,200}) Build +- brand_replacement: Tmobile + device_replacement: $1 $2 + model_replacement: $2 + regex: \b(T-Mobile)[_ ]?(.{0,200}?)Build +- brand_replacement: Tomtec + device_replacement: $1 + model_replacement: $1 + regex: ' (ATP[0-9]{4})(?: Build|\) AppleWebKit)' +- brand_replacement: Tooky + device_replacement: $1 $2 + model_replacement: $2 + regex: ' ?(TOOKY)[ _\-]([^;/]{1,100}) ?(?:Build|;)' + regex_flag: i +- brand_replacement: Toshiba + device_replacement: $1 + model_replacement: Folio 100 + regex: \b(TOSHIBA_AC_AND_AZ|TOSHIBA_FOLIO_AND_A|FOLIO_AND_A) +- brand_replacement: Toshiba + device_replacement: $1 + model_replacement: Folio 100 + regex: '; {0,2}([Ff]olio ?100)(?: Build|\) AppleWebKit)' +- brand_replacement: Toshiba + device_replacement: Toshiba $1 + model_replacement: $1 + regex: '; {0,2}(AT[0-9]{2,3}(?:\-A|LE\-A|PE\-A|SE|a|)|AT7-A|AT1S0|Hikari-iFrame/WDPF-[^;/]{1,100}|THRiVE|Thrive)(?: + Build|\) AppleWebKit)' +- brand_replacement: Touchmate + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TM-MID\d+[^;/]{1,50}|TOUCHMATE|MID-750)(?: Build|\) AppleWebKit)' +- brand_replacement: Touchmate + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(TM-SM\d+[^;/]{1,50}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Treq + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(A10 [Bb]asic2?)(?: Build|\) AppleWebKit)' +- brand_replacement: Treq + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(TREQ[ _\-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Umeox + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(X-?5|X-?3)(?: Build|\) AppleWebKit)' +- brand_replacement: Umeox + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(A502\+?|A936|A603|X1|X2)(?: Build|\) AppleWebKit)' +- brand_replacement: Vernee + device_replacement: Thor + model_replacement: Thor + regex: ; thor Build/ +- brand_replacement: Vernee + device_replacement: Thor $1 + model_replacement: Thor + regex: ; Thor (E)? Build/ +- brand_replacement: Vernee + device_replacement: Apollo Lite + model_replacement: Apollo + regex: ; Apollo Lite Build/ +- brand_replacement: Versus + device_replacement: Versus $1 + model_replacement: $1 + regex: '(TOUCH(?:TAB|PAD).{1,200}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Vertu + device_replacement: $1 $2 + model_replacement: $2 + regex: '(VERTU) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Videocon + device_replacement: $1 $2 + model_replacement: $2 + regex: ; {0,2}(Videocon)[ _\-]([^;/]{1,100}?) {0,2}(?:Build|;) +- brand_replacement: Videocon + device_replacement: $1 + model_replacement: $1 + regex: ' (VT\d{2}[A-Za-z]*)(?: Build|\) AppleWebKit)' +- brand_replacement: Viewsonic + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}((?:ViewPad|ViewPhone|VSD)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Viewsonic + device_replacement: $1$2 + model_replacement: $2 + regex: '; {0,2}(ViewSonic-)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Viewsonic + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(GTablet.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: vivo + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}([Vv]ivo)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: '(Vodafone) (.{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Walton + device_replacement: Walton $1 + model_replacement: $1 + regex: '; {0,2}(?:Walton[ _\-]|)(Primo[ _\-][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Wiko + device_replacement: Wiko $1 + model_replacement: $1 + regex: '; {0,2}(?:WIKO[ \-]|)(CINK\+?|BARRY|BLOOM|DARKFULL|DARKMOON|DARKNIGHT|DARKSIDE|FIZZ|HIGHWAY|IGGY|OZZY|RAINBOW|STAIRWAY|SUBLIM|WAX|CINK + [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Wellcom + device_replacement: Wellcom $1 + model_replacement: $1 + regex: '; {0,2}WellcoM-([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: WeTab + device_replacement: $1 + model_replacement: WeTab + regex: (?:(WeTab)-Browser|; (wetab) Build) +- brand_replacement: Wolfgang + device_replacement: Wolfgang $1 + model_replacement: $1 + regex: '; {0,2}(AT-AS[^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Woxter + device_replacement: Woxter $1 + model_replacement: $1 + regex: '; {0,2}(?:Woxter|Wxt) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' +- brand_replacement: Yarvik + device_replacement: Yarvik $1 + model_replacement: $1 + regex: '; {0,2}(?:Xenta |Luna |)(TAB[234][0-9]{2}|TAB0[78]-\d{3}|TAB0?9-\d{3}|TAB1[03]-\d{3}|SMP\d{2}-\d{3})(?: + Build|\) AppleWebKit)' +- brand_replacement: Yifang + device_replacement: Yifang $1$2$3 + model_replacement: $2 + regex: '; {0,2}([A-Z]{2,4})(M\d{3,}[A-Z]{2})([^;\)\/]*)(?: Build|[;\)])' +- brand_replacement: XiaoMi + device_replacement: XiaoMi $1 + model_replacement: $1 + regex: ; {0,2}((Mi|MI|HM|MI-ONE|Redmi)[ -](NOTE |Note |)[^;/]*) (Build|MIUI)/ +- brand_replacement: XiaoMi + device_replacement: XiaoMi $1 + model_replacement: $1 + regex: ; {0,2}((Mi|MI|HM|MI-ONE|Redmi)[ -](NOTE |Note |)[^;/\)]*) +- brand_replacement: XiaoMi + device_replacement: XiaoMi $1 + model_replacement: $1 + regex: ; {0,2}(MIX) (Build|MIUI)/ +- brand_replacement: XiaoMi + device_replacement: XiaoMi $1 + model_replacement: $1 + regex: ; {0,2}((MIX) ([^;/]*)) (Build|MIUI)/ +- brand_replacement: Xolo + device_replacement: Xolo $1 + model_replacement: $1 + regex: '; {0,2}XOLO[ _]([^;/]{0,30}tab.{0,30})(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Xolo + device_replacement: Xolo $1 + model_replacement: $1 + regex: '; {0,2}XOLO[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Xolo + device_replacement: Xolo $1 + model_replacement: $1 + regex: '; {0,2}(q\d0{2,3}[a-z]?)(?: Build|\) AppleWebKit)' + regex_flag: i +- brand_replacement: Xoro + device_replacement: Xoro $1 + model_replacement: $1 + regex: '; {0,2}(PAD ?[79]\d+[^;/]{0,50}|TelePAD\d+[^;/])(?: Build|\) AppleWebKit)' +- brand_replacement: Zopo + device_replacement: $1$2$3 + model_replacement: $1$2$3 + regex: '; {0,2}(?:(?:ZOPO|Zopo)[ _]([^;/]{1,100}?)|(ZP ?(?:\d{2}[^;/]{1,100}|C2))|(C[2379]))(?: + Build|\) AppleWebKit)' +- brand_replacement: ZiiLabs + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(ZiiLABS) (Zii[^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: ZiiLabs + device_replacement: $1 $2 + model_replacement: $2 + regex: '; {0,2}(Zii)_([^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(ARIZONA|(?:ATLAS|Atlas) W|D930|Grand (?:[SX][^;]{0,200}?|Era|Memo[^;]{0,200}?)|JOE|(?:Kis|KIS)\b[^;]{0,200}?|Libra|Light + [^;]{0,200}?|N8[056][01]|N850L|N8000|N9[15]\d{2}|N9810|NX501|Optik|(?:Vip )Racer[^;]{0,200}?|RacerII|RACERII|San + Francisco[^;]{0,200}?|V9[AC]|V55|V881|Z[679][0-9]{2}[A-z]?)(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}([A-Z]\d+)_USA_[^;]{0,200}(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: $1 + model_replacement: $1 + regex: '; {0,2}(SmartTab\d+)[^;]{0,50}(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: ZTE Blade$1 + model_replacement: Blade$1 + regex: '; {0,2}(?:Blade|BLADE|ZTE-BLADE)([^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: ZTE Skate$1 + model_replacement: Skate$1 + regex: '; {0,2}(?:Skate|SKATE|ZTE-SKATE)([^;/]*)(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: $1$2 + model_replacement: $1$2 + regex: '; {0,2}(Orange |Optimus )(Monte Carlo|San Francisco)(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: ZTE $1 + model_replacement: $1 + regex: '; {0,2}(?:ZXY-ZTE_|ZTE\-U |ZTE[\- _]|ZTE-C[_ ])([^;/]{1,100}?)(?: Build|\) + AppleWebKit)' +- brand_replacement: ZTE + device_replacement: $1 $2 + model_replacement: $1 $2 + regex: '; (BASE) (lutea|Lutea 2|Tab[^;]{0,200}?)(?: Build|\) AppleWebKit)' +- brand_replacement: ZTE + device_replacement: $1 + model_replacement: $1 + regex: '; (Avea inTouch 2|soft stone|tmn smart a7|Movistar[ _]Link)(?: Build|\) + AppleWebKit)' + regex_flag: i +- brand_replacement: ZTE + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(vp9plus)\) +- brand_replacement: Zync + device_replacement: $1 + model_replacement: $1 + regex: '; ?(Cloud[ _]Z5|z1000|Z99 2G|z99|z930|z999|z990|z909|Z919|z900)(?: Build|\) + AppleWebKit)' +- brand_replacement: Amazon + device_replacement: Kindle Fire + model_replacement: Kindle Fire + regex: ; ?(KFOT|Kindle Fire) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire 2 + model_replacement: Kindle Fire 2 + regex: ; ?(KFOTE|Amazon Kindle Fire2) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HD + model_replacement: Kindle Fire HD 7" + regex: ; ?(KFTT) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HD 8.9" WiFi + model_replacement: Kindle Fire HD 8.9" WiFi + regex: ; ?(KFJWI) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HD 8.9" 4G + model_replacement: Kindle Fire HD 8.9" 4G + regex: ; ?(KFJWA) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HD 7" WiFi + model_replacement: Kindle Fire HD 7" WiFi + regex: ; ?(KFSOWI) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HDX 7" WiFi + model_replacement: Kindle Fire HDX 7" WiFi + regex: ; ?(KFTHWI) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HDX 7" 4G + model_replacement: Kindle Fire HDX 7" 4G + regex: ; ?(KFTHWA) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HDX 8.9" WiFi + model_replacement: Kindle Fire HDX 8.9" WiFi + regex: ; ?(KFAPWI) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire HDX 8.9" 4G + model_replacement: Kindle Fire HDX 8.9" 4G + regex: ; ?(KFAPWA) Build\b +- brand_replacement: Amazon + device_replacement: $1 + model_replacement: $1 + regex: ; ?Amazon ([^;/]{1,100}) Build\b +- brand_replacement: Amazon + device_replacement: Kindle + model_replacement: Kindle + regex: ; ?(Kindle) Build\b +- brand_replacement: Amazon + device_replacement: Kindle Fire + model_replacement: Kindle Fire$2 + regex: ; ?(Silk)/(\d+)\.(\d+)(?:\.([0-9\-]+)|) Build\b +- brand_replacement: Amazon + device_replacement: Kindle + model_replacement: $1 $2 + regex: ' (Kindle)/(\d+\.\d+)' +- brand_replacement: Amazon + device_replacement: Kindle + model_replacement: Kindle + regex: ' (Silk|Kindle)/(\d+)\.' +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: (sprd)\-([^/]{1,50})/ +- brand_replacement: Hero + device_replacement: $1 + model_replacement: $1 + regex: ; {0,2}(H\d{2}00\+?) Build +- brand_replacement: Xianghe + device_replacement: Xianghe $1 + model_replacement: $1 + regex: ; {0,2}(iphone|iPhone5) Build/ +- brand_replacement: Xianghe + device_replacement: Xianghe $1 + model_replacement: $1 + regex: ; {0,2}(e\d{4}[a-z]?_?v\d+|v89_[^;/]{1,100})[^;/]{1,30} Build/ +- brand_replacement: Cellular + device_replacement: $1 + model_replacement: $1 + regex: \bUSCC[_\-]?([^ ;/\)]+) +- brand_replacement: Alcatel + device_replacement: Alcatel $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:ALCATEL)[^;]{0,200}; {0,2}([^;,\)]+) +- brand_replacement: Asus + device_replacement: Asus $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|WpsLondonTest; ?|)(?:ASUS|Asus)[^;]{0,200}; {0,2}([^;,\)]+) +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:DELL|Dell)[^;]{0,200}; {0,2}([^;,\)]+) +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|WpsLondonTest; ?|)(?:HTC|Htc|HTC_blocked[^;]{0,200})[^;]{0,200}; {0,2}(?:HTC|)([^;,\)]+) +- brand_replacement: Huawei + device_replacement: Huawei $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:HUAWEI)[^;]{0,200}; {0,2}(?:HUAWEI |)([^;,\)]+) +- brand_replacement: LG + device_replacement: LG $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:LG|Lg)[^;]{0,200}; {0,2}(?:LG[ \-]|)([^;,\)]+) +- brand_replacement: Nokia + device_replacement: Lumia $1 + model_replacement: Lumia $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:rv:11; |)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(?:NOKIA ?|Nokia ?|LUMIA + ?|[Ll]umia ?|)(\d{3,10}[^;\)]*) +- brand_replacement: Nokia + device_replacement: Nokia $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(RM-\d{3,}) +- brand_replacement: Nokia + device_replacement: Nokia $1 + model_replacement: $1 + regex: (?:Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)]|WPDesktop;) ?(?:ARM; + ?Touch; ?|Touch; ?|)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(?:NOKIA ?|Nokia ?|LUMIA + ?|[Ll]umia ?|)([^;\)]+) +- brand_replacement: Microsoft + device_replacement: Microsoft $1 + model_replacement: $1 + regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|)(?:Microsoft(?: Corporation|))[^;]{0,200}; {0,2}([^;,\)]+)' +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|WpsLondonTest; ?|)(?:SAMSUNG)[^;]{0,200}; {0,2}(?:SAMSUNG |)([^;,\.\)]+) +- brand_replacement: Toshiba + device_replacement: Toshiba $1 + model_replacement: $1 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|WpsLondonTest; ?|)(?:TOSHIBA|FujitsuToshibaMobileCommun)[^;]{0,200}; + {0,2}([^;,\)]+) +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; + ?|Touch; ?|WpsLondonTest; ?|)([^;]{1,200}); {0,2}([^;,\)]+) +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: (?:^|; )SAMSUNG\-([A-Za-z0-9\-]{1,50}).{0,200} Bada/ +- brand_replacement: Alcatel + device_replacement: Alcatel $1 $2 $3 + model_replacement: One Touch $3 + regex: \(Mobile; ALCATEL ?(One|ONE) ?(Touch|TOUCH) ?([^;/]{1,100}?)(?:/[^;]{1,200}|); + rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/ +- brand_replacement: ZTE + device_replacement: ZTE $1$2 + model_replacement: $1$2 + regex: \(Mobile; (?:ZTE([^;]{1,200})|(OpenC)); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} + Firefox/ +- brand_replacement: Alcatel + device_replacement: Alcatel $1 + model_replacement: $1 + regex: \(Mobile; ALCATEL([A-Za-z0-9\-]+); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/[^\/]{1,200} + KaiOS/ +- brand_replacement: LYF + device_replacement: LYF $1 + model_replacement: $1 + regex: \(Mobile; LYF\/([A-Za-z0-9\-]{1,100})\/.{0,100};.{0,100}rv:[^\)]{1,100}\) + Gecko/[^\/]{1,100} Firefox/[^\/]{1,100} KAIOS/ +- brand_replacement: Nokia + device_replacement: Nokia $1 + model_replacement: $1 + regex: \(Mobile; Nokia_([A-Za-z0-9\-]{1,100})_.{1,100}; rv:[^\)]{1,100}\) Gecko/[^\/]{1,100} + Firefox/[^\/]{1,100} KAIOS/ +- brand_replacement: Nokia + device_replacement: Nokia $1 + model_replacement: $1$2 + regex: Nokia(N[0-9]+)([A-Za-z_\-][A-Za-z0-9_\-]*) +- brand_replacement: Nokia + device_replacement: Nokia $1$2$3 + model_replacement: $1$2$3 + regex: (?:NOKIA|Nokia)(?:\-| {0,2})(?:([A-Za-z0-9]+)\-[0-9a-f]{32}|([A-Za-z0-9\-]+)(?:UCBrowser)|([A-Za-z0-9\-]+)) +- brand_replacement: Nokia + device_replacement: Lumia $1 + model_replacement: Lumia $1 + regex: Lumia ([A-Za-z0-9\-]+) +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: \(Symbian; U; S60 V5; [A-z]{2}\-[A-z]{2}; (SonyEricsson|Samsung|Nokia|LG)([^;/]{1,100}?)\) +- brand_replacement: Nokia + device_replacement: Nokia $1 + model_replacement: $1 + regex: \(Symbian(?:/3|); U; ([^;]{1,200}); +- brand_replacement: BlackBerry + device_replacement: BlackBerry $1 + model_replacement: $1 + regex: BB10; ([A-Za-z0-9\- ]+)\) +- brand_replacement: BlackBerry + device_replacement: BlackBerry Playbook + model_replacement: Playbook + regex: Play[Bb]ook.{1,200}RIM Tablet OS +- brand_replacement: BlackBerry + device_replacement: BlackBerry $1 + model_replacement: $1 + regex: Black[Bb]erry ([0-9]+); +- brand_replacement: BlackBerry + device_replacement: BlackBerry $1 + model_replacement: $1 + regex: Black[Bb]erry([0-9]+) +- brand_replacement: BlackBerry + device_replacement: BlackBerry + model_replacement: $1 + regex: Black[Bb]erry; +- brand_replacement: Palm + device_replacement: Palm $1 + model_replacement: $1 + regex: (Pre|Pixi)/\d+\.\d+ +- brand_replacement: Palm + device_replacement: Palm $1 + model_replacement: $1 + regex: Palm([0-9]+) +- brand_replacement: Palm + device_replacement: Palm Treo $1 + model_replacement: Treo $1 + regex: Treo([A-Za-z0-9]+) +- brand_replacement: HP + device_replacement: HP Veer + model_replacement: Veer + regex: webOS.{0,200}(P160U(?:NA|))/(\d+).(\d+) +- brand_replacement: HP + device_replacement: HP TouchPad + model_replacement: TouchPad + regex: (Touch[Pp]ad)/\d+\.\d+ +- brand_replacement: HP + device_replacement: HP iPAQ $1 + model_replacement: iPAQ $1 + regex: HPiPAQ([A-Za-z0-9]{1,20})/\d+\.\d+ +- brand_replacement: Sony + device_replacement: $1 + model_replacement: $1 $2 + regex: PDA; (PalmOS)/sony/model ([a-z]+)/Revision +- brand_replacement: Apple + device_replacement: AppleTV + model_replacement: AppleTV + regex: (Apple\s?TV) +- brand_replacement: Tesla + device_replacement: Tesla Model S + model_replacement: Model S + regex: (QtCarBrowser) +- brand_replacement: Apple + device_replacement: $1 + model_replacement: $1$2 + regex: (iPhone|iPad|iPod)(\d+,\d+) +- brand_replacement: Apple + device_replacement: $1 + model_replacement: $1 + regex: (iPad)(?:;| Simulator;) +- brand_replacement: Apple + device_replacement: $1 + model_replacement: $1 + regex: (iPod)(?:;| touch;| Simulator;) +- brand_replacement: Apple + device_replacement: $1 + model_replacement: $1 + regex: (iPhone)(?:;| Simulator;) +- brand_replacement: Apple + device_replacement: Apple $1 + model_replacement: $1$2 + regex: (Watch)(\d+,\d+) +- brand_replacement: Apple + device_replacement: $1 + model_replacement: $1 + regex: (Apple Watch)(?:;| Simulator;) +- brand_replacement: Apple + device_replacement: $1 + model_replacement: $1 + regex: (HomePod)(?:;| Simulator;) +- brand_replacement: Apple + device_replacement: iPhone + model_replacement: iPhone + regex: iPhone +- brand_replacement: Apple + device_replacement: $1$2,$3 + model_replacement: $1$2,$3 + regex: CFNetwork/.{0,100} Darwin/\d.{0,100}\(((?:Mac|iMac|PowerMac|PowerBook)[^\d]*)(\d+)(?:,|%2C)(\d+) +- brand_replacement: Apple + device_replacement: Mac + model_replacement: Mac + regex: CFNetwork/.{0,100} Darwin/\d+\.\d+\.\d+ \(x86_64\) +- brand_replacement: Apple + device_replacement: iOS-Device + model_replacement: iOS-Device + regex: CFNetwork/.{0,100} Darwin/\d +- brand_replacement: Apple + device_replacement: iPhone + model_replacement: iPhone + regex: Outlook-(iOS)/\d+\.\d+\.prod\.iphone +- brand_replacement: Acer + device_replacement: Acer $1 + model_replacement: $1 + regex: acer_([A-Za-z0-9]+)_ +- brand_replacement: Alcatel + device_replacement: Alcatel $1 + model_replacement: $1 + regex: (?:ALCATEL|Alcatel)-([A-Za-z0-9\-]+) +- brand_replacement: Amoi + device_replacement: Amoi $1 + model_replacement: $1 + regex: (?:Amoi|AMOI)\-([A-Za-z0-9]+) +- brand_replacement: Asus + device_replacement: Asus $1 + model_replacement: $1 + regex: (?:; |\/|^)((?:Transformer (?:Pad|Prime) |Transformer |PadFone[ _]?)[A-Za-z0-9]*) +- brand_replacement: Asus + device_replacement: Asus $1 + model_replacement: $1 + regex: (?:asus.{0,200}?ASUS|Asus|ASUS|asus)[\- ;]*((?:Transformer (?:Pad|Prime) + |Transformer |Padfone |Nexus[ _]|)[A-Za-z0-9]+) +- brand_replacement: Asus + device_replacement: Asus $1 + model_replacement: $1 + regex: (?:ASUS)_([A-Za-z0-9\-]+) +- brand_replacement: Bird + device_replacement: Bird $1 + model_replacement: $1 + regex: \bBIRD[ \-\.]([A-Za-z0-9]+) +- brand_replacement: Dell + device_replacement: Dell $1 + model_replacement: $1 + regex: \bDell ([A-Za-z0-9]+) +- brand_replacement: DoCoMo + device_replacement: DoCoMo $1 + model_replacement: $1 + regex: DoCoMo/2\.0 ([A-Za-z0-9]+) +- brand_replacement: DoCoMo + device_replacement: DoCoMo $1 + model_replacement: $1 + regex: ^.{0,50}?([A-Za-z0-9]{1,30})_W;FOMA +- brand_replacement: DoCoMo + device_replacement: DoCoMo $1 + model_replacement: $1 + regex: ^.{0,50}?([A-Za-z0-9]{1,30});FOMA +- brand_replacement: HTC + device_replacement: HTC $1 + model_replacement: $1 + regex: \b(?:HTC/|HTC/[a-z0-9]{1,20}/|)HTC[ _\-;]? {0,2}(.{0,200}?)(?:-?Mozilla|fingerPrint|[;/\(\)]|$) +- brand_replacement: Huawei + device_replacement: Huawei $1 + model_replacement: $1 + regex: Huawei([A-Za-z0-9]+) +- brand_replacement: Huawei + device_replacement: Huawei $1 + model_replacement: $1 + regex: HUAWEI-([A-Za-z0-9]+) +- brand_replacement: Huawei + device_replacement: Huawei $1 + model_replacement: $1 + regex: HUAWEI ([A-Za-z0-9\-]+) +- brand_replacement: Huawei + device_replacement: Huawei Vodafone $1 + model_replacement: Vodafone $1 + regex: vodafone([A-Za-z0-9]+) +- brand_replacement: i-mate + device_replacement: i-mate $1 + model_replacement: $1 + regex: i\-mate ([A-Za-z0-9]+) +- brand_replacement: Kyocera + device_replacement: Kyocera $1 + model_replacement: $1 + regex: Kyocera\-([A-Za-z0-9]+) +- brand_replacement: Kyocera + device_replacement: Kyocera $1 + model_replacement: $1 + regex: KWC\-([A-Za-z0-9]+) +- brand_replacement: Lenovo + device_replacement: Lenovo $1 + model_replacement: $1 + regex: Lenovo[_\-]([A-Za-z0-9]+) +- brand_replacement: $2 + device_replacement: $1 + model_replacement: $3 + regex: (HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ \( ?;(LG)E ?;([^;]{0,30}) +- brand_replacement: Thomson + device_replacement: $1 + model_replacement: $4 + regex: (HbbTV)/1\.1\.1.{0,200}CE-HTML/1\.\d;(Vendor/|)(THOM[^;]{0,200}?)[;\s].{0,30}(LF[^;]{1,200});? +- brand_replacement: $2 + device_replacement: $1 + model_replacement: $3 + regex: '(HbbTV)(?:/1\.1\.1|) ?(?: \(;;;;;\)|); {0,2}CE-HTML(?:/1\.\d|); {0,2}([^ + ]{1,30}) ([^;]{1,200});' +- brand_replacement: Samsung + device_replacement: $1 + model_replacement: $1 + regex: (HbbTV)/1\.1\.1 \(;;;;;\) Maple_2011 +- brand_replacement: $2$3 + device_replacement: $1 + model_replacement: $4 + regex: (HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ \([^;]{0,30}; ?(?:CUS:([^;]{0,200})|([^;]{1,200})) + ?; ?([^;]{0,30}) +- device_replacement: $1 + model_replacement: $1 + regex: (HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ +- brand_replacement: LG + device_replacement: NetCast$2 + model_replacement: $1 + regex: LGE; (?:Media\/|)([^;]{0,200});[^;]{0,200};[^;]{0,200};?\); "?LG NetCast(\.TV|\.Media|)-\d+ +- brand_replacement: $1 + device_replacement: Inettv + model_replacement: $2 + regex: InettvBrowser/[0-9]{1,30}\.[0-9A-Z]{1,30} \([^;]{0,200};(Sony)([^;]{0,200});[^;]{0,200};[^\)]{0,10}\) +- brand_replacement: Generic_Inettv + device_replacement: Inettv + model_replacement: $1 + regex: InettvBrowser/[0-9]{1,30}\.[0-9A-Z]{1,30} \([^;]{0,200};([^;]{0,200});[^;]{0,200};[^\)]{0,10}\) +- brand_replacement: Generic_Inettv + device_replacement: Inettv + model_replacement: $1 + regex: (?:InettvBrowser|TSBNetTV|NETTV|HBBTV) +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: Series60/\d\.\d (LG)[\-]?([A-Za-z0-9 \-]+) +- brand_replacement: LG + device_replacement: LG $1 + model_replacement: $1 + regex: \b(?:LGE[ \-]LG\-(?:AX|)|LGE |LGE?-LG|LGE?[ \-]|LG[ /\-]|lg[\-])([A-Za-z0-9]+)\b +- brand_replacement: LG + device_replacement: LG $1 + model_replacement: $1 + regex: (?:^LG[\-]?|^LGE[\-/]?)([A-Za-z]+[0-9]+[A-Za-z]*) +- brand_replacement: LG + device_replacement: LG $1 + model_replacement: $1 + regex: ^LG([0-9]+[A-Za-z]*) +- brand_replacement: Microsoft + device_replacement: Microsoft $1 + model_replacement: $1 + regex: (KIN\.[^ ]+) (\d+)\.(\d+) +- brand_replacement: Microsoft + device_replacement: $1 + model_replacement: $1 + regex: (?:MSIE|XBMC).{0,200}\b(Xbox)\b +- brand_replacement: Microsoft + device_replacement: Microsoft Surface RT + model_replacement: Surface RT + regex: ; ARM; Trident/6\.0; Touch[\);] +- brand_replacement: Motorola + device_replacement: Motorola $1 + model_replacement: $1 + regex: Motorola\-([A-Za-z0-9]+) +- brand_replacement: Motorola + device_replacement: Motorola $1 + model_replacement: $1 + regex: MOTO\-([A-Za-z0-9]+) +- brand_replacement: Motorola + device_replacement: Motorola $1 + model_replacement: $1 + regex: MOT\-([A-z0-9][A-z0-9\-]*) +- brand_replacement: Nintendo + device_replacement: Nintendo Wii U + model_replacement: Wii U + regex: Nintendo WiiU +- brand_replacement: Nintendo + device_replacement: Nintendo $1 + model_replacement: $1 + regex: Nintendo (DS|3DS|DSi|Wii); +- brand_replacement: Pantech + device_replacement: Pantech $1 + model_replacement: $1 + regex: (?:Pantech|PANTECH)[ _-]?([A-Za-z0-9\-]+) +- brand_replacement: Philips + device_replacement: Philips $1 + model_replacement: $1 + regex: Philips([A-Za-z0-9]+) +- brand_replacement: Philips + device_replacement: Philips $1 + model_replacement: $1 + regex: Philips ([A-Za-z0-9]+) +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: '(SMART-TV); .{0,200} Tizen ' +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: SymbianOS/9\.\d.{0,200} Samsung[/\-]([A-Za-z0-9 \-]+) +- brand_replacement: $1 + device_replacement: $1 $2$3 + model_replacement: $2-$3 + regex: (Samsung)(SGH)(i[0-9]+) +- brand_replacement: Samsung + device_replacement: $1 + model_replacement: $1 + regex: SAMSUNG-ANDROID-MMS/([^;/]{1,100}) +- brand_replacement: Samsung + device_replacement: Samsung $1 + model_replacement: $1 + regex: SAMSUNG(?:; |[ -/])([A-Za-z0-9\-]+) + regex_flag: i +- brand_replacement: Sega + device_replacement: Sega $1 + model_replacement: $1 + regex: (Dreamcast) +- brand_replacement: Siemens + device_replacement: Siemens $1 + model_replacement: $1 + regex: ^SIE-([A-Za-z0-9]+) +- brand_replacement: Softbank + device_replacement: Softbank $1 + model_replacement: $1 + regex: Softbank/[12]\.0/([A-Za-z0-9]+) +- brand_replacement: SonyEricsson + device_replacement: Ericsson $1 + model_replacement: $1 + regex: SonyEricsson ?([A-Za-z0-9\-]+) +- brand_replacement: $2 + device_replacement: $2 $1 + model_replacement: $1 + regex: Android [^;]{1,200}; ([^ ]+) (Sony)/ +- brand_replacement: $1 + device_replacement: $1 $2 + model_replacement: $2 + regex: (Sony)(?:BDP\/|\/|)([^ /;\)]+)[ /;\)] +- brand_replacement: Apple + device_replacement: iPad + model_replacement: iPad + regex: Puffin/[\d\.]+IT +- brand_replacement: Apple + device_replacement: iPhone + model_replacement: iPhone + regex: Puffin/[\d\.]+IP +- brand_replacement: Generic + device_replacement: Generic Tablet + model_replacement: Tablet + regex: Puffin/[\d\.]+AT +- brand_replacement: Generic + device_replacement: Generic Smartphone + model_replacement: Smartphone + regex: Puffin/[\d\.]+AP +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: Android[\- ][\d]+\.[\d]+; [A-Za-z]{2}\-[A-Za-z]{0,2}; WOWMobile (.{1,200})( + Build[/ ]|\)) +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: Android[\- ][\d]+\.[\d]+\-update1; [A-Za-z]{2}\-[A-Za-z]{0,2} {0,2}; {0,2}(.{1,200}?)( + Build[/ ]|\)) +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[A-Za-z]{2}[_\-][A-Za-z]{0,2}\-? + {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\)) +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[A-Za-z]{0,2}\- {0,2}; {0,2}(.{1,200}?)( + Build[/ ]|\)) +- brand_replacement: Generic + device_replacement: Generic Smartphone + model_replacement: Smartphone + regex: Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[a-z]{0,2}[_\-]?[A-Za-z]{0,2};?( + Build[/ ]|\)) +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,3}\-?[A-Za-z]{2}; {0,2}(.{1,50}?)( + Build[/ ]|\)) +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\) AppleWebKit).{1,200}? + Mobile Safari' +- brand_replacement: Generic_Android_Tablet + device_replacement: $1 + model_replacement: $1 + regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\) AppleWebKit).{1,200}? + Safari' +- brand_replacement: Generic_Android + device_replacement: $1 + model_replacement: $1 + regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\))' +- brand_replacement: Generic_Inettv + device_replacement: $1 + model_replacement: $1 + regex: (GoogleTV) +- brand_replacement: Generic_Inettv + device_replacement: $1 + model_replacement: $1 + regex: (WebTV)/\d+.\d+ +- brand_replacement: Generic_Inettv + device_replacement: $1 + model_replacement: $1 + regex: ^(Roku)/DVP-\d+\.\d+ +- brand_replacement: Generic + device_replacement: Generic Tablet + model_replacement: Tablet + regex: (Android 3\.\d|Opera Tablet|Tablet; .{1,100}Firefox/|Android.{0,100}(?:Tab|Pad)) + regex_flag: i +- brand_replacement: Generic + device_replacement: Generic Smartphone + model_replacement: Smartphone + regex: (Symbian|\bS60(Version|V\d)|\bS60\b|\((Series 60|Windows Mobile|Palm OS|Bada); + Opera Mini|Windows CE|Opera Mobi|BREW|Brew|Mobile; .{1,200}Firefox/|iPhone OS|Android|MobileSafari|Windows + {0,2}Phone|\(webOS/|PalmOS) +- brand_replacement: Generic + device_replacement: Generic Smartphone + model_replacement: Smartphone + regex: (hiptop|avantgo|plucker|xiino|blazer|elaine) + regex_flag: i +- brand_replacement: Spider + device_replacement: Spider + model_replacement: Desktop + regex: ^.{0,100}(bot|BUbiNG|zao|borg|DBot|oegp|silk|Xenu|zeal|^NING|CCBot|crawl|htdig|lycos|slurp|teoma|voila|yahoo|Sogou|CiBra|Nutch|^Java/|^JNLP/|Daumoa|Daum|Genieo|ichiro|larbin|pompos|Scrapy|snappy|speedy|spider|msnbot|msrbot|vortex|^vortex|crawler|favicon|indexer|Riddler|scooter|scraper|scrubby|WhatWeb|WinHTTP|bingbot|BingPreview|openbot|gigabot|furlbot|polybot|seekbot|^voyager|archiver|Icarus6j|mogimogi|Netvibes|blitzbot|altavista|charlotte|findlinks|Retreiver|TLSProber|WordPress|SeznamBot|ProoXiBot|wsr\-agent|Squrl + Java|EtaoSpider|PaperLiBot|SputnikBot|A6\-Indexer|netresearch|searchsight|baiduspider|YisouSpider|ICC\-Crawler|http%20client|Python-urllib|dataparksearch|converacrawler|Screaming + Frog|AppEngine-Google|YahooCacheSystem|fast\-webcrawler|Sogou Pic Spider|semanticdiscovery|Innovazion + Crawler|facebookexternalhit|Google.{0,200}/\+/web/snippet|Google-HTTP-Java-Client|BlogBridge|IlTrovatore-Setaccio|InternetArchive|GomezAgent|WebThumbnail|heritrix|NewsGator|PagePeeker|Reaper|ZooShot|holmes|NL-Crawler|Pingdom|StatusCake|WhatsApp|masscan|Google + Web Preview|Qwantify|Yeti|OgScrper) + regex_flag: i +- brand_replacement: Generic + device_replacement: Generic Feature Phone + model_replacement: Feature Phone + regex: ^(1207|3gso|4thp|501i|502i|503i|504i|505i|506i|6310|6590|770s|802s|a wa|acer|acs\-|airn|alav|asus|attw|au\-m|aur + |aus |abac|acoo|aiko|alco|alca|amoi|anex|anny|anyw|aptu|arch|argo|bmobile|bell|bird|bw\-n|bw\-u|beck|benq|bilb|blac|c55/|cdm\-|chtm|capi|comp|cond|dall|dbte|dc\-s|dica|ds\-d|ds12|dait|devi|dmob|doco|dopo|dorado|el(?:38|39|48|49|50|55|58|68)|el[3456]\d{2}dual|erk0|esl8|ex300|ez40|ez60|ez70|ezos|ezze|elai|emul|eric|ezwa|fake|fly\-|fly_|g\-mo|g1 + u|g560|gf\-5|grun|gene|go.w|good|grad|hcit|hd\-m|hd\-p|hd\-t|hei\-|hp i|hpip|hs\-c|htc + |htc\-|htca|htcg) + regex_flag: i +- brand_replacement: Generic + device_replacement: Generic Feature Phone + model_replacement: Feature Phone + regex: ^(htcp|htcs|htct|htc_|haie|hita|huaw|hutc|i\-20|i\-go|i\-ma|i\-mobile|i230|iac|iac\-|iac/|ig01|im1k|inno|iris|jata|kddi|kgt|kgt/|kpt + |kwc\-|klon|lexi|lg g|lg\-a|lg\-b|lg\-c|lg\-d|lg\-f|lg\-g|lg\-k|lg\-l|lg\-m|lg\-o|lg\-p|lg\-s|lg\-t|lg\-u|lg\-w|lg/k|lg/l|lg/u|lg50|lg54|lge\-|lge/|leno|m1\-w|m3ga|m50/|maui|mc01|mc21|mcca|medi|meri|mio8|mioa|mo01|mo02|mode|modo|mot + |mot\-|mt50|mtp1|mtv |mate|maxo|merc|mits|mobi|motv|mozz|n100|n101|n102|n202|n203|n300|n302|n500|n502|n505|n700|n701|n710|nec\-|nem\-|newg|neon) + regex_flag: i +- brand_replacement: Generic + device_replacement: Generic Feature Phone + model_replacement: Feature Phone + regex: ^(netf|noki|nzph|o2 x|o2\-x|opwv|owg1|opti|oran|ot\-s|p800|pand|pg\-1|pg\-2|pg\-3|pg\-6|pg\-8|pg\-c|pg13|phil|pn\-2|pt\-g|palm|pana|pire|pock|pose|psio|qa\-a|qc\-2|qc\-3|qc\-5|qc\-7|qc07|qc12|qc21|qc32|qc60|qci\-|qwap|qtek|r380|r600|raks|rim9|rove|s55/|sage|sams|sc01|sch\-|scp\-|sdk/|se47|sec\-|sec0|sec1|semc|sgh\-|shar|sie\-|sk\-0|sl45|slid|smb3|smt5|sp01|sph\-|spv + |spv\-|sy01|samm|sany|sava|scoo|send|siem|smar|smit|soft|sony|t\-mo|t218|t250|t600|t610|t618|tcl\-|tdg\-|telm|tim\-|ts70|tsm\-|tsm3|tsm5|tx\-9|tagt) + regex_flag: i +- brand_replacement: Generic + device_replacement: Generic Feature Phone + model_replacement: Feature Phone + regex: ^(talk|teli|topl|tosh|up.b|upg1|utst|v400|v750|veri|vk\-v|vk40|vk50|vk52|vk53|vm40|vx98|virg|vertu|vite|voda|vulc|w3c + |w3c\-|wapj|wapp|wapu|wapm|wig |wapi|wapr|wapv|wapy|wapa|waps|wapt|winc|winw|wonu|x700|xda2|xdag|yas\-|your|zte\-|zeto|aste|audi|avan|blaz|brew|brvw|bumb|ccwa|cell|cldc|cmd\-|dang|eml2|fetc|hipt|http|ibro|idea|ikom|ipaq|jbro|jemu|jigs|keji|kyoc|kyok|libw|m\-cr|midp|mmef|moto|mwbp|mywa|newt|nok6|o2im|pant|pdxg|play|pluc|port|prox|rozo|sama|seri|smal|symb|treo|upsi|vx52|vx53|vx60|vx61|vx70|vx80|vx81|vx83|vx85|wap\-|webc|whit|wmlb|xda\-|xda_) + regex_flag: i +- brand_replacement: Generic + device_replacement: Generic Feature Phone + model_replacement: Feature Phone + regex: ^(Ice)$ +- brand_replacement: Generic + device_replacement: Generic Feature Phone + model_replacement: Feature Phone + regex: (wap[\-\ ]browser|maui|netfront|obigo|teleca|up\.browser|midp|Opera Mini) + regex_flag: i +- brand_replacement: Apple + device_replacement: Mac + model_replacement: Mac + regex: Mac OS diff --git a/tests/queries/0_stateless/data_ua_parser/os.yaml b/tests/queries/0_stateless/data_ua_parser/os.yaml new file mode 100644 index 00000000000..13394fd50e2 --- /dev/null +++ b/tests/queries/0_stateless/data_ua_parser/os.yaml @@ -0,0 +1,1075 @@ +- os_replacement: $1 + os_v1_replacement: '2013' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/\d+\.\d+\.\d+ \( ;(LG)E ;NetCast 4.0 +- os_replacement: $1 + os_v1_replacement: '2012' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/\d+\.\d+\.\d+ \( ;(LG)E ;NetCast 3.0 +- os_replacement: Samsung + os_v1_replacement: '2011' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/1.1.1 \(;;;;;\) Maple_2011 +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: UE40F7000 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/\d+\.\d+\.\d+ \(;(Samsung);SmartTV([0-9]{4});.{0,200}FXPDEUC +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: UE32F4500 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/\d+\.\d+\.\d+ \(;(Samsung);SmartTV([0-9]{4});.{0,200}MST12DEUC +- os_replacement: $1 + os_v1_replacement: '2013' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/4 +- os_replacement: $1 + os_v1_replacement: '2012' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/3 +- os_replacement: $1 + os_v1_replacement: '2011' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/2 +- os_replacement: FireHbbTV + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/\d+\.\d+\.\d+.{0,100}(firetv)-firefox-plugin (\d+).(\d+).(\d+) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: HbbTV/\d+\.\d+\.\d+ \(.{0,30}; ?([a-zA-Z]+) ?;.{0,30}(201[1-9]).{0,30}\) +- os_replacement: Other + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: AspiegelBot +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows Phone) (?:OS[ /])?(\d+)\.(\d+) +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CPU[ +]OS|iPhone[ +]OS|CPU[ +]iPhone)[ +]+(\d+)[_\.](\d+)(?:[_\.](\d+)|).{0,100}Outlook-iOS-Android +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ArcGIS\.?(iOS|Android)-\d+\.\d+(?:\.\d+|)(?:[^\/]{1,50}|)\/(\d+)(?:\.(\d+)(?:\.(\d+)|)|) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ArcGISRuntime-(?:Android|iOS)\/\d+\.\d+(?:\.\d+|) \((Android|iOS) (\d+)(?:\.(\d+)(?:\.(\d+)|)|); +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android)[ \-/](\d+)(?:\.(\d+)|)(?:[.\-]([a-z0-9]+)|) +- os_replacement: $1 + os_v1_replacement: '1' + os_v2_replacement: '2' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android) Donut +- os_replacement: $1 + os_v1_replacement: '2' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android) Eclair +- os_replacement: $1 + os_v1_replacement: '2' + os_v2_replacement: '2' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android) Froyo +- os_replacement: $1 + os_v1_replacement: '2' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android) Gingerbread +- os_replacement: $1 + os_v1_replacement: '3' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android) Honeycomb +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Android) (\d+); +- os_replacement: Android + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^UCWEB.{0,200}; (Adr) (\d+)\.(\d+)(?:[.\-]([a-z0-9]{1,100})|); +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^UCWEB.{0,200}; (iPad|iPh|iPd) OS (\d+)_(\d+)(?:_(\d+)|); +- os_replacement: Windows Phone + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^UCWEB.{0,200}; (wds) (\d+)\.(\d+)(?:\.(\d+)|); +- os_replacement: Android + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^(JUC).{0,200}; ?U; ?(?:Android|)(\d+)\.(\d+)(?:[\.\-]([a-z0-9]{1,100})|) +- os_replacement: Android + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (android)\s(?:mobile\/)(\d+)(?:\.(\d+)(?:\.(\d+)|)|) +- os_replacement: Android + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Silk-Accelerated=[a-z]{4,5}) +- os_replacement: Chrome OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (x86_64|aarch64)\ (\d+)\.(\d+)\.(\d+).{0,100}Chrome.{0,100}(?:CitrixChromeApp)$ +- os_replacement: Windows Phone + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (XBLWP7) +- os_replacement: Windows Mobile + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows ?Mobile) +- os_replacement: Windows + os_v1_replacement: '10' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows 10) +- os_replacement: Windows + os_v1_replacement: XP + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows (?:NT 5\.2|NT 5\.1)) +- os_replacement: Windows + os_v1_replacement: '7' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Win(?:dows NT |32NT\/)6\.1) +- os_replacement: Windows + os_v1_replacement: Vista + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Win(?:dows NT |32NT\/)6\.0) +- os_replacement: Windows + os_v1_replacement: ME + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Win 9x 4\.90) +- os_replacement: Windows + os_v1_replacement: RT + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows NT 6\.2; ARM;) +- os_replacement: Windows + os_v1_replacement: '8' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Win(?:dows NT |32NT\/)6\.2) +- os_replacement: Windows + os_v1_replacement: RT 8 + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows NT 6\.3; ARM;) +- os_replacement: Windows + os_v1_replacement: '8' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Win(?:dows NT |32NT\/)6\.3) +- os_replacement: Windows + os_v1_replacement: '10' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Win(?:dows NT |32NT\/)6\.4) +- os_replacement: Windows + os_v1_replacement: '10' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows NT 10\.0) +- os_replacement: Windows + os_v1_replacement: '2000' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows NT 5\.0) +- os_replacement: Windows + os_v1_replacement: NT 4.0 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (WinNT4.0) +- os_replacement: Windows + os_v1_replacement: CE + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows ?CE) +- os_replacement: Windows + os_v1_replacement: $1 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Win(?:dows)? ?(95|98|3.1|NT|ME|2000|XP|Vista|7|CE) +- os_replacement: Windows + os_v1_replacement: '3.1' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Win16 +- os_replacement: Windows + os_v1_replacement: '95' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Win32 +- os_replacement: Windows + os_v1_replacement: $1 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^Box.{0,200}Windows/([\d.]+); +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Tizen)[/ ](\d+)\.(\d+) +- os_replacement: Mac OS X + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ((?:Mac[ +]?|; )OS[ +]X)[\s+/](?:(\d+)[_.](\d+)(?:[_.](\d+)|)|Mach-O) +- os_replacement: Mac OS X + os_v1_replacement: $1 + os_v2_replacement: $2 + os_v3_replacement: $3 + os_v4_replacement: $5 + regex: Mac OS X\s.{1,50}\s(\d+).(\d+).(\d+) +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '5' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ' (Dar)(win)/(9).(\d+).{0,100}\((?:i386|x86_64|Power Macintosh)\)' +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '6' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ' (Dar)(win)/(10).(\d+).{0,100}\((?:i386|x86_64)\)' +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '7' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ' (Dar)(win)/(11).(\d+).{0,100}\((?:i386|x86_64)\)' +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '8' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ' (Dar)(win)/(12).(\d+).{0,100}\((?:i386|x86_64)\)' +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '9' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ' (Dar)(win)/(13).(\d+).{0,100}\((?:i386|x86_64)\)' +- os_replacement: Mac OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Mac_PowerPC +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (?:PPC|Intel) (Mac OS X) +- os_replacement: Mac OS X + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^Box.{0,200};(Darwin)/(10)\.(1\d)(?:\.(\d+)|) +- os_replacement: ATV OS X + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Apple\s?TV)(?:/(\d+)\.(\d+)|) +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CPU[ +]OS|iPhone[ +]OS|CPU[ +]iPhone|CPU IPhone OS|CPU iPad OS)[ +]+(\d+)[_\.](\d+)(?:[_\.](\d+)|) +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (iPhone|iPad|iPod); Opera +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (iPhone|iPad|iPod).{0,100}Mac OS X.{0,100}Version/(\d+)\.(\d+) +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/(5)48\.0\.3.{0,100} Darwin/11\.0\.0 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/(5)48\.(0)\.4.{0,100} Darwin/(1)1\.0\.0 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/(5)48\.(1)\.4 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/(4)85\.1(3)\.9 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/(6)09\.(1)\.4 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/(6)(0)9 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/6(7)2\.(1)\.13 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/6(7)2\.(1)\.(1)4 +- os_replacement: iOS + os_v1_replacement: '7' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CF)(Network)/6(7)(2)\.1\.15 +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/6(7)2\.(0)\.(?:2|8) +- os_replacement: iOS + os_v1_replacement: '8' + os_v2_replacement: 0.b5 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CFNetwork)/709\.1 +- os_replacement: iOS + os_v1_replacement: '8' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CF)(Network)/711\.(\d) +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '10' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CF)(Network)/(720)\.(\d) +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '11' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CF)(Network)/(760)\.(\d) +- os_replacement: iOS + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '1' + os_v4_replacement: $5 + regex: CFNetwork/7.{0,100} Darwin/15\.4\.\d+ +- os_replacement: iOS + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '2' + os_v4_replacement: $5 + regex: CFNetwork/7.{0,100} Darwin/15\.5\.\d+ +- os_replacement: iOS + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '5' + os_v4_replacement: $5 + regex: CFNetwork/7.{0,100} Darwin/15\.6\.\d+ +- os_replacement: iOS + os_v1_replacement: '9' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CF)(Network)/758\.(\d) +- os_replacement: iOS + os_v1_replacement: '10' + os_v2_replacement: '2' + os_v3_replacement: '1' + os_v4_replacement: $5 + regex: CFNetwork/808\.3 Darwin/16\.3\.\d+ +- os_replacement: iOS + os_v1_replacement: '10' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CF)(Network)/808\.(\d) +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '13' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/17\.\d+.{0,100}\(x86_64\) +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '12' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/16\.\d+.{0,100}\(x86_64\) +- os_replacement: Mac OS X + os_v1_replacement: '10' + os_v2_replacement: '11' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/15\.\d+.{0,100}\(x86_64\) +- os_replacement: iOS + os_v1_replacement: '1' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/(9)\.\d+ +- os_replacement: iOS + os_v1_replacement: '4' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/(10)\.\d+ +- os_replacement: iOS + os_v1_replacement: '5' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/(11)\.\d+ +- os_replacement: iOS + os_v1_replacement: '6' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/(13)\.\d+ +- os_replacement: iOS + os_v1_replacement: '7' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/6.{0,100} Darwin/(14)\.\d+ +- os_replacement: iOS + os_v1_replacement: '8' + os_v2_replacement: '0' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/7.{0,100} Darwin/(14)\.\d+ +- os_replacement: iOS + os_v1_replacement: '9' + os_v2_replacement: '0' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/7.{0,100} Darwin/(15)\.\d+ +- os_replacement: iOS + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/16\.5\.\d+ +- os_replacement: iOS + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: '2' + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/16\.6\.\d+ +- os_replacement: iOS + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: '3' + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/16\.7\.\d+ +- os_replacement: iOS + os_v1_replacement: '10' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/(16)\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '0' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/17\.0\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/17\.2\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '2' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/17\.3\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '2' + os_v3_replacement: '6' + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/17\.4\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/17\.5\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '4' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/17\.6\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: '4' + os_v3_replacement: '1' + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/17\.7\.\d+ +- os_replacement: iOS + os_v1_replacement: '11' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/8.{0,100} Darwin/(17)\.\d+ +- os_replacement: iOS + os_v1_replacement: '12' + os_v2_replacement: '0' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/18\.0\.\d+ +- os_replacement: iOS + os_v1_replacement: '12' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/18\.2\.\d+ +- os_replacement: iOS + os_v1_replacement: '12' + os_v2_replacement: '2' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/18\.5\.\d+ +- os_replacement: iOS + os_v1_replacement: '12' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/18\.6\.\d+ +- os_replacement: iOS + os_v1_replacement: '12' + os_v2_replacement: '4' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/18\.7\.\d+ +- os_replacement: iOS + os_v1_replacement: '12' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/9.{0,100} Darwin/(18)\.\d+ +- os_replacement: iOS + os_v1_replacement: '13' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/11.{0,100} Darwin/19\.2\.\d+ +- os_replacement: iOS + os_v1_replacement: '13' + os_v2_replacement: '3' + os_v3_replacement: '1' + os_v4_replacement: $5 + regex: CFNetwork/11.{0,100} Darwin/19\.3\.\d+ +- os_replacement: iOS + os_v1_replacement: '13' + os_v2_replacement: '4' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/11.{0,100} Darwin/19\.4\.\d+ +- os_replacement: iOS + os_v1_replacement: '13' + os_v2_replacement: '5' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/11.{0,100} Darwin/19\.5\.\d+ +- os_replacement: iOS + os_v1_replacement: '13' + os_v2_replacement: '6' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/11.{0,100} Darwin/19\.6\.\d+ +- os_replacement: iOS + os_v1_replacement: '13' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/1[01].{0,100} Darwin/19\.\d+ +- os_replacement: iOS + os_v1_replacement: '14' + os_v2_replacement: '2' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/12.{0,100} Darwin/20\.1\.\d+ +- os_replacement: iOS + os_v1_replacement: '14' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/12.{0,100} Darwin/20\.2\.\d+ +- os_replacement: iOS + os_v1_replacement: '14' + os_v2_replacement: '4' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/12.{0,100} Darwin/20\.3\.\d+ +- os_replacement: iOS + os_v1_replacement: '14' + os_v2_replacement: '5' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/12.{0,100} Darwin/20\.4\.\d+ +- os_replacement: iOS + os_v1_replacement: '14' + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/(20)\.\d+ +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: CFNetwork/.{0,100} Darwin/ +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: '\b(iOS[ /]|iOS; |iPhone(?:/| v|[ _]OS[/,]|; | OS : |\d,\d/|\d,\d; )|iPad/)(\d{1,2})[_\.](\d{1,2})(?:[_\.](\d+)|)' +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((iOS); +- os_replacement: WatchOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (watchOS)[/ ](\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Outlook-(iOS)/\d+\.\d+\.prod\.iphone +- os_replacement: iOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (iPod|iPhone|iPad) +- os_replacement: tvOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (tvOS)[/ ](\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: Chrome OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CrOS) [a-z0-9_]+ (\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: Debian + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ([Dd]ebian) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Linux Mint)(?:/(\d+)|) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: '(Mandriva)(?: Linux|)/(?:[\d.-]+m[a-z]{2}(\d+).(\d)|)' +- os_replacement: Symbian OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Symbian[Oo][Ss])[/ ](\d+)\.(\d+) +- os_replacement: Symbian^3 Anna + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Symbian/3).{1,200}NokiaBrowser/7\.3 +- os_replacement: Symbian^3 Belle + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Symbian/3).{1,200}NokiaBrowser/7\.4 +- os_replacement: Symbian^3 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Symbian/3) +- os_replacement: Symbian OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \b(Series 60|SymbOS|S60Version|S60V\d|S60\b) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (MeeGo) +- os_replacement: Symbian OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Symbian [Oo][Ss] +- os_replacement: Nokia Series 40 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Series40; +- os_replacement: Nokia Series 30 Plus + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: Series30Plus; +- os_replacement: BlackBerry OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (BB10);.{1,200}Version/(\d+)\.(\d+)\.(\d+) +- os_replacement: BlackBerry OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Black[Bb]erry)[0-9a-z]+/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: BlackBerry OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Black[Bb]erry).{1,200}Version/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: BlackBerry Tablet OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (RIM Tablet OS) (\d+)\.(\d+)\.(\d+) +- os_replacement: BlackBerry Tablet OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Play[Bb]ook) +- os_replacement: BlackBerry OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Black[Bb]erry) +- os_replacement: KaiOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (K[Aa][Ii]OS)\/(\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: Firefox OS + os_v1_replacement: '1' + os_v2_replacement: '0' + os_v3_replacement: '1' + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/18.0 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: '1' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/18.1 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: '1' + os_v2_replacement: '2' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/26.0 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: '1' + os_v2_replacement: '3' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/28.0 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: '1' + os_v2_replacement: '4' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/30.0 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: '2' + os_v2_replacement: '0' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/32.0 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: '2' + os_v2_replacement: '1' + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Gecko/34.0 Firefox/\d+\.\d+ +- os_replacement: Firefox OS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((?:Mobile|Tablet);.{1,200}Firefox/\d+\.\d+ +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (BREW)[ /](\d+)\.(\d+)\.(\d+) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (BREW); +- os_replacement: Brew MP + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Brew MP|BMP)[ /](\d+)\.(\d+)\.(\d+) +- os_replacement: Brew MP + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: BMP; +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: '(GoogleTV)(?: (\d+)\.(\d+)(?:\.(\d+)|)|/[\da-z]+)' +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (WebTV)/(\d+).(\d+) +- os_replacement: Chromecast + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|) +- os_replacement: webOS + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (hpw|web)OS/(\d+)\.(\d+)(?:\.(\d+)|) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (VRE); +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Fedora|Red Hat|PCLinuxOS|Puppy|Ubuntu|Kindle|Bada|Sailfish|Lubuntu|BackTrack|Slackware|(?:Free|Open|Net|\b)BSD)[/ + ](\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|) +- os_replacement: Gentoo + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Linux)[ /](\d+)\.(\d+)(?:\.(\d+)|).{0,100}gentoo +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((Bada); +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Windows|Android|WeTab|Maemo|Web0S) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Ubuntu|Kubuntu|Arch Linux|CentOS|Slackware|Gentoo|openSUSE|SUSE|Red Hat|Fedora|PCLinuxOS|Mageia|(?:Free|Open|Net|\b)BSD) +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: (Linux)(?:[ /](\d+)\.(\d+)(?:\.(\d+)|)|) +- os_replacement: Solaris + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: SunOS +- os_replacement: Linux + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \(linux-gnu\) +- os_replacement: Red Hat + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \(x86_64-redhat-linux-gnu\) +- os_replacement: FreeBSD + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: \((freebsd)(\d+)\.(\d+)\) +- os_replacement: Linux + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: linux +- os_replacement: $1 + os_v1_replacement: $2 + os_v2_replacement: $3 + os_v3_replacement: $4 + os_v4_replacement: $5 + regex: ^(Roku)/DVP-(\d+)\.(\d+) diff --git a/tests/queries/0_stateless/data_ua_parser/useragents.txt b/tests/queries/0_stateless/data_ua_parser/useragents.txt new file mode 100644 index 00000000000..98bc7e31d73 --- /dev/null +++ b/tests/queries/0_stateless/data_ua_parser/useragents.txt @@ -0,0 +1,793 @@ +AppleCoreMedia/1.0.0.12B466 (Apple TV; U; CPU OS 8_1_3 like Mac OS X; en_us) +Mozilla/5.0 (Android 7.0; Mobile; LG-M150; rv:68.0) Gecko/68.0 Firefox/68.0 +Mozilla/5.0 (Android 8.0.0; Mobile; rv:68.0) Gecko/68.0 Firefox/68.0 +Mozilla/5.0 (Android 8.1.0; Tablet; rv:68.0) Gecko/68.0 Firefox/68.0 +Mozilla/5.0 (Android 9; Mobile; rv:68.0) Gecko/68.0 Firefox/68.0 +Mozilla/5.0 (Linux; Android 10; PH-1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 10; Pixel 2 XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 10; Pixel 2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 10; Pixel 3 Build/QP1A.190711.020.C3; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 10; Pixel XL Build/QP1A.190711.020; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 10; Pixel XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 4.0.3; HTC Sensation 4G Build/IML74K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 4.0.3; KFTT) AppleWebKit/537.36 (KHTML, like Gecko) Silk/73.7.5 like Chrome/73.0.3683.90 Safari/537.36 +Mozilla/5.0 (Linux; Android 4.2.2; GT-I9152 Build/JDQ39) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 4.4.2; GT-N5110) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 4.4.2; RCT6773W22) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 4.4.2; SM-T217S) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 4.4.2; SM-T530NU) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 4.4.2; TegraNote-P1640 Build/KOT49H; en-us) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36 +Mozilla/5.0 (Linux; Android 4.4.3; KFTHWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.0.2; SM-A500H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.0.2; SM-T357T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.0.2; SM-T530NU) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.0.2; SM-T530NU) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.0; RCT6213W87DK) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 YaBrowser/19.4.1.454.01 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.0; SM-N900T Build/LRX21V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/229.0.0.35.117;] +Mozilla/5.0 (Linux; Android 5.1.1) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Focus/4.4.1 Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; AFTB) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.16 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; AFTT Build/LVY48F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.26 +Mozilla/5.0 (Linux; Android 5.1.1; AFTT) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.16 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFAUWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFDOWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/71.2.4 like Chrome/71.0.3578.98 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFDOWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFFOWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFGIWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFSUWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; KFSUWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/77.1.127 like Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; LG-AS330) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; LGL43AL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SAMSUNG SM-G530R7 Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.2 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SAMSUNG SM-T377P) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SAMSUNG SM-T900) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SAMSUNG-SM-T337A Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SM-G360T1 Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.81 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SM-J320FN) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SM-T280) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SM-T330NU) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SM-T670) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; SM-T670) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1.1; Vodafone Smart ultra 6 Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1; BLU Advance 5.0 Build/LMY47I) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.158 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1; HTC Desire 626s) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1; HUAWEI LUA-L22 Build/HUAWEILUA-L22) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.89 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1; NX16A11264) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 5.1; XT1526) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; CPH1613) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; LG-M153 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; LG-M153) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; LGLS676) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; N9136) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; Nexus 7 Build/MOB30X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.133 Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-G900I) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-G900P Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/7.2 Chrome/59.0.3071.125 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-J700M) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG SM-S327VL) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SAMSUNG-SM-T377A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G532M Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G532M Build/MMB29T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/75.0.3770.101 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 6.0.1; SM-G532M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G550T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G550T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G550T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G900V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-G920A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-J327P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-N910S) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-N920V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.89 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-T350 Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-T560NU) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; SM-T800) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; XT1254) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; Z798BL Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0.1; Z799VL Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/45.0.2454.95 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0; 5010X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.89 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0; CAM-L21) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0; F3313) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 6.0; RCT6603W47M7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; 5049Z Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; ASUS_A002A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; Alcatel_5044C) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; Astra Young Pro Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/59.0.3071.125 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; Infinix X571) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-H872 Build/NRD90U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.123 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-K425 Build/NRD90U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-LS777) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-M210) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-M430) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-TP260 Build/NRD90U; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/64.0.3282.137 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 7.0; LG-TP260) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-TP450 Build/NRD90U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-V521) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LG-V521) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LGMP260 Build/NRD90U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.83 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LGMS210 Build/NRD90U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; LGMS210) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; P00I) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; RS988) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SAMSUNG SM-J701F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SAMSUNG SM-J710F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SAMSUNG SM-N920T Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.2 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SAMSUNG-SM-G920A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-G920P Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 Flipboard/4.2.23/4722,4.2.23.4722 +Mozilla/5.0 (Linux; Android 7.0; SM-G920V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-G928V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-G955U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-J327T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-J327T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-J327T1 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-J327T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-J327T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-N9208) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-N920P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-N920T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-T585) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-T810) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-T810) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-T810) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-T813) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; SM-T813) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; ST1009X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0; XT1663) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.0;) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; A574BL Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 7.1.1; A574BL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; CPH1729 Build/N6F26Q; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 7.1.1; Coolpad 3632A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; General Mobile 4G Dual) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; Moto E (4) Plus Build/NCRS26.58-44-20; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/76.0.3809.111 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 7.1.1; Moto E (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.80 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; Moto E (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; Moto E (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.73 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; Moto E (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; NX591J) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; REVVLPLUS C3701A Build/143.54.190611.3701A-TMO) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SAMSUNG SM-J320A) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SAMSUNG SM-T550) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SAMSUNG-SM-T377A Build/NMF26X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-J250F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-J700T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-T350) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-T377T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.73 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-T550 Build/NMF26X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-T550) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; SM-T560NU) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; X20 Build/N6F26Q; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; Z851M Build/NMF26V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.83 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.1; Z899VL Build/NMF26V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 7.1.1; Z982 Build/NMF26V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/75.0.3770.143 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 7.1.1; Z982) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Focus/4.4.1 Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; AFTKMST12 Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.26 +Mozilla/5.0 (Linux; Android 7.1.2; AFTKMST12) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.16 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.26 +Mozilla/5.0 (Linux; Android 7.1.2; AFTN Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.26 +Mozilla/5.0 (Linux; Android 7.1.2; KFKAWI Build/NS6301; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/59.0.3071.125 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; KFKAWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; KFMUWI) AppleWebKit/537.36 (KHTML, like Gecko) Silk/76.3.6 like Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; LG-SP200) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; LG-SP200) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; LM-X210(G)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; LM-X210) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; RCT6973W43R) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 7.1.2; Redmi 4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; ASUS_Z01FD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; AUM-L29) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; BRAVIA 4K GB Build/OPR2.170623.027.S25; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 8.0.0; CMR-W09) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; EVA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.73 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; G3223) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; LG-H910) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; LG-H931) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; LG-H932) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-A520F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-G891A Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/8.2 Chrome/63.0.3239.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-G935T) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-G955U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-J337T Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.2 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-J737P) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG SM-N950F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG-SM-G891A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SAMSUNG-SM-G935A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-A720F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G570F Build/R16NW; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/73.0.3683.90 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/231.0.0.39.113;] +Mozilla/5.0 (Linux; Android 8.0.0; SM-G570Y) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G930T Build/R16NW; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G930V Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G930VL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G935F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G935P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G935T Build/R16NW; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 8.0.0; SM-G935T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G955U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.112 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-G955U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-J330G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-J337T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-J737A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-J737T1 Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.126 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-J737T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-N950F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.126 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-N950U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; SM-S367VL Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 OPT/1.22.80 +Mozilla/5.0 (Linux; Android 8.0.0; VS995) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; XT1635-02) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; moto e5 play) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; moto e5 play) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; moto e5 supra) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.0.0; moto g(6)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; 5041C) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; 6062W) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; A502DL Build/OPM1.171019.011) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; A502DL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; BKK-LX2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; C4 Build/OPM2.171019.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; Coolpad 3310A Build/3310A.SPRINT.190213.0S) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; Infinix X604 Build/O11019) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; Joy 1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LAVA LE9820) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LG-Q710AL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-Q610(FGN)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-Q710(FGN) Build/OPM1.171019.019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/235.0.0.38.118;] +Mozilla/5.0 (Linux; Android 8.1.0; LM-Q710(FGN)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-Q710(FGN)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-Q710(FGN)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-Q710(FGN)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-V405) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X210(G) Build/OPM1.171019.026; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 agentweb/4.0.2 UCBrowser/11.6.4.950 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X210(G)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X210(G)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.105 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X210(G)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X212(G)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X220) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X220) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X220PM Build/O11019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X410(FG)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X410(FG)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X410(FG)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LM-X410.FGN Build/OPM1.171019.019) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LML414DL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; LML713DL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; Moto G (5S) Plus) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; One) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36/TansoDL +Mozilla/5.0 (Linux; Android 8.1.0; RCT6873W42BMF8KC Build/O11019) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; REVVL 2 Build/OPM1.171019.011) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; REVVL 2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SAMSUNG SM-J727T) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SAMSUNG SM-J727T1 Build/M1AJQ) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.4 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SAMSUNG SM-J727T1) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SAMSUNG SM-T580 Build/M1AJQ) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.4 Chrome/67.0.3396.87 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SAMSUNG-SM-J727A Build/M1AJQ; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 8.1.0; SM-G610F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J260T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J260T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J260T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J410F Build/M1AJB) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727P Build/M1AJQ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727T Build/M1AJQ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.126 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.73 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727T1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-J727V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-P580) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-T380) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-T580) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Safari/537.36 EdgA/42.0.2.3928 +Mozilla/5.0 (Linux; Android 8.1.0; SM-T580) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-T580) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.89 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-T580) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; SM-T837T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; TECNO CF8 Build/O11019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/239.0.0.41.152;] +Mozilla/5.0 (Linux; Android 8.1.0; V1818CA) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; meizu C9 Build/OPM2.171019.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.91 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; vivo 1724) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 8.1.0; vivo 1814) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 DuckDuckGo/5 +Mozilla/5.0 (Linux; Android 9; 1825) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ANE-LX2 Build/HUAWEIANE-L22; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/76.0.3809.132 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/236.0.0.40.117;] +Mozilla/5.0 (Linux; Android 9; BLA-A09) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; CLT-L04) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; CPH1911 Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/239.0.0.41.152;] +Mozilla/5.0 (Linux; Android 9; CPH1923 Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ELE-L29) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; G8142) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; GM1911) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; GM1917) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; INE-LX2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; LM-G710 Build/PKQ1.181105.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; LM-Q720) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; LM-V405 Build/PKQ1.190202.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.15 +Mozilla/5.0 (Linux; Android 9; LM-V405) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; LM-V500N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; LM-X420) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; LM-X420) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; MAR-LX1A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; MI 9) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Mi A2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Moto Z (2)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Nokia 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ONEPLUS A6000) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ONEPLUS A6003) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ONEPLUS A6013 Build/PKQ1.180716.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ONEPLUS A6013) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; ONEPLUS A6013) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; PAR-AL00 Build/HUAWEIPAR-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/235.0.0.38.118;] +Mozilla/5.0 (Linux; Android 9; Pixel 2 XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Pixel 3 Build/PQ1A.190105.004; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Pixel 3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Pixel 3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Pixel 3a XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; REVVLRY ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; RMX1801) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Redmi 7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; Redmi Note 7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.89 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-A102U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-A505FN) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-A505GN) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G892U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G955F Build/PPR1.180610.011) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.4 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G955U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G9600 Build/PPR1.180610.011) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.4 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G960U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G965U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G970F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G970U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G973U Build/PPR1.180610.011) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.4 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G973U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-G975U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-J415F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-J730F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-J737P) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-J737T Build/PPR1.180610.011) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.0 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N960F) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N960U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N960U1 Build/PPR1.180610.011) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/9.2 Chrome/67.0.3396.87 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N970U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N975U) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-N975U1) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-T510) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SAMSUNG SM-T720) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SHIELD Android TV Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-A102U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A102U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A105M Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/237.0.0.44.120;] +Mozilla/5.0 (Linux; Android 9; SM-A205G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A205U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A505F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A530F Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-A530N Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;KAKAOTALK 1908560 +Mozilla/5.0 (Linux; Android 9; SM-A600T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A605F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-A920F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G892A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G950U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G950U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G955F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G955U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.73 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-G955U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G9600) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G960U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/73.0.3683.90 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/233.0.0.36.117;] +Mozilla/5.0 (Linux; Android 9; SM-G960U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-G960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.99 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G960U1 Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-G960U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G965F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G965U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G965U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G965U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3921.2 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G965U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G970U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-G970U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G970U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G970U1 Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G973U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G973U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G973U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G975U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G975U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G975U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-G975U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J260A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J337P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J600FN) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J600G Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.73 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/238.0.0.41.116;] +Mozilla/5.0 (Linux; Android 9; SM-J730F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J737A Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-J737A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J737V Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/75.0.3770.101 Mobile Safari/537.36 [Pinterest/Android] +Mozilla/5.0 (Linux; Android 9; SM-J737V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-J810M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N950U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.73 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N950U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N960F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N960U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-N960U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-N960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N960U1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N975U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.73 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-N975U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; SM-N975U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-N976V Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/240.0.0.38.121;] +Mozilla/5.0 (Linux; Android 9; SM-S367VL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-S767VL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-T597P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 9; SM-T720) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Safari/537.36 +Mozilla/5.0 (Linux; Android 9; TECNO KC8) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; VOG-L29) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; cp3705A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.136 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; moto g(6) Build/PPS29.118-15-11; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.92 Mobile Safari/537.36;dailymotion-player-sdk-android 0.1.31 +Mozilla/5.0 (Linux; Android 9; moto g(6) play) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; moto g(7) play Build/PCYS29.105-134-1; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/76.0.3809.132 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/235.0.0.38.118;] +Mozilla/5.0 (Linux; Android 9; moto g(7) play) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.80 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; moto g(7) power) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; moto g(7) power) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.92 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; moto z4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.90 Mobile Safari/537.36 +Mozilla/5.0 (Linux; Android 9; moto z4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.73 Mobile Safari/537.36 +Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; GT-P3113 Build/JRO03C) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30 +Mozilla/5.0 (Linux; U; Android 4.1.2; ar-ae; GT-I8160 Build/JZO54K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 +Mozilla/5.0 (Linux; U; Android 4.2.2; en-us; Nexus 7 Build/JDQ39) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30; DailymotionEmbedSDK 1.0 +Mozilla/5.0 (Linux; U; Android 4.4; en-us; SM-E500H Build/JOP24G) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 +Mozilla/5.0 (Linux; U; Android 6.0.1; en-us; LGMS550 Build/JOP24G) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.65 Mobile Safari/534.30 +Mozilla/5.0 (Linux; U; Android 6.0.1; en-us; SM-J737T1 Build/JOP24G) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.65 Mobile Safari/534.30 +Mozilla/5.0 (Linux; U; Android 7.0; TECNO CA6 Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/64.0.3282.137 Mobile Safari/537.36 OPR/5.3.2254.135058 +Mozilla/5.0 (Linux; U; Android 7.1.2; id-id; Redmi 5A Build/N2G47H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.146 Mobile Safari/537.36 XiaoMi/MiuiBrowser/9.5.6 +Mozilla/5.0 (Linux; U; Android 9; in-id; CPH1911 Build/PPR1.180610.011) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.80 Mobile Safari/537.36 OppoBrowser/25.6.0.0.5beta +Mozilla/5.0 (Linux; U; Android 9; vivo 1904 Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 OPR/44.1.2254.143214 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:68.0) Gecko/20100101 Firefox/68.0 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:67.0) Gecko/20100101 Firefox/67.0 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:68.0) Gecko/20100101 Firefox/68.0 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:70.0) Gecko/20100101 Firefox/70.0 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.7 (KHTML, like Gecko) Version/9.1.2 Safari/601.7.7 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Safari/605.1.15 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36 +Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36 +Mozilla/5.0 (PlayStation 4 6.72) AppleWebKit/605.1.15 (KHTML, like Gecko) +Mozilla/5.0 (SMART-TV; LINUX; Tizen 3.0) AppleWebKit/538.1 (KHTML, like Gecko) Version/3.0 TV Safari/538.1 +Mozilla/5.0 (SMART-TV; Linux; Tizen 3.0) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/2.0 Chrome/47.0.2526.69 TV safari/537.36 +Mozilla/5.0 (SMART-TV; Linux; Tizen 4.0) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/2.1 Chrome/56.0.2924.0 TV Safari/537.36 +Mozilla/5.0 (SMART-TV; Linux; Tizen 5.0) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/2.2 Chrome/63.0.3239.84 TV Safari/537.36 +Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134 +Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763 +Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.7.5000 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.1.3683.41 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 OPR/63.0.3368.94 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/82.0.144 Chrome/76.0.3809.144 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko +Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0 +Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 Firefox/60.0 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18995 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19493 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.110 Safari/537.36 Vivaldi/2.7.1628.30 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 OPR/63.0.3368.94 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3907.0 Safari/537.36 Edg/79.0.279.0 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; Xbox; Xbox One) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; Xbox; Xbox One) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; Xbox; Xbox One; WebView/3.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0 +Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows NT 10.0; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36 +Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36 +Mozilla/5.0 (Windows NT 6.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/80.0.180 Chrome/74.0.3729.180 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/82.0.144 Chrome/76.0.3809.144 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0 Waterfox/56.2.14 +Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0 +Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0 +Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0 +Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows NT 6.1; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 6.3; ARM; Trident/7.0; Touch; rv:11.0) like Gecko +Mozilla/5.0 (Windows NT 6.3; Trident/7.0; Touch; rv:11.0) like Gecko +Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko +Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 +Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36 +Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 +Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36 +Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 +Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows NT 6.3; rv:69.0) Gecko/20100101 Firefox/69.0 +Mozilla/5.0 (Windows; U; Windows NT 10.0; en-US; Valve Steam GameOverlay/1568860339; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36 +Mozilla/5.0 (X11; CrOS aarch64 12371.75.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.105 Safari/537.36 +Mozilla/5.0 (X11; CrOS armv7l 12239.92.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.136 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 10895.78.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.120 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 11021.81.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 11895.118.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.159 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 12239.92.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.136 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 12239.92.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.136 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 12239.92.4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.136 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 12371.46.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.63 Safari/537.36 +Mozilla/5.0 (X11; CrOS x86_64 12371.65.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.93 Safari/537.36 +Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36 +Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36 +Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/10.1 Chrome/71.0.3578.99 Safari/537.36 +Mozilla/5.0 (X11; U; U; Linux x86_64; in-id) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36 +Mozilla/5.0 (X11; U; U; Linux x86_64; pt-pt) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36 +Mozilla/5.0 (X11; U; U; Linux x86_64; th-th) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36 +Mozilla/5.0 (X11; U; U; Linux x86_64; vi-vn) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36 +Mozilla/5.0 (X11; U; U; Linux x86_64; zh-cn) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36 +Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0 +Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0 +Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:67.0) Gecko/20100101 Firefox/67.0 +Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/22.0.141836113 Mobile/14G60 Safari/600.1.4 +Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) CriOS/71.0.3578.89 Mobile/14G60 Safari/602.1 +Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) FxiOS/14.0b12646 Mobile/14G60 Safari/603.3.8 +Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 +Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 [FBAN/FBIOS;FBAV/240.0.0.55.117;FBBV/174195427;FBDV/iPad5,3;FBMD/iPad;FBSN/iOS;FBSV/10.3.3;FBSS/2;FBID/tablet;FBLC/zh_TW;FBOP/5;FBRV/175353135;FBCR/] +Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G60 Safari/602.1 +Mozilla/5.0 (iPad; CPU OS 10_3_4 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G61 Safari/602.1 +Mozilla/5.0 (iPad; CPU OS 11_1 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15B101 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_1_2 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15B202 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_2_1 like Mac OS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0 Mobile/15C153 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_2_2 like Mac OS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0 Mobile/15C202 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_2_6 like Mac OS X) AppleWebKit/604.5.6 (KHTML, like Gecko) Version/11.0 Mobile/15D100 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15G77 +Mozilla/5.0 (iPad; CPU OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/75.0.3770.103 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16B92 +Mozilla/5.0 (iPad; CPU OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_1_4 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) GSA/48.0.193557427 Mobile/16D57 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 +Mozilla/5.0 (iPad; CPU OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.93 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPad; CPU OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.81 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.103 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.69 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.93 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/74.0.248026584 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPad; CPU OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) CriOS/67.0.3396.87 Mobile/16G102 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/19.0b16042 Mobile/15E148 Safari/605.1.15 +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPad4,7;FBMD/iPad;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/tablet;FBLC/vi_VN;FBOP/5;FBCR/] +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPad5,1;FBMD/iPad;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBCR/;FBID/tablet;FBLC/en_US;FBOP/5] +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPad6,11;FBMD/iPad;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/tablet;FBLC/en_US;FBOP/5;FBCR/] +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPad7,5;FBMD/iPad;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/tablet;FBLC/en_US;FBOP/5;FBCR/] +Mozilla/5.0 (iPad; CPU OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPad; CPU OS 6_1_3 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25 +Mozilla/5.0 (iPad; CPU OS 8_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A365 Safari/600.1.4 +Mozilla/5.0 (iPad; CPU OS 8_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12D508 Safari/600.1.4 +Mozilla/5.0 (iPad; CPU OS 8_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/23.1.148956103 Mobile/12H143 Safari/600.1.4 +Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1 +Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G36 Safari/601.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 10_2 like Mac OS X) AppleWebKit/602.3.12 (KHTML, like Gecko) Version/10.0 Mobile/14C92 Safari/602.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 [FBAN/FBIOS;FBDV/iPhone7,1;FBMD/iPhone;FBSN/iOS;FBSV/10.3.3;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_4 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) GSA/68.0.234683655 Mobile/14G61 Safari/602.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_4 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G61 Safari/602.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_3 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A432 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0 Mobile/15B150 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0 Mobile/15B202 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_1 like Mac OS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0 Mobile/15C153 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_6 like Mac OS X) AppleWebKit/604.5.6 (KHTML, like Gecko) Mobile/15D100 [FBAN/FBIOS;FBAV/207.0.0.48.100;FBBV/141048683;FBDV/iPhone9,3;FBMD/iPhone;FBSN/iOS;FBSV/11.2.6;FBSS/2;FBCR/SFR;FBID/phone;FBLC/fr_FR;FBOP/5;FBRV/142061404] +Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E302 [FBAN/FBIOS;FBDV/iPhone7,2;FBMD/iPhone;FBSN/iOS;FBSV/11.3.1;FBSS/2;FBID/phone;FBLC/fr_FR;FBOP/5;FBCR/VINI] +Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_4 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) GSA/74.1.250942683 Mobile/15G77 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/74.1.250942683 Mobile/16C101 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16C101 [FBAN/FBIOS;FBDV/iPhone9,3;FBMD/iPhone;FBSN/iOS;FBSV/12.1.2;FBSS/2;FBCR/Free;FBID/phone;FBLC/fr_FR;FBOP/5] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/74.1.250942683 Mobile/16D57 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.69 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,2;FBMD/iPhone;FBSN/iOS;FBSV/12.2;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,5;FBMD/iPhone;FBSN/iOS;FBSV/12.2;FBSS/3;FBCR/AT&T;FBID/phone;FBLC/en_US;FBOP/5] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.69 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/79.0.259819395 Mobile/16F203 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DuckDuckGo/7 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,6;FBMD/iPhone;FBSN/iOS;FBSV/12.3.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/12.3.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,3;FBMD/iPhone;FBSN/iOS;FBSV/12.3.1;FBSS/2;FBID/phone;FBLC/es_LA;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/73.0.3683.68 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/75.0.3770.103 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.103 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.69 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/81.0.264749124 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/82.1.267240167 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,1;FBMD/iPhone;FBSN/iOS;FBSV/12.4;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,5;FBMD/iPhone;FBSN/iOS;FBSV/12.4;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/12.4;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Sprint] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,4;FBMD/iPhone;FBSN/iOS;FBSV/12.4;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/74.1.250942683 Mobile/16G102 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Instagram 89.0.0.14.100 (iPhone11,6; iOS 12_4_1; en_US; en-US; scale=3.00; gamut=normal; 1242x2688; 149781277) +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBAV/240.0.0.55.117;FBBV/174195427;FBDV/iPhone7,2;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/es_LA;FBOP/5;FBRV/175040728;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,2;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Sprint] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,3;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,3;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,4;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/es_LA;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,4;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/fr_FR;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,5;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBCR/T-Mobile;FBID/phone;FBLC/es_LA;FBOP/5] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,5;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,5;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/es_LA;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,6;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,2;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/fr_FR;FBOP/5;FBCR/SFR] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,8;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,8;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,8;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/fr_FR;FBOP/5;FBCR/Carrier] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone7,2;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone8,1;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/MetroPCS] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,2;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/cricket] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,4;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,4;FBMD/iPhone;FBSN/iOS;FBSV/12.4.1;FBSS/3;FBID/phone;FBLC/fr_FR;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.4.1 Mobile/15E148 Safari/605.1.15 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.69 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,5;FBMD/iPhone;FBSN/iOS;FBSV/13.0;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,6;FBMD/iPhone;FBSN/iOS;FBSV/13.0;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,2;FBMD/iPhone;FBSN/iOS;FBSV/13.0;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,8;FBMD/iPhone;FBSN/iOS;FBSV/13.0;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,8;FBMD/iPhone;FBSN/iOS;FBSV/13.0;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,4;FBMD/iPhone;FBSN/iOS;FBSV/13.0;FBSS/3;FBID/phone;FBLC/fr_FR;FBOP/5;FBCR/Orange France] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/76.0.3809.123 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.69 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/77.0.3865.93 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.3 Mobile/15E148 Safari/605.1.15 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) GSA/83.0.268992909 Mobile/15E148 Safari/605.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DuckDuckGo/7 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,1;FBMD/iPhone;FBSN/iOS;FBSV/13.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,6;FBMD/iPhone;FBSN/iOS;FBSV/13.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,6;FBMD/iPhone;FBSN/iOS;FBSV/13.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,8;FBMD/iPhone;FBSN/iOS;FBSV/13.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,4;FBMD/iPhone;FBSN/iOS;FBSV/13.1;FBSS/3;FBID/phone;FBLC/es_LA;FBOP/5;FBCR/Telcel] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,1;FBMD/iPhone;FBSN/iOS;FBSV/13.1.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Union] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone10,3;FBMD/iPhone;FBSN/iOS;FBSV/13.1.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/13.1.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/T-Mobile] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,6;FBMD/iPhone;FBSN/iOS;FBSV/13.1.1;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Mobile/15E148 Safari/604.1 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone11,2;FBMD/iPhone;FBSN/iOS;FBSV/13.1.2;FBSS/3;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T] +Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Mobile/15E148 Safari/604.1 diff --git a/tests/queries/1_stateful/00009_uniq_distributed.sql b/tests/queries/1_stateful/00009_uniq_distributed.sql index f78604fd401..352514cd059 100644 --- a/tests/queries/1_stateful/00009_uniq_distributed.sql +++ b/tests/queries/1_stateful/00009_uniq_distributed.sql @@ -1,3 +1,4 @@ -- Tags: distributed + SELECT uniq(UserID), uniqIf(UserID, CounterID = 800784), uniqIf(FUniqID, RegionID = 213) FROM remote('127.0.0.{1,2}', test, hits) diff --git a/tests/queries/1_stateful/00012_sorting_distributed.sql b/tests/queries/1_stateful/00012_sorting_distributed.sql index 2f852af1dba..afbaf89d9ae 100644 --- a/tests/queries/1_stateful/00012_sorting_distributed.sql +++ b/tests/queries/1_stateful/00012_sorting_distributed.sql @@ -1,3 +1,4 @@ -- Tags: distributed + SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1,2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/tests/queries/1_stateful/00013_sorting_of_nested.sql b/tests/queries/1_stateful/00013_sorting_of_nested.sql index 44f7684d746..f97120e2b98 100644 --- a/tests/queries/1_stateful/00013_sorting_of_nested.sql +++ b/tests/queries/1_stateful/00013_sorting_of_nested.sql @@ -1,2 +1,4 @@ +-- Tags: no-parallel-replicas + SELECT ParsedParams.Key1 FROM test.visits FINAL WHERE VisitID != 0 AND notEmpty(ParsedParams.Key1) ORDER BY VisitID LIMIT 10 diff --git a/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql b/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql index 2afe28639f2..50a3402244e 100644 --- a/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql +++ b/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql @@ -1,3 +1,4 @@ -- Tags: distributed + SELECT anyIf(SearchPhrase, CounterID = -1) FROM remote('127.0.0.{1,2}:9000', test, hits) diff --git a/tests/queries/1_stateful/00022_merge_prewhere.sql b/tests/queries/1_stateful/00022_merge_prewhere.sql index 74a3677b68e..400a896d5a8 100644 --- a/tests/queries/1_stateful/00022_merge_prewhere.sql +++ b/tests/queries/1_stateful/00022_merge_prewhere.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + DROP TABLE IF EXISTS test.merge_hits; CREATE TABLE IF NOT EXISTS test.merge_hits AS test.hits ENGINE = Merge(test, '^hits$'); SELECT count() FROM test.merge_hits WHERE AdvEngineID = 2; diff --git a/tests/queries/1_stateful/00042_any_left_join.sql b/tests/queries/1_stateful/00042_any_left_join.sql index b87cf88f007..c7c0f0f987a 100644 --- a/tests/queries/1_stateful/00042_any_left_join.sql +++ b/tests/queries/1_stateful/00042_any_left_join.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + SELECT EventDate, hits, diff --git a/tests/queries/1_stateful/00043_any_left_join.sql b/tests/queries/1_stateful/00043_any_left_join.sql index 704d38f727a..6b8cce54051 100644 --- a/tests/queries/1_stateful/00043_any_left_join.sql +++ b/tests/queries/1_stateful/00043_any_left_join.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + SELECT EventDate, count() AS hits, diff --git a/tests/queries/1_stateful/00044_any_left_join_string.sql b/tests/queries/1_stateful/00044_any_left_join_string.sql index a4f2e9e1b96..ceb7a1c1783 100644 --- a/tests/queries/1_stateful/00044_any_left_join_string.sql +++ b/tests/queries/1_stateful/00044_any_left_join_string.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + SELECT domain, hits, diff --git a/tests/queries/1_stateful/00063_loyalty_joins.sql b/tests/queries/1_stateful/00063_loyalty_joins.sql index 1e7011ea909..44f0767a87a 100644 --- a/tests/queries/1_stateful/00063_loyalty_joins.sql +++ b/tests/queries/1_stateful/00063_loyalty_joins.sql @@ -1,15 +1,17 @@ +-- Tags: no-parallel-replicas + SET any_join_distinct_right_table_keys = 1; SET joined_subquery_requires_alias = 0; SELECT - loyalty, + loyalty, count() -FROM test.hits ANY LEFT JOIN +FROM test.hits ANY LEFT JOIN ( SELECT - UserID, - sum(SearchEngineID = 2) AS yandex, - sum(SearchEngineID = 3) AS google, + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty FROM test.hits WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) @@ -21,18 +23,18 @@ ORDER BY loyalty ASC; SELECT - loyalty, + loyalty, count() FROM ( SELECT UserID FROM test.hits -) ANY LEFT JOIN +) ANY LEFT JOIN ( SELECT - UserID, - sum(SearchEngineID = 2) AS yandex, - sum(SearchEngineID = 3) AS google, + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty FROM test.hits WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) @@ -44,23 +46,23 @@ ORDER BY loyalty ASC; SELECT - loyalty, + loyalty, count() FROM ( SELECT - loyalty, + loyalty, UserID FROM ( SELECT UserID FROM test.hits - ) ANY LEFT JOIN + ) ANY LEFT JOIN ( SELECT - UserID, - sum(SearchEngineID = 2) AS yandex, - sum(SearchEngineID = 3) AS google, + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty FROM test.hits WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) @@ -73,19 +75,19 @@ ORDER BY loyalty ASC; SELECT - loyalty, - count() AS c, + loyalty, + count() AS c, bar(log(c + 1) * 1000, 0, log(3000000) * 1000, 80) -FROM test.hits ANY INNER JOIN +FROM test.hits ANY INNER JOIN ( SELECT - UserID, + UserID, toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty FROM ( SELECT - UserID, - sum(SearchEngineID = 2) AS yandex, + UserID, + sum(SearchEngineID = 2) AS yandex, sum(SearchEngineID = 3) AS google FROM test.hits WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) diff --git a/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql b/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql index 515a2410583..35f0c7b60b9 100644 --- a/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql +++ b/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + USE test; DROP TABLE IF EXISTS join; @@ -7,7 +9,7 @@ INSERT INTO join SELECT UserID, toInt8(if((sum(SearchEngineID = 2) AS yandex) > (sum(SearchEngineID = 3) AS google), - yandex / (yandex + google), + yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty FROM hits WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) diff --git a/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql index c7a34c493c9..c60e342dd41 100644 --- a/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql +++ b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql @@ -1,4 +1,5 @@ -- Tags: replica, distributed, no-random-settings + SET max_parallel_replicas = 2; SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1|2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/tests/queries/1_stateful/00074_full_join.sql b/tests/queries/1_stateful/00074_full_join.sql index f049be2a74d..c1d9e4be1a4 100644 --- a/tests/queries/1_stateful/00074_full_join.sql +++ b/tests/queries/1_stateful/00074_full_join.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + set any_join_distinct_right_table_keys = 1; set joined_subquery_requires_alias = 0; diff --git a/tests/queries/1_stateful/00075_left_array_join.sql b/tests/queries/1_stateful/00075_left_array_join.sql index 1fd045a26bf..3540d791157 100644 --- a/tests/queries/1_stateful/00075_left_array_join.sql +++ b/tests/queries/1_stateful/00075_left_array_join.sql @@ -1,2 +1,4 @@ +-- Tags: no-parallel-replicas + SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits LEFT ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; diff --git a/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql index 8e6742bb1e1..9431e1cf596 100644 --- a/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql +++ b/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + SELECT PP.Key1 AS `ym:s:paramsLevel1`, sum(arrayAll(`x_1` -> `x_1`= '', ParsedParams.Key2)) AS `ym:s:visits` FROM test.hits ARRAY JOIN ParsedParams AS `PP` WHERE CounterID = 1704509 GROUP BY `ym:s:paramsLevel1` ORDER BY PP.Key1, `ym:s:visits` LIMIT 0, 100; SELECT PP.Key1 AS x1, ParsedParams.Key2 AS x2 FROM test.hits ARRAY JOIN ParsedParams AS PP WHERE CounterID = 1704509 ORDER BY x1, x2 LIMIT 10; SELECT ParsedParams.Key2 AS x FROM test.hits ARRAY JOIN ParsedParams AS PP ORDER BY x DESC LIMIT 10; diff --git a/tests/queries/1_stateful/00080_array_join_and_union.sql b/tests/queries/1_stateful/00080_array_join_and_union.sql index d9aa1cc17cc..2f2e5e9324f 100644 --- a/tests/queries/1_stateful/00080_array_join_and_union.sql +++ b/tests/queries/1_stateful/00080_array_join_and_union.sql @@ -1 +1,3 @@ +-- Tags: no-parallel-replicas + SELECT count() FROM (SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10 UNION ALL SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10); diff --git a/tests/queries/1_stateful/00084_external_aggregation.sql b/tests/queries/1_stateful/00084_external_aggregation.sql index 816d95f4b8b..330aa158cf7 100644 --- a/tests/queries/1_stateful/00084_external_aggregation.sql +++ b/tests/queries/1_stateful/00084_external_aggregation.sql @@ -1,4 +1,4 @@ --- Tags: no-random-settings +-- Tags: no-random-settings, no-parallel-replicas SET max_bytes_before_external_group_by = 200000000; diff --git a/tests/queries/1_stateful/00091_prewhere_two_conditions.sql b/tests/queries/1_stateful/00091_prewhere_two_conditions.sql index 1e476d3a27d..745bb125c2b 100644 --- a/tests/queries/1_stateful/00091_prewhere_two_conditions.sql +++ b/tests/queries/1_stateful/00091_prewhere_two_conditions.sql @@ -1,3 +1,6 @@ +-- Tags: no-parallel-replicas +-- Requires investigation (max_bytes_to_read is not respected) + SET max_bytes_to_read = 600000000; SET optimize_move_to_prewhere = 1; diff --git a/tests/queries/1_stateful/00092_obfuscator.sh b/tests/queries/1_stateful/00092_obfuscator.sh index 85f476c6ae5..f19473f01ac 100755 --- a/tests/queries/1_stateful/00092_obfuscator.sh +++ b/tests/queries/1_stateful/00092_obfuscator.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# Tags: no-parallel-replicas +# clickhouse-local may not work with parallel replicas CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/1_stateful/00096_obfuscator_save_load.sh b/tests/queries/1_stateful/00096_obfuscator_save_load.sh index a88dfcdb9b9..1bb212e1bba 100755 --- a/tests/queries/1_stateful/00096_obfuscator_save_load.sh +++ b/tests/queries/1_stateful/00096_obfuscator_save_load.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# Tags: no-parallel-replicas +# clickhouse-local may not work with parallel replicas CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/1_stateful/00146_aggregate_function_uniq.sql b/tests/queries/1_stateful/00146_aggregate_function_uniq.sql index fd3fde7636d..2cab6e70d22 100644 --- a/tests/queries/1_stateful/00146_aggregate_function_uniq.sql +++ b/tests/queries/1_stateful/00146_aggregate_function_uniq.sql @@ -1,3 +1,5 @@ +-- Tags: no-parallel-replicas + SELECT RegionID, uniqHLL12(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; SELECT RegionID, uniqCombined(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; SELECT abs(uniq(WatchID) - uniqExact(WatchID)) FROM test.hits; diff --git a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql index 6f910646fb7..5d2476226ba 100644 --- a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql +++ b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql @@ -1,4 +1,4 @@ --- Tags: distributed +-- Tags: distributed, no-parallel-replicas SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1; diff --git a/tests/queries/1_stateful/00152_insert_different_granularity.sql b/tests/queries/1_stateful/00152_insert_different_granularity.sql index 294d71b384b..35483149498 100644 --- a/tests/queries/1_stateful/00152_insert_different_granularity.sql +++ b/tests/queries/1_stateful/00152_insert_different_granularity.sql @@ -1,4 +1,4 @@ --- Tags: no-tsan, no-replicated-database, no-parallel +-- Tags: no-tsan, no-replicated-database, no-parallel, no-parallel-replicas -- Tag no-replicated-database: Fails due to additional replicas or shards DROP TABLE IF EXISTS fixed_granularity_table; diff --git a/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql b/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql index e325c18200b..32079111f6c 100644 --- a/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql +++ b/tests/queries/1_stateful/00156_max_execution_speed_sample_merge.sql @@ -1,3 +1,6 @@ +-- Tags: no-parallel-replicas +-- Merge tables doesn't work with parallel replicas currently + SET max_execution_speed = 4000000, timeout_before_checking_execution_speed = 0; CREATE TEMPORARY TABLE times (t DateTime); diff --git a/tests/queries/1_stateful/00166_explain_estimate.sql b/tests/queries/1_stateful/00166_explain_estimate.sql index c4071271736..abac92ecb2e 100644 --- a/tests/queries/1_stateful/00166_explain_estimate.sql +++ b/tests/queries/1_stateful/00166_explain_estimate.sql @@ -1,4 +1,4 @@ --- Tags: no-replicated-database +-- Tags: no-replicated-database, no-parallel-replicas -- Tag no-replicated-database: Requires investigation EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473; diff --git a/tests/queries/1_stateful/00170_s3_cache.sql b/tests/queries/1_stateful/00170_s3_cache.sql index b03b2a16bf0..81592255428 100644 --- a/tests/queries/1_stateful/00170_s3_cache.sql +++ b/tests/queries/1_stateful/00170_s3_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-random-settings +-- Tags: no-parallel, no-random-settings, no-parallel-replicas -- { echo } diff --git a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql index 7068780a1b1..07788af927e 100644 --- a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql +++ b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql @@ -1,4 +1,4 @@ --- Tags: distributed +-- Tags: distributed, no-parallel-replicas SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169; diff --git a/tests/queries/1_stateful/00172_early_constant_folding.sql b/tests/queries/1_stateful/00172_early_constant_folding.sql index cc3d2274ecd..b31e418b492 100644 --- a/tests/queries/1_stateful/00172_early_constant_folding.sql +++ b/tests/queries/1_stateful/00172_early_constant_folding.sql @@ -1 +1,3 @@ +-- Tags: no-parallel-replicas + EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1; diff --git a/tests/queries/1_stateful/00172_hits_joins.sql.j2 b/tests/queries/1_stateful/00172_hits_joins.sql.j2 index 4599d1d5a5d..4617fe5aef8 100644 --- a/tests/queries/1_stateful/00172_hits_joins.sql.j2 +++ b/tests/queries/1_stateful/00172_hits_joins.sql.j2 @@ -1,3 +1,4 @@ +-- Tags: no-parallel-replicas {% for join_algorithm in ['hash', 'parallel_hash', 'full_sorting_merge', 'grace_hash'] -%} SET max_rows_in_join = '{% if join_algorithm == 'grace_hash' %}10K{% else %}0{% endif %}'; diff --git a/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh b/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh index 771c7ab5436..0b308c65061 100755 --- a/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh +++ b/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-parallel-replicas CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 1ee45f5874d..75c693bc1a8 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -127,8 +127,8 @@ PrettySpaceNoEscapesMonoBlock Protobuf ProtobufSingle QTCreator -QueryResultCacheHits -QueryResultCacheMisses +QueryCacheHits +QueryCacheMisses RBAC RawBLOB RedHat diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index 0ea6371b49f..e82b21079fe 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -69,7 +69,8 @@ int main(int argc, char *argv[]) LOG_INFO(logger, "Last committed index: {}", last_commited_index); - DB::KeeperLogStore changelog(argv[2], 10000000, true, settings->compress_logs); + DB::KeeperLogStore changelog( + argv[2], LogFileSettings{.force_sync = true, .compress_logs = settings->compress_logs, .rotate_interval = 10000000}); changelog.init(last_commited_index, 10000000000UL); /// collect all logs if (changelog.size() == 0) LOG_INFO(logger, "Changelog empty"); diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 4535eeaf243..e09a39ff463 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v23.1.3.5-stable 2023-02-03 v23.1.2.9-stable 2023-01-29 v23.1.1.3077-stable 2023-01-25 v22.12.3.5-stable 2023-01-10