diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.md b/.github/ISSUE_TEMPLATE/96_installation-issues.md new file mode 100644 index 00000000000..c322ccc92ce --- /dev/null +++ b/.github/ISSUE_TEMPLATE/96_installation-issues.md @@ -0,0 +1,29 @@ +--- +name: Installation issue +about: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/ +title: '' +labels: comp-install +assignees: '' + +--- + +**Installation type** + +Packages, docker, single binary, curl? + +**Source of the ClickHouse** + +A link to the source. Or the command you've tried + +**Expected result** + +What you expected + +**The actual result** + +What you get + +**How to reproduce** + +* For Linux-based operating systems: provide a script for clear docker container from the official image +* For anything else: steps to reproduce on as much as possible clear system diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 4c8d023f2ec..30a77a9b27f 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -466,6 +466,7 @@ jobs: - BuilderDebTsan - BuilderDebDebug runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | @@ -504,6 +505,7 @@ jobs: - BuilderBinDarwin - BuilderBinDarwinAarch64 runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml index cb06d853219..3c2be767ad2 100644 --- a/.github/workflows/cancel.yml +++ b/.github/workflows/cancel.yml @@ -6,7 +6,7 @@ env: on: # yamllint disable-line rule:truthy workflow_run: - workflows: ["PullRequestCI", "ReleaseCI", "DocsCheck", "BackportPR"] + workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"] types: - requested jobs: diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 3d22cb984dd..fba8a975ca6 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -974,6 +974,7 @@ jobs: - BuilderDebTsan - BuilderDebUBsan runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | @@ -1021,6 +1022,7 @@ jobs: - BuilderBinClangTidy - BuilderDebShared runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 801f7eda94a..612bb1f8f9b 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -122,3 +122,58 @@ jobs: docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + SonarCloud: + runs-on: [self-hosted, builder] + env: + SONAR_SCANNER_VERSION: 4.7.0.2747 + SONAR_SERVER_URL: "https://sonarcloud.io" + BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed + CC: clang-15 + CXX: clang++-15 + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + submodules: true + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: 11 + - name: Download and set up sonar-scanner + env: + SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip + run: | + mkdir -p "$HOME/.sonar" + curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}" + unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/" + echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH" + - name: Download and set up build-wrapper + env: + BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip + run: | + curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" + unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/" + echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH" + - name: Set Up Build Tools + run: | + sudo apt-get update + sudo apt-get install -yq git cmake ccache python3 ninja-build + sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" + - name: Run build-wrapper + run: | + mkdir build + cd build + cmake .. + cd .. + build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ + - name: Run sonar-scanner + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + run: | + sonar-scanner \ + --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ + --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ + --define sonar.projectKey="ClickHouse_ClickHouse" \ + --define sonar.organization="clickhouse-java" \ + --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 2795dc62d6d..23245c16374 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -112,7 +112,7 @@ jobs: StyleCheck: needs: DockerHubPush runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} + if: ${{ success() || failure() || always() }} steps: - name: Set envs run: | @@ -2023,6 +2023,7 @@ jobs: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" TestsBugfixCheck: + needs: [CheckLabels, StyleCheck] runs-on: [self-hosted, stress-tester] steps: - name: Set envs diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 8f42ca92646..abe85d3e72d 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -541,6 +541,7 @@ jobs: - BuilderDebMsan - BuilderDebDebug runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | @@ -580,6 +581,7 @@ jobs: - BuilderBinDarwin - BuilderBinDarwinAarch64 runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} steps: - name: Set envs run: | diff --git a/.gitignore b/.gitignore index dd632eba85d..5b8f2ca452d 100644 --- a/.gitignore +++ b/.gitignore @@ -80,6 +80,7 @@ core vgcore* *.deb +*.tar.zst *.build *.upload *.changes diff --git a/.gitmodules b/.gitmodules index abd29c38846..293029ad171 100644 --- a/.gitmodules +++ b/.gitmodules @@ -287,3 +287,6 @@ [submodule "contrib/corrosion"] path = contrib/corrosion url = https://github.com/corrosion-rs/corrosion.git +[submodule "contrib/morton-nd"] + path = contrib/morton-nd + url = https://github.com/morton-nd/morton-nd diff --git a/.snyk b/.snyk new file mode 100644 index 00000000000..7acc6b9fbf5 --- /dev/null +++ b/.snyk @@ -0,0 +1,4 @@ +# Snyk (https://snyk.io) policy file +exclude: + global: + - tests/** diff --git a/CHANGELOG.md b/CHANGELOG.md index 56d117d05dd..68767612892 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ### Table of Contents +**[ClickHouse release v22.10, 2022-10-25](#2210)**
**[ClickHouse release v22.9, 2022-09-22](#229)**
-**[ClickHouse release v22.8, 2022-08-18](#228)**
+**[ClickHouse release v22.8-lts, 2022-08-18](#228)**
**[ClickHouse release v22.7, 2022-07-21](#227)**
**[ClickHouse release v22.6, 2022-06-16](#226)**
**[ClickHouse release v22.5, 2022-05-19](#225)**
@@ -10,10 +11,143 @@ **[ClickHouse release v22.1, 2022-01-18](#221)**
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**
+### ClickHouse release 22.10, 2022-10-26 + +#### Backward Incompatible Change +* Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Remove support for the `WITH TIMEOUT` section for `LIVE VIEW`. This closes [#40557](https://github.com/ClickHouse/ClickHouse/issues/40557). [#42173](https://github.com/ClickHouse/ClickHouse/pull/42173) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove support for the `{database}` macro from the client's prompt. It was displayed incorrectly if the database was unspecified and it was not updated on `USE` statements. This closes [#25891](https://github.com/ClickHouse/ClickHouse/issues/25891). [#42508](https://github.com/ClickHouse/ClickHouse/pull/42508) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* Composable protocol configuration is added. Now different protocols can be set up with different listen hosts. Protocol wrappers such as PROXYv1 can be set up over any other protocols (TCP, TCP secure, MySQL, Postgres). [#41198](https://github.com/ClickHouse/ClickHouse/pull/41198) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add `S3` as a new type of the destination of backups. Support BACKUP to S3 with as-is path/data structure. [#42333](https://github.com/ClickHouse/ClickHouse/pull/42333) ([Vitaly Baranov](https://github.com/vitlibar)), [#42232](https://github.com/ClickHouse/ClickHouse/pull/42232) ([Azat Khuzhin](https://github.com/azat)). +* Added functions (`randUniform`, `randNormal`, `randLogNormal`, `randExponential`, `randChiSquared`, `randStudentT`, `randFisherF`, `randBernoulli`, `randBinomial`, `randNegativeBinomial`, `randPoisson`) to generate random values according to the specified distributions. This closes [#21834](https://github.com/ClickHouse/ClickHouse/issues/21834). [#42411](https://github.com/ClickHouse/ClickHouse/pull/42411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* An improvement for ClickHouse Keeper: add support for uploading snapshots to S3. S3 information can be defined inside `keeper_server.s3_snapshot`. [#41342](https://github.com/ClickHouse/ClickHouse/pull/41342) ([Antonio Andelic](https://github.com/antonio2368)). +* Added an aggregate function `analysisOfVariance` (`anova`) to perform a statistical test over several groups of normally distributed observations to find out whether all groups have the same mean or not. Original PR [#37872](https://github.com/ClickHouse/ClickHouse/issues/37872). [#42131](https://github.com/ClickHouse/ClickHouse/pull/42131) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support limiting of temporary data stored on disk using settings `max_temporary_data_on_disk_size_for_user`/`max_temporary_data_on_disk_size_for_query` . [#40893](https://github.com/ClickHouse/ClickHouse/pull/40893) ([Vladimir C](https://github.com/vdimir)). +* Add setting `format_json_object_each_row_column_for_object_name` to write/parse object name as column value in JSONObjectEachRow format. [#41703](https://github.com/ClickHouse/ClickHouse/pull/41703) ([Kruglov Pavel](https://github.com/Avogar)). +* Add BLAKE3 hash-function to SQL. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)). +* The function `javaHash` has been extended to integers. [#41131](https://github.com/ClickHouse/ClickHouse/pull/41131) ([JackyWoo](https://github.com/JackyWoo)). +* Add OpenTelemetry support to ON CLUSTER DDL (require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)). +* Added system table `asynchronous_insert_log`. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)). +* Add support for methods `lz4`, `bz2`, `snappy` in HTTP's `Accept-Encoding` which is a non-standard extension to HTTP protocol. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)). +* Adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)). +* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)). + +#### Experimental Feature +* Added new infrastructure for query analysis and planning under the `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)). +* Initial implementation of Kusto Query Language. Please don't use it. [#37961](https://github.com/ClickHouse/ClickHouse/pull/37961) ([Yong Wang](https://github.com/kashwy)). + +#### Performance Improvement +* Relax the "Too many parts" threshold. This closes [#6551](https://github.com/ClickHouse/ClickHouse/issues/6551). Now ClickHouse will allow more parts in a partition if the average part size is large enough (at least 10 GiB). This allows to have up to petabytes of data in a single partition of a single table on a single server, which is possible using disk shelves or object storage. [#42002](https://github.com/ClickHouse/ClickHouse/pull/42002) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Implement operator precedence element parser to make the required stack size smaller. [#34892](https://github.com/ClickHouse/ClickHouse/pull/34892) ([Nikolay Degterinsky](https://github.com/evillique)). +* DISTINCT in order optimization leverage sorting properties of data streams. This improvement will enable reading in order for DISTINCT if applicable (before it was necessary to provide ORDER BY for columns in DISTINCT). [#41014](https://github.com/ClickHouse/ClickHouse/pull/41014) ([Igor Nikonov](https://github.com/devcrafter)). +* ColumnVector: optimize UInt8 index with AVX512VBMI. [#41247](https://github.com/ClickHouse/ClickHouse/pull/41247) ([Guo Wangyang](https://github.com/guowangy)). +* Optimize the lock contentions for `ThreadGroupStatus::mutex`. The performance experiments of **SSB** (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could bring a **2.95x** improvement of the geomean of all subcases' QPS. [#41675](https://github.com/ClickHouse/ClickHouse/pull/41675) ([Zhiguo Zhou](https://github.com/ZhiguoZh)). +* Add `ldapr` capabilities to AArch64 builds. This is supported from Graviton 2+, Azure and GCP instances. Only appeared in clang-15 [not so long ago](https://github.com/llvm/llvm-project/commit/9609b5daffe9fd28d83d83da895abc5113f76c24). [#41778](https://github.com/ClickHouse/ClickHouse/pull/41778) ([Daniel Kutenin](https://github.com/danlark1)). +* Improve performance when comparing strings and one argument is an empty constant string. [#41870](https://github.com/ClickHouse/ClickHouse/pull/41870) ([Jiebin Sun](https://github.com/jiebinn)). +* Optimize `insertFrom` of ColumnAggregateFunction to share Aggregate State in some cases. [#41960](https://github.com/ClickHouse/ClickHouse/pull/41960) ([flynn](https://github.com/ucasfl)). +* Make writing to `azure_blob_storage` disks faster (respect `max_single_part_upload_size` instead of writing a block per each buffer size). Inefficiency mentioned in [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42041](https://github.com/ClickHouse/ClickHouse/pull/42041) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Make thread ids in the process list and query_log unique to avoid waste. [#42180](https://github.com/ClickHouse/ClickHouse/pull/42180) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support skipping cache completely (both download to cache and reading cached data) in case the requested read range exceeds the threshold defined by cache setting `bypass_cache_threashold`, requires to be enabled with `enable_bypass_cache_with_threshold`). [#42418](https://github.com/ClickHouse/ClickHouse/pull/42418) ([Han Shukai](https://github.com/KinderRiven)). This helps on slow local disks. + +#### Improvement +* Add setting `allow_implicit_no_password`: in combination with `allow_no_password` it forbids creating a user with no password unless `IDENTIFIED WITH no_password` is explicitly specified. [#41341](https://github.com/ClickHouse/ClickHouse/pull/41341) ([Nikolay Degterinsky](https://github.com/evillique)). +* Embedded Keeper will always start in the background allowing ClickHouse to start without achieving quorum. [#40991](https://github.com/ClickHouse/ClickHouse/pull/40991) ([Antonio Andelic](https://github.com/antonio2368)). +* Made reestablishing a new connection to ZooKeeper more reactive in case of expiration of the previous one. Previously there was a task which spawns every minute by default and thus a table could be in readonly state for about this time. [#41092](https://github.com/ClickHouse/ClickHouse/pull/41092) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Now projections can be used with zero copy replication (zero-copy replication is a non-production feature). [#41147](https://github.com/ClickHouse/ClickHouse/pull/41147) ([alesapin](https://github.com/alesapin)). +* Support expression `(EXPLAIN SELECT ...)` in a subquery. Queries like `SELECT * FROM (EXPLAIN PIPELINE SELECT col FROM TABLE ORDER BY col)` became valid. [#40630](https://github.com/ClickHouse/ClickHouse/pull/40630) ([Vladimir C](https://github.com/vdimir)). +* Allow changing `async_insert_max_data_size` or `async_insert_busy_timeout_ms` in scope of query. E.g. user wants to insert data rarely and she doesn't have access to the server config to tune default settings. [#40668](https://github.com/ClickHouse/ClickHouse/pull/40668) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Improvements for reading from remote filesystems, made threadpool size for reads/writes configurable. Closes [#41070](https://github.com/ClickHouse/ClickHouse/issues/41070). [#41011](https://github.com/ClickHouse/ClickHouse/pull/41011) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support all combinators combination in WindowTransform/arratReduce*/initializeAggregation/aggregate functions versioning. Previously combinators like `ForEach/Resample/Map` didn't work in these places, using them led to exception like`State function ... inserts results into non-state column`. [#41107](https://github.com/ClickHouse/ClickHouse/pull/41107) ([Kruglov Pavel](https://github.com/Avogar)). +* Add function `tryDecrypt` that returns NULL when decrypt fails (e.g. decrypt with incorrect key) instead of throwing an exception. [#41206](https://github.com/ClickHouse/ClickHouse/pull/41206) ([Duc Canh Le](https://github.com/canhld94)). +* Add the `unreserved_space` column to the `system.disks` table to check how much space is not taken by reservations per disk. [#41254](https://github.com/ClickHouse/ClickHouse/pull/41254) ([filimonov](https://github.com/filimonov)). +* Support s3 authorization headers in table function arguments. [#41261](https://github.com/ClickHouse/ClickHouse/pull/41261) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add support for MultiRead in Keeper and internal ZooKeeper client (this is an extension to ZooKeeper protocol, only available in ClickHouse Keeper). [#41410](https://github.com/ClickHouse/ClickHouse/pull/41410) ([Antonio Andelic](https://github.com/antonio2368)). +* Add support for decimal type comparing with floating point literal in IN operator. [#41544](https://github.com/ClickHouse/ClickHouse/pull/41544) ([liang.huang](https://github.com/lhuang09287750)). +* Allow readable size values (like `1TB`) in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)). +* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these periods ClickHouse can nevertheless try to establish a connection and produce errors. This behavior is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)). +* Only allow clients connecting to a secure server with an invalid certificate only to proceed with the '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add function `tryBase58Decode`, similar to the existing function `tryBase64Decode`. [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)). +* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)). +* Fix parallel parsing: segmentator now checks `max_block_size`. This fixed memory overallocation in case of parallel parsing and small LIMIT. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)). +* Don't add "TABLE_IS_DROPPED" exception to `system.errors` if it's happened during SELECT from a system table and was ignored. [#41908](https://github.com/ClickHouse/ClickHouse/pull/41908) ([AlfVII](https://github.com/AlfVII)). +* Improve option `enable_extended_results_for_datetime_functions` to return results of type DateTime64 for functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute` and `timeSlot`. [#41910](https://github.com/ClickHouse/ClickHouse/pull/41910) ([Roman Vasin](https://github.com/rvasin)). +* Improve `DateTime` type inference for text formats. Now it respects setting `date_time_input_format` and doesn't try to infer datetimes from numbers as timestamps. Closes [#41389](https://github.com/ClickHouse/ClickHouse/issues/41389) Closes [#42206](https://github.com/ClickHouse/ClickHouse/issues/42206). [#41912](https://github.com/ClickHouse/ClickHouse/pull/41912) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove confusing warning when inserting with `perform_ttl_move_on_insert` = false. [#41980](https://github.com/ClickHouse/ClickHouse/pull/41980) ([Vitaly Baranov](https://github.com/vitlibar)). +* Allow user to write `countState(*)` similar to `count(*)`. This closes [#9338](https://github.com/ClickHouse/ClickHouse/issues/9338). [#41983](https://github.com/ClickHouse/ClickHouse/pull/41983) ([Amos Bird](https://github.com/amosbird)). +* Fix `rankCorr` size overflow. [#42020](https://github.com/ClickHouse/ClickHouse/pull/42020) ([Duc Canh Le](https://github.com/canhld94)). +* Added an option to specify an arbitrary string as an environment name in the Sentry's config for more handy reports. [#42037](https://github.com/ClickHouse/ClickHouse/pull/42037) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix parsing out-of-range Date from CSV. [#42044](https://github.com/ClickHouse/ClickHouse/pull/42044) ([Andrey Zvonov](https://github.com/zvonand)). +* `parseDataTimeBestEffort` now supports comma between date and time. Closes [#42038](https://github.com/ClickHouse/ClickHouse/issues/42038). [#42049](https://github.com/ClickHouse/ClickHouse/pull/42049) ([flynn](https://github.com/ucasfl)). +* Improved stale replica recovery process for `ReplicatedMergeTree`. If a lost replica has some parts which are absent from a healthy replica, but these parts should appear in the future according to the replication queue of the healthy replica, then the lost replica will keep such parts instead of detaching them. [#42134](https://github.com/ClickHouse/ClickHouse/pull/42134) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add a possibility to use `Date32` arguments for date_diff function. Fix issue in date_diff function when using DateTime64 arguments with a start date before Unix epoch and end date after Unix epoch. [#42308](https://github.com/ClickHouse/ClickHouse/pull/42308) ([Roman Vasin](https://github.com/rvasin)). +* When uploading big parts to Minio, 'Complete Multipart Upload' can take a long time. Minio sends heartbeats every 10 seconds (see https://github.com/minio/minio/pull/7198). But clickhouse times out earlier, because the default send/receive timeout is [set](https://github.com/ClickHouse/ClickHouse/blob/cc24fcd6d5dfb67f5f66f5483e986bd1010ad9cf/src/IO/S3/PocoHTTPClient.cpp#L123) to 5 seconds. [#42321](https://github.com/ClickHouse/ClickHouse/pull/42321) ([filimonov](https://github.com/filimonov)). +* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)). +* Allow to use `Date32` arguments for `dateName` function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)). +* Now filters with NULL literals will be used during index analysis. [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)). +* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). This is continuation of [#39550i](https://github.com/ClickHouse/ClickHouse/pull/39550) by [@fastio](https://github.com/fastio) who implemented most of the logic. +* Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)). + +#### Build/Testing/Packaging Improvement +* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)). This represents the biggest advancement for ClickHouse testing in this year so far. +* Beta version of the ClickHouse Cloud service is released: [https://clickhouse.cloud/](https://clickhouse.cloud/). It provides the easiest way to use ClickHouse (even slightly easier than the single-command installation). +* Added support of WHERE clause generation to AST Fuzzer and possibility to add or remove ORDER BY and WHERE clause. [#38519](https://github.com/ClickHouse/ClickHouse/pull/38519) ([Ilya Yatsishin](https://github.com/qoega)). +* Aarch64 binaries now require at least ARMv8.2, released in 2016. Most notably, this enables use of ARM LSE, i.e. native atomic operations. Also, CMake build option "NO_ARMV81_OR_HIGHER" has been added to allow compilation of binaries for older ARMv8.0 hardware, e.g. Raspberry Pi 4. [#41610](https://github.com/ClickHouse/ClickHouse/pull/41610) ([Robert Schulze](https://github.com/rschu1ze)). +* Allow building ClickHouse with Musl (small changes after it was already supported but broken). [#41987](https://github.com/ClickHouse/ClickHouse/pull/41987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add the `$CLICKHOUSE_CRONFILE` file checking to avoid running the `sed` command to get the file not found error on install. [#42081](https://github.com/ClickHouse/ClickHouse/pull/42081) ([Chun-Sheng, Li](https://github.com/peter279k)). +* Update cctz to `2022e` to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Add Rust code support into ClickHouse with BLAKE3 hash-function library as an example. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Choose correct aggregation method for `LowCardinality` with big integer types. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). +* Several fixes for `web` disk. [#41652](https://github.com/ClickHouse/ClickHouse/pull/41652) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixes an issue that causes docker run to fail if `https_port` is not present in config. [#41693](https://github.com/ClickHouse/ClickHouse/pull/41693) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Mutations were not cancelled properly on server shutdown or `SYSTEM STOP MERGES` query and cancellation might take long time, it's fixed. [#41699](https://github.com/ClickHouse/ClickHouse/pull/41699) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix wrong result of queries with `ORDER BY` or `GROUP BY` by columns from prefix of sorting key, wrapped into monotonic functions, with enable "read in order" optimization (settings `optimize_read_in_order` and `optimize_aggregation_in_order`). [#41701](https://github.com/ClickHouse/ClickHouse/pull/41701) ([Anton Popov](https://github.com/CurtizJ)). +* Fix possible crash in `SELECT` from `Merge` table with enabled `optimize_monotonous_functions_in_order_by` setting. Fixes [#41269](https://github.com/ClickHouse/ClickHouse/issues/41269). [#41740](https://github.com/ClickHouse/ClickHouse/pull/41740) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Don't allow to create or alter merge tree tables with column name `_row_exists`, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Fix a bug that CORS headers are missing in some HTTP responses. [#41792](https://github.com/ClickHouse/ClickHouse/pull/41792) ([Frank Chen](https://github.com/FrankChen021)). +* 22.9 might fail to startup `ReplicatedMergeTree` table if that table was created by 20.3 or older version and was never altered, it's fixed. Fixes [#41742](https://github.com/ClickHouse/ClickHouse/issues/41742). [#41796](https://github.com/ClickHouse/ClickHouse/pull/41796) ([Alexander Tokmakov](https://github.com/tavplubix)). +* When the batch sending fails for some reason, it cannot be automatically recovered, and if it is not processed in time, it will lead to accumulation, and the printed error message will become longer and longer, which will cause the http thread to block. [#41813](https://github.com/ClickHouse/ClickHouse/pull/41813) ([zhongyuankai](https://github.com/zhongyuankai)). +* Fix compact parts with compressed marks setting. Fixes [#41783](https://github.com/ClickHouse/ClickHouse/issues/41783) and [#41746](https://github.com/ClickHouse/ClickHouse/issues/41746). [#41823](https://github.com/ClickHouse/ClickHouse/pull/41823) ([alesapin](https://github.com/alesapin)). +* Old versions of Replicated database don't have a special marker in [Zoo]Keeper. We need to check only whether the node contains come obscure data instead of special mark. [#41875](https://github.com/ClickHouse/ClickHouse/pull/41875) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix possible exception in fs cache. [#41884](https://github.com/ClickHouse/ClickHouse/pull/41884) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix `use_environment_credentials` for s3 table function. [#41970](https://github.com/ClickHouse/ClickHouse/pull/41970) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixed "Directory already exists and is not empty" error on detaching broken part that might prevent `ReplicatedMergeTree` table from starting replication. Fixes [#40957](https://github.com/ClickHouse/ClickHouse/issues/40957). [#41981](https://github.com/ClickHouse/ClickHouse/pull/41981) ([Alexander Tokmakov](https://github.com/tavplubix)). +* `toDateTime64` now returns the same output with negative integer and float arguments. [#42025](https://github.com/ClickHouse/ClickHouse/pull/42025) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix write into `azure_blob_storage`. Partially closes [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42034](https://github.com/ClickHouse/ClickHouse/pull/42034) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix the `bzip2` decoding issue for specific `bzip2` files. [#42046](https://github.com/ClickHouse/ClickHouse/pull/42046) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix SQL function `toLastDayOfMonth` with setting "enable_extended_results_for_datetime_functions = 1" at the beginning of the extended range (January 1900). - Fix SQL function "toRelativeWeekNum()" with setting "enable_extended_results_for_datetime_functions = 1" at the end of extended range (December 2299). - Improve the performance of for SQL functions "toISOYear()", "toFirstDayNumOfISOYearIndex()" and "toYearWeekOfNewyearMode()" by avoiding unnecessary index arithmetics. [#42084](https://github.com/ClickHouse/ClickHouse/pull/42084) ([Roman Vasin](https://github.com/rvasin)). +* The maximum size of fetches for each table accidentally was set to 8 while the pool size could be bigger. Now the maximum size of fetches for table is equal to the pool size. [#42090](https://github.com/ClickHouse/ClickHouse/pull/42090) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* A table might be shut down and a dictionary might be detached before checking if can be dropped without breaking dependencies between table, it's fixed. Fixes [#41982](https://github.com/ClickHouse/ClickHouse/issues/41982). [#42106](https://github.com/ClickHouse/ClickHouse/pull/42106) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix bad inefficiency of `remote_filesystem_read_method=read` with filesystem cache. Closes [#42125](https://github.com/ClickHouse/ClickHouse/issues/42125). [#42129](https://github.com/ClickHouse/ClickHouse/pull/42129) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix possible timeout exception for distributed queries with use_hedged_requests = 0. [#42130](https://github.com/ClickHouse/ClickHouse/pull/42130) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a minor bug inside function `runningDifference` in case of using it with `Date32` type. Previously `Date` was used and it may cause some logical errors like `Bad cast from type DB::ColumnVector to DB::ColumnVector'`. [#42143](https://github.com/ClickHouse/ClickHouse/pull/42143) ([Alfred Xu](https://github.com/sperlingxx)). +* Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* DISTINCT in order fails with LOGICAL_ERROR if first column in sorting key contains function. [#42186](https://github.com/ClickHouse/ClickHouse/pull/42186) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix read from `Buffer` tables with read in order desc. [#42236](https://github.com/ClickHouse/ClickHouse/pull/42236) ([Duc Canh Le](https://github.com/canhld94)). +* Fix a bug which prevents ClickHouse to start when `background_pool_size setting` is set on default profile but `background_merges_mutations_concurrency_ratio` is not. [#42315](https://github.com/ClickHouse/ClickHouse/pull/42315) ([nvartolomei](https://github.com/nvartolomei)). +* `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix a data race in query finish/cancel. This closes [#42346](https://github.com/ClickHouse/ClickHouse/issues/42346). [#42362](https://github.com/ClickHouse/ClickHouse/pull/42362) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix assert cast in join on falsy condition, Close [#42380](https://github.com/ClickHouse/ClickHouse/issues/42380). [#42407](https://github.com/ClickHouse/ClickHouse/pull/42407) ([Vladimir C](https://github.com/vdimir)). +* Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* `AggregateFunctionQuantile` now correctly works with UInt128 columns. Previously, the quantile state interpreted `UInt128` columns as `Int128` which could have led to incorrect results. [#42473](https://github.com/ClickHouse/ClickHouse/pull/42473) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix bad_cast assert during INSERT into `Annoy` indexes over non-Float32 columns. `Annoy` indices is an experimental feature. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)). +* Arithmetic operator with Date or DateTime and 128 or 256-bit integer was referencing uninitialized memory. [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)). + ### ClickHouse release 22.9, 2022-09-22 #### Backward Incompatible Change + * Upgrade from 20.3 and older to 22.9 and newer should be done through an intermediate version if there are any `ReplicatedMergeTree` tables, otherwise server with the new version will not start. [#40641](https://github.com/ClickHouse/ClickHouse/pull/40641) ([Alexander Tokmakov](https://github.com/tavplubix)). * Remove the functions `accurate_Cast` and `accurate_CastOrNull` (they are different to `accurateCast` and `accurateCastOrNull` by underscore in the name and they are not affected by the value of `cast_keep_nullable` setting). These functions were undocumented, untested, unused, and unneeded. They appeared to be alive due to code generalization. [#40682](https://github.com/ClickHouse/ClickHouse/pull/40682) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Add a test to ensure that every new table function will be documented. See [#40649](https://github.com/ClickHouse/ClickHouse/issues/40649). Rename table function `MeiliSearch` to `meilisearch`. [#40709](https://github.com/ClickHouse/ClickHouse/pull/40709) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -21,6 +155,7 @@ * Make interpretation of YAML configs to be more conventional. [#41044](https://github.com/ClickHouse/ClickHouse/pull/41044) ([Vitaly Baranov](https://github.com/vitlibar)). #### New Feature + * Support `insert_quorum = 'auto'` to use majority number. [#39970](https://github.com/ClickHouse/ClickHouse/pull/39970) ([Sachin](https://github.com/SachinSetiya)). * Add embedded dashboards to ClickHouse server. This is a demo project about how to achieve 90% results with 1% effort using ClickHouse features. [#40461](https://github.com/ClickHouse/ClickHouse/pull/40461) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Added new settings constraint writability kind `changeable_in_readonly`. [#40631](https://github.com/ClickHouse/ClickHouse/pull/40631) ([Sergei Trifonov](https://github.com/serxa)). @@ -38,6 +173,7 @@ * Improvement for in-memory data parts: remove completely processed WAL files. [#40592](https://github.com/ClickHouse/ClickHouse/pull/40592) ([Azat Khuzhin](https://github.com/azat)). #### Performance Improvement + * Implement compression of marks and primary key. Close [#34437](https://github.com/ClickHouse/ClickHouse/issues/34437). [#37693](https://github.com/ClickHouse/ClickHouse/pull/37693) ([zhongyuankai](https://github.com/zhongyuankai)). * Allow to load marks with threadpool in advance. Regulated by setting `load_marks_asynchronously` (default: 0). [#40821](https://github.com/ClickHouse/ClickHouse/pull/40821) ([Kseniia Sumarokova](https://github.com/kssenii)). * Virtual filesystem over s3 will use random object names split into multiple path prefixes for better performance on AWS. [#40968](https://github.com/ClickHouse/ClickHouse/pull/40968) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -58,6 +194,7 @@ * Parallel hash JOIN for Float data types might be suboptimal. Make it better. [#41183](https://github.com/ClickHouse/ClickHouse/pull/41183) ([Alexey Milovidov](https://github.com/alexey-milovidov)). #### Improvement + * During startup and ATTACH call, `ReplicatedMergeTree` tables will be readonly until the ZooKeeper connection is made and the setup is finished. [#40148](https://github.com/ClickHouse/ClickHouse/pull/40148) ([Antonio Andelic](https://github.com/antonio2368)). * Add `enable_extended_results_for_datetime_functions` option to return results of type Date32 for functions toStartOfYear, toStartOfISOYear, toStartOfQuarter, toStartOfMonth, toStartOfWeek, toMonday and toLastDayOfMonth when argument is Date32 or DateTime64, otherwise results of Date type are returned. For compatibility reasons default value is ‘0’. [#41214](https://github.com/ClickHouse/ClickHouse/pull/41214) ([Roman Vasin](https://github.com/rvasin)). * For security and stability reasons, CatBoost models are no longer evaluated within the ClickHouse server. Instead, the evaluation is now done in the clickhouse-library-bridge, a separate process that loads the catboost library and communicates with the server process via HTTP. [#40897](https://github.com/ClickHouse/ClickHouse/pull/40897) ([Robert Schulze](https://github.com/rschu1ze)). [#39629](https://github.com/ClickHouse/ClickHouse/pull/39629) ([Robert Schulze](https://github.com/rschu1ze)). @@ -108,6 +245,7 @@ * Add `has_lightweight_delete` to system.parts. [#41564](https://github.com/ClickHouse/ClickHouse/pull/41564) ([Kseniia Sumarokova](https://github.com/kssenii)). #### Build/Testing/Packaging Improvement + * Enforce documentation for every setting. [#40644](https://github.com/ClickHouse/ClickHouse/pull/40644) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Enforce documentation for every current metric. [#40645](https://github.com/ClickHouse/ClickHouse/pull/40645) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Enforce documentation for every profile event counter. Write the documentation where it was missing. [#40646](https://github.com/ClickHouse/ClickHouse/pull/40646) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -217,15 +355,16 @@ * Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41586](https://github.com/ClickHouse/ClickHouse/pull/41586) ([Raúl Marín](https://github.com/Algunenano)). * Fix possible `pipeline stuck` exception for queries with `OFFSET`. The error was found with `enable_optimize_predicate_expression = 0` and always false condition in `WHERE`. Fixes [#41383](https://github.com/ClickHouse/ClickHouse/issues/41383). [#41588](https://github.com/ClickHouse/ClickHouse/pull/41588) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). - -### ClickHouse release 22.8, 2022-08-18 +### ClickHouse release 22.8-lts, 2022-08-18 #### Backward Incompatible Change + * Extended range of `Date32` and `DateTime64` to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)). * Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Make the remote filesystem cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes https://github.com/ClickHouse/ClickHouse/issues/36140. Closes https://github.com/ClickHouse/ClickHouse/issues/37889. ([Kseniia Sumarokova](https://github.com/kssenii)). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171)) #### New Feature + * Query parameters can be set in interactive mode as `SET param_abc = 'def'` and transferred via the native protocol as settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)). * Quota key can be set in the native protocol ([Yakov Olkhovsky](https://github.com/ClickHouse/ClickHouse/pull/39874)). * Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)). @@ -240,12 +379,14 @@ * Add new setting schema_inference_hints that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)). #### Experimental Feature + * Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)) ([Alexander Gololobov](https://github.com/davenger)). Note: this new feature does not make ClickHouse an HTAP DBMS. #### Performance Improvement + * Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)). * Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). Add `concurrent_threads_soft_limit parameter` to increase performance in case of high QPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)). -* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)). +* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)). * Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)). * `DISTINCT` in order with `ORDER BY`: Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). Improve memory usage (significantly) and query execution time + use `DistinctSortedChunkTransform` for final distinct when `DISTINCT` columns match `ORDER BY` columns, but rename to `DistinctSortedStreamTransform` in `EXPLAIN PIPELINE` → this improves memory usage significantly + remove unnecessary allocations in hot loop in `DistinctSortedChunkTransform`. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation + it allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). Fix: `DistinctSortedTransform` didn't take advantage of sorting. It never cleared HashSet since clearing_columns were detected incorrectly (always empty). So, it basically worked as ordinary `DISTINCT` (`DistinctTransform`). The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)). * Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)). @@ -256,6 +397,7 @@ * Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)). #### Improvement + * Normalize `AggregateFunction` types and state representations because optimizations like [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). The functions with identical states can be used in materialized views interchangeably. * Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set the ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)). * Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)). @@ -294,6 +436,7 @@ * Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)). #### Build/Testing/Packaging Improvement + * [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**). * ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)). * Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -308,6 +451,7 @@ * Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). #### Bug Fix + * Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)). * Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -358,16 +502,17 @@ * A fix for reverse DNS resolution. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)). * Fix unexpected result `arrayDifference` of `Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)). - ### ClickHouse release 22.7, 2022-07-21 #### Upgrade Notes + * Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Disable `format_csv_allow_single_quotes` by default. See [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). ([Kruglov Pavel](https://github.com/Avogar)). * `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new databases with `Ordinary` engine. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)). * Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). If you will face any incompatibilities, you can turn this setting back. #### New Feature + * Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)). * Add new `direct` join algorithm for `EmbeddedRocksDB` tables, see [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)). * Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)). @@ -395,9 +540,11 @@ * Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). #### Experimental Feature + * Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)). #### Performance Improvement + * Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)). * Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)). * More parallel execution for queries with `FINAL` [#36396](https://github.com/ClickHouse/ClickHouse/pull/36396) ([Nikita Taranov](https://github.com/nickitat)). @@ -407,7 +554,7 @@ * Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)). * Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)). * Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)). +* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)). * Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)). * Add AVX-512 VBMI optimized `copyOverlap32Shuffle` for LZ4 decompression. In other words, LZ4 decompression performance is improved. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)). * `ORDER BY (a, b)` will use all the same benefits as `ORDER BY a, b`. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)). @@ -419,6 +566,7 @@ * The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)). #### Improvement + * Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)). * Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)). * Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)). @@ -464,6 +612,7 @@ * Allow to declare `RabbitMQ` queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)). #### Build/Testing/Packaging Improvement + * Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)). * Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Preparation for building on `s390x` platform. [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)). @@ -473,6 +622,7 @@ * Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). #### Bug Fix (user-visible misbehavior in official stable or prestable release) + * Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)). * Fixed crash caused by data race in storage `Hive` (integration table engine). [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)). * Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)). @@ -529,6 +679,7 @@ ### ClickHouse release 22.6, 2022-06-16 #### Backward Incompatible Change + * Remove support for octal number literals in SQL. In previous versions they were parsed as Float64. [#37765](https://github.com/ClickHouse/ClickHouse/pull/37765) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). * Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)). * Changed format of binary serialization of columns of experimental type `Object`. New format is more convenient to implement by third-party clients. [#37482](https://github.com/ClickHouse/ClickHouse/pull/37482) ([Anton Popov](https://github.com/CurtizJ)). @@ -537,6 +688,7 @@ * If you run different ClickHouse versions on a cluster with AArch64 CPU or mix AArch64 and amd64 on a cluster, and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, and the size of the result is huge, the data will not be fully aggregated in the result of these queries during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade. #### New Feature + * Add `GROUPING` function. It allows to disambiguate the records in the queries with `ROLLUP`, `CUBE` or `GROUPING SETS`. Closes [#19426](https://github.com/ClickHouse/ClickHouse/issues/19426). [#37163](https://github.com/ClickHouse/ClickHouse/pull/37163) ([Dmitry Novik](https://github.com/novikd)). * A new codec [FPC](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf) algorithm for floating point data compression. [#37553](https://github.com/ClickHouse/ClickHouse/pull/37553) ([Mikhail Guzov](https://github.com/koloshmet)). * Add new columnar JSON formats: `JSONColumns`, `JSONCompactColumns`, `JSONColumnsWithMetadata`. Closes [#36338](https://github.com/ClickHouse/ClickHouse/issues/36338) Closes [#34509](https://github.com/ClickHouse/ClickHouse/issues/34509). [#36975](https://github.com/ClickHouse/ClickHouse/pull/36975) ([Kruglov Pavel](https://github.com/Avogar)). @@ -557,11 +709,13 @@ * Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)). #### Experimental Feature + * Enables `POPULATE` for `WINDOW VIEW`. [#36945](https://github.com/ClickHouse/ClickHouse/pull/36945) ([vxider](https://github.com/Vxider)). * `ALTER TABLE ... MODIFY QUERY` support for `WINDOW VIEW`. [#37188](https://github.com/ClickHouse/ClickHouse/pull/37188) ([vxider](https://github.com/Vxider)). * This PR changes the behavior of the `ENGINE` syntax in `WINDOW VIEW`, to make it like in `MATERIALIZED VIEW`. [#37214](https://github.com/ClickHouse/ClickHouse/pull/37214) ([vxider](https://github.com/Vxider)). #### Performance Improvement + * Added numerous optimizations for ARM NEON [#38093](https://github.com/ClickHouse/ClickHouse/pull/38093)([Daniel Kutenin](https://github.com/danlark1)), ([Alexandra Pilipyuk](https://github.com/chalice19)) Note: if you run different ClickHouse versions on a cluster with ARM CPU and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, the result of the aggregation query will be wrong during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade. * Improve performance and memory usage for select of subset of columns for formats Native, Protobuf, CapnProto, JSONEachRow, TSKV, all formats with suffixes WithNames/WithNamesAndTypes. Previously while selecting only subset of columns from files in these formats all columns were read and stored in memory. Now only required columns are read. This PR enables setting `input_format_skip_unknown_fields` by default, because otherwise in case of select of subset of columns exception will be thrown. [#37192](https://github.com/ClickHouse/ClickHouse/pull/37192) ([Kruglov Pavel](https://github.com/Avogar)). * Now more filters can be pushed down for join. [#37472](https://github.com/ClickHouse/ClickHouse/pull/37472) ([Amos Bird](https://github.com/amosbird)). @@ -592,6 +746,7 @@ * In function: CompressedWriteBuffer::nextImpl(), there is an unnecessary write-copy step that would happen frequently during inserting data. Below shows the differentiation with this patch: - Before: 1. Compress "working_buffer" into "compressed_buffer" 2. write-copy into "out" - After: Directly Compress "working_buffer" into "out". [#37242](https://github.com/ClickHouse/ClickHouse/pull/37242) ([jasperzhu](https://github.com/jinjunzh)). #### Improvement + * Support types with non-standard defaults in ROLLUP, CUBE, GROUPING SETS. Closes [#37360](https://github.com/ClickHouse/ClickHouse/issues/37360). [#37667](https://github.com/ClickHouse/ClickHouse/pull/37667) ([Dmitry Novik](https://github.com/novikd)). * Fix stack traces collection on ARM. Closes [#37044](https://github.com/ClickHouse/ClickHouse/issues/37044). Closes [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638). [#37797](https://github.com/ClickHouse/ClickHouse/pull/37797) ([Maksim Kita](https://github.com/kitaisreal)). * Client will try every IP address returned by DNS resolution until successful connection. [#37273](https://github.com/ClickHouse/ClickHouse/pull/37273) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). @@ -633,6 +788,7 @@ * Add implicit grants with grant option too. For example `GRANT CREATE TABLE ON test.* TO A WITH GRANT OPTION` now allows `A` to execute `GRANT CREATE VIEW ON test.* TO B`. [#38017](https://github.com/ClickHouse/ClickHouse/pull/38017) ([Vitaly Baranov](https://github.com/vitlibar)). #### Build/Testing/Packaging Improvement + * Use `clang-14` and LLVM infrastructure version 14 for builds. This closes [#34681](https://github.com/ClickHouse/ClickHouse/issues/34681). [#34754](https://github.com/ClickHouse/ClickHouse/pull/34754) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Note: `clang-14` has [a bug](https://github.com/google/sanitizers/issues/1540) in ThreadSanitizer that makes our CI work worse. * Allow to drop privileges at startup. This simplifies Docker images. Closes [#36293](https://github.com/ClickHouse/ClickHouse/issues/36293). [#36341](https://github.com/ClickHouse/ClickHouse/pull/36341) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)). @@ -690,7 +846,6 @@ * Fix possible heap-use-after-free error when reading system.projection_parts and system.projection_parts_columns . This fixes [#37184](https://github.com/ClickHouse/ClickHouse/issues/37184). [#37185](https://github.com/ClickHouse/ClickHouse/pull/37185) ([Amos Bird](https://github.com/amosbird)). * Fixed `DateTime64` fractional seconds behavior prior to Unix epoch. [#37697](https://github.com/ClickHouse/ClickHouse/pull/37697) ([Andrey Zvonov](https://github.com/zvonand)). [#37039](https://github.com/ClickHouse/ClickHouse/pull/37039) ([李扬](https://github.com/taiyang-li)). - ### ClickHouse release 22.5, 2022-05-19 #### Upgrade Notes @@ -743,7 +898,7 @@ * Implement partial GROUP BY key for optimize_aggregation_in_order. [#35111](https://github.com/ClickHouse/ClickHouse/pull/35111) ([Azat Khuzhin](https://github.com/azat)). #### Improvement - + * Show names of erroneous files in case of parsing errors while executing table functions `file`, `s3` and `url`. [#36314](https://github.com/ClickHouse/ClickHouse/pull/36314) ([Anton Popov](https://github.com/CurtizJ)). * Allowed to increase the number of threads for executing background operations (merges, mutations, moves and fetches) at runtime if they are specified at top level config. [#36425](https://github.com/ClickHouse/ClickHouse/pull/36425) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Now date time conversion functions that generates time before 1970-01-01 00:00:00 with partial hours/minutes timezones will be saturated to zero instead of overflow. This is the continuation of https://github.com/ClickHouse/ClickHouse/pull/29953 which addresses https://github.com/ClickHouse/ClickHouse/pull/29953#discussion_r800550280 . Mark as improvement because it's implementation defined behavior (and very rare case) and we are allowed to break it. [#36656](https://github.com/ClickHouse/ClickHouse/pull/36656) ([Amos Bird](https://github.com/amosbird)). @@ -852,7 +1007,6 @@ * Fix ALTER DROP COLUMN of nested column with compact parts (i.e. `ALTER TABLE x DROP COLUMN n`, when there is column `n.d`). [#35797](https://github.com/ClickHouse/ClickHouse/pull/35797) ([Azat Khuzhin](https://github.com/azat)). * Fix substring function range error length when `offset` and `length` is negative constant and `s` is not constant. [#33861](https://github.com/ClickHouse/ClickHouse/pull/33861) ([RogerYK](https://github.com/RogerYK)). - ### ClickHouse release 22.4, 2022-04-19 #### Backward Incompatible Change @@ -1004,8 +1158,7 @@ * Fix mutations in tables with enabled sparse columns. [#35284](https://github.com/ClickHouse/ClickHouse/pull/35284) ([Anton Popov](https://github.com/CurtizJ)). * Do not delay final part writing by default (fixes possible `Memory limit exceeded` during `INSERT` by adding `max_insert_delayed_streams_for_parallel_write` with default to 1000 for writes to s3 and disabled as before otherwise). [#34780](https://github.com/ClickHouse/ClickHouse/pull/34780) ([Azat Khuzhin](https://github.com/azat)). - -## ClickHouse release v22.3-lts, 2022-03-17 +### ClickHouse release v22.3-lts, 2022-03-17 #### Backward Incompatible Change @@ -1132,7 +1285,6 @@ * Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)). * Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)). - ### ClickHouse release v22.2, 2022-02-17 #### Upgrade Notes @@ -1308,7 +1460,6 @@ * Fix issue [#18206](https://github.com/ClickHouse/ClickHouse/issues/18206). [#33977](https://github.com/ClickHouse/ClickHouse/pull/33977) ([Vitaly Baranov](https://github.com/vitlibar)). * This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)). - ### ClickHouse release v22.1, 2022-01-18 #### Upgrade Notes @@ -1335,7 +1486,6 @@ * Add function `decodeURLFormComponent` slightly different to `decodeURLComponent`. Close [#10298](https://github.com/ClickHouse/ClickHouse/issues/10298). [#33451](https://github.com/ClickHouse/ClickHouse/pull/33451) ([SuperDJY](https://github.com/cmsxbc)). * Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional rule_type field). [#33494](https://github.com/ClickHouse/ClickHouse/pull/33494) ([Michail Safronov](https://github.com/msaf1980)). - #### Performance Improvement * Support moving conditions to `PREWHERE` (setting `optimize_move_to_prewhere`) for tables of `Merge` engine if its all underlying tables supports `PREWHERE`. [#33300](https://github.com/ClickHouse/ClickHouse/pull/33300) ([Anton Popov](https://github.com/CurtizJ)). @@ -1351,7 +1501,6 @@ * Optimize selecting of MergeTree parts that can be moved between volumes. [#33225](https://github.com/ClickHouse/ClickHouse/pull/33225) ([OnePiece](https://github.com/zhongyuankai)). * Fix `sparse_hashed` dict performance with sequential keys (wrong hash function). [#32536](https://github.com/ClickHouse/ClickHouse/pull/32536) ([Azat Khuzhin](https://github.com/azat)). - #### Experimental Feature * Parallel reading from multiple replicas within a shard during distributed query without using sample key. To enable this, set `allow_experimental_parallel_reading_from_replicas = 1` and `max_parallel_replicas` to any number. This closes [#26748](https://github.com/ClickHouse/ClickHouse/issues/26748). [#29279](https://github.com/ClickHouse/ClickHouse/pull/29279) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). @@ -1364,7 +1513,6 @@ * Fix ACL with explicit digit hash in `clickhouse-keeper`: now the behavior consistent with ZooKeeper and generated digest is always accepted. [#33249](https://github.com/ClickHouse/ClickHouse/pull/33249) ([小路](https://github.com/nicelulu)). [#33246](https://github.com/ClickHouse/ClickHouse/pull/33246). * Fix unexpected projection removal when detaching parts. [#32067](https://github.com/ClickHouse/ClickHouse/pull/32067) ([Amos Bird](https://github.com/amosbird)). - #### Improvement * Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch. @@ -1411,7 +1559,6 @@ * Updating `modification_time` for data part in `system.parts` after part movement [#32964](https://github.com/ClickHouse/ClickHouse/issues/32964). [#32965](https://github.com/ClickHouse/ClickHouse/pull/32965) ([save-my-heart](https://github.com/save-my-heart)). * Potential issue, cannot be exploited: integer overflow may happen in array resize. [#33024](https://github.com/ClickHouse/ClickHouse/pull/33024) ([varadarajkumar](https://github.com/varadarajkumar)). - #### Build/Testing/Packaging Improvement * Add packages, functional tests and Docker builds for AArch64 (ARM) version of ClickHouse. [#32911](https://github.com/ClickHouse/ClickHouse/pull/32911) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). [#32415](https://github.com/ClickHouse/ClickHouse/pull/32415) @@ -1426,7 +1573,6 @@ * Inject git information into clickhouse binary file. So we can get source code revision easily from clickhouse binary file. [#33124](https://github.com/ClickHouse/ClickHouse/pull/33124) ([taiyang-li](https://github.com/taiyang-li)). * Remove obsolete code from ConfigProcessor. Yandex specific code is not used anymore. The code contained one minor defect. This defect was reported by [Mallik Hassan](https://github.com/SadiHassan) in [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). This closes [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). [#33026](https://github.com/ClickHouse/ClickHouse/pull/33026) ([alexey-milovidov](https://github.com/alexey-milovidov)). - #### Bug Fix (user-visible misbehavior in official stable or prestable release) * Several fixes for format parsing. This is relevant if `clickhouse-server` is open for write access to adversary. Specifically crafted input data for `Native` format may lead to reading uninitialized memory or crash. This is relevant if `clickhouse-server` is open for write access to adversary. [#33050](https://github.com/ClickHouse/ClickHouse/pull/33050) ([Heena Bansal](https://github.com/HeenaBansal2009)). Fixed Apache Avro Union type index out of boundary issue in Apache Avro binary format. [#33022](https://github.com/ClickHouse/ClickHouse/pull/33022) ([Harry Lee](https://github.com/HarryLeeIBM)). Fix null pointer dereference in `LowCardinality` data when deserializing `LowCardinality` data in the Native format. [#33021](https://github.com/ClickHouse/ClickHouse/pull/33021) ([Harry Lee](https://github.com/HarryLeeIBM)). @@ -1485,5 +1631,4 @@ * Fix possible crash (or incorrect result) in case of `LowCardinality` arguments of window function. Fixes [#31114](https://github.com/ClickHouse/ClickHouse/issues/31114). [#31888](https://github.com/ClickHouse/ClickHouse/pull/31888) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix hang up with command `DROP TABLE system.query_log sync`. [#33293](https://github.com/ClickHouse/ClickHouse/pull/33293) ([zhanghuajie](https://github.com/zhanghuajieHIT)). - ## [Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021) diff --git a/README.md b/README.md index 9f4a39a2c97..f90df9686c2 100644 --- a/README.md +++ b/README.md @@ -5,16 +5,17 @@ ClickHouse® is an open-source column-oriented database management system that a ## Useful Links * [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page. -* [ClickHouse Cloud](https://clickhouse.com/cloud) ClickHouse as a service, built by the creators and maintainers. +* [ClickHouse Cloud](https://clickhouse.cloud) ClickHouse as a service, built by the creators and maintainers. * [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster. * [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. * [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time. -* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events. +* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events. * [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation. * [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev. * [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any. ## Upcoming events -* [**v22.10 Release Webinar**](https://clickhouse.com/company/events/v22-10-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap. -* [**Introducing ClickHouse Cloud**](https://clickhouse.com/company/events/cloud-beta) Introducing ClickHouse as a service, built by creators and maintainers of the fastest OLAP database on earth. Join Tanya Bragin for a detailed walkthrough of ClickHouse Cloud capabilities, as well as a peek behind the curtain to understand the unique architecture that makes our service tick. +* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap. +* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management. +* [**AWS re:Invent**](https://clickhouse.com/company/events/aws-reinvent) Core members of the ClickHouse team -- including 2 of our founders -- will be at re:Invent from November 29 to December 3. We are available on the show floor, but are also determining interest in holding an event during the time there. diff --git a/SECURITY.md b/SECURITY.md index fb6caa92cb8..0fb333c8ea3 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,9 +10,11 @@ The following versions of ClickHouse server are currently being supported with s | Version | Supported | |:-|:-| +| 22.10 | ✔️ | +| 22.9 | ✔️ | | 22.8 | ✔️ | -| 22.7 | ✔️ | -| 22.6 | ✔️ | +| 22.7 | ❌ | +| 22.6 | ❌ | | 22.5 | ❌ | | 22.4 | ❌ | | 22.3 | ✔️ | diff --git a/base/base/ReplxxLineReader.cpp b/base/base/ReplxxLineReader.cpp index a014fa4b8f2..e0dc81af5b0 100644 --- a/base/base/ReplxxLineReader.cpp +++ b/base/base/ReplxxLineReader.cpp @@ -151,7 +151,7 @@ public: { size_t dot_pos = path.rfind('.'); if (dot_pos != std::string::npos) - fd = ::mkstemps(path.data(), path.size() - dot_pos); + fd = ::mkstemps(path.data(), static_cast(path.size() - dot_pos)); else fd = ::mkstemp(path.data()); @@ -408,7 +408,7 @@ ReplxxLineReader::ReplxxLineReader( // In a simplest case use simple comment. commented_line = fmt::format("-- {}", state.text()); } - rx.set_state(replxx::Replxx::State(commented_line.c_str(), commented_line.size())); + rx.set_state(replxx::Replxx::State(commented_line.c_str(), static_cast(commented_line.size()))); return rx.invoke(Replxx::ACTION::COMMIT_LINE, code); }; @@ -480,7 +480,7 @@ void ReplxxLineReader::openEditor() if (executeCommand(argv) == 0) { const std::string & new_query = readFile(editor_file.getPath()); - rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size())); + rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast(new_query.size()))); } } catch (const std::runtime_error & e) @@ -526,7 +526,7 @@ void ReplxxLineReader::openInteractiveHistorySearch() { std::string new_query = readFile(output_file.getPath()); rightTrim(new_query); - rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size())); + rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast(new_query.size()))); } } catch (const std::runtime_error & e) diff --git a/base/base/StringRef.h b/base/base/StringRef.h index 5ee197021ca..a3e32ff5058 100644 --- a/base/base/StringRef.h +++ b/base/base/StringRef.h @@ -265,7 +265,7 @@ inline size_t hashLessThan16(const char * data, size_t size) struct CRC32Hash { - size_t operator() (StringRef x) const + unsigned operator() (StringRef x) const { const char * pos = x.data; size_t size = x.size; @@ -275,22 +275,22 @@ struct CRC32Hash if (size < 8) { - return hashLessThan8(x.data, x.size); + return static_cast(hashLessThan8(x.data, x.size)); } const char * end = pos + size; - size_t res = -1ULL; + unsigned res = -1U; do { UInt64 word = unalignedLoad(pos); - res = CRC_INT(res, word); + res = static_cast(CRC_INT(res, word)); pos += 8; } while (pos + 8 < end); UInt64 word = unalignedLoad(end - 8); /// I'm not sure if this is normal. - res = CRC_INT(res, word); + res = static_cast(CRC_INT(res, word)); return res; } @@ -302,7 +302,7 @@ struct StringRefHash : CRC32Hash {}; struct CRC32Hash { - size_t operator() (StringRef /* x */) const + unsigned operator() (StringRef /* x */) const { throw std::logic_error{"Not implemented CRC32Hash without SSE"}; } diff --git a/base/base/itoa.h b/base/base/itoa.h index 5e0b18d50c0..dd3e3cc96fe 100644 --- a/base/base/itoa.h +++ b/base/base/itoa.h @@ -122,7 +122,7 @@ QuotientAndRemainder static inline split(UnsignedOfSize value) constexpr DivisionBy10PowN division; UnsignedOfSize quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift; - UnsignedOfSize remainder = value - quotient * pow10>(N); + UnsignedOfSize remainder = static_cast>(value - quotient * pow10>(N)); return {quotient, remainder}; } diff --git a/base/base/safeExit.cpp b/base/base/safeExit.cpp index e4f9e80759e..ddb93dac65b 100644 --- a/base/base/safeExit.cpp +++ b/base/base/safeExit.cpp @@ -1,10 +1,8 @@ #if defined(OS_LINUX) # include #endif -#include #include #include -#include [[noreturn]] void safeExit(int code) { diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h index eb2edcb98ff..1b5f502722c 100644 --- a/base/base/wide_integer_impl.h +++ b/base/base/wide_integer_impl.h @@ -227,6 +227,8 @@ struct integer::_impl template __attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept { + /// NOTE: this can be called with DB::Decimal, and in this case, result + /// will be wrong if constexpr (std::is_signed_v) return static_cast(f); else diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 9728451f38a..11b37f5a7c8 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54467) +SET(VERSION_REVISION 54468) SET(VERSION_MAJOR 22) -SET(VERSION_MINOR 10) +SET(VERSION_MINOR 11) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 3030d4c7ff09ec44ab07d0a8069ea923227288a1) -SET(VERSION_DESCRIBE v22.10.1.1-testing) -SET(VERSION_STRING 22.10.1.1) +SET(VERSION_GITHASH 98ab5a3c189232ea2a3dddb9d2be7196ae8b3434) +SET(VERSION_DESCRIBE v22.11.1.1-testing) +SET(VERSION_STRING 22.11.1.1) # end of autochange diff --git a/cmake/clang_tidy.cmake b/cmake/clang_tidy.cmake index 200282234ca..57295682487 100644 --- a/cmake/clang_tidy.cmake +++ b/cmake/clang_tidy.cmake @@ -3,10 +3,20 @@ option (ENABLE_CLANG_TIDY "Use clang-tidy static analyzer" OFF) if (ENABLE_CLANG_TIDY) - find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12") + find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache") + if (CLANG_TIDY_CACHE_PATH) + find_program (_CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12") + + # Why do we use ';' here? + # It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY + # The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax. + set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper") + else () + find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12") + endif () if (CLANG_TIDY_PATH) - message(STATUS + message (STATUS "Using clang-tidy: ${CLANG_TIDY_PATH}. The checks will be run during build process. See the .clang-tidy file at the root directory to configure the checks.") @@ -15,11 +25,15 @@ if (ENABLE_CLANG_TIDY) # clang-tidy requires assertions to guide the analysis # Note that NDEBUG is set implicitly by CMake for non-debug builds - set(COMPILER_FLAGS "${COMPILER_FLAGS} -UNDEBUG") + set (COMPILER_FLAGS "${COMPILER_FLAGS} -UNDEBUG") - # The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code. + # The variable CMAKE_CXX_CLANG_TIDY will be set inside the following directories with non third-party code. + # - base + # - programs + # - src + # - utils # set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") else () - message(${RECONFIGURE_MESSAGE_LEVEL} "clang-tidy is not found") + message (${RECONFIGURE_MESSAGE_LEVEL} "clang-tidy is not found") endif () endif () diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 6707d703372..a554992caf3 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -61,8 +61,14 @@ elseif (ARCH_AARCH64) endif () elseif (ARCH_PPC64LE) + # By Default, build for power8 and up, allow building for power9 and up # Note that gcc and clang have support for x86 SSE2 intrinsics when building for PowerPC - set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS") + option (POWER9 "Build for Power 9 CPU and above" 0) + if(POWER9) + set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power9 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS") + else () + set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS") + endif () elseif (ARCH_AMD64) option (ENABLE_SSSE3 "Use SSSE3 instructions on x86_64" 1) @@ -75,6 +81,7 @@ elseif (ARCH_AMD64) option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0) option (ENABLE_AVX512_VBMI "Use AVX512_VBMI instruction on x86_64 (depends on ENABLE_AVX512)" 0) option (ENABLE_BMI "Use BMI instructions on x86_64" 0) + option (ENABLE_BMI2 "Use BMI2 instructions on x86_64 (depends on ENABLE_AVX2)" 0) option (ENABLE_AVX2_FOR_SPEC_OP "Use avx2 instructions for specific operations on x86_64" 0) option (ENABLE_AVX512_FOR_SPEC_OP "Use avx512 instructions for specific operations on x86_64" 0) @@ -90,6 +97,7 @@ elseif (ARCH_AMD64) SET(ENABLE_AVX512 0) SET(ENABLE_AVX512_VBMI 0) SET(ENABLE_BMI 0) + SET(ENABLE_BMI2 0) SET(ENABLE_AVX2_FOR_SPEC_OP 0) SET(ENABLE_AVX512_FOR_SPEC_OP 0) endif() @@ -237,6 +245,20 @@ elseif (ARCH_AMD64) set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") endif () + set (TEST_FLAG "-mbmi2") + set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") + check_cxx_source_compiles(" + #include + int main() { + auto a = _pdep_u64(0, 0); + (void)a; + return 0; + } + " HAVE_BMI2) + if (HAVE_BMI2 AND HAVE_AVX2 AND ENABLE_AVX2 AND ENABLE_BMI2) + set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + endif () + # Limit avx2/avx512 flag for specific source build set (X86_INTRINSICS_FLAGS "") if (ENABLE_AVX2_FOR_SPEC_OP) diff --git a/cmake/ld.lld.in b/cmake/ld.lld.in index 9736dab1bc3..78a264a0089 100755 --- a/cmake/ld.lld.in +++ b/cmake/ld.lld.in @@ -3,15 +3,15 @@ # This is a workaround for bug in llvm/clang, # that does not produce .debug_aranges with LTO # -# NOTE: this is a temporary solution, that should be removed once [1] will be -# resolved. +# NOTE: this is a temporary solution, that should be removed after upgrading to +# clang-16/llvm-16. # -# [1]: https://discourse.llvm.org/t/clang-does-not-produce-full-debug-aranges-section-with-thinlto/64898/8 +# Refs: https://reviews.llvm.org/D133092 # NOTE: only -flto=thin is supported. # NOTE: it is not possible to check was there -gdwarf-aranges initially or not. if [[ "$*" =~ -plugin-opt=thinlto ]]; then - exec "@LLD_PATH@" -mllvm -generate-arange-section "$@" + exec "@LLD_PATH@" -plugin-opt=-generate-arange-section "$@" else exec "@LLD_PATH@" "$@" fi diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 73610545009..f0cef54b0b8 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -85,7 +85,7 @@ if (SANITIZE) # and they have a bunch of flags not halt the program if UIO happend and even to silence that warnings. # But for unknown reason that flags don't work with ClickHouse or we don't understand how to properly use them, # that's why we often receive reports about UIO. The simplest way to avoid this is just set this flag here. - set(UBSAN_FLAGS "${SAN_FLAGS} -fno-sanitize=unsigned-integer-overflow") + set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow") endif() if (COMPILER_CLANG) set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt") diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 57d39899a40..8a17d97cf13 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -117,7 +117,7 @@ endif() # Archiver if (COMPILER_GCC) - find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12") + find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-15" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12") else () find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar") endif () @@ -131,7 +131,7 @@ message(STATUS "Using archiver: ${CMAKE_AR}") # Ranlib if (COMPILER_GCC) - find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12") + find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-15" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12") else () find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib") endif () @@ -145,7 +145,7 @@ message(STATUS "Using ranlib: ${CMAKE_RANLIB}") # Install Name Tool if (COMPILER_GCC) - find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12") + find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-15" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12") else () find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool") endif () @@ -159,7 +159,7 @@ message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}") # Objcopy if (COMPILER_GCC) - find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy") + find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-15" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy") else () find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy") endif () @@ -173,7 +173,7 @@ endif () # Strip if (COMPILER_GCC) - find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip") + find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-15" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip") else () find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip") endif () diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake index 89f3a62ba2e..8364b0c2c08 100644 --- a/cmake/warnings.cmake +++ b/cmake/warnings.cmake @@ -27,7 +27,6 @@ if (COMPILER_CLANG) no_warning(sign-conversion) no_warning(implicit-int-conversion) no_warning(implicit-int-float-conversion) - no_warning(shorten-64-to-32) no_warning(ctad-maybe-unsupported) # clang 9+, linux-only no_warning(disabled-macro-expansion) no_warning(documentation-unknown-command) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index f914c0d2d3f..8ebd4ab55d3 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -114,6 +114,7 @@ if (ENABLE_TESTS) endif() add_contrib (llvm-project-cmake llvm-project) +add_contrib (libfuzzer-cmake llvm-project) add_contrib (libxml2-cmake libxml2) add_contrib (aws-s3-cmake aws @@ -164,6 +165,7 @@ add_contrib (sqlite-cmake sqlite-amalgamation) add_contrib (s2geometry-cmake s2geometry) add_contrib (c-ares-cmake c-ares) add_contrib (qpl-cmake qpl) +add_contrib (morton-nd-cmake morton-nd) add_contrib(annoy-cmake annoy) diff --git a/contrib/cctz b/contrib/cctz index 7a454c25c7d..5c8528fb35e 160000 --- a/contrib/cctz +++ b/contrib/cctz @@ -1 +1 @@ -Subproject commit 7a454c25c7d16053bcd327cdd16329212a08fa4a +Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5 diff --git a/contrib/libcxx b/contrib/libcxx index 172b2ae074f..4db7f838afd 160000 --- a/contrib/libcxx +++ b/contrib/libcxx @@ -1 +1 @@ -Subproject commit 172b2ae074f6755145b91c53a95c8540c1468239 +Subproject commit 4db7f838afd3139eb3761694b04d31275df45d2d diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index 6f42a479588..53c6ff58f83 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -25,6 +25,7 @@ set(SRCS "${LIBCXX_SOURCE_DIR}/src/ios.cpp" "${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp" "${LIBCXX_SOURCE_DIR}/src/iostream.cpp" +"${LIBCXX_SOURCE_DIR}/src/legacy_debug_handler.cpp" "${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp" "${LIBCXX_SOURCE_DIR}/src/locale.cpp" "${LIBCXX_SOURCE_DIR}/src/memory.cpp" @@ -49,6 +50,7 @@ set(SRCS "${LIBCXX_SOURCE_DIR}/src/valarray.cpp" "${LIBCXX_SOURCE_DIR}/src/variant.cpp" "${LIBCXX_SOURCE_DIR}/src/vector.cpp" +"${LIBCXX_SOURCE_DIR}/src/verbose_abort.cpp" ) add_library(cxx ${SRCS}) diff --git a/contrib/libcxxabi b/contrib/libcxxabi index 6eb7cc7a7bd..a736a6b3c6a 160000 --- a/contrib/libcxxabi +++ b/contrib/libcxxabi @@ -1 +1 @@ -Subproject commit 6eb7cc7a7bdd779e6734d1b9fb451df2274462d7 +Subproject commit a736a6b3c6a7b8aae2ebad629ca21b2c55b4820e diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index bf1ede8a60e..a59452eee9a 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -9,6 +9,7 @@ set(SRCS "${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" +# "${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp" diff --git a/contrib/libfuzzer-cmake/CMakeLists.txt b/contrib/libfuzzer-cmake/CMakeLists.txt new file mode 100644 index 00000000000..ff3a91d828e --- /dev/null +++ b/contrib/libfuzzer-cmake/CMakeLists.txt @@ -0,0 +1,35 @@ +set(COMPILER_RT_FUZZER_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/compiler-rt/lib/fuzzer") + +set(FUZZER_SRCS + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerCrossOver.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerDataFlowTrace.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerDriver.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsDlsym.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsWeak.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsWindows.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCounters.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCountersDarwin.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCountersWindows.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerFork.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIO.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIOPosix.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIOWindows.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerLoop.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMerge.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMutate.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerSHA1.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerTracePC.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtil.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilDarwin.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilFuchsia.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilLinux.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilPosix.cpp" + "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilWindows.cpp" +) + +add_library(_fuzzer_no_main STATIC ${FUZZER_SRCS}) +add_library(ch_contrib::fuzzer_no_main ALIAS _fuzzer_no_main) + +add_library(_fuzzer STATIC ${FUZZER_SRCS} "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMain.cpp") +add_library(ch_contrib::fuzzer ALIAS _fuzzer) + diff --git a/contrib/llvm-project-cmake/CMakeLists.txt b/contrib/llvm-project-cmake/CMakeLists.txt index 6a73ae0f0c6..7af4a23bc9d 100644 --- a/contrib/llvm-project-cmake/CMakeLists.txt +++ b/contrib/llvm-project-cmake/CMakeLists.txt @@ -21,6 +21,9 @@ set (LLVM_INCLUDE_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm/include" ) set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm") +# NOTE: You should not remove this line since otherwise it will use default 20, +# and llvm cannot be compiled with bundled libcxx and 20 standard. +set (CMAKE_CXX_STANDARD 14) # This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles. set (REQUIRED_LLVM_LIBRARIES diff --git a/contrib/morton-nd b/contrib/morton-nd new file mode 160000 index 00000000000..3795491a4aa --- /dev/null +++ b/contrib/morton-nd @@ -0,0 +1 @@ +Subproject commit 3795491a4aa3cdc916c8583094683f0d68df5bc0 diff --git a/contrib/morton-nd-cmake/CMakeLists.txt b/contrib/morton-nd-cmake/CMakeLists.txt new file mode 100644 index 00000000000..4842781503f --- /dev/null +++ b/contrib/morton-nd-cmake/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(_morton_nd INTERFACE) +target_include_directories(_morton_nd SYSTEM BEFORE INTERFACE "${ClickHouse_SOURCE_DIR}/contrib/morton-nd/include/") +add_library(ch_contrib::morton_nd ALIAS _morton_nd) diff --git a/contrib/rocksdb b/contrib/rocksdb index e7c2b2f7bcf..2c8998e26c6 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9 +Subproject commit 2c8998e26c6d46b27c710d7829c3a15e34959f70 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index b9dd2558348..466adf6aff0 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -78,23 +78,13 @@ endif() include(CheckCCompilerFlag) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") - CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9) - if(HAS_POWER9) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power9 -mtune=power9") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power9 -mtune=power9") + if(POWER9) + set(HAS_POWER9 1) + set(HAS_ALTIVEC 1) else() - CHECK_C_COMPILER_FLAG("-mcpu=power8" HAS_POWER8) - if(HAS_POWER8) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power8 -mtune=power8") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power8 -mtune=power8") - endif(HAS_POWER8) - endif(HAS_POWER9) - CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC) - if(HAS_ALTIVEC) - message(STATUS " HAS_ALTIVEC yes") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maltivec") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec") - endif(HAS_ALTIVEC) + set(HAS_POWER8 1) + set(HAS_ALTIVEC 1) + endif(POWER9) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64") diff --git a/contrib/zlib-ng b/contrib/zlib-ng index bffad6f6fe7..50f0eae1a41 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit bffad6f6fe74d6a2f92e2668390664a926c68733 +Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 2954cd574d0..06c3c0d80f0 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -73,7 +73,7 @@ RUN apt-get install binutils-riscv64-linux-gnu # Architecture of the image when BuildKit/buildx is used ARG TARGETARCH -ARG NFPM_VERSION=2.18.1 +ARG NFPM_VERSION=2.20.0 RUN arch=${TARGETARCH:-amd64} \ && curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \ @@ -91,6 +91,9 @@ ENV PATH="$PATH:/usr/local/go/bin" ENV GOPATH=/workdir/go ENV GOCACHE=/workdir/ +RUN curl https://raw.githubusercontent.com/matus-chochlik/ctcache/7fd516e91c17779cbc6fc18bd119313d9532dd90/clang-tidy-cache -Lo /usr/bin/clang-tidy-cache \ + && chmod +x /usr/bin/clang-tidy-cache + RUN mkdir /workdir && chmod 777 /workdir WORKDIR /workdir diff --git a/docker/packager/packager b/docker/packager/packager index b4aa4ebdd91..7f6bd8818fb 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -208,6 +208,7 @@ def parse_env_variables( cxx = cc.replace("gcc", "g++").replace("clang", "clang++") if package_type == "deb": + # NOTE: This are the env for packages/build script result.append("MAKE_DEB=true") cmake_flags.append("-DENABLE_TESTS=0") cmake_flags.append("-DENABLE_UTILS=0") @@ -257,6 +258,10 @@ def parse_env_variables( if clang_tidy: # 15G is not enough for tidy build cache_maxsize = "25G" + + # `CTCACHE_DIR` has the same purpose as the `CCACHE_DIR` above. + # It's there to have the clang-tidy cache embedded into our standard `CCACHE_DIR` + result.append("CTCACHE_DIR=/ccache/clang-tidy-cache") result.append(f"CCACHE_MAXSIZE={cache_maxsize}") if distcc_hosts: @@ -268,6 +273,7 @@ def parse_env_variables( result.append('DISTCC_HOSTS="localhost/`nproc`"') if additional_pkgs: + # NOTE: This are the env for packages/build script result.append("MAKE_APK=true") result.append("MAKE_RPM=true") result.append("MAKE_TGZ=true") @@ -280,9 +286,7 @@ def parse_env_variables( cmake_flags.append("-DENABLE_TESTS=1") if shared_libraries: - cmake_flags.append( - "-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1" - ) + cmake_flags.append("-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1") # We can't always build utils because it requires too much space, but # we have to build them at least in some way in CI. The shared library # build is probably the least heavy disk-wise. diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 5b597f927a2..8f1cf6ee98b 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="22.9.3.18" +ARG VERSION="22.10.2.11" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index c6254b898ed..d5fc5d8e0d3 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="22.9.3.18" +ARG VERSION="22.10.2.11" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile index ceed93c3ac7..b76b8234c81 100644 --- a/docker/test/codebrowser/Dockerfile +++ b/docker/test/codebrowser/Dockerfile @@ -36,10 +36,7 @@ RUN arch=${TARGETARCH:-amd64} \ # repo versions doesn't work correctly with C++17 # also we push reports to s3, so we add index.html to subfolder urls # https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b -# TODO: remove branch in a few weeks after merge, e.g. in May or June 2022 -# -# FIXME: update location of a repo -RUN git clone https://github.com/azat/woboq_codebrowser --branch llvm-15 \ +RUN git clone https://github.com/ClickHouse/woboq_codebrowser \ && cd woboq_codebrowser \ && cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \ && ninja \ diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 9d6cf22c817..de9125d565b 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -136,6 +136,7 @@ function clone_submodules contrib/wyhash contrib/hashidsxx contrib/c-ares + contrib/morton-nd ) git submodule sync diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 9b6318a5426..a2d86187a23 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -27,9 +27,14 @@ RUN apt-get update \ tar \ tzdata \ unixodbc \ + python3-pip \ + libcurl4-openssl-dev \ + libssl-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* +RUN pip3 install pycurl + # Architecture of the image when BuildKit/buildx is used ARG TARGETARCH diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh old mode 100755 new mode 100644 index 6b9954c2431..7058853b43e --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -47,7 +47,6 @@ function install_packages() function configure() { - export ZOOKEEPER_FAULT_INJECTION=1 # install test configs export USE_DATABASE_ORDINARY=1 export EXPORT_S3_STORAGE_POLICIES=1 @@ -203,6 +202,7 @@ quit install_packages package_folder +export ZOOKEEPER_FAULT_INJECTION=1 configure azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log & @@ -243,6 +243,7 @@ stop # Let's enable S3 storage by default export USE_S3_STORAGE_FOR_MERGE_TREE=1 +export ZOOKEEPER_FAULT_INJECTION=1 configure # But we still need default disk because some tables loaded only into it @@ -270,10 +271,6 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_ || (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \ && grep -a ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt) -echo "Get previous release tag" -previous_release_tag=$(clickhouse-client --query="SELECT version()" | get_previous_release_tag) -echo $previous_release_tag - stop [ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL" @@ -331,6 +328,10 @@ zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \ echo -e "Backward compatibility check\n" +echo "Get previous release tag" +previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag) +echo $previous_release_tag + echo "Clone previous release repository" git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository @@ -375,6 +376,8 @@ else install_packages previous_release_package_folder # Start server from previous release + # Previous version may not be ready for fault injections + export ZOOKEEPER_FAULT_INJECTION=0 configure # Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..." @@ -389,12 +392,23 @@ else clickhouse-client --query="SELECT 'Server version: ', version()" - # Install new package before running stress test because we should use new clickhouse-client and new clickhouse-test - # But we should leave old binary in /usr/bin/ for gdb (so it will print sane stacktarces) + # Install new package before running stress test because we should use new + # clickhouse-client and new clickhouse-test. + # + # But we should leave old binary in /usr/bin/ and debug symbols in + # /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it + # will print sane stacktraces and also to avoid possible crashes. + # + # FIXME: those files can be extracted directly from debian package, but + # actually better solution will be to use different PATH instead of playing + # games with files from packages. mv /usr/bin/clickhouse previous_release_package_folder/ + mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/ install_packages package_folder mv /usr/bin/clickhouse package_folder/ + mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/ mv previous_release_package_folder/clickhouse /usr/bin/ + mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug mkdir tmp_stress_output @@ -410,6 +424,8 @@ else # Start new server mv package_folder/clickhouse /usr/bin/ + mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug + export ZOOKEEPER_FAULT_INJECTION=1 configure start 500 clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \ @@ -464,6 +480,7 @@ else -e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \ -e "The set of parts restored in place of" \ -e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \ + -e "Code: 269. DB::Exception: Destination table is myself" \ /var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv diff --git a/docker/test/stress/stress b/docker/test/stress/stress index 7f3f38bd8f5..a0ec86f7fbe 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -286,9 +286,7 @@ if __name__ == "__main__": # But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY. "--client-option", "max_untracked_memory=1Gi", - "--client-option", "max_memory_usage_for_user=0", - "--client-option", "memory_profiler_step=1Gi", # Use system database to avoid CREATE/DROP DATABASE queries "--database=system", diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index 57880bfc1d6..57544bdc090 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -5,6 +5,7 @@ FROM ubuntu:20.04 ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list +# 15.0.2 ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=15 RUN apt-get update \ @@ -58,6 +59,9 @@ RUN apt-get update \ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld # for external_symbolizer_path RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer +# FIXME: workaround for "The imported target "merge-fdata" references the file" error +# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d +RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake ARG CCACHE_VERSION=4.6.1 RUN mkdir /tmp/ccache \ diff --git a/docs/README.md b/docs/README.md index fa8b6bed85c..3ca87dc03c3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -212,4 +212,4 @@ Templates: ## How to Build Documentation -You can build your documentation manually by following the instructions in [docs/tools/README.md](../docs/tools/README.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it. +You can build your documentation manually by following the instructions in the docs repo [contrib-writing-guide](https://github.com/ClickHouse/clickhouse-docs/blob/main/contrib-writing-guide.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it. diff --git a/docs/changelogs/v22.10.1.1877-stable.md b/docs/changelogs/v22.10.1.1877-stable.md new file mode 100644 index 00000000000..77e540ce928 --- /dev/null +++ b/docs/changelogs/v22.10.1.1877-stable.md @@ -0,0 +1,352 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.10.1.1877-stable (98ab5a3c189) FIXME as compared to v22.9.1.2603-stable (3030d4c7ff0) + +#### Backward Incompatible Change +* Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Remove support for the `WITH TIMEOUT` section for `LIVE VIEW`. This closes [#40557](https://github.com/ClickHouse/ClickHouse/issues/40557). [#42173](https://github.com/ClickHouse/ClickHouse/pull/42173) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* Add Rust code support into ClickHouse with BLAKE3 hash-function library as an example. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)). +* This is the initial implement of Kusto Query Language. (MVP). [#37961](https://github.com/ClickHouse/ClickHouse/pull/37961) ([Yong Wang](https://github.com/kashwy)). +* * Support limiting of temporary data stored on disk using settings `max_temporary_data_on_disk_size_for_user`/`max_temporary_data_on_disk_size_for_query` . [#40893](https://github.com/ClickHouse/ClickHouse/pull/40893) ([Vladimir C](https://github.com/vdimir)). +* Support Java integers hashing in `javaHash`. [#41131](https://github.com/ClickHouse/ClickHouse/pull/41131) ([JackyWoo](https://github.com/JackyWoo)). +* This PR is to support the OpenSSL in-house build like the BoringSSL submodule. Build flag i.e. ENABLE_CH_BUNDLE_BORINGSSL is used to choose between BoringSSL and OpenSSL. By default, the BoringSSL in-house build will be used. [#41142](https://github.com/ClickHouse/ClickHouse/pull/41142) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)). +* Composable protocol configuration is added. [#41198](https://github.com/ClickHouse/ClickHouse/pull/41198) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add OpenTelemetry support to ON CLUSTER DDL(require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)). +* Add setting `format_json_object_each_row_column_for_object_name` to write/parse object name as column value in JSONObjectEachRow format. [#41703](https://github.com/ClickHouse/ClickHouse/pull/41703) ([Kruglov Pavel](https://github.com/Avogar)). +* adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)). +* Implement support for different UUID binary formats with support for the two most prevalent ones: the default big-endian and Microsoft's mixed-endian as specified in [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). [#42108](https://github.com/ClickHouse/ClickHouse/pull/42108) ([ltrk2](https://github.com/ltrk2)). +* Added an aggregate function `analysisOfVariance` (`anova`) to perform a statistical test over several groups of normally distributed observations to find out whether all groups have the same mean or not. Original PR [#37872](https://github.com/ClickHouse/ClickHouse/issues/37872). [#42131](https://github.com/ClickHouse/ClickHouse/pull/42131) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)). +* * Add `URL` Functions which conform rfc. Functions include: `cutToFirstSignificantSubdomainCustomRFC`, `cutToFirstSignificantSubdomainCustomWithWWWRFC`, `cutToFirstSignificantSubdomainRFC`, `cutToFirstSignificantSubdomainWithWWWRFC`, `domainRFC`, `domainWithoutWWWRFC`, `firstSignificantSubdomainCustomRFC`, `firstSignificantSubdomainRFC`, `portRFC`, `topLevelDomainRFC`. [#42274](https://github.com/ClickHouse/ClickHouse/pull/42274) ([Quanfa Fu](https://github.com/dentiscalprum)). +* Added functions (`randUniform`, `randNormal`, `randLogNormal`, `randExponential`, `randChiSquared`, `randStudentT`, `randFisherF`, `randBernoulli`, `randBinomial`, `randNegativeBinomial`, `randPoisson` ) to generate random values according to the specified distributions. This closes [#21834](https://github.com/ClickHouse/ClickHouse/issues/21834). [#42411](https://github.com/ClickHouse/ClickHouse/pull/42411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Performance Improvement +* Implement operator precedence element parser to resolve stack overflow issues and make the required stack size smaller. [#34892](https://github.com/ClickHouse/ClickHouse/pull/34892) ([Nikolay Degterinsky](https://github.com/evillique)). +* DISTINCT in order optimization leverage sorting properties of data streams. This improvement will enable reading in order for DISTINCT if applicable (before it was necessary to provide ORDER BY for columns in DISTINCT). [#41014](https://github.com/ClickHouse/ClickHouse/pull/41014) ([Igor Nikonov](https://github.com/devcrafter)). +* ColumnVector: optimize UInt8 index with AVX512VBMI. [#41247](https://github.com/ClickHouse/ClickHouse/pull/41247) ([Guo Wangyang](https://github.com/guowangy)). +* The performance experiments of **SSB** (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could bring a **2.95x** improvement of the geomean of all subcases' QPS. [#41675](https://github.com/ClickHouse/ClickHouse/pull/41675) ([Zhiguo Zhou](https://github.com/ZhiguoZh)). +* Fixed slowness in JSONExtract with LowCardinality(String) tuples. [#41726](https://github.com/ClickHouse/ClickHouse/pull/41726) ([AlfVII](https://github.com/AlfVII)). +* Add ldapr capabilities to AArch64 builds. This is supported from Graviton 2+, Azure and GCP instances. Only appeared in clang-15 [not so long ago](https://github.com/llvm/llvm-project/commit/9609b5daffe9fd28d83d83da895abc5113f76c24). [#41778](https://github.com/ClickHouse/ClickHouse/pull/41778) ([Daniel Kutenin](https://github.com/danlark1)). +* Improve performance when comparing strings and one argument is empty constant string. [#41870](https://github.com/ClickHouse/ClickHouse/pull/41870) ([Jiebin Sun](https://github.com/jiebinn)). +* optimize insertFrom of ColumnAggregateFunction to share Aggregate State in some cases. [#41960](https://github.com/ClickHouse/ClickHouse/pull/41960) ([flynn](https://github.com/ucasfl)). +* Relax the "Too many parts" threshold. This closes [#6551](https://github.com/ClickHouse/ClickHouse/issues/6551). Now ClickHouse will allow more parts in a partition if the average part size is large enough (at least 10 GiB). This allows to have up to petabytes of data in a single partition of a single table on a single server, which is possible using disk shelves or object storage. [#42002](https://github.com/ClickHouse/ClickHouse/pull/42002) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Make writing to AzureBlobStorage more efficient (respect `max_single_part_upload_size` instead of writing a block per each buffer size). Inefficiency mentioned in [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42041](https://github.com/ClickHouse/ClickHouse/pull/42041) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Make thread ids in the process list and query_log unique to avoid waste. [#42180](https://github.com/ClickHouse/ClickHouse/pull/42180) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Improvement +* Added new infrastructure for query analysis and planning under `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)). +* * Support expression `(EXPLAIN SELECT ...)` in a subquery. Queries like `SELECT * FROM (EXPLAIN PIPELINE SELECT col FROM TABLE ORDER BY col)` became valid. [#40630](https://github.com/ClickHouse/ClickHouse/pull/40630) ([Vladimir C](https://github.com/vdimir)). +* Currently changing `async_insert_max_data_size` or `async_insert_busy_timeout_ms` in scope of query makes no sense and this leads to bad user experience. E.g. user wants to insert data rarely and he doesn't have an access to server config to tune default settings. [#40668](https://github.com/ClickHouse/ClickHouse/pull/40668) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Embedded Keeper will always start in the background allowing ClickHouse to start without achieving quorum. [#40991](https://github.com/ClickHouse/ClickHouse/pull/40991) ([Antonio Andelic](https://github.com/antonio2368)). +* Improvements for reading from remote filesystems, made threadpool size for reads/writes configurable. Closes [#41070](https://github.com/ClickHouse/ClickHouse/issues/41070). [#41011](https://github.com/ClickHouse/ClickHouse/pull/41011) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Made reestablishing a new connection more reactive in case of expiration of the previous one. Previously there was a task which spawns every minute by default and thus a table could be in readonly state for about this time. [#41092](https://github.com/ClickHouse/ClickHouse/pull/41092) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support all combinators combination in WindowTransform/arratReduce*/initializeAggregation/aggregate functions versioning. Previously combinators like `ForEach/Resample/Map` didn't work in these places, using them led to exception like`State function ... inserts results into non-state column`. [#41107](https://github.com/ClickHouse/ClickHouse/pull/41107) ([Kruglov Pavel](https://github.com/Avogar)). +* Now projections can be used with zero copy replication. [#41147](https://github.com/ClickHouse/ClickHouse/pull/41147) ([alesapin](https://github.com/alesapin)). +* - Add function tryDecrypt that returns NULL when decrypt fail (e.g. decrypt with incorrect key) instead of throwing exception. [#41206](https://github.com/ClickHouse/ClickHouse/pull/41206) ([Duc Canh Le](https://github.com/canhld94)). +* Add the `unreserved_space` column to the `system.disks` table to check how much space is not taken by reservations per disk. [#41254](https://github.com/ClickHouse/ClickHouse/pull/41254) ([filimonov](https://github.com/filimonov)). +* Support s3 authorisation headers from ast arguments. [#41261](https://github.com/ClickHouse/ClickHouse/pull/41261) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add setting 'allow_implicit_no_password' that forbids creating a user with no password unless 'IDENTIFIED WITH no_password' is explicitly specified. [#41341](https://github.com/ClickHouse/ClickHouse/pull/41341) ([Nikolay Degterinsky](https://github.com/evillique)). +* keeper-improvement: add support for uploading snapshots to S3. S3 information can be defined inside `keeper_server.s3_snapshot`. [#41342](https://github.com/ClickHouse/ClickHouse/pull/41342) ([Antonio Andelic](https://github.com/antonio2368)). +* Add support for MultiRead in Keeper and internal ZooKeeper client. [#41410](https://github.com/ClickHouse/ClickHouse/pull/41410) ([Antonio Andelic](https://github.com/antonio2368)). +* add a support for decimal type comparing with floating point literal in IN operator. [#41544](https://github.com/ClickHouse/ClickHouse/pull/41544) ([liang.huang](https://github.com/lhuang09287750)). +* Allow readable size values in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Check file path for path traversal attacks in errors logger for input formats. [#41694](https://github.com/ClickHouse/ClickHouse/pull/41694) ([Kruglov Pavel](https://github.com/Avogar)). +* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these period ClickHouse can nevertheless try to establish a connection and produce errors. This behaviour is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)). +* For client when connecting to a secure server with invalid certificate only allow to proceed with '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add function "tryBase58Decode()", similar to the existing function "tryBase64Decode()". [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)). +* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)). +* Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)). +* Fix parallel parsing: segmentator now checks max_block_size. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)). +* Don't report TABLE_IS_DROPPED exception in order to skip table in case is was just dropped. [#41908](https://github.com/ClickHouse/ClickHouse/pull/41908) ([AlfVII](https://github.com/AlfVII)). +* Improve option enable_extended_results_for_datetime_functions to return results of type DateTime64 for functions toStartOfDay, toStartOfHour, toStartOfFifteenMinutes, toStartOfTenMinutes, toStartOfFiveMinutes, toStartOfMinute and timeSlot. [#41910](https://github.com/ClickHouse/ClickHouse/pull/41910) ([Roman Vasin](https://github.com/rvasin)). +* Improve DateTime type inference for text formats. Now it respect setting `date_time_input_format` and doesn't try to infer datetimes from numbers as timestamps. Closes [#41389](https://github.com/ClickHouse/ClickHouse/issues/41389) Closes [#42206](https://github.com/ClickHouse/ClickHouse/issues/42206). [#41912](https://github.com/ClickHouse/ClickHouse/pull/41912) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove confusing warning when inserting with `perform_ttl_move_on_insert`=false. [#41980](https://github.com/ClickHouse/ClickHouse/pull/41980) ([Vitaly Baranov](https://github.com/vitlibar)). +* Allow user to write `countState(*)` similar to `count(*)`. This closes [#9338](https://github.com/ClickHouse/ClickHouse/issues/9338). [#41983](https://github.com/ClickHouse/ClickHouse/pull/41983) ([Amos Bird](https://github.com/amosbird)). +* - Fix rankCorr size overflow. [#42020](https://github.com/ClickHouse/ClickHouse/pull/42020) ([Duc Canh Le](https://github.com/canhld94)). +* Added an option to specify an arbitrary string as an environment name in the Sentry's config for more handy reports. [#42037](https://github.com/ClickHouse/ClickHouse/pull/42037) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Added system table `asynchronous_insert_log `. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)). +* Fix parsing out-of-range Date from CSV:. [#42044](https://github.com/ClickHouse/ClickHouse/pull/42044) ([Andrey Zvonov](https://github.com/zvonand)). +* parseDataTimeBestEffort support comma between date and time. Closes [#42038](https://github.com/ClickHouse/ClickHouse/issues/42038). [#42049](https://github.com/ClickHouse/ClickHouse/pull/42049) ([flynn](https://github.com/ucasfl)). +* Add support for methods lz4, bz2, snappy in 'Accept-Encoding'. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)). +* Various minor fixes for BLAKE3 function. [#42073](https://github.com/ClickHouse/ClickHouse/pull/42073) ([BoloniniD](https://github.com/BoloniniD)). +* Improved stale replica recovery process for `ReplicatedMergeTree`. If lost replica have some parts which absent on a healthy replica, but these parts should appear in future according to replication queue of the healthy replica, then lost replica will keep such parts instead of detaching them. [#42134](https://github.com/ClickHouse/ClickHouse/pull/42134) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support BACKUP to S3 with as-is path/data structure. [#42232](https://github.com/ClickHouse/ClickHouse/pull/42232) ([Azat Khuzhin](https://github.com/azat)). +* Add a possibility to use Date32 arguments for date_diff function. Fix issue in date_diff function when using DateTime64 arguments with start date before Unix epoch and end date after Unix epoch. [#42308](https://github.com/ClickHouse/ClickHouse/pull/42308) ([Roman Vasin](https://github.com/rvasin)). +* When uploading big parts to minio, 'Complete Multipart Upload' can take a long time. Minio sends heartbeats every 10 seconds (see https://github.com/minio/minio/pull/7198). But clickhouse times out earlier, because the default send/receive timeout is [set](https://github.com/ClickHouse/ClickHouse/blob/cc24fcd6d5dfb67f5f66f5483e986bd1010ad9cf/src/IO/S3/PocoHTTPClient.cpp#L123) to 5 seconds. [#42321](https://github.com/ClickHouse/ClickHouse/pull/42321) ([filimonov](https://github.com/filimonov)). +* Add `S3` as a new type of the destination of backups. [#42333](https://github.com/ClickHouse/ClickHouse/pull/42333) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)). +* Support skipping cache completely (both download to cache and reading cached data) in case the requested read range exceeds the threshold defined by cache setting `bypass_cache_threashold`, requires to be enabled with `enable_bypass_cache_with_threshold`). [#42418](https://github.com/ClickHouse/ClickHouse/pull/42418) ([Han Shukai](https://github.com/KinderRiven)). +* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). +* Enabled CompiledExpressionCache in clickhouse-local. [#42477](https://github.com/ClickHouse/ClickHouse/pull/42477) ([AlfVII](https://github.com/AlfVII)). +* Remove support for the `{database}` macro from the client's prompt. It was displayed incorrectly if the database was unspecified and it was not updated on `USE` statements. This closes [#25891](https://github.com/ClickHouse/ClickHouse/issues/25891). [#42508](https://github.com/ClickHouse/ClickHouse/pull/42508) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)). +* Allow to use Date32 arguments for dateName function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)). + +#### Bug Fix +* Now filters with NULL literals will be used during index analysis. This closes https://github.com/ClickHouse/ClickHouse/pull/41814 [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)). +* - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). +* Fix using subqueries in row policy filters. This PR fixes [#32463](https://github.com/ClickHouse/ClickHouse/issues/32463). [#42562](https://github.com/ClickHouse/ClickHouse/pull/42562) ([Vitaly Baranov](https://github.com/vitlibar)). + +#### Build/Testing/Packaging Improvement +* Added support of WHERE clause generation to AST Fuzzer and possibility to add or remove ORDER BY and WHERE clause. [#38519](https://github.com/ClickHouse/ClickHouse/pull/38519) ([Ilya Yatsishin](https://github.com/qoega)). +* Aarch64 binaries now require at least ARMv8.2, released in 2016. Most notably, this enables use of ARM LSE, i.e. native atomic operations. Also, CMake build option "NO_ARMV81_OR_HIGHER" has been added to allow compilation of binaries for older ARMv8.0 hardware, e.g. Raspberry Pi 4. [#41610](https://github.com/ClickHouse/ClickHouse/pull/41610) ([Robert Schulze](https://github.com/rschu1ze)). +* After updating runners to 22.04 cgroups stopped to work in privileged mode, here's the issue https://github.com/moby/moby/issues/42275#issuecomment-1115055846. [#41857](https://github.com/ClickHouse/ClickHouse/pull/41857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Allow building ClickHouse with Musl (small changes after it was already supported but broken). [#41987](https://github.com/ClickHouse/ClickHouse/pull/41987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Add the `$CLICKHOUSE_CRONFILE` file checking to avoid running the `sed` command to get the file not found error. [#42081](https://github.com/ClickHouse/ClickHouse/pull/42081) ([Chun-Sheng, Li](https://github.com/peter279k)). +* Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix power8 support. [#42462](https://github.com/ClickHouse/ClickHouse/pull/42462) ([Boris Kuschel](https://github.com/bkuschel)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Several fixes for DiskWeb. [#41652](https://github.com/ClickHouse/ClickHouse/pull/41652) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixes issue when docker run will fail if "https_port" is not present in config. [#41693](https://github.com/ClickHouse/ClickHouse/pull/41693) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Mutations were not cancelled properly on server shutdown or `SYSTEM STOP MERGES` query and cancellation might take long time, it's fixed. [#41699](https://github.com/ClickHouse/ClickHouse/pull/41699) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix wrong result of queries with `ORDER BY` or `GROUP BY` by columns from prefix of sorting key, wrapped into monotonic functions, with enable "read in order" optimization (settings `optimize_read_in_order` and `optimize_aggregation_in_order`). [#41701](https://github.com/ClickHouse/ClickHouse/pull/41701) ([Anton Popov](https://github.com/CurtizJ)). +* Fix possible crash in `SELECT` from `Merge` table with enabled `optimize_monotonous_functions_in_order_by` setting. Fixes [#41269](https://github.com/ClickHouse/ClickHouse/issues/41269). [#41740](https://github.com/ClickHouse/ClickHouse/pull/41740) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Fix a bug that CORS headers are missing in some HTTP responses. [#41792](https://github.com/ClickHouse/ClickHouse/pull/41792) ([Frank Chen](https://github.com/FrankChen021)). +* 22.9 might fail to startup `ReplicatedMergeTree` table if that table was created by 20.3 or older version and was never altered, it's fixed. Fixes [#41742](https://github.com/ClickHouse/ClickHouse/issues/41742). [#41796](https://github.com/ClickHouse/ClickHouse/pull/41796) ([Alexander Tokmakov](https://github.com/tavplubix)). +* When the batch sending fails for some reason, it cannot be automatically recovered, and if it is not processed in time, it will lead to accumulation, and the printed error message will become longer and longer, which will cause the http thread to block. [#41813](https://github.com/ClickHouse/ClickHouse/pull/41813) ([zhongyuankai](https://github.com/zhongyuankai)). +* Fix compact parts with compressed marks setting. Fixes [#41783](https://github.com/ClickHouse/ClickHouse/issues/41783) and [#41746](https://github.com/ClickHouse/ClickHouse/issues/41746). [#41823](https://github.com/ClickHouse/ClickHouse/pull/41823) ([alesapin](https://github.com/alesapin)). +* Old versions of Replicated database doesn't have a special marker in [Zoo]Keeper. We need to check only whether the node contains come obscure data instead of special mark. [#41875](https://github.com/ClickHouse/ClickHouse/pull/41875) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix possible exception in fs cache. [#41884](https://github.com/ClickHouse/ClickHouse/pull/41884) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix use_environment_credentials for s3 table function. [#41970](https://github.com/ClickHouse/ClickHouse/pull/41970) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixed "Directory already exists and is not empty" error on detaching broken part that might prevent `ReplicatedMergeTree` table from starting replication. Fixes [#40957](https://github.com/ClickHouse/ClickHouse/issues/40957). [#41981](https://github.com/ClickHouse/ClickHouse/pull/41981) ([Alexander Tokmakov](https://github.com/tavplubix)). +* toDateTime64() now returns the same output with negative integer and float arguments. [#42025](https://github.com/ClickHouse/ClickHouse/pull/42025) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix write into AzureBlobStorage. Partially closes [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42034](https://github.com/ClickHouse/ClickHouse/pull/42034) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix the bzip2 decoding issue for specific bzip2 files. [#42046](https://github.com/ClickHouse/ClickHouse/pull/42046) ([Nikolay Degterinsky](https://github.com/evillique)). +* - Fix SQL function "toLastDayOfMonth()" with setting "enable_extended_results_for_datetime_functions = 1" at the beginning of the extended range (January 1900). - Fix SQL function "toRelativeWeekNum()" with setting "enable_extended_results_for_datetime_functions = 1" at the end of extended range (December 2299). - Improve the performance of for SQL functions "toISOYear()", "toFirstDayNumOfISOYearIndex()" and "toYearWeekOfNewyearMode()" by avoiding unnecessary index arithmetics. [#42084](https://github.com/ClickHouse/ClickHouse/pull/42084) ([Roman Vasin](https://github.com/rvasin)). +* The maximum size of fetches for each table accidentally was set to 8 while the pool size could be bigger. Now the maximum size of fetches for table is equal to the pool size. [#42090](https://github.com/ClickHouse/ClickHouse/pull/42090) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* A table might be shut down and a dictionary might be detached before checking if can be dropped without breaking dependencies between table, it's fixed. Fixes [#41982](https://github.com/ClickHouse/ClickHouse/issues/41982). [#42106](https://github.com/ClickHouse/ClickHouse/pull/42106) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix bad inefficiency of `remote_filesystem_read_method=read` with filesystem cache. Closes [#42125](https://github.com/ClickHouse/ClickHouse/issues/42125). [#42129](https://github.com/ClickHouse/ClickHouse/pull/42129) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix possible timeout exception for distributed queries with use_hedged_requests=0. [#42130](https://github.com/ClickHouse/ClickHouse/pull/42130) ([Azat Khuzhin](https://github.com/azat)). +* Fixed a minor bug inside function `runningDifference` in case of using it with `Date32` type. Previously `Date` was used and it may cause some logical errors like `Bad cast from type DB::ColumnVector to DB::ColumnVector'`. [#42143](https://github.com/ClickHouse/ClickHouse/pull/42143) ([Alfred Xu](https://github.com/sperlingxx)). +* Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* DISTINCT in order fails with LOGICAL_ERROR if first column in sorting key contains function. [#42186](https://github.com/ClickHouse/ClickHouse/pull/42186) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* - Fix read from buffer with read in order desc. [#42236](https://github.com/ClickHouse/ClickHouse/pull/42236) ([Duc Canh Le](https://github.com/canhld94)). +* Fix a bug which prevents ClickHouse to start when background_pool_size setting is set on default profile but background_merges_mutations_concurrency_ratio is not. [#42315](https://github.com/ClickHouse/ClickHouse/pull/42315) ([nvartolomei](https://github.com/nvartolomei)). +* `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix a data race in query finish/cancel. This closes [#42346](https://github.com/ClickHouse/ClickHouse/issues/42346). [#42362](https://github.com/ClickHouse/ClickHouse/pull/42362) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix assert cast in join on falsy condition, Close [#42380](https://github.com/ClickHouse/ClickHouse/issues/42380). [#42407](https://github.com/ClickHouse/ClickHouse/pull/42407) ([Vladimir C](https://github.com/vdimir)). +* Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* `AggregateFunctionQuantile` now correctly works with UInt128 columns. Previously, the quantile state interpreted `UInt128` columns as `Int128` which could have led to incorrect results. [#42473](https://github.com/ClickHouse/ClickHouse/pull/42473) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix bad_assert during INSERT into Annoy indexes over non-Float32 columns. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)). +* This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix function `arrayElement` with type `Map` with `Nullable` values and `Nullable` index. [#42623](https://github.com/ClickHouse/ClickHouse/pull/42623) ([Anton Popov](https://github.com/CurtizJ)). + +#### Bug Fix (user-visible misbehaviour in official stable or prestable release) + +* Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)). + +#### Build Improvement + +* Fixed SipHash Endian issue for s390x platform. [#41372](https://github.com/ClickHouse/ClickHouse/pull/41372) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Enable lib base64 for ppc64le platform. [#41974](https://github.com/ClickHouse/ClickHouse/pull/41974) ([Suzy Wang](https://github.com/SuzyWangIBMer)). +* Fixed Endian issue in T64 compression codec on s390x. [#42314](https://github.com/ClickHouse/ClickHouse/pull/42314) ([Harry Lee](https://github.com/HarryLeeIBM)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Disable parallel s3 multipart upload for part moves."'. [#41681](https://github.com/ClickHouse/ClickHouse/pull/41681) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Attempt to fix abort from parallel parsing"'. [#42545](https://github.com/ClickHouse/ClickHouse/pull/42545) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* NO CL ENTRY: 'Revert "Low cardinality cases moved to the function for its corresponding type"'. [#42633](https://github.com/ClickHouse/ClickHouse/pull/42633) ([Anton Popov](https://github.com/CurtizJ)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Test for ignore function in PARTITION KEY [#39875](https://github.com/ClickHouse/ClickHouse/pull/39875) ([UnamedRus](https://github.com/UnamedRus)). +* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)). +* Add missing tests for legacy geobase [#40684](https://github.com/ClickHouse/ClickHouse/pull/40684) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove obsolete comment from the config.xml [#41518](https://github.com/ClickHouse/ClickHouse/pull/41518) ([filimonov](https://github.com/filimonov)). +* Resurrect parallel distributed insert select with s3Cluster [#41535](https://github.com/ClickHouse/ClickHouse/pull/41535) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Update runners to a recent version to install on 22.04 [#41556](https://github.com/ClickHouse/ClickHouse/pull/41556) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Refactor wiping sensitive information from logs. [#41562](https://github.com/ClickHouse/ClickHouse/pull/41562) ([Vitaly Baranov](https://github.com/vitlibar)). +* Better S3 logs [#41587](https://github.com/ClickHouse/ClickHouse/pull/41587) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix typos in JSON formats after [#40910](https://github.com/ClickHouse/ClickHouse/issues/40910) [#41614](https://github.com/ClickHouse/ClickHouse/pull/41614) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix drop for KeeperMap [#41616](https://github.com/ClickHouse/ClickHouse/pull/41616) ([Antonio Andelic](https://github.com/antonio2368)). +* increase default max_suspicious_broken_parts to 100 [#41619](https://github.com/ClickHouse/ClickHouse/pull/41619) ([Denny Crane](https://github.com/den-crane)). +* Release AWS SDK log level + replace one exception [#41649](https://github.com/ClickHouse/ClickHouse/pull/41649) ([alesapin](https://github.com/alesapin)). +* Fix a destruction order for views ThreadStatus [#41650](https://github.com/ClickHouse/ClickHouse/pull/41650) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add very explicit logging on disk choice for fetch [#41653](https://github.com/ClickHouse/ClickHouse/pull/41653) ([alesapin](https://github.com/alesapin)). +* Fix race between ~BackgroundSchedulePool and ~DNSCacheUpdater [#41654](https://github.com/ClickHouse/ClickHouse/pull/41654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add changelog for 22.9 [#41668](https://github.com/ClickHouse/ClickHouse/pull/41668) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update version after release [#41670](https://github.com/ClickHouse/ClickHouse/pull/41670) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix error message [#41680](https://github.com/ClickHouse/ClickHouse/pull/41680) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add test for setting output_format_json_validate_utf8 [#41691](https://github.com/ClickHouse/ClickHouse/pull/41691) ([Kruglov Pavel](https://github.com/Avogar)). +* Resolve findings from clang-tidy [#41702](https://github.com/ClickHouse/ClickHouse/pull/41702) ([ltrk2](https://github.com/ltrk2)). +* Ignore Keeper errors from ReplicatedMergeTreeAttachThread in stress tests [#41717](https://github.com/ClickHouse/ClickHouse/pull/41717) ([Antonio Andelic](https://github.com/antonio2368)). +* Collect logs in Stress test using clickhouse-local [#41721](https://github.com/ClickHouse/ClickHouse/pull/41721) ([Antonio Andelic](https://github.com/antonio2368)). +* Disable flaky `test_merge_tree_azure_blob_storage` [#41722](https://github.com/ClickHouse/ClickHouse/pull/41722) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update version_date.tsv and changelogs after v22.9.2.7-stable [#41724](https://github.com/ClickHouse/ClickHouse/pull/41724) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Fix part removal retries [#41728](https://github.com/ClickHouse/ClickHouse/pull/41728) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Try fix azure tests [#41731](https://github.com/ClickHouse/ClickHouse/pull/41731) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix test build [#41732](https://github.com/ClickHouse/ClickHouse/pull/41732) ([Robert Schulze](https://github.com/rschu1ze)). +* Change logging levels in cache [#41733](https://github.com/ClickHouse/ClickHouse/pull/41733) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Revert of "Revert the revert of "ColumnVector: optimize filter with AVX512 VBMI2 compress store" [#40033](https://github.com/ClickHouse/ClickHouse/issues/40033)" [#41752](https://github.com/ClickHouse/ClickHouse/pull/41752) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix SET query parameters formatting [#41755](https://github.com/ClickHouse/ClickHouse/pull/41755) ([Nikolay Degterinsky](https://github.com/evillique)). +* Support to run testcases on macOS [#41760](https://github.com/ClickHouse/ClickHouse/pull/41760) ([Frank Chen](https://github.com/FrankChen021)). +* Bump LLVM from 12 to 13 [#41762](https://github.com/ClickHouse/ClickHouse/pull/41762) ([Robert Schulze](https://github.com/rschu1ze)). +* ColumnVector: re-enable AVX512_VBMI/AVX512_VBMI2 optimized filter and index [#41765](https://github.com/ClickHouse/ClickHouse/pull/41765) ([Guo Wangyang](https://github.com/guowangy)). +* Update 02354_annoy.sql [#41767](https://github.com/ClickHouse/ClickHouse/pull/41767) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix the typo preventing building latest images [#41769](https://github.com/ClickHouse/ClickHouse/pull/41769) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Make automatic download script choose between ARMv8.0 or ARMv8.2 builds [#41775](https://github.com/ClickHouse/ClickHouse/pull/41775) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix tests for docker-ci [#41777](https://github.com/ClickHouse/ClickHouse/pull/41777) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Possible fix for KeeperMap drop [#41784](https://github.com/ClickHouse/ClickHouse/pull/41784) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix drop of completely dropped table [#41789](https://github.com/ClickHouse/ClickHouse/pull/41789) ([alesapin](https://github.com/alesapin)). +* Log git hash during startup [#41790](https://github.com/ClickHouse/ClickHouse/pull/41790) ([Robert Schulze](https://github.com/rschu1ze)). +* Revert "ColumnVector: optimize UInt8 index with AVX512VBMI ([#41247](https://github.com/ClickHouse/ClickHouse/issues/41247))" [#41797](https://github.com/ClickHouse/ClickHouse/pull/41797) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Small fix in dashboard [#41798](https://github.com/ClickHouse/ClickHouse/pull/41798) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Keep the most important log in stress tests [#41821](https://github.com/ClickHouse/ClickHouse/pull/41821) ([alesapin](https://github.com/alesapin)). +* Use copy for some operations instead of hardlinks [#41832](https://github.com/ClickHouse/ClickHouse/pull/41832) ([alesapin](https://github.com/alesapin)). +* Remove unused variable in registerStorageMergeTree.cpp [#41839](https://github.com/ClickHouse/ClickHouse/pull/41839) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix Jepsen [#41845](https://github.com/ClickHouse/ClickHouse/pull/41845) ([Antonio Andelic](https://github.com/antonio2368)). +* Increase `request_timeout_ms` for s3 tests in CI [#41853](https://github.com/ClickHouse/ClickHouse/pull/41853) ([Kseniia Sumarokova](https://github.com/kssenii)). +* tests: fix debug symbols (and possible crashes) for backward compatiblity check [#41854](https://github.com/ClickHouse/ClickHouse/pull/41854) ([Azat Khuzhin](https://github.com/azat)). +* Remove two redundant lines [#41856](https://github.com/ClickHouse/ClickHouse/pull/41856) ([alesapin](https://github.com/alesapin)). +* Infer Object type only when allow_experimental_object_type is enabled [#41858](https://github.com/ClickHouse/ClickHouse/pull/41858) ([Kruglov Pavel](https://github.com/Avogar)). +* Add default UNION/EXCEPT/INTERSECT to the echo query text [#41862](https://github.com/ClickHouse/ClickHouse/pull/41862) ([Nikolay Degterinsky](https://github.com/evillique)). +* Consolidate CMake-generated config headers [#41873](https://github.com/ClickHouse/ClickHouse/pull/41873) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)). +* Docs: Remove obsolete modelEvaluate() mention [#41878](https://github.com/ClickHouse/ClickHouse/pull/41878) ([Robert Schulze](https://github.com/rschu1ze)). +* Better exception message for duplicate column names in schema inference [#41885](https://github.com/ClickHouse/ClickHouse/pull/41885) ([Kruglov Pavel](https://github.com/Avogar)). +* Docs: Reference external papers as DOIs [#41886](https://github.com/ClickHouse/ClickHouse/pull/41886) ([Robert Schulze](https://github.com/rschu1ze)). +* Make LDAPR a prerequisite for downloading the ARMv8.2 build [#41897](https://github.com/ClickHouse/ClickHouse/pull/41897) ([Robert Schulze](https://github.com/rschu1ze)). +* Another sync replicas in test_recovery_replica [#41898](https://github.com/ClickHouse/ClickHouse/pull/41898) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* remove unused code [#41921](https://github.com/ClickHouse/ClickHouse/pull/41921) ([flynn](https://github.com/ucasfl)). +* Move all queries for MV creation to the end of queue during recovering [#41932](https://github.com/ClickHouse/ClickHouse/pull/41932) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix broken test_disks_app_func [#41933](https://github.com/ClickHouse/ClickHouse/pull/41933) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Temporarily disable ThreadFuzzer with TSan [#41943](https://github.com/ClickHouse/ClickHouse/pull/41943) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Enable some disabled S3 tests [#41945](https://github.com/ClickHouse/ClickHouse/pull/41945) ([alesapin](https://github.com/alesapin)). +* QOL log improvements [#41947](https://github.com/ClickHouse/ClickHouse/pull/41947) ([Raúl Marín](https://github.com/Algunenano)). +* Fix non-deterministic test results [#41948](https://github.com/ClickHouse/ClickHouse/pull/41948) ([Robert Schulze](https://github.com/rschu1ze)). +* Earlier throw exception in PullingAsyncPipelineExecutor. [#41949](https://github.com/ClickHouse/ClickHouse/pull/41949) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix linker error [#41950](https://github.com/ClickHouse/ClickHouse/pull/41950) ([ltrk2](https://github.com/ltrk2)). +* Bump LLVM from 13 to 14 [#41951](https://github.com/ClickHouse/ClickHouse/pull/41951) ([Robert Schulze](https://github.com/rschu1ze)). +* Update version_date.tsv and changelogs after v22.3.13.80-lts [#41953](https://github.com/ClickHouse/ClickHouse/pull/41953) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update version_date.tsv and changelogs after v22.7.6.74-stable [#41954](https://github.com/ClickHouse/ClickHouse/pull/41954) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update version_date.tsv and changelogs after v22.8.6.71-lts [#41955](https://github.com/ClickHouse/ClickHouse/pull/41955) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update version_date.tsv and changelogs after v22.9.3.18-stable [#41956](https://github.com/ClickHouse/ClickHouse/pull/41956) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Rename max_temp_data_on_disk -> max_temporary_data_on_disk [#41984](https://github.com/ClickHouse/ClickHouse/pull/41984) ([Vladimir C](https://github.com/vdimir)). +* Add more checkStackSize calls [#41991](https://github.com/ClickHouse/ClickHouse/pull/41991) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test 02403_big_http_chunk_size [#41996](https://github.com/ClickHouse/ClickHouse/pull/41996) ([Vitaly Baranov](https://github.com/vitlibar)). +* More sane behavior of part number thresholds override in query level settings [#42001](https://github.com/ClickHouse/ClickHouse/pull/42001) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove useless code [#42004](https://github.com/ClickHouse/ClickHouse/pull/42004) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Refactoring: Uninline some error handling methods [#42010](https://github.com/ClickHouse/ClickHouse/pull/42010) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix warning that ENABLE_REPLXX is unused [#42013](https://github.com/ClickHouse/ClickHouse/pull/42013) ([Robert Schulze](https://github.com/rschu1ze)). +* Drop leftovers of libexecinfo [#42014](https://github.com/ClickHouse/ClickHouse/pull/42014) ([Robert Schulze](https://github.com/rschu1ze)). +* More detailed exception message [#42022](https://github.com/ClickHouse/ClickHouse/pull/42022) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Build against an LLVM version which has clang[-extra-tools], lldb and lld removed [#42023](https://github.com/ClickHouse/ClickHouse/pull/42023) ([Robert Schulze](https://github.com/rschu1ze)). +* Add log message and lower the retry timeout in MergeTreeRestartingThread [#42026](https://github.com/ClickHouse/ClickHouse/pull/42026) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Update amqp-cpp [#42031](https://github.com/ClickHouse/ClickHouse/pull/42031) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix No such key during table drop [#42036](https://github.com/ClickHouse/ClickHouse/pull/42036) ([alesapin](https://github.com/alesapin)). +* Temporarily disable too aggressive tests [#42050](https://github.com/ClickHouse/ClickHouse/pull/42050) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix style check [#42055](https://github.com/ClickHouse/ClickHouse/pull/42055) ([Anton Popov](https://github.com/CurtizJ)). +* Function name normalization fix functions header [#42063](https://github.com/ClickHouse/ClickHouse/pull/42063) ([Maksim Kita](https://github.com/kitaisreal)). +* remove unused virtual keyword [#42065](https://github.com/ClickHouse/ClickHouse/pull/42065) ([flynn](https://github.com/ucasfl)). +* Fix crash in `SummingMergeTree` with `LowCardinality` [#42066](https://github.com/ClickHouse/ClickHouse/pull/42066) ([Anton Popov](https://github.com/CurtizJ)). +* Fix drop of completely dropped table [#42067](https://github.com/ClickHouse/ClickHouse/pull/42067) ([alesapin](https://github.com/alesapin)). +* Fix assertion in bloom filter index [#42072](https://github.com/ClickHouse/ClickHouse/pull/42072) ([Anton Popov](https://github.com/CurtizJ)). +* Ignore core.autocrlf for tests references [#42076](https://github.com/ClickHouse/ClickHouse/pull/42076) ([Azat Khuzhin](https://github.com/azat)). +* Fix progress for INSERT SELECT [#42078](https://github.com/ClickHouse/ClickHouse/pull/42078) ([Azat Khuzhin](https://github.com/azat)). +* Avoid adding extra new line after using fuzzy history search [#42080](https://github.com/ClickHouse/ClickHouse/pull/42080) ([Azat Khuzhin](https://github.com/azat)). +* Add `at` to runner AMI, bump gh runner version [#42082](https://github.com/ClickHouse/ClickHouse/pull/42082) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Use send_metadata instead of send_object_metadata [#42085](https://github.com/ClickHouse/ClickHouse/pull/42085) ([Elena Torró](https://github.com/elenatorro)). +* Docs: Preparations to remove misc statements page [#42086](https://github.com/ClickHouse/ClickHouse/pull/42086) ([Robert Schulze](https://github.com/rschu1ze)). +* Followup for TemporaryDataOnDisk [#42103](https://github.com/ClickHouse/ClickHouse/pull/42103) ([Vladimir C](https://github.com/vdimir)). +* Disable 02122_join_group_by_timeout for debug [#42104](https://github.com/ClickHouse/ClickHouse/pull/42104) ([Vladimir C](https://github.com/vdimir)). +* Update version_date.tsv and changelogs after v22.6.9.11-stable [#42114](https://github.com/ClickHouse/ClickHouse/pull/42114) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* JIT compilation migration to LLVM 15 [#42123](https://github.com/ClickHouse/ClickHouse/pull/42123) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix build without TSA [#42128](https://github.com/ClickHouse/ClickHouse/pull/42128) ([Raúl Marín](https://github.com/Algunenano)). +* Update codespell-ignore-words.list [#42132](https://github.com/ClickHouse/ClickHouse/pull/42132) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Add null pointer checks [#42135](https://github.com/ClickHouse/ClickHouse/pull/42135) ([ltrk2](https://github.com/ltrk2)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Follow up for [#42129](https://github.com/ClickHouse/ClickHouse/issues/42129) [#42144](https://github.com/ClickHouse/ClickHouse/pull/42144) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix checking parent for old-format parts [#42147](https://github.com/ClickHouse/ClickHouse/pull/42147) ([alesapin](https://github.com/alesapin)). +* Revert "Resurrect parallel distributed insert select with s3Cluster [#42150](https://github.com/ClickHouse/ClickHouse/pull/42150) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Docs: Add "TABLE" to CHECK/DESCRIBE statements in sidebar [#42152](https://github.com/ClickHouse/ClickHouse/pull/42152) ([Robert Schulze](https://github.com/rschu1ze)). +* Add logging during merge tree startup [#42163](https://github.com/ClickHouse/ClickHouse/pull/42163) ([alesapin](https://github.com/alesapin)). +* Abort instead of `__builtin_unreachable` in debug builds [#42168](https://github.com/ClickHouse/ClickHouse/pull/42168) ([Alexander Tokmakov](https://github.com/tavplubix)). +* [RFC] Enable -Wshorten-64-to-32 [#42190](https://github.com/ClickHouse/ClickHouse/pull/42190) ([Azat Khuzhin](https://github.com/azat)). +* Fix dialect setting description [#42196](https://github.com/ClickHouse/ClickHouse/pull/42196) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add a test for #658 [#42197](https://github.com/ClickHouse/ClickHouse/pull/42197) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* use alias for MergeMutateSelectedEntry share ptr [#42211](https://github.com/ClickHouse/ClickHouse/pull/42211) ([Tian Xinhui](https://github.com/xinhuitian)). +* Fix LLVM build [#42216](https://github.com/ClickHouse/ClickHouse/pull/42216) ([Raúl Marín](https://github.com/Algunenano)). +* Exclude comments from style-check defined extern [#42217](https://github.com/ClickHouse/ClickHouse/pull/42217) ([Vladimir C](https://github.com/vdimir)). +* Update safeExit.cpp [#42220](https://github.com/ClickHouse/ClickHouse/pull/42220) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Disable concurrent parts removal [#42222](https://github.com/ClickHouse/ClickHouse/pull/42222) ([alesapin](https://github.com/alesapin)). +* Fail fast on empty URL in HDFS [#42223](https://github.com/ClickHouse/ClickHouse/pull/42223) ([Ilya Yatsishin](https://github.com/qoega)). +* Add a test for [#2389](https://github.com/ClickHouse/ClickHouse/issues/2389) [#42235](https://github.com/ClickHouse/ClickHouse/pull/42235) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Use MultiRead where possible [#42243](https://github.com/ClickHouse/ClickHouse/pull/42243) ([Antonio Andelic](https://github.com/antonio2368)). +* Minor cleanups of LLVM integration [#42249](https://github.com/ClickHouse/ClickHouse/pull/42249) ([Robert Schulze](https://github.com/rschu1ze)). +* remove useless code [#42253](https://github.com/ClickHouse/ClickHouse/pull/42253) ([flynn](https://github.com/ucasfl)). +* Early return of corner cases in selectPartsToMutate function [#42254](https://github.com/ClickHouse/ClickHouse/pull/42254) ([Tian Xinhui](https://github.com/xinhuitian)). +* Refactor the implementation of user-defined functions [#42263](https://github.com/ClickHouse/ClickHouse/pull/42263) ([Vitaly Baranov](https://github.com/vitlibar)). +* assert unused value in test_replicated_merge_tree_compatibility [#42266](https://github.com/ClickHouse/ClickHouse/pull/42266) ([nvartolomei](https://github.com/nvartolomei)). +* Fix Date Interval add/minus over DataTypeDate32 [#42279](https://github.com/ClickHouse/ClickHouse/pull/42279) ([Alfred Xu](https://github.com/sperlingxx)). +* Fix log-level in `clickhouse-disks` [#42302](https://github.com/ClickHouse/ClickHouse/pull/42302) ([Nikolay Degterinsky](https://github.com/evillique)). +* Remove forgotten debug logging [#42313](https://github.com/ClickHouse/ClickHouse/pull/42313) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix another trash in zero-copy replication [#42317](https://github.com/ClickHouse/ClickHouse/pull/42317) ([alesapin](https://github.com/alesapin)). +* go update for diagnostics tool [#42325](https://github.com/ClickHouse/ClickHouse/pull/42325) ([Dale McDiarmid](https://github.com/gingerwizard)). +* Better logging for asynchronous inserts [#42345](https://github.com/ClickHouse/ClickHouse/pull/42345) ([Anton Popov](https://github.com/CurtizJ)). +* Use nfpm packager for archlinux packages [#42349](https://github.com/ClickHouse/ClickHouse/pull/42349) ([Azat Khuzhin](https://github.com/azat)). +* Bump llvm/clang to 15.0.2 [#42351](https://github.com/ClickHouse/ClickHouse/pull/42351) ([Azat Khuzhin](https://github.com/azat)). +* Make getResource() independent from the order of the sections [#42353](https://github.com/ClickHouse/ClickHouse/pull/42353) ([Azat Khuzhin](https://github.com/azat)). +* Smaller threshold for multipart upload part size increase [#42392](https://github.com/ClickHouse/ClickHouse/pull/42392) ([alesapin](https://github.com/alesapin)). +* Better error message for unsupported delimiters in custom formats [#42406](https://github.com/ClickHouse/ClickHouse/pull/42406) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix formatting of `ALTER FREEZE` [#42409](https://github.com/ClickHouse/ClickHouse/pull/42409) ([Anton Popov](https://github.com/CurtizJ)). +* Replace table name in ast fuzzer more often [#42413](https://github.com/ClickHouse/ClickHouse/pull/42413) ([Anton Popov](https://github.com/CurtizJ)). +* Add *-15 tools to cmake.tools for GCC build [#42430](https://github.com/ClickHouse/ClickHouse/pull/42430) ([Ilya Yatsishin](https://github.com/qoega)). +* Deactivate tasks in ReplicatedMergeTree until startup [#42441](https://github.com/ClickHouse/ClickHouse/pull/42441) ([alesapin](https://github.com/alesapin)). +* Revert "Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787)" [#42442](https://github.com/ClickHouse/ClickHouse/pull/42442) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update woboq_codebrowser location [#42448](https://github.com/ClickHouse/ClickHouse/pull/42448) ([Azat Khuzhin](https://github.com/azat)). +* add mdx and jsx to list of doc files [#42454](https://github.com/ClickHouse/ClickHouse/pull/42454) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Remove code browser docs [#42455](https://github.com/ClickHouse/ClickHouse/pull/42455) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Better workaround for emitting .debug_aranges section [#42457](https://github.com/ClickHouse/ClickHouse/pull/42457) ([Azat Khuzhin](https://github.com/azat)). +* Fix flaky test [#42459](https://github.com/ClickHouse/ClickHouse/pull/42459) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix UBSan report in Julian Day functions [#42464](https://github.com/ClickHouse/ClickHouse/pull/42464) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* rename filesystem_query_cache [#42472](https://github.com/ClickHouse/ClickHouse/pull/42472) ([Han Shukai](https://github.com/KinderRiven)). +* Add convenience typedefs for Date/Date32/DateTime/DateTime64 columns [#42476](https://github.com/ClickHouse/ClickHouse/pull/42476) ([Robert Schulze](https://github.com/rschu1ze)). +* Add error "Destination table is myself" to exception list in BC check [#42479](https://github.com/ClickHouse/ClickHouse/pull/42479) ([Kruglov Pavel](https://github.com/Avogar)). +* Get current clickhouse version without sending query in BC check [#42483](https://github.com/ClickHouse/ClickHouse/pull/42483) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix logical error from welchTTest [#42487](https://github.com/ClickHouse/ClickHouse/pull/42487) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Attempt to fix abort from parallel parsing [#42496](https://github.com/ClickHouse/ClickHouse/pull/42496) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Increase threshold for using physical cores for `max_threads` [#42503](https://github.com/ClickHouse/ClickHouse/pull/42503) ([Nikita Taranov](https://github.com/nickitat)). +* Add a test for [#16827](https://github.com/ClickHouse/ClickHouse/issues/16827) [#42511](https://github.com/ClickHouse/ClickHouse/pull/42511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#13653](https://github.com/ClickHouse/ClickHouse/issues/13653) [#42512](https://github.com/ClickHouse/ClickHouse/pull/42512) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix aliases [#42514](https://github.com/ClickHouse/ClickHouse/pull/42514) ([Nikolay Degterinsky](https://github.com/evillique)). +* tests: fix 00705_drop_create_merge_tree flakiness [#42522](https://github.com/ClickHouse/ClickHouse/pull/42522) ([Azat Khuzhin](https://github.com/azat)). +* Fix sanitizer reports in integration tests [#42529](https://github.com/ClickHouse/ClickHouse/pull/42529) ([Azat Khuzhin](https://github.com/azat)). +* Fix `KeeperTCPHandler` data race [#42532](https://github.com/ClickHouse/ClickHouse/pull/42532) ([Antonio Andelic](https://github.com/antonio2368)). +* Disable `test_storage_nats`, because it's permanently broken [#42535](https://github.com/ClickHouse/ClickHouse/pull/42535) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Better logs in clickhouse-disks [#42549](https://github.com/ClickHouse/ClickHouse/pull/42549) ([Nikolay Degterinsky](https://github.com/evillique)). +* add lib_fuzzer and lib_fuzzer_no_main to llvm-project build [#42550](https://github.com/ClickHouse/ClickHouse/pull/42550) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Some polishing: replicated merge tree [#42560](https://github.com/ClickHouse/ClickHouse/pull/42560) ([Igor Nikonov](https://github.com/devcrafter)). +* Temporarily disable flaky `test_replicated_merge_tree_hdfs_zero_copy` [#42563](https://github.com/ClickHouse/ClickHouse/pull/42563) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Adapt internal data structures to 512-bit era [#42564](https://github.com/ClickHouse/ClickHouse/pull/42564) ([Nikita Taranov](https://github.com/nickitat)). +* Fix strange code in date monotonicity [#42574](https://github.com/ClickHouse/ClickHouse/pull/42574) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Clear thread::id when ThreadFromGlobalPool exits. [#42577](https://github.com/ClickHouse/ClickHouse/pull/42577) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* ci/stress: fix memory limits overrides for hung check [#42585](https://github.com/ClickHouse/ClickHouse/pull/42585) ([Azat Khuzhin](https://github.com/azat)). +* tests: avoid model overlap for obfuscator [#42586](https://github.com/ClickHouse/ClickHouse/pull/42586) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible segfault in expression parser [#42598](https://github.com/ClickHouse/ClickHouse/pull/42598) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix incorrect trace log line on dict reload [#42609](https://github.com/ClickHouse/ClickHouse/pull/42609) ([filimonov](https://github.com/filimonov)). +* Fix flaky 02458_datediff_date32 test [#42611](https://github.com/ClickHouse/ClickHouse/pull/42611) ([Roman Vasin](https://github.com/rvasin)). +* Revert revert 41268 disable s3 parallel write for part moves to disk s3 [#42617](https://github.com/ClickHouse/ClickHouse/pull/42617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Try to fix data race on zookeeper vs DDLWorker at server shutdown. [#42620](https://github.com/ClickHouse/ClickHouse/pull/42620) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add a template for installation issues [#42626](https://github.com/ClickHouse/ClickHouse/pull/42626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix typo in cmake code related to fuzzing [#42627](https://github.com/ClickHouse/ClickHouse/pull/42627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix build [#42635](https://github.com/ClickHouse/ClickHouse/pull/42635) ([Anton Popov](https://github.com/CurtizJ)). +* Add .rgignore for test data [#42639](https://github.com/ClickHouse/ClickHouse/pull/42639) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix flaky 02457_datediff_via_unix_epoch test [#42655](https://github.com/ClickHouse/ClickHouse/pull/42655) ([Roman Vasin](https://github.com/rvasin)). + diff --git a/docs/changelogs/v22.10.2.11-stable.md b/docs/changelogs/v22.10.2.11-stable.md new file mode 100644 index 00000000000..e4507f4e745 --- /dev/null +++ b/docs/changelogs/v22.10.2.11-stable.md @@ -0,0 +1,18 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.10.2.11-stable (d2bfcaba002) FIXME as compared to v22.10.1.1877-stable (98ab5a3c189) + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42750](https://github.com/ClickHouse/ClickHouse/issues/42750): A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)). +* Backported in [#42793](https://github.com/ClickHouse/ClickHouse/issues/42793): Fix a bug in ParserFunction that could have led to a segmentation fault. [#42724](https://github.com/ClickHouse/ClickHouse/pull/42724) ([Nikolay Degterinsky](https://github.com/evillique)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/changelogs/v22.3.14.18-lts.md b/docs/changelogs/v22.3.14.18-lts.md new file mode 100644 index 00000000000..d0c67a2b241 --- /dev/null +++ b/docs/changelogs/v22.3.14.18-lts.md @@ -0,0 +1,26 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.3.14.18-lts (642946f61b2) FIXME as compared to v22.3.13.80-lts (e2708b01fba) + +#### Bug Fix +* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/docs/changelogs/v22.3.14.23-lts.md b/docs/changelogs/v22.3.14.23-lts.md new file mode 100644 index 00000000000..663d8b43f6f --- /dev/null +++ b/docs/changelogs/v22.3.14.23-lts.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.3.14.23-lts (74956bfee4d) FIXME as compared to v22.3.13.80-lts (e2708b01fba) + +#### Improvement +* Backported in [#42527](https://github.com/ClickHouse/ClickHouse/issues/42527): Fix issue with passing MySQL timeouts for MySQL database engine and MySQL table function. Closes [#34168](https://github.com/ClickHouse/ClickHouse/issues/34168)?notification_referrer_id=NT_kwDOAzsV57MzMDMxNjAzNTY5OjU0MjAzODc5. [#40751](https://github.com/ClickHouse/ClickHouse/pull/40751) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Bug Fix +* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/docs/changelogs/v22.7.7.24-stable.md b/docs/changelogs/v22.7.7.24-stable.md new file mode 100644 index 00000000000..d7b83775502 --- /dev/null +++ b/docs/changelogs/v22.7.7.24-stable.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.7.7.24-stable (02ad1f979a8) FIXME as compared to v22.7.6.74-stable (c00ffb3c11a) + +#### Bug Fix +* Backported in [#42433](https://github.com/ClickHouse/ClickHouse/issues/42433): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42329](https://github.com/ClickHouse/ClickHouse/issues/42329): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42359](https://github.com/ClickHouse/ClickHouse/issues/42359): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42268](https://github.com/ClickHouse/ClickHouse/issues/42268): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#42299](https://github.com/ClickHouse/ClickHouse/issues/42299): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42386](https://github.com/ClickHouse/ClickHouse/issues/42386): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42498](https://github.com/ClickHouse/ClickHouse/issues/42498): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42593](https://github.com/ClickHouse/ClickHouse/issues/42593): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/docs/changelogs/v22.8.7.34-lts.md b/docs/changelogs/v22.8.7.34-lts.md new file mode 100644 index 00000000000..0dc899f4717 --- /dev/null +++ b/docs/changelogs/v22.8.7.34-lts.md @@ -0,0 +1,37 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.8.7.34-lts (3c38e5e8ab9) FIXME as compared to v22.8.6.71-lts (7bf38a43e30) + +#### Improvement +* Backported in [#42096](https://github.com/ClickHouse/ClickHouse/issues/42096): Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)). + +#### Bug Fix +* Backported in [#42434](https://github.com/ClickHouse/ClickHouse/issues/42434): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42296](https://github.com/ClickHouse/ClickHouse/issues/42296): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42360](https://github.com/ClickHouse/ClickHouse/issues/42360): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42489](https://github.com/ClickHouse/ClickHouse/issues/42489): Removed skipping of mutations in unaffected partitions of `MergeTree` tables, because this feature never worked correctly and might cause resurrection of finished mutations. [#40589](https://github.com/ClickHouse/ClickHouse/pull/40589) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#42121](https://github.com/ClickHouse/ClickHouse/issues/42121): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* - Prevent crash when passing wrong aggregation states to groupBitmap*. [#41972](https://github.com/ClickHouse/ClickHouse/pull/41972) ([Raúl Marín](https://github.com/Algunenano)). +* - Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41973](https://github.com/ClickHouse/ClickHouse/pull/41973) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#42269](https://github.com/ClickHouse/ClickHouse/issues/42269): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#42300](https://github.com/ClickHouse/ClickHouse/issues/42300): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42387](https://github.com/ClickHouse/ClickHouse/issues/42387): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42499](https://github.com/ClickHouse/ClickHouse/issues/42499): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42571](https://github.com/ClickHouse/ClickHouse/issues/42571): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42594](https://github.com/ClickHouse/ClickHouse/issues/42594): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/docs/changelogs/v22.8.8.3-lts.md b/docs/changelogs/v22.8.8.3-lts.md new file mode 100644 index 00000000000..deaab51fce9 --- /dev/null +++ b/docs/changelogs/v22.8.8.3-lts.md @@ -0,0 +1,13 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.8.8.3-lts (ac5a6cababc) FIXME as compared to v22.8.7.34-lts (3c38e5e8ab9) + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42677](https://github.com/ClickHouse/ClickHouse/issues/42677): keeper-fix: fix race in accessing logs while snapshot is being installed. [#40627](https://github.com/ClickHouse/ClickHouse/pull/40627) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/docs/changelogs/v22.9.4.32-stable.md b/docs/changelogs/v22.9.4.32-stable.md new file mode 100644 index 00000000000..d6c3f4ba498 --- /dev/null +++ b/docs/changelogs/v22.9.4.32-stable.md @@ -0,0 +1,33 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.9.4.32-stable (3db8bcf1a70) FIXME as compared to v22.9.3.18-stable (0cb4b15d2fa) + +#### Bug Fix +* Backported in [#42435](https://github.com/ClickHouse/ClickHouse/issues/42435): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Backported in [#42297](https://github.com/ClickHouse/ClickHouse/issues/42297): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)). +* Backported in [#42361](https://github.com/ClickHouse/ClickHouse/issues/42361): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Backported in [#42122](https://github.com/ClickHouse/ClickHouse/issues/42122): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#41938](https://github.com/ClickHouse/ClickHouse/issues/41938): Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Backported in [#42179](https://github.com/ClickHouse/ClickHouse/issues/42179): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#42301](https://github.com/ClickHouse/ClickHouse/issues/42301): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42388](https://github.com/ClickHouse/ClickHouse/issues/42388): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42500](https://github.com/ClickHouse/ClickHouse/issues/42500): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#42581](https://github.com/ClickHouse/ClickHouse/issues/42581): This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42572](https://github.com/ClickHouse/ClickHouse/issues/42572): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#42595](https://github.com/ClickHouse/ClickHouse/issues/42595): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index c13b2519b84..fe644c43889 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -49,27 +49,13 @@ When we calculate some function over columns in a block, we add another column w Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared_ptrs and column names. -## Block Streams {#block-streams} +## Processors -Block streams are for processing data. We use streams of blocks to read data from somewhere, perform data transformations, or write data to somewhere. `IBlockInputStream` has the `read` method to fetch the next block while available. `IBlockOutputStream` has the `write` method to push the block somewhere. - -Streams are responsible for: - -1. Reading or writing to a table. The table just returns a stream for reading or writing blocks. -2. Implementing data formats. For example, if you want to output data to a terminal in `Pretty` format, you create a block output stream where you push blocks, and it formats them. -3. Performing data transformations. Let’s say you have `IBlockInputStream` and want to create a filtered stream. You create `FilterBlockInputStream` and initialize it with your stream. Then when you pull a block from `FilterBlockInputStream`, it pulls a block from your stream, filters it, and returns the filtered block to you. Query execution pipelines are represented this way. - -There are more sophisticated transformations. For example, when you pull from `AggregatingBlockInputStream`, it reads all data from its source, aggregates it, and then returns a stream of aggregated data for you. Another example: `UnionBlockInputStream` accepts many input sources in the constructor and also a number of threads. It launches multiple threads and reads from multiple sources in parallel. - -> Block streams use the “pull” approach to control flow: when you pull a block from the first stream, it consequently pulls the required blocks from nested streams, and the entire execution pipeline will work. Neither “pull” nor “push” is the best solution, because control flow is implicit, and that limits the implementation of various features like simultaneous execution of multiple queries (merging many pipelines together). This limitation could be overcome with coroutines or just running extra threads that wait for each other. We may have more possibilities if we make control flow explicit: if we locate the logic for passing data from one calculation unit to another outside of those calculation units. Read this [article](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) for more thoughts. - -We should note that the query execution pipeline creates temporary data at each step. We try to keep block size small enough so that temporary data fits in the CPU cache. With that assumption, writing and reading temporary data is almost free in comparison with other calculations. We could consider an alternative, which is to fuse many operations in the pipeline together. It could make the pipeline as short as possible and remove much of the temporary data, which could be an advantage, but it also has drawbacks. For example, a split pipeline makes it easy to implement caching intermediate data, stealing intermediate data from similar queries running at the same time, and merging pipelines for similar queries. +See the description at [https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h). ## Formats {#formats} -Data formats are implemented with block streams. There are “presentational” formats only suitable for the output of data to the client, such as `Pretty` format, which provides only `IBlockOutputStream`. And there are input/output formats, such as `TabSeparated` or `JSONEachRow`. - -There are also row streams: `IRowInputStream` and `IRowOutputStream`. They allow you to pull/push data by individual rows, not by blocks. And they are only needed to simplify the implementation of row-oriented formats. The wrappers `BlockInputStreamFromRowInputStream` and `BlockOutputStreamFromRowOutputStream` allow you to convert row-oriented streams to regular block-oriented streams. +Data formats are implemented with processors. ## I/O {#io} diff --git a/docs/en/development/browse-code.md b/docs/en/development/browse-code.md deleted file mode 100644 index 0d064cc9b0c..00000000000 --- a/docs/en/development/browse-code.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -slug: /en/development/browse-code -sidebar_label: Source Code Browser -sidebar_position: 72 -description: Various ways to browse and edit the source code ---- - -# Browse ClickHouse Source Code - -You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. - -Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. - -If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favorite IDE. Vim and Emacs also count. diff --git a/docs/en/development/build.md b/docs/en/development/build.md index f397dc0d037..8982a3bc0a4 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -105,7 +105,7 @@ ninja Example for Fedora Rawhide: ``` bash sudo yum update -yum --nogpg install git cmake make clang-c++ python3 +sudo yum --nogpg install git cmake make clang python3 ccache git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build && cd build cmake ../ClickHouse diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 986a29b8307..db983ab9c68 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -139,7 +139,7 @@ The following settings can be specified in configuration file for given endpoint - `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`. - `region` — Specifies S3 region name. Optional. - `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`. -- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times. +- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times. - `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional. - `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional. diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 9dc7e300d45..486baac2310 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -419,6 +419,8 @@ Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `St For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function. +There are also special-purpose and experimental indexes to support approximate nearest neighbor (ANN) queries. See [here](annindexes.md) for details. + The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall). Example of index creation for `Map` data type diff --git a/docs/en/getting-started/example-datasets/nyc-taxi.md b/docs/en/getting-started/example-datasets/nyc-taxi.md index e24fb4b01a7..69098f63037 100644 --- a/docs/en/getting-started/example-datasets/nyc-taxi.md +++ b/docs/en/getting-started/example-datasets/nyc-taxi.md @@ -33,7 +33,7 @@ CREATE TABLE trips ( tip_amount Float32, tolls_amount Float32, total_amount Float32, - payment_type Enum('CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4), + payment_type Enum('CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4, 'UNK' = 5), pickup_ntaname LowCardinality(String), dropoff_ntaname LowCardinality(String) ) @@ -63,7 +63,7 @@ SELECT payment_type, pickup_ntaname, dropoff_ntaname -FROM url( +FROM s3( 'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_{0..2}.gz', 'TabSeparatedWithNames' ) diff --git a/docs/en/getting-started/example-datasets/uk-price-paid.md b/docs/en/getting-started/example-datasets/uk-price-paid.md index ef20c03883f..2a89bfda2e7 100644 --- a/docs/en/getting-started/example-datasets/uk-price-paid.md +++ b/docs/en/getting-started/example-datasets/uk-price-paid.md @@ -101,7 +101,7 @@ SELECT count() FROM uk_price_paid ``` -At the time this query was executed, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse: +At the time this query was run, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse: ```sql SELECT formatReadableSize(total_bytes) @@ -342,7 +342,7 @@ The result looks like: ## Let's Speed Up Queries Using Projections {#speedup-with-projections} -[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At execution time, ClickHouse will use your projection if it thinks the projection can improve the performance fo the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful). +[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At query time, ClickHouse will use your projection if it thinks the projection can improve the performance of the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful). ### Build a Projection {#build-projection} diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md index 61303eddab9..e88e9e06a68 100644 --- a/docs/en/getting-started/install.md +++ b/docs/en/getting-started/install.md @@ -128,6 +128,24 @@ clickhouse-client # or "clickhouse-client --password" if you set up a password. +
+Migration Method for installing the deb-packages + +```bash +sudo apt-key del E0C56BD4 +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754 +echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \ + /etc/apt/sources.list.d/clickhouse.list +sudo apt-get update + +sudo apt-get install -y clickhouse-server clickhouse-client + +sudo service clickhouse-server start +clickhouse-client # or "clickhouse-client --password" if you set up a password. +``` + +
+ You can replace `stable` with `lts` to use different [release kinds](/docs/en/faq/operations/production.md) based on your needs. You can also download and install packages manually from [here](https://packages.clickhouse.com/deb/pool/main/c/). diff --git a/docs/en/operations/backup.md b/docs/en/operations/_backup.md similarity index 67% rename from docs/en/operations/backup.md rename to docs/en/operations/_backup.md index a755e3ef9a6..d694c51cee6 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/_backup.md @@ -1,9 +1,12 @@ ---- -slug: /en/operations/backup -sidebar_position: 49 -sidebar_label: Data backup and restore -title: Data backup and restore ---- + +[//]: # (This file is included in Manage > Backups) + +- [Backup to a local disk](#backup-to-a-local-disk) +- [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint) +- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk) +- [Alternatives](#alternatives) + +## Background While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented. @@ -15,7 +18,9 @@ Each company has different resources available and business requirements, so the Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly. ::: -## Configure a backup destination +## Backup to a local disk + +### Configure a backup destination In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list: @@ -39,7 +44,7 @@ In the examples below you will see the backup destination specified like `Disk(' ``` -## Parameters +### Parameters Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected. @@ -52,7 +57,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des - `password` for the file on disk - `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')` -## Usage examples +### Usage examples Backup and then restore a table: ``` @@ -81,7 +86,7 @@ RESTORE TABLE test.table AS test.table2 FROM Disk('backups', '1.zip') BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip') ``` -## Incremental backups +### Incremental backups Incremental backups can be taken by specifying the `base_backup`. :::note @@ -100,7 +105,7 @@ RESTORE TABLE test.table AS test.table2 FROM Disk('backups', 'incremental-a.zip'); ``` -## Assign a password to the backup +### Assign a password to the backup Backups written to disk can have a password applied to the file: ``` @@ -116,7 +121,7 @@ RESTORE TABLE test.table SETTINGS password='qwerty' ``` -## Compression settings +### Compression settings If you would like to specify the compression method or level: ``` @@ -125,14 +130,14 @@ BACKUP TABLE test.table SETTINGS compression_method='lzma', compression_level=3 ``` -## Restore specific partitions +### Restore specific partitions If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup: ``` RESTORE TABLE test.table PARTITIONS '2', '3' FROM Disk('backups', 'filename.zip') ``` -## Check the status of backups +### Check the status of backups The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file: ```sql @@ -171,13 +176,118 @@ end_time: 2022-08-30 09:21:46 1 row in set. Elapsed: 0.002 sec. ``` -## Backup to S3 +## Configuring BACKUP/RESTORE to use an S3 Endpoint -It is possible to `BACKUP`/`RESTORE` to S3, but this disk should be configured -in a proper way, since by default you will need to backup metadata from local -disk to make backup full. +To write backups to an S3 bucket you need three pieces of information: +- S3 endpoint, + for example `https://mars-doc-test.s3.amazonaws.com/backup-S3/` +- Access key ID, + for example `ABC123` +- Secret access key, + for example `Abc+123` -First of all, you need to configure S3 disk in a special way: +:::note +Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk](/docs/en/integrations/data-ingestion/s3/configuring-s3-for-clickhouse-use.md), just come back to this doc after saving the policy, there is no need to configure ClickHouse to use the S3 bucket. +::: + +The destination for a backup will be specified like this: +``` +S3('/', '', ') +``` + +```sql +CREATE TABLE data +( + `key` Int, + `value` String, + `array` Array(String) +) +ENGINE = MergeTree +ORDER BY tuple() +``` + +```sql +INSERT INTO data SELECT * +FROM generateRandom('key Int, value String, array Array(String)') +LIMIT 1000 +``` + +### Create a base (initial) backup + +Incremental backups require a _base_ backup to start from, this example will be used +later as the base backup. The first parameter of the S3 destination is the S3 endpoint followed by the directory within the bucket to use for this backup. In this example the directory is named `my_backup`. + +```sql +BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123') +``` + +```response +┌─id───────────────────────────────────┬─status─────────┐ +│ de442b75-a66c-4a3c-a193-f76f278c70f3 │ BACKUP_CREATED │ +└──────────────────────────────────────┴────────────────┘ +``` + +### Add more data + +Incremental backups are populated with the difference between the base backup and the current content of the table being backed up. Add more data before taking the incremental backup: + +```sql +INSERT INTO data SELECT * +FROM generateRandom('key Int, value String, array Array(String)') +LIMIT 100 +``` +### Take an incremental backup + +This backup command is similar to the base backup, but adds `SETTINGS base_backup` and the location of the base backup. Note that the destination for the incremental backup is not the same directory as the base, it is the same endpoint with a different target directory within the bucket. The base backup is in `my_backup`, and the incremental will be written to `my_incremental`: +```sql +BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123') SETTINGS base_backup = S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123') +``` + +```response +┌─id───────────────────────────────────┬─status─────────┐ +│ f6cd3900-850f-41c9-94f1-0c4df33ea528 │ BACKUP_CREATED │ +└──────────────────────────────────────┴────────────────┘ +``` +### Restore from the incremental backup + +This command restores the incremental backup into a new table, `data3`. Note that when an incremental backup is restored, the base backup is also included. Specify only the incremental backup when restoring: +```sql +RESTORE TABLE data AS data3 FROM S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123') +``` + +```response +┌─id───────────────────────────────────┬─status───┐ +│ ff0c8c39-7dff-4324-a241-000796de11ca │ RESTORED │ +└──────────────────────────────────────┴──────────┘ +``` + +### Verify the count + +There were two inserts into the original table `data`, one with 1,000 rows and one with 100 rows, for a total of 1,100. Verify that the restored table has 1,100 rows: +```sql +SELECT count() +FROM data3 +``` +```response +┌─count()─┐ +│ 1100 │ +└─────────┘ +``` + +### Verify the content +This compares the content of the original table, `data` with the restored table `data3`: +```sql +SELECT throwIf(( + SELECT groupArray(tuple(*)) + FROM data + ) != ( + SELECT groupArray(tuple(*)) + FROM data3 + ), 'Data does not match after BACKUP/RESTORE') +``` +## BACKUP/RESTORE Using an S3 Disk + +It is also possible to `BACKUP`/`RESTORE` to S3 by configuring an S3 disk in the ClickHouse storage configuration. Configure the disk like this by adding a file to `/etc/clickhouse-server/config.d`: ```xml diff --git a/docs/en/operations/update.md b/docs/en/operations/_update.md similarity index 88% rename from docs/en/operations/update.md rename to docs/en/operations/_update.md index 24f7efecc7b..86981da2be6 100644 --- a/docs/en/operations/update.md +++ b/docs/en/operations/_update.md @@ -1,10 +1,7 @@ ---- -slug: /en/operations/update -sidebar_position: 47 -sidebar_label: ClickHouse Upgrade ---- -# ClickHouse Upgrade +[//]: # (This file is included in Manage > Updates) + +## Self-managed ClickHouse Upgrade If ClickHouse was installed from `deb` packages, execute the following commands on the server: diff --git a/docs/en/operations/clickhouse-keeper.md b/docs/en/operations/clickhouse-keeper.md index 82fa5c114ea..aad20da0010 100644 --- a/docs/en/operations/clickhouse-keeper.md +++ b/docs/en/operations/clickhouse-keeper.md @@ -309,7 +309,7 @@ Sessions with Ephemerals (1): /clickhouse/task_queue/ddl ``` -## [experimental] Migration from ZooKeeper {#migration-from-zookeeper} +## Migration from ZooKeeper {#migration-from-zookeeper} Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration: diff --git a/docs/en/operations/system-tables/information_schema.md b/docs/en/operations/system-tables/information_schema.md index a573491282a..a8e516f02a3 100644 --- a/docs/en/operations/system-tables/information_schema.md +++ b/docs/en/operations/system-tables/information_schema.md @@ -178,7 +178,7 @@ Columns: - `view_definition` ([String](../../sql-reference/data-types/string.md)) — `SELECT` query for view. - `check_option` ([String](../../sql-reference/data-types/string.md)) — `NONE`, no checking. - `is_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the view is not updated. -- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — Shows whether the created view is [materialized](../../sql-reference/statements/create/view/#materialized). Possible values: +- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — Shows whether the created view is [materialized](../../sql-reference/statements/create/view.md/#materialized-view). Possible values: - `NO` — The created view is not materialized. - `YES` — The created view is materialized. - `is_trigger_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the trigger is not updated. diff --git a/docs/en/operations/system-tables/replicated_fetches.md b/docs/en/operations/system-tables/replicated_fetches.md index 3536bbaff4d..74888fd2f13 100644 --- a/docs/en/operations/system-tables/replicated_fetches.md +++ b/docs/en/operations/system-tables/replicated_fetches.md @@ -68,6 +68,5 @@ thread_id: 54 **See Also** -- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system/#query-language-system-replicated) +- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#managing-replicatedmergetree-tables) -[Original article](https://clickhouse.com/docs/en/operations/system_tables/replicated_fetches) diff --git a/docs/en/operations/troubleshooting.md b/docs/en/operations/troubleshooting.md index 93bd56087a2..6a1ca3176ad 100644 --- a/docs/en/operations/troubleshooting.md +++ b/docs/en/operations/troubleshooting.md @@ -17,6 +17,33 @@ title: Troubleshooting - Check firewall settings. - If you cannot access the repository for any reason, download packages as described in the [install guide](../getting-started/install.md) article and install them manually using the `sudo dpkg -i ` command. You will also need the `tzdata` package. +### You Cannot Update Deb Packages from ClickHouse Repository with Apt-get {#you-cannot-update-deb-packages-from-clickhouse-repository-with-apt-get} + +- The issue may be happened when the GPG key is changed. + +Please use the following scripts to resolve the issue: + +```bash +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754 +sudo apt-get update +``` + +### You Get the Unsupported Architecture Warning with Apt-get {#you-get-the-unsupported-architecture-warning-with-apt-get} + +- The completed warning message is as follows: + +``` +N: Skipping acquire of configured file 'main/binary-i386/Packages' as repository 'https://packages.clickhouse.com/deb stable InRelease' doesn't support architecture 'i386' +``` + +To resolve the above issue, please use the following script: + +```bash +sudo rm /var/lib/apt/lists/packages.clickhouse.com_* /var/lib/dpkg/arch +sudo apt-get clean +sudo apt-get autoclean +``` + ## Connecting to the Server {#troubleshooting-accepts-no-connections} Possible issues: diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 198ff12f1d6..02a4ad57a3b 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -303,17 +303,25 @@ or CREATE DICTIONARY somedict ( id UInt64, first Date, - last Date + last Date, + advertiser_id UInt64 ) PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'date_table')) +LIFETIME(MIN 1 MAX 1000) LAYOUT(RANGE_HASHED()) RANGE(MIN first MAX last) ``` -To work with these dictionaries, you need to pass an additional argument to the `dictGetT` function, for which a range is selected: +To work with these dictionaries, you need to pass an additional argument to the `dictGet` function, for which a range is selected: ``` sql -dictGetT('dict_name', 'attr_name', id, date) +dictGet('dict_name', 'attr_name', id, date) +``` +Query example: + +``` sql +SELECT dictGet('somedict', 'advertiser_id', 1, '2022-10-20 23:20:10.000'::DateTime64::UInt64); ``` This function returns the value for the specified `id`s and the date range that includes the passed date. diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md index 912af5b5bce..e5ee48c9166 100644 --- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md +++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md @@ -14,8 +14,10 @@ Example of a polygon dictionary configuration: - key - Array(Array(Array(Array(Float64)))) + + key + Array(Array(Array(Array(Float64)))) + diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 76f66db924f..f7ea2690b21 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -271,11 +271,7 @@ Result: The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) which is `0` by default. Behavior for -* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. In case argument is out of normal range: - * If the argument is smaller than 1970, the result will be calculated from the argument `1970-01-01 (00:00:00)` instead. - * If the return type is `DateTime` and the argument is larger than `2106-02-07 08:28:15`, the result will be calculated from the argument `2106-02-07 08:28:15` instead. - * If the return type is `Date` and the argument is larger than `2149-06-06`, the result will be calculated from the argument `2149-06-06` instead. - * If `toLastDayOfMonth` is called with an argument greater then `2149-05-31`, the result will be calculated from the argument `2149-05-31` instead. +* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. * `enable_extended_results_for_datetime_functions = 1`: * Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime` if their argument is a `Date` or `DateTime`, and they return `Date32` or `DateTime64` if their argument is a `Date32` or `DateTime64`. * Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime` if their argument is a `Date` or `DateTime`, and they return `DateTime64` if their argument is a `Date32` or `DateTime64`. @@ -302,25 +298,22 @@ Returns the date. Rounds down a date or date with time to the first day of the month. Returns the date. -## toLastDayOfMonth - -Rounds up a date or date with time to the last day of the month. -Returns the date. +:::note +The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow. +::: If `toLastDayOfMonth` is called with an argument of type `Date` greater then 2149-05-31, the result will be calculated from the argument 2149-05-31 instead. ## toMonday Rounds down a date or date with time to the nearest Monday. -As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` return date `1970-01-01`. Returns the date. ## toStartOfWeek(t\[,mode\]) Rounds down a date or date with time to the nearest Sunday or Monday by mode. Returns the date. -As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` (and `1970-01-05` if `mode` is `1`) return date `1970-01-01`. -The `mode` argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. +The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used. ## toStartOfDay @@ -671,9 +664,9 @@ Aliases: `dateDiff`, `DATE_DIFF`. - `quarter` - `year` -- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). +- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). -- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md). +- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md). @@ -1163,7 +1156,7 @@ dateName(date_part, date) **Arguments** - `date_part` — Date part. Possible values: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md). -- `date` — Date. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). +- `date` — Date. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md). **Returned value** @@ -1251,7 +1244,7 @@ Result: └──────────────────────────┘ ``` -When there are two arguments: first is an [Integer](../../sql-reference/data-types/int-uint.md) or [DateTime](../../sql-reference/data-types/datetime.md), second is a constant format string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type. +When there are two or three arguments, the first an [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second a constant format string and the third an optional constant time zone string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type. For example: diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index eb357df19db..4a6e46e1759 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -376,14 +376,6 @@ Result: └─────┘ ``` -## UUIDStringToNum(str) - -Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16). - -## UUIDNumToString(str) - -Accepts a FixedString(16) value. Returns a string containing 36 characters in text format. - ## bitmaskToList(num) Accepts an integer. Returns a string containing the list of powers of two that total the source number when summed. They are comma-separated without spaces in text format, in ascending order. diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index b80d75e3611..6490d4c2272 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -571,7 +571,7 @@ Example: ``` sql SELECT - transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s, count() AS c FROM test.hits GROUP BY domain(Referer) diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index b8f222c2e4e..43542367cd5 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -211,12 +211,19 @@ SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid ## UUIDStringToNum -Accepts a string containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns it as a set of bytes in a [FixedString(16)](../../sql-reference/data-types/fixedstring.md). +Accepts `string` containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns a [FixedString(16)](../../sql-reference/data-types/fixedstring.md) as its binary representation, with its format optionally specified by `variant` (`Big-endian` by default). + +**Syntax** ``` sql -UUIDStringToNum(String) +UUIDStringToNum(string[, variant = 1]) ``` +**Arguments** + +- `string` — String of 36 characters or FixedString(36). [String](../../sql-reference/syntax.md#syntax-string-literal). +- `variant` — Integer, representing a variant as specified by [RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). 1 = `Big-endian` (default), 2 = `Microsoft`. + **Returned value** FixedString(16) @@ -235,14 +242,33 @@ SELECT └──────────────────────────────────────┴──────────────────┘ ``` +``` sql +SELECT + '612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid, + UUIDStringToNum(uuid, 2) AS bytes +``` + +``` text +┌─uuid─────────────────────────────────┬─bytes────────────┐ +│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ @ [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. +## ADD PROJECTION -- `ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db].name ADD PROJECTION name ( SELECT [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. -- `ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +## DROP PROJECTION -- `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). + +## MATERIALIZE PROJECTION + +`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). + +## CLEAR PROJECTION + +`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files. -Also, they are replicated, syncing projections metadata via ZooKeeper. +Also, they are replicated, syncing projections metadata via ClickHouse Keeper or ZooKeeper. :::note Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). diff --git a/docs/en/sql-reference/statements/create/database.md b/docs/en/sql-reference/statements/create/database.md index 432f5975cc8..7954d1362f1 100644 --- a/docs/en/sql-reference/statements/create/database.md +++ b/docs/en/sql-reference/statements/create/database.md @@ -31,7 +31,7 @@ By default, ClickHouse uses its own [Atomic](../../../engines/database-engines/a ### COMMENT -You can add a comment to the database when you creating it. +You can add a comment to the database when you are creating it. The comment is supported for all database engines. diff --git a/docs/en/sql-reference/statements/create/function.md b/docs/en/sql-reference/statements/create/function.md index 63c006b1e3e..90be007bf43 100644 --- a/docs/en/sql-reference/statements/create/function.md +++ b/docs/en/sql-reference/statements/create/function.md @@ -4,7 +4,7 @@ sidebar_position: 38 sidebar_label: FUNCTION --- -# CREATE FUNCTION +# CREATE FUNCTION — user defined function (UDF) Creates a user defined function from a lambda expression. The expression must consist of function parameters, constants, operators, or other function calls. diff --git a/docs/en/sql-reference/statements/misc.md b/docs/en/sql-reference/statements/misc.md deleted file mode 100644 index d812dd2008a..00000000000 --- a/docs/en/sql-reference/statements/misc.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -slug: /en/sql-reference/statements/misc -toc_hidden: true -sidebar_position: 70 ---- - -# Miscellaneous Statements - -- [ATTACH](../../sql-reference/statements/attach.md) -- [CHECK TABLE](../../sql-reference/statements/check-table.md) -- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md) -- [DETACH](../../sql-reference/statements/detach.md) -- [DROP](../../sql-reference/statements/drop.md) -- [EXISTS](../../sql-reference/statements/exists.md) -- [KILL](../../sql-reference/statements/kill.md) -- [OPTIMIZE](../../sql-reference/statements/optimize.md) -- [RENAME](../../sql-reference/statements/rename.md) -- [SET](../../sql-reference/statements/set.md) -- [SET ROLE](../../sql-reference/statements/set-role.md) -- [TRUNCATE](../../sql-reference/statements/truncate.md) -- [USE](../../sql-reference/statements/use.md) diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 680ff773992..036d3f0599a 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -22,7 +22,7 @@ The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/me When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`). - If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting. -- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr). +- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter/partition.md#how-to-set-partition-expression). - If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed. - If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine. diff --git a/docs/en/sql-reference/statements/select/intersect.md b/docs/en/sql-reference/statements/select/intersect.md index d3b2b51b6be..f1eb4738543 100644 --- a/docs/en/sql-reference/statements/select/intersect.md +++ b/docs/en/sql-reference/statements/select/intersect.md @@ -7,7 +7,7 @@ sidebar_label: INTERSECT The `INTERSECT` clause returns only those rows that result from both the first and the second queries. The queries must match the number of columns, order, and type. The result of `INTERSECT` can contain duplicate rows. -Multiple `INTERSECT` statements are executes left to right if parenthesis are not specified. The `INTERSECT` operator has a higher priority than the `UNION` and `EXCEPT` clause. +Multiple `INTERSECT` statements are executed left to right if parentheses are not specified. The `INTERSECT` operator has a higher priority than the `UNION` and `EXCEPT` clauses. ``` sql diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index f40107aaaca..fc81e7cf649 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -81,6 +81,7 @@ Multiple path components can have globs. For being processed file must exist and - `?` — Substitutes any single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. - `{N..M}` — Substitutes any number in range from N to M including both borders. +- `**` - Fetches all files inside the folder recursively. Constructions with `{}` are similar to the [remote](remote.md) table function. @@ -119,6 +120,22 @@ Query the data from files named `file000`, `file001`, … , `file999`: SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32'); ``` +**Example** + +Query the data from all files inside `big_dir` directory recursively: + +``` sql +SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32'); +``` + +**Example** + +Query the data from all `file002` files from any folder inside `big_dir` directory recursively: + +``` sql +SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32'); +``` + ## Virtual Columns - `_path` — Path to the file. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 545037665bb..545a89223bf 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -127,6 +127,18 @@ INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my- SELECT name, value FROM existing_table; ``` +Glob ** can be used for recursive directory traversal. Consider the below example, it will fetch all files from `my-test-bucket-768` directory recursively: + +``` sql +SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**', 'CSV', 'name String, value UInt32', 'gzip'); +``` + +The below get data from all `test-data.csv.gz` files from any folder inside `my-test-bucket` directory recursively: + +``` sql +SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip'); +``` + ## Partitioned Write If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency. diff --git a/docs/ru/development/browse-code.md b/docs/ru/development/browse-code.md deleted file mode 100644 index 640b1ac3693..00000000000 --- a/docs/ru/development/browse-code.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -slug: /ru/development/browse-code -sidebar_position: 72 -sidebar_label: "Навигация по коду ClickHouse" ---- - - -# Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse} - -Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. - -Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse). - -Если вы интересуетесь, какую среду разработки выбрать для работы с ClickHouse, мы рекомендуем CLion, QT Creator, VSCode или KDevelop (с некоторыми предостережениями). Вы можете использовать свою любимую среду разработки, Vim и Emacs тоже считаются. diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 37fc902e777..a5f091e1b23 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -87,14 +87,15 @@ SETTINGS Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше. +:::note "Attention" +Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше. +::: ``` sql Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format [, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages]) ``` - ::: + ## Описание {#opisanie} diff --git a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md index aa16113192e..86a275767a0 100644 --- a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -39,9 +39,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md index ecaaa6b8417..72b4725c6ed 100644 --- a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -43,9 +43,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( @@ -59,7 +60,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - `sign` — Имя столбца с типом строки: `1` — строка состояния, `-1` — строка отмены состояния. - Тип данных столбца — `Int8`. + Тип данных столбца — `Int8`. diff --git a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md index 818f85f7e37..324a3fd1633 100644 --- a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md @@ -55,9 +55,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index e01e0006b87..f024d5f1985 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -115,9 +115,10 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md index 0d9d268fa46..7b69927e161 100644 --- a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md @@ -42,9 +42,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] Устаревший способ создания таблицы - :::note "Attention" - Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. - ::: +:::note "Attention" +Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. +::: + ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( diff --git a/docs/ru/sql-reference/data-types/date.md b/docs/ru/sql-reference/data-types/date.md index 7254b82f461..185fe28d567 100644 --- a/docs/ru/sql-reference/data-types/date.md +++ b/docs/ru/sql-reference/data-types/date.md @@ -6,7 +6,7 @@ sidebar_label: Date # Date {#data-type-date} -Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2149 года, последний полностью поддерживаемый год - 2148). +Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2106 года, последний полностью поддерживаемый год - 2105). Диапазон значений: \[1970-01-01, 2149-06-06\]. diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index a7d2ce49fae..f18c2ea258a 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -272,15 +272,9 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp; Поведение для * `enable_extended_results_for_datetime_functions = 0`: Функции `toStartOf*`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime`. Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime`. Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат. -В случае если значение аргумента вне нормального диапазона: - * `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года, - * `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`, - * `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`, - * `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`. * `enable_extended_results_for_datetime_functions = 1`: * Функции `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `Date32` или `DateTime64` если их аргумент `Date32` или `DateTime64`. * Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `DateTime64` если их аргумент `Date32` или `DateTime64`. - ::: ## toStartOfYear {#tostartofyear} @@ -321,20 +315,20 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101; Округляет дату или дату-с-временем до последнего числа месяца. Возвращается дата. -Если `toLastDayOfMonth` вызывается с аргументом типа `Date` большим чем 2149-05-31, то результат будет вычислен от аргумента 2149-05-31. +:::note "Attention" +Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами. +::: ## toMonday {#tomonday} Округляет дату или дату-с-временем вниз до ближайшего понедельника. -Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` результатом будет `1970-01-01`. Возвращается дата. ## toStartOfWeek(t[,mode]) {#tostartofweek} Округляет дату или дату со временем до ближайшего воскресенья или понедельника в соответствии с mode. Возвращается дата. -Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` (и `1970-01-05`, если `mode` равен `1`) результатом будет `1970-01-01`. -Аргумент `mode` работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0. +Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0. ## toStartOfDay {#tostartofday} @@ -721,9 +715,9 @@ date_diff('unit', startdate, enddate, [timezone]) - `quarter` - `year` -- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md). +- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). -- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md). +- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md). @@ -975,8 +969,7 @@ SELECT now('Europe/Moscow'); ## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size} Для интервала, начинающегося в `StartTime` и длящегося `Duration` секунд, возвращает массив моментов времени, кратных `Size`. Параметр `Size` указывать необязательно, по умолчанию он равен 1800 секундам (30 минутам) - необязательный параметр. -Данная функция может использоваться, например, для анализа количества просмотров страницы за соответствующую сессию. -Аргумент `StartTime` может иметь тип `DateTime` или `DateTime64`. В случае, если используется `DateTime`, аргументы `Duration` и `Size` должны иметь тип `UInt32`; Для DateTime64 они должны быть типа `Decimal64`. + Возвращает массив DateTime/DateTime64 (тип будет совпадать с типом параметра ’StartTime’). Для DateTime64 масштаб(scale) возвращаемой величины может отличаться от масштаба фргумента ’StartTime’ --- результат будет иметь наибольший масштаб среди всех данных аргументов. Пример использования: @@ -1085,7 +1078,7 @@ dateName(date_part, date) **Аргументы** - `date_part` — часть даты. Возможные значения: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md). -- `date` — дата. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). +- `date` — дата. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md). - `timezone` — часовой пояс. Необязательный аргумент. [String](../../sql-reference/data-types/string.md). **Возвращаемое значение** @@ -1133,8 +1126,7 @@ SELECT FROM_UNIXTIME(423543535); └──────────────────────────┘ ``` -В случае, когда есть два аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md) или [DateTime](../../sql-reference/data-types/datetime.md), а второй является строкой постоянного формата — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string). - +В случае, когда есть два или три аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md), а второй является строкой постоянного формата и третий является строкой постоянной временной зоны — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string). Запрос: diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 5c8584cd2a0..af21ccd6bed 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -568,7 +568,7 @@ ORDER BY c DESC ``` sql SELECT - transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s, count() AS c FROM test.hits GROUP BY domain(Referer) diff --git a/docs/ru/sql-reference/operators/in.md b/docs/ru/sql-reference/operators/in.md index 2b3d87a877f..fa679b890a7 100644 --- a/docs/ru/sql-reference/operators/in.md +++ b/docs/ru/sql-reference/operators/in.md @@ -122,9 +122,9 @@ FROM t_null Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса. - :::note "Attention" - Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`. - ::: +:::note "Attention" +Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`. +::: При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`. При использовании `GLOBAL IN` / `GLOBAL JOIN-а`, сначала выполняются все подзапросы для `GLOBAL IN` / `GLOBAL JOIN-ов`, и результаты складываются во временные таблицы. Затем эти временные таблицы передаются на каждый удалённый сервер, и на них выполняются запросы, с использованием этих переданных временных данных. diff --git a/docs/ru/sql-reference/statements/misc.md b/docs/ru/sql-reference/statements/misc.md deleted file mode 100644 index 437215f20ce..00000000000 --- a/docs/ru/sql-reference/statements/misc.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -slug: /ru/sql-reference/statements/misc -sidebar_position: 41 ---- - -# Прочие виды запросов {#prochie-vidy-zaprosov} - -- [ATTACH](../../sql-reference/statements/attach.md) -- [CHECK TABLE](../../sql-reference/statements/check-table.md) -- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md) -- [DETACH](../../sql-reference/statements/detach.md) -- [DROP](../../sql-reference/statements/drop.md) -- [EXISTS](../../sql-reference/statements/exists.md) -- [KILL](../../sql-reference/statements/kill.md) -- [OPTIMIZE](../../sql-reference/statements/optimize.md) -- [RENAME](../../sql-reference/statements/rename.md) -- [SET](../../sql-reference/statements/set.md) -- [SET ROLE](../../sql-reference/statements/set-role.md) -- [TRUNCATE](../../sql-reference/statements/truncate.md) -- [USE](../../sql-reference/statements/use.md) - diff --git a/docs/zh/development/browse-code.md b/docs/zh/development/browse-code.md deleted file mode 100644 index 16382a94ed5..00000000000 --- a/docs/zh/development/browse-code.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -slug: /zh/development/browse-code -sidebar_position: 63 -sidebar_label: "\u6D4F\u89C8\u6E90\u4EE3\u7801" ---- - -# 浏览ClickHouse源代码 {#browse-clickhouse-source-code} - -您可以使用 **Woboq** 在线代码浏览器 [点击这里](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). 它提供了代码导航和语义突出显示、搜索和索引。 代码快照每天更新。 - -此外,您还可以像往常一样浏览源代码 [GitHub](https://github.com/ClickHouse/ClickHouse) - -如果你希望了解哪种IDE较好,我们推荐使用CLion,QT Creator,VS Code和KDevelop(有注意事项)。 您可以使用任何您喜欢的IDE。 Vim和Emacs也可以。 diff --git a/docs/zh/getting-started/example-datasets/cell-towers.mdx b/docs/zh/getting-started/example-datasets/cell-towers.mdx index ece13445210..9738680519a 100644 --- a/docs/zh/getting-started/example-datasets/cell-towers.mdx +++ b/docs/zh/getting-started/example-datasets/cell-towers.mdx @@ -1,9 +1,232 @@ --- slug: /zh/getting-started/example-datasets/cell-towers -sidebar_label: Cell Towers -title: "Cell Towers" +sidebar_label: 蜂窝信号塔 +sidebar_position: 3 +title: "蜂窝信号塔" --- -import Content from '@site/docs/en/getting-started/example-datasets/cell-towers.md'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import ActionsMenu from '@site/docs/en/_snippets/_service_actions_menu.md'; +import SQLConsoleDetail from '@site/docs/en/_snippets/_launch_sql_console.md'; + +该数据集来自 [OpenCellid](https://www.opencellid.org/) - 世界上最大的蜂窝信号塔的开放数据库。 + +截至 2021 年,它拥有超过 4000 万条关于全球蜂窝信号塔(GSM、LTE、UMTS 等)的记录及其地理坐标和元数据(国家代码、网络等)。 + +OpenCelliD 项目在 `Creative Commons Attribution-ShareAlike 4.0 International License` 协议下许可使用,我们根据相同许可条款重新分发此数据集的快照。登录后即可下载最新版本的数据集。 + + +## 获取数据集 {#get-the-dataset} + + + + +在 ClickHouse Cloud 上可以通过一个按钮实现通过 S3 上传此数据集。登录你的 ClickHouse Cloud 组织,或通过 [ClickHouse.cloud](https://clickhouse.cloud) 创建免费试用版。 + +从 **Sample data** 选项卡中选择 **Cell Towers** 数据集,然后选择 **Load data**: + +![加载数据集](@site/docs/en/_snippets/images/cloud-load-data-sample.png) + +检查 cell_towers 的表结构: + +```sql +DESCRIBE TABLE cell_towers +``` + + + + + + +1. 下载 2021 年 2 月以来的数据集快照:[cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (729 MB)。 + +2. 验证完整性(可选步骤): + +```bash +md5sum cell_towers.csv.xz +``` + +```response +8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz +``` + +3. 使用以下命令解压: + +```bash +xz -d cell_towers.csv.xz +``` + +4. 创建表: + +```sql +CREATE TABLE cell_towers +( + radio Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5), + mcc UInt16, + net UInt16, + area UInt16, + cell UInt64, + unit Int16, + lon Float64, + lat Float64, + range UInt32, + samples UInt32, + changeable UInt8, + created DateTime, + updated DateTime, + averageSignal UInt8 +) +ENGINE = MergeTree ORDER BY (radio, mcc, net, created); +``` + +5. 插入数据集: + +```bash +clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_towers.csv +``` + + + + +## 查询示例 {#examples} + +1. 按类型划分的基站数量: + +```sql +SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC +``` +```response +┌─radio─┬────────c─┐ +│ UMTS │ 20686487 │ +│ LTE │ 12101148 │ +│ GSM │ 9931312 │ +│ CDMA │ 556344 │ +│ NR │ 867 │ +└───────┴──────────┘ + +5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.) +``` + +2. 各个[移动国家代码(MCC)](https://en.wikipedia.org/wiki/Mobile_country_code)对应的蜂窝信号塔数量: + +```sql +SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10 +``` +```response +┌─mcc─┬─count()─┐ +│ 310 │ 5024650 │ +│ 262 │ 2622423 │ +│ 250 │ 1953176 │ +│ 208 │ 1891187 │ +│ 724 │ 1836150 │ +│ 404 │ 1729151 │ +│ 234 │ 1618924 │ +│ 510 │ 1353998 │ +│ 440 │ 1343355 │ +│ 311 │ 1332798 │ +└─────┴─────────┘ + +10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.) +``` + +排名靠前的国家是:美国、德国和俄罗斯。 + +你可以通过在 ClickHouse 中创建一个 [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) 来解码这些值。 + +## 用例:合并地理数据 {#use-case} + +使用 `pointInPolygon` 函数。 + +1. 创建一个用于存储多边形的表: + + + + +```sql +CREATE TABLE moscow (polygon Array(Tuple(Float64, Float64))) +ORDER BY polygon; +``` + + + + +```sql +CREATE TEMPORARY TABLE +moscow (polygon Array(Tuple(Float64, Float64))); +``` + + + + +2. 以下点大致上构造了莫斯科的地理围栏(除“新莫斯科”外): + +```sql +INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266), +(37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554), +(37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413), +(37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372), +(37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784), +(37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089), +(37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608), +(37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335), +(37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639), +(37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552), +(37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121), +(37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455), +(37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279), +(37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446), +(37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373), +(37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915), +(37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051), +(37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785), +(37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155), +(37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229), +(37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064), +(37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576), +(37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014), +(37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414), +(37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686), +(37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811), +(37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614), +(37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725), +(37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266), +(37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804), +(37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979), +(37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975), +(37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751), +(37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635), +(37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249), +(37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802), +(37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586), +(37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106), +(37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566), +(37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865), +(37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505), +(37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554), +(37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488), +(37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761), +(37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134), +(37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492), +(37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685), +(37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368), +(37.84172564285271, 55.78000432402266)]); +``` + +3. 检查莫斯科有多少个蜂窝信号塔: + +```sql +SELECT count() FROM cell_towers +WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow)) +``` +```response +┌─count()─┐ +│ 310463 │ +└─────────┘ + +1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.) +``` + +虽然不能创建临时表,但此数据集仍可在 [Playground](https://play.clickhouse.com/play?user=play) 中进行交互式的请求, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=). - diff --git a/docs/zh/getting-started/example-datasets/menus.mdx b/docs/zh/getting-started/example-datasets/menus.mdx index 250b8a4cd37..10e9f2bd318 100644 --- a/docs/zh/getting-started/example-datasets/menus.mdx +++ b/docs/zh/getting-started/example-datasets/menus.mdx @@ -1,9 +1,352 @@ ---- -slug: /zh/getting-started/example-datasets/menus -sidebar_label: New York Public Library "What's on the Menu?" Dataset -title: "New York Public Library \"What's on the Menu?\" Dataset" +--- +slug: /zh/getting-started/example-datasets/menus +sidebar_label: '纽约公共图书馆“菜单上有什么?”数据集' +title: '纽约公共图书馆“菜单上有什么?”数据集' --- -import Content from '@site/docs/en/getting-started/example-datasets/menus.md'; +该数据集由纽约公共图书馆创建。其中含有有关酒店、餐馆和咖啡馆的菜单上的菜肴及其价格的历史数据。 - +来源:http://menus.nypl.org/data +数据为开放数据。 + +数据来自于图书馆中的档案,因此可能不完整,以至于难以进行统计分析。尽管如此,该数据集也是非常有意思的。数据集中只有 130 万条关于菜单中的菜肴的记录 - 这对于 ClickHouse 来说是一个非常小的数据量,但这仍是一个很好的例子。 + +## 下载数据集 {#download-dataset} + +运行命令: + +```bash +wget https://s3.amazonaws.com/menusdata.nypl.org/gzips/2021_08_01_07_01_17_data.tgz +``` + +如果有需要可以使用 http://menus.nypl.org/data 中的最新链接。下载的大小约为 35 MB。 + +## 解压数据集 {#unpack-dataset} + +```bash +tar xvf 2021_08_01_07_01_17_data.tgz +``` + +解压后的的大小约为 150 MB。 + +数据集由四个表组成: + +- `Menu` - 有关菜单的信息,其中包含:餐厅名称,看到菜单的日期等 +- `Dish` - 有关菜肴的信息,其中包含:菜肴名称以及一些特征。 +- `MenuPage` - 有关菜单中页面的信息,每个页面都属于某个 `Menu`。 +- `MenuItem` - 菜单项。某个菜单页面上的菜肴及其价格:指向 `Dish` 和 `MenuPage`的链接。 + +## 创建表 {#create-tables} + +使用 [Decimal](/docs/zh/sql-reference/data-types/decimal.md) 数据类型来存储价格。 + +```sql +CREATE TABLE dish +( + id UInt32, + name String, + description String, + menus_appeared UInt32, + times_appeared Int32, + first_appeared UInt16, + last_appeared UInt16, + lowest_price Decimal64(3), + highest_price Decimal64(3) +) ENGINE = MergeTree ORDER BY id; + +CREATE TABLE menu +( + id UInt32, + name String, + sponsor String, + event String, + venue String, + place String, + physical_description String, + occasion String, + notes String, + call_number String, + keywords String, + language String, + date String, + location String, + location_type String, + currency String, + currency_symbol String, + status String, + page_count UInt16, + dish_count UInt16 +) ENGINE = MergeTree ORDER BY id; + +CREATE TABLE menu_page +( + id UInt32, + menu_id UInt32, + page_number UInt16, + image_id String, + full_height UInt16, + full_width UInt16, + uuid UUID +) ENGINE = MergeTree ORDER BY id; + +CREATE TABLE menu_item +( + id UInt32, + menu_page_id UInt32, + price Decimal64(3), + high_price Decimal64(3), + dish_id UInt32, + created_at DateTime, + updated_at DateTime, + xpos Float64, + ypos Float64 +) ENGINE = MergeTree ORDER BY id; +``` + +## 导入数据 {#import-data} + +执行以下命令将数据导入 ClickHouse: + +```bash +clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO dish FORMAT CSVWithNames" < Dish.csv +clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO menu FORMAT CSVWithNames" < Menu.csv +clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO menu_page FORMAT CSVWithNames" < MenuPage.csv +clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --date_time_input_format best_effort --query "INSERT INTO menu_item FORMAT CSVWithNames" < MenuItem.csv +``` + +因为数据由带有标题的 CSV 表示,所以使用 [CSVWithNames](/docs/zh/interfaces/formats.md#csvwithnames) 格式。 + +因为只有双引号用于数据字段,单引号可以在值内,所以禁用了 `format_csv_allow_single_quotes` 以避免混淆 CSV 解析器。 + +因为数据中没有 [NULL](/docs/zh/sql-reference/syntax.md#null-literal) 值,所以禁用 [input_format_null_as_default](/docs/zh/operations/settings/settings.md#settings-input-format-null-as-default)。不然 ClickHouse 将会尝试解析 `\N` 序列,并可能与数据中的 `\` 混淆。 + +设置 [date_time_input_format best_effort](/docs/zh/operations/settings/settings.md#settings-date_time_input_format) 以便解析各种格式的 [DateTime](/docs/zh/sql-reference/data-types/datetime.md)字段。例如,识别像“2000-01-01 01:02”这样没有秒数的 ISO-8601 时间字符串。如果没有此设置,则仅允许使用固定的 DateTime 格式。 + +## 非规范化数据 {#denormalize-data} + +数据以 [规范化形式] (https://en.wikipedia.org/wiki/Database_normalization#Normal_forms) 在多个表格中呈现。这意味着如果你想进行如查询菜单项中的菜名这类的查询,则必须执行 [JOIN](/docs/zh/sql-reference/statements/select/join.md#select-join)。在典型的分析任务中,预先处理联接的数据以避免每次都执行“联接”会更有效率。这中操作被称为“非规范化”数据。 + +我们将创建一个表“menu_item_denorm”,其中将包含所有联接在一起的数据: + +```sql +CREATE TABLE menu_item_denorm +ENGINE = MergeTree ORDER BY (dish_name, created_at) +AS SELECT + price, + high_price, + created_at, + updated_at, + xpos, + ypos, + dish.id AS dish_id, + dish.name AS dish_name, + dish.description AS dish_description, + dish.menus_appeared AS dish_menus_appeared, + dish.times_appeared AS dish_times_appeared, + dish.first_appeared AS dish_first_appeared, + dish.last_appeared AS dish_last_appeared, + dish.lowest_price AS dish_lowest_price, + dish.highest_price AS dish_highest_price, + menu.id AS menu_id, + menu.name AS menu_name, + menu.sponsor AS menu_sponsor, + menu.event AS menu_event, + menu.venue AS menu_venue, + menu.place AS menu_place, + menu.physical_description AS menu_physical_description, + menu.occasion AS menu_occasion, + menu.notes AS menu_notes, + menu.call_number AS menu_call_number, + menu.keywords AS menu_keywords, + menu.language AS menu_language, + menu.date AS menu_date, + menu.location AS menu_location, + menu.location_type AS menu_location_type, + menu.currency AS menu_currency, + menu.currency_symbol AS menu_currency_symbol, + menu.status AS menu_status, + menu.page_count AS menu_page_count, + menu.dish_count AS menu_dish_count +FROM menu_item + JOIN dish ON menu_item.dish_id = dish.id + JOIN menu_page ON menu_item.menu_page_id = menu_page.id + JOIN menu ON menu_page.menu_id = menu.id; +``` + +## 验证数据 {#validate-data} + +请求: + +```sql +SELECT count() FROM menu_item_denorm; +``` + +结果: + +```text +┌─count()─┐ +│ 1329175 │ +└─────────┘ +``` + +## 运行一些查询 {#run-queries} + +### 菜品的平均历史价格 {#query-averaged-historical-prices} + +请求: + +```sql +SELECT + round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, + count(), + round(avg(price), 2), + bar(avg(price), 0, 100, 100) +FROM menu_item_denorm +WHERE (menu_currency = 'Dollars') AND (d > 0) AND (d < 2022) +GROUP BY d +ORDER BY d ASC; +``` + +结果: + +```text +┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 100, 100)─┐ +│ 1850 │ 618 │ 1.5 │ █▍ │ +│ 1860 │ 1634 │ 1.29 │ █▎ │ +│ 1870 │ 2215 │ 1.36 │ █▎ │ +│ 1880 │ 3909 │ 1.01 │ █ │ +│ 1890 │ 8837 │ 1.4 │ █▍ │ +│ 1900 │ 176292 │ 0.68 │ ▋ │ +│ 1910 │ 212196 │ 0.88 │ ▊ │ +│ 1920 │ 179590 │ 0.74 │ ▋ │ +│ 1930 │ 73707 │ 0.6 │ ▌ │ +│ 1940 │ 58795 │ 0.57 │ ▌ │ +│ 1950 │ 41407 │ 0.95 │ ▊ │ +│ 1960 │ 51179 │ 1.32 │ █▎ │ +│ 1970 │ 12914 │ 1.86 │ █▋ │ +│ 1980 │ 7268 │ 4.35 │ ████▎ │ +│ 1990 │ 11055 │ 6.03 │ ██████ │ +│ 2000 │ 2467 │ 11.85 │ ███████████▋ │ +│ 2010 │ 597 │ 25.66 │ █████████████████████████▋ │ +└──────┴─────────┴──────────────────────┴──────────────────────────────┘ +``` + +带上一粒盐。 + +### 汉堡价格 {#query-burger-prices} + +请求: + +```sql +SELECT + round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, + count(), + round(avg(price), 2), + bar(avg(price), 0, 50, 100) +FROM menu_item_denorm +WHERE (menu_currency = 'Dollars') AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%burger%') +GROUP BY d +ORDER BY d ASC; +``` + +结果: + +```text +┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)───────────┐ +│ 1880 │ 2 │ 0.42 │ ▋ │ +│ 1890 │ 7 │ 0.85 │ █▋ │ +│ 1900 │ 399 │ 0.49 │ ▊ │ +│ 1910 │ 589 │ 0.68 │ █▎ │ +│ 1920 │ 280 │ 0.56 │ █ │ +│ 1930 │ 74 │ 0.42 │ ▋ │ +│ 1940 │ 119 │ 0.59 │ █▏ │ +│ 1950 │ 134 │ 1.09 │ ██▏ │ +│ 1960 │ 272 │ 0.92 │ █▋ │ +│ 1970 │ 108 │ 1.18 │ ██▎ │ +│ 1980 │ 88 │ 2.82 │ █████▋ │ +│ 1990 │ 184 │ 3.68 │ ███████▎ │ +│ 2000 │ 21 │ 7.14 │ ██████████████▎ │ +│ 2010 │ 6 │ 18.42 │ ████████████████████████████████████▋ │ +└──────┴─────────┴──────────────────────┴───────────────────────────────────────┘ +``` + +###伏特加{#query-vodka} + +请求: + +```sql +SELECT + round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, + count(), + round(avg(price), 2), + bar(avg(price), 0, 50, 100) +FROM menu_item_denorm +WHERE (menu_currency IN ('Dollars', '')) AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%vodka%') +GROUP BY d +ORDER BY d ASC; +``` + +结果: + +```text +┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)─┐ +│ 1910 │ 2 │ 0 │ │ +│ 1920 │ 1 │ 0.3 │ ▌ │ +│ 1940 │ 21 │ 0.42 │ ▋ │ +│ 1950 │ 14 │ 0.59 │ █▏ │ +│ 1960 │ 113 │ 2.17 │ ████▎ │ +│ 1970 │ 37 │ 0.68 │ █▎ │ +│ 1980 │ 19 │ 2.55 │ █████ │ +│ 1990 │ 86 │ 3.6 │ ███████▏ │ +│ 2000 │ 2 │ 3.98 │ ███████▊ │ +└──────┴─────────┴──────────────────────┴─────────────────────────────┘ +``` + +要查询 `Vodka`,必须声明通过 `ILIKE '%vodka%'` 进行查询。 + +### 鱼子酱 {#query-caviar} + +列出鱼子酱的价格。另外,列出任何带有鱼子酱的菜肴的名称。 + +请求: + +```sql +SELECT + round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d, + count(), + round(avg(price), 2), + bar(avg(price), 0, 50, 100), + any(dish_name) +FROM menu_item_denorm +WHERE (menu_currency IN ('Dollars', '')) AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%caviar%') +GROUP BY d +ORDER BY d ASC; +``` + +结果: + +```text +┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)──────┬─any(dish_name)──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ 1090 │ 1 │ 0 │ │ Caviar │ +│ 1880 │ 3 │ 0 │ │ Caviar │ +│ 1890 │ 39 │ 0.59 │ █▏ │ Butter and caviar │ +│ 1900 │ 1014 │ 0.34 │ ▋ │ Anchovy Caviar on Toast │ +│ 1910 │ 1588 │ 1.35 │ ██▋ │ 1/1 Brötchen Caviar │ +│ 1920 │ 927 │ 1.37 │ ██▋ │ ASTRAKAN CAVIAR │ +│ 1930 │ 289 │ 1.91 │ ███▋ │ Astrachan caviar │ +│ 1940 │ 201 │ 0.83 │ █▋ │ (SPECIAL) Domestic Caviar Sandwich │ +│ 1950 │ 81 │ 2.27 │ ████▌ │ Beluga Caviar │ +│ 1960 │ 126 │ 2.21 │ ████▍ │ Beluga Caviar │ +│ 1970 │ 105 │ 0.95 │ █▊ │ BELUGA MALOSSOL CAVIAR AMERICAN DRESSING │ +│ 1980 │ 12 │ 7.22 │ ██████████████▍ │ Authentic Iranian Beluga Caviar the world's finest black caviar presented in ice garni and a sampling of chilled 100° Russian vodka │ +│ 1990 │ 74 │ 14.42 │ ████████████████████████████▋ │ Avocado Salad, Fresh cut avocado with caviare │ +│ 2000 │ 3 │ 7.82 │ ███████████████▋ │ Aufgeschlagenes Kartoffelsueppchen mit Forellencaviar │ +│ 2010 │ 6 │ 15.58 │ ███████████████████████████████▏ │ "OYSTERS AND PEARLS" "Sabayon" of Pearl Tapioca with Island Creek Oysters and Russian Sevruga Caviar │ +└──────┴─────────┴──────────────────────┴──────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +至少他们有伏特加配鱼子酱。真棒。 + +## 在线 Playground{#playground} + +此数据集已经上传到了 ClickHouse Playground 中,[example](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICByb3VuZCh0b1VJbnQzMk9yWmVybyhleHRyYWN0KG1lbnVfZGF0ZSwgJ15cXGR7NH0nKSksIC0xKSBBUyBkLAogICAgY291bnQoKSwKICAgIHJvdW5kKGF2ZyhwcmljZSksIDIpLAogICAgYmFyKGF2ZyhwcmljZSksIDAsIDUwLCAxMDApLAogICAgYW55KGRpc2hfbmFtZSkKRlJPTSBtZW51X2l0ZW1fZGVub3JtCldIRVJFIChtZW51X2N1cnJlbmN5IElOICgnRG9sbGFycycsICcnKSkgQU5EIChkID4gMCkgQU5EIChkIDwgMjAyMikgQU5EIChkaXNoX25hbWUgSUxJS0UgJyVjYXZpYXIlJykKR1JPVVAgQlkgZApPUkRFUiBCWSBkIEFTQw==)。 diff --git a/docs/zh/getting-started/example-datasets/opensky.mdx b/docs/zh/getting-started/example-datasets/opensky.mdx index e8d5367e970..92cd104e06e 100644 --- a/docs/zh/getting-started/example-datasets/opensky.mdx +++ b/docs/zh/getting-started/example-datasets/opensky.mdx @@ -1,9 +1,416 @@ ---- +--- slug: /zh/getting-started/example-datasets/opensky -sidebar_label: Air Traffic Data -title: "Crowdsourced air traffic data from The OpenSky Network 2020" +sidebar_label: 空中交通数据 +description: 该数据集中的数据是从完整的 OpenSky 数据集中衍生而来的,对其中的数据进行了必要的清理,用以展示在 COVID-19 期间空中交通的发展。 +title: "来自 The OpenSky Network 2020 的众包空中交通数据" --- -import Content from '@site/docs/en/getting-started/example-datasets/opensky.md'; +该数据集中的数据是从完整的 OpenSky 数据集中派生和清理的,以说明 COVID-19 大流行期间空中交通的发展。它涵盖了自 2019 年 1 月 1 日以来该网络中 2500 多名成员观测到的所有航班。直到 COVID-19 大流行结束,更多数据将定期的更新到数据集中。 - +来源:https://zenodo.org/record/5092942#.YRBCyTpRXYd + +Martin Strohmeier、Xavier Olive、Jannis Lübbe、Matthias Schäfer 和 Vincent Lenders “来自 OpenSky 网络 2019-2020 的众包空中交通数据”地球系统科学数据 13(2),2021 https://doi.org/10.5194/essd- 13-357-2021 + +## 下载数据集 {#download-dataset} + +运行命令: + +```bash +wget -O- https://zenodo.org/record/5092942 | grep -oP 'https://zenodo.org/record/5092942/files/flightlist_\d+_\d+\.csv\.gz' | xargs wget +``` + +Download will take about 2 minutes with good internet connection. There are 30 files with total size of 4.3 GB. + +## 创建表 {#create-table} + +```sql +CREATE TABLE opensky +( + callsign String, + number String, + icao24 String, + registration String, + typecode String, + origin String, + destination String, + firstseen DateTime, + lastseen DateTime, + day DateTime, + latitude_1 Float64, + longitude_1 Float64, + altitude_1 Float64, + latitude_2 Float64, + longitude_2 Float64, + altitude_2 Float64 +) ENGINE = MergeTree ORDER BY (origin, destination, callsign); +``` + +## 导入数据 {#import-data} + +将数据并行导入到 ClickHouse: + +```bash +ls -1 flightlist_*.csv.gz | xargs -P100 -I{} bash -c 'gzip -c -d "{}" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"' +``` + +- 这里我们将文件列表(`ls -1 flightlist_*.csv.gz`)传递给`xargs`以进行并行处理。 `xargs -P100` 指定最多使用 100 个并行工作程序,但由于我们只有 30 个文件,工作程序的数量将只有 30 个。 +- 对于每个文件,`xargs` 将通过 `bash -c` 为每个文件运行一个脚本文件。该脚本通过使用 `{}` 表示文件名占位符,然后 `xargs` 由命令进行填充(使用 `-I{}`)。 +- 该脚本会将文件 (`gzip -c -d "{}"`) 解压缩到标准输出(`-c` 参数),并将输出重定向到 `clickhouse-client`。 +- 我们还要求使用扩展解析器解析 [DateTime](../../sql-reference/data-types/datetime.md) 字段 ([--date_time_input_format best_effort](../../operations/settings/ settings.md#settings-date_time_input_format)) 以识别具有时区偏移的 ISO-8601 格式。 + +最后,`clickhouse-client` 会以 [CSVWithNames](../../interfaces/formats.md#csvwithnames) 格式读取输入数据然后执行插入。 + +并行导入需要 24 秒。 + +如果您不想使用并行导入,以下是顺序导入的方式: + +```bash +for file in flightlist_*.csv.gz; do gzip -c -d "$file" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"; done +``` + +## 验证数据 {#validate-data} + +请求: + +```sql +SELECT count() FROM opensky; +``` + +结果: + +```text +┌──count()─┐ +│ 66010819 │ +└──────────┘ +``` + +ClickHouse 中的数据集大小只有 2.66 GiB,检查一下。 + +请求: + +```sql +SELECT formatReadableSize(total_bytes) FROM system.tables WHERE name = 'opensky'; +``` + +结果: + +```text +┌─formatReadableSize(total_bytes)─┐ +│ 2.66 GiB │ +└─────────────────────────────────┘ +``` + +## 运行一些查询 {#run-queries} + +总行驶距离为 680 亿公里。 + +请求: + +```sql +SELECT formatReadableQuantity(sum(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)) / 1000) FROM opensky; +``` + +结果: + +```text +┌─formatReadableQuantity(divide(sum(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)), 1000))─┐ +│ 68.72 billion │ +└──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +平均飞行距离约为 1000 公里。 + +请求: + +```sql +SELECT avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)) FROM opensky; +``` + +结果: + +```text +┌─avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2))─┐ +│ 1041090.6465708319 │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### 最繁忙的始发机场和观测到的平均距离{#busy-airports-average-distance} + +请求: + +```sql +SELECT + origin, + count(), + round(avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2))) AS distance, + bar(distance, 0, 10000000, 100) AS bar +FROM opensky +WHERE origin != '' +GROUP BY origin +ORDER BY count() DESC +LIMIT 100; +``` + +结果: + +```text + ┌─origin─┬─count()─┬─distance─┬─bar────────────────────────────────────┐ + 1. │ KORD │ 745007 │ 1546108 │ ███████████████▍ │ + 2. │ KDFW │ 696702 │ 1358721 │ █████████████▌ │ + 3. │ KATL │ 667286 │ 1169661 │ ███████████▋ │ + 4. │ KDEN │ 582709 │ 1287742 │ ████████████▊ │ + 5. │ KLAX │ 581952 │ 2628393 │ ██████████████████████████▎ │ + 6. │ KLAS │ 447789 │ 1336967 │ █████████████▎ │ + 7. │ KPHX │ 428558 │ 1345635 │ █████████████▍ │ + 8. │ KSEA │ 412592 │ 1757317 │ █████████████████▌ │ + 9. │ KCLT │ 404612 │ 880355 │ ████████▋ │ + 10. │ VIDP │ 363074 │ 1445052 │ ██████████████▍ │ + 11. │ EDDF │ 362643 │ 2263960 │ ██████████████████████▋ │ + 12. │ KSFO │ 361869 │ 2445732 │ ████████████████████████▍ │ + 13. │ KJFK │ 349232 │ 2996550 │ █████████████████████████████▊ │ + 14. │ KMSP │ 346010 │ 1287328 │ ████████████▋ │ + 15. │ LFPG │ 344748 │ 2206203 │ ██████████████████████ │ + 16. │ EGLL │ 341370 │ 3216593 │ ████████████████████████████████▏ │ + 17. │ EHAM │ 340272 │ 2116425 │ █████████████████████▏ │ + 18. │ KEWR │ 337696 │ 1826545 │ ██████████████████▎ │ + 19. │ KPHL │ 320762 │ 1291761 │ ████████████▊ │ + 20. │ OMDB │ 308855 │ 2855706 │ ████████████████████████████▌ │ + 21. │ UUEE │ 307098 │ 1555122 │ ███████████████▌ │ + 22. │ KBOS │ 304416 │ 1621675 │ ████████████████▏ │ + 23. │ LEMD │ 291787 │ 1695097 │ ████████████████▊ │ + 24. │ YSSY │ 272979 │ 1875298 │ ██████████████████▋ │ + 25. │ KMIA │ 265121 │ 1923542 │ ███████████████████▏ │ + 26. │ ZGSZ │ 263497 │ 745086 │ ███████▍ │ + 27. │ EDDM │ 256691 │ 1361453 │ █████████████▌ │ + 28. │ WMKK │ 254264 │ 1626688 │ ████████████████▎ │ + 29. │ CYYZ │ 251192 │ 2175026 │ █████████████████████▋ │ + 30. │ KLGA │ 248699 │ 1106935 │ ███████████ │ + 31. │ VHHH │ 248473 │ 3457658 │ ██████████████████████████████████▌ │ + 32. │ RJTT │ 243477 │ 1272744 │ ████████████▋ │ + 33. │ KBWI │ 241440 │ 1187060 │ ███████████▋ │ + 34. │ KIAD │ 239558 │ 1683485 │ ████████████████▋ │ + 35. │ KIAH │ 234202 │ 1538335 │ ███████████████▍ │ + 36. │ KFLL │ 223447 │ 1464410 │ ██████████████▋ │ + 37. │ KDAL │ 212055 │ 1082339 │ ██████████▋ │ + 38. │ KDCA │ 207883 │ 1013359 │ ██████████▏ │ + 39. │ LIRF │ 207047 │ 1427965 │ ██████████████▎ │ + 40. │ PANC │ 206007 │ 2525359 │ █████████████████████████▎ │ + 41. │ LTFJ │ 205415 │ 860470 │ ████████▌ │ + 42. │ KDTW │ 204020 │ 1106716 │ ███████████ │ + 43. │ VABB │ 201679 │ 1300865 │ █████████████ │ + 44. │ OTHH │ 200797 │ 3759544 │ █████████████████████████████████████▌ │ + 45. │ KMDW │ 200796 │ 1232551 │ ████████████▎ │ + 46. │ KSAN │ 198003 │ 1495195 │ ██████████████▊ │ + 47. │ KPDX │ 197760 │ 1269230 │ ████████████▋ │ + 48. │ SBGR │ 197624 │ 2041697 │ ████████████████████▍ │ + 49. │ VOBL │ 189011 │ 1040180 │ ██████████▍ │ + 50. │ LEBL │ 188956 │ 1283190 │ ████████████▋ │ + 51. │ YBBN │ 188011 │ 1253405 │ ████████████▌ │ + 52. │ LSZH │ 187934 │ 1572029 │ ███████████████▋ │ + 53. │ YMML │ 187643 │ 1870076 │ ██████████████████▋ │ + 54. │ RCTP │ 184466 │ 2773976 │ ███████████████████████████▋ │ + 55. │ KSNA │ 180045 │ 778484 │ ███████▋ │ + 56. │ EGKK │ 176420 │ 1694770 │ ████████████████▊ │ + 57. │ LOWW │ 176191 │ 1274833 │ ████████████▋ │ + 58. │ UUDD │ 176099 │ 1368226 │ █████████████▋ │ + 59. │ RKSI │ 173466 │ 3079026 │ ██████████████████████████████▋ │ + 60. │ EKCH │ 172128 │ 1229895 │ ████████████▎ │ + 61. │ KOAK │ 171119 │ 1114447 │ ███████████▏ │ + 62. │ RPLL │ 170122 │ 1440735 │ ██████████████▍ │ + 63. │ KRDU │ 167001 │ 830521 │ ████████▎ │ + 64. │ KAUS │ 164524 │ 1256198 │ ████████████▌ │ + 65. │ KBNA │ 163242 │ 1022726 │ ██████████▏ │ + 66. │ KSDF │ 162655 │ 1380867 │ █████████████▋ │ + 67. │ ENGM │ 160732 │ 910108 │ █████████ │ + 68. │ LIMC │ 160696 │ 1564620 │ ███████████████▋ │ + 69. │ KSJC │ 159278 │ 1081125 │ ██████████▋ │ + 70. │ KSTL │ 157984 │ 1026699 │ ██████████▎ │ + 71. │ UUWW │ 156811 │ 1261155 │ ████████████▌ │ + 72. │ KIND │ 153929 │ 987944 │ █████████▊ │ + 73. │ ESSA │ 153390 │ 1203439 │ ████████████ │ + 74. │ KMCO │ 153351 │ 1508657 │ ███████████████ │ + 75. │ KDVT │ 152895 │ 74048 │ ▋ │ + 76. │ VTBS │ 152645 │ 2255591 │ ██████████████████████▌ │ + 77. │ CYVR │ 149574 │ 2027413 │ ████████████████████▎ │ + 78. │ EIDW │ 148723 │ 1503985 │ ███████████████ │ + 79. │ LFPO │ 143277 │ 1152964 │ ███████████▌ │ + 80. │ EGSS │ 140830 │ 1348183 │ █████████████▍ │ + 81. │ KAPA │ 140776 │ 420441 │ ████▏ │ + 82. │ KHOU │ 138985 │ 1068806 │ ██████████▋ │ + 83. │ KTPA │ 138033 │ 1338223 │ █████████████▍ │ + 84. │ KFFZ │ 137333 │ 55397 │ ▌ │ + 85. │ NZAA │ 136092 │ 1581264 │ ███████████████▋ │ + 86. │ YPPH │ 133916 │ 1271550 │ ████████████▋ │ + 87. │ RJBB │ 133522 │ 1805623 │ ██████████████████ │ + 88. │ EDDL │ 133018 │ 1265919 │ ████████████▋ │ + 89. │ ULLI │ 130501 │ 1197108 │ ███████████▊ │ + 90. │ KIWA │ 127195 │ 250876 │ ██▌ │ + 91. │ KTEB │ 126969 │ 1189414 │ ███████████▊ │ + 92. │ VOMM │ 125616 │ 1127757 │ ███████████▎ │ + 93. │ LSGG │ 123998 │ 1049101 │ ██████████▍ │ + 94. │ LPPT │ 122733 │ 1779187 │ █████████████████▋ │ + 95. │ WSSS │ 120493 │ 3264122 │ ████████████████████████████████▋ │ + 96. │ EBBR │ 118539 │ 1579939 │ ███████████████▋ │ + 97. │ VTBD │ 118107 │ 661627 │ ██████▌ │ + 98. │ KVNY │ 116326 │ 692960 │ ██████▊ │ + 99. │ EDDT │ 115122 │ 941740 │ █████████▍ │ +100. │ EFHK │ 114860 │ 1629143 │ ████████████████▎ │ + └────────┴─────────┴──────────┴────────────────────────────────────────┘ +``` + +### 每周来自莫斯科三个主要机场的航班数量 {#flights-from-moscow} + +请求: + +```sql +SELECT + toMonday(day) AS k, + count() AS c, + bar(c, 0, 10000, 100) AS bar +FROM opensky +WHERE origin IN ('UUEE', 'UUDD', 'UUWW') +GROUP BY k +ORDER BY k ASC; +``` + +结果: + +```text + ┌──────────k─┬────c─┬─bar──────────────────────────────────────────────────────────────────────────┐ + 1. │ 2018-12-31 │ 5248 │ ████████████████████████████████████████████████████▍ │ + 2. │ 2019-01-07 │ 6302 │ ███████████████████████████████████████████████████████████████ │ + 3. │ 2019-01-14 │ 5701 │ █████████████████████████████████████████████████████████ │ + 4. │ 2019-01-21 │ 5638 │ ████████████████████████████████████████████████████████▍ │ + 5. │ 2019-01-28 │ 5731 │ █████████████████████████████████████████████████████████▎ │ + 6. │ 2019-02-04 │ 5683 │ ████████████████████████████████████████████████████████▋ │ + 7. │ 2019-02-11 │ 5759 │ █████████████████████████████████████████████████████████▌ │ + 8. │ 2019-02-18 │ 5736 │ █████████████████████████████████████████████████████████▎ │ + 9. │ 2019-02-25 │ 5873 │ ██████████████████████████████████████████████████████████▋ │ + 10. │ 2019-03-04 │ 5965 │ ███████████████████████████████████████████████████████████▋ │ + 11. │ 2019-03-11 │ 5900 │ ███████████████████████████████████████████████████████████ │ + 12. │ 2019-03-18 │ 5823 │ ██████████████████████████████████████████████████████████▏ │ + 13. │ 2019-03-25 │ 5899 │ ██████████████████████████████████████████████████████████▊ │ + 14. │ 2019-04-01 │ 6043 │ ████████████████████████████████████████████████████████████▍ │ + 15. │ 2019-04-08 │ 6098 │ ████████████████████████████████████████████████████████████▊ │ + 16. │ 2019-04-15 │ 6196 │ █████████████████████████████████████████████████████████████▊ │ + 17. │ 2019-04-22 │ 6486 │ ████████████████████████████████████████████████████████████████▋ │ + 18. │ 2019-04-29 │ 6682 │ ██████████████████████████████████████████████████████████████████▋ │ + 19. │ 2019-05-06 │ 6739 │ ███████████████████████████████████████████████████████████████████▍ │ + 20. │ 2019-05-13 │ 6600 │ ██████████████████████████████████████████████████████████████████ │ + 21. │ 2019-05-20 │ 6575 │ █████████████████████████████████████████████████████████████████▋ │ + 22. │ 2019-05-27 │ 6786 │ ███████████████████████████████████████████████████████████████████▋ │ + 23. │ 2019-06-03 │ 6872 │ ████████████████████████████████████████████████████████████████████▋ │ + 24. │ 2019-06-10 │ 7045 │ ██████████████████████████████████████████████████████████████████████▍ │ + 25. │ 2019-06-17 │ 7045 │ ██████████████████████████████████████████████████████████████████████▍ │ + 26. │ 2019-06-24 │ 6852 │ ████████████████████████████████████████████████████████████████████▌ │ + 27. │ 2019-07-01 │ 7248 │ ████████████████████████████████████████████████████████████████████████▍ │ + 28. │ 2019-07-08 │ 7284 │ ████████████████████████████████████████████████████████████████████████▋ │ + 29. │ 2019-07-15 │ 7142 │ ███████████████████████████████████████████████████████████████████████▍ │ + 30. │ 2019-07-22 │ 7108 │ ███████████████████████████████████████████████████████████████████████ │ + 31. │ 2019-07-29 │ 7251 │ ████████████████████████████████████████████████████████████████████████▌ │ + 32. │ 2019-08-05 │ 7403 │ ██████████████████████████████████████████████████████████████████████████ │ + 33. │ 2019-08-12 │ 7457 │ ██████████████████████████████████████████████████████████████████████████▌ │ + 34. │ 2019-08-19 │ 7502 │ ███████████████████████████████████████████████████████████████████████████ │ + 35. │ 2019-08-26 │ 7540 │ ███████████████████████████████████████████████████████████████████████████▍ │ + 36. │ 2019-09-02 │ 7237 │ ████████████████████████████████████████████████████████████████████████▎ │ + 37. │ 2019-09-09 │ 7328 │ █████████████████████████████████████████████████████████████████████████▎ │ + 38. │ 2019-09-16 │ 5566 │ ███████████████████████████████████████████████████████▋ │ + 39. │ 2019-09-23 │ 7049 │ ██████████████████████████████████████████████████████████████████████▍ │ + 40. │ 2019-09-30 │ 6880 │ ████████████████████████████████████████████████████████████████████▋ │ + 41. │ 2019-10-07 │ 6518 │ █████████████████████████████████████████████████████████████████▏ │ + 42. │ 2019-10-14 │ 6688 │ ██████████████████████████████████████████████████████████████████▊ │ + 43. │ 2019-10-21 │ 6667 │ ██████████████████████████████████████████████████████████████████▋ │ + 44. │ 2019-10-28 │ 6303 │ ███████████████████████████████████████████████████████████████ │ + 45. │ 2019-11-04 │ 6298 │ ██████████████████████████████████████████████████████████████▊ │ + 46. │ 2019-11-11 │ 6137 │ █████████████████████████████████████████████████████████████▎ │ + 47. │ 2019-11-18 │ 6051 │ ████████████████████████████████████████████████████████████▌ │ + 48. │ 2019-11-25 │ 5820 │ ██████████████████████████████████████████████████████████▏ │ + 49. │ 2019-12-02 │ 5942 │ ███████████████████████████████████████████████████████████▍ │ + 50. │ 2019-12-09 │ 4891 │ ████████████████████████████████████████████████▊ │ + 51. │ 2019-12-16 │ 5682 │ ████████████████████████████████████████████████████████▋ │ + 52. │ 2019-12-23 │ 6111 │ █████████████████████████████████████████████████████████████ │ + 53. │ 2019-12-30 │ 5870 │ ██████████████████████████████████████████████████████████▋ │ + 54. │ 2020-01-06 │ 5953 │ ███████████████████████████████████████████████████████████▌ │ + 55. │ 2020-01-13 │ 5698 │ ████████████████████████████████████████████████████████▊ │ + 56. │ 2020-01-20 │ 5339 │ █████████████████████████████████████████████████████▍ │ + 57. │ 2020-01-27 │ 5566 │ ███████████████████████████████████████████████████████▋ │ + 58. │ 2020-02-03 │ 5801 │ ██████████████████████████████████████████████████████████ │ + 59. │ 2020-02-10 │ 5692 │ ████████████████████████████████████████████████████████▊ │ + 60. │ 2020-02-17 │ 5912 │ ███████████████████████████████████████████████████████████ │ + 61. │ 2020-02-24 │ 6031 │ ████████████████████████████████████████████████████████████▎ │ + 62. │ 2020-03-02 │ 6105 │ █████████████████████████████████████████████████████████████ │ + 63. │ 2020-03-09 │ 5823 │ ██████████████████████████████████████████████████████████▏ │ + 64. │ 2020-03-16 │ 4659 │ ██████████████████████████████████████████████▌ │ + 65. │ 2020-03-23 │ 3720 │ █████████████████████████████████████▏ │ + 66. │ 2020-03-30 │ 1720 │ █████████████████▏ │ + 67. │ 2020-04-06 │ 849 │ ████████▍ │ + 68. │ 2020-04-13 │ 710 │ ███████ │ + 69. │ 2020-04-20 │ 725 │ ███████▏ │ + 70. │ 2020-04-27 │ 920 │ █████████▏ │ + 71. │ 2020-05-04 │ 859 │ ████████▌ │ + 72. │ 2020-05-11 │ 1047 │ ██████████▍ │ + 73. │ 2020-05-18 │ 1135 │ ███████████▎ │ + 74. │ 2020-05-25 │ 1266 │ ████████████▋ │ + 75. │ 2020-06-01 │ 1793 │ █████████████████▊ │ + 76. │ 2020-06-08 │ 1979 │ ███████████████████▋ │ + 77. │ 2020-06-15 │ 2297 │ ██████████████████████▊ │ + 78. │ 2020-06-22 │ 2788 │ ███████████████████████████▊ │ + 79. │ 2020-06-29 │ 3389 │ █████████████████████████████████▊ │ + 80. │ 2020-07-06 │ 3545 │ ███████████████████████████████████▍ │ + 81. │ 2020-07-13 │ 3569 │ ███████████████████████████████████▋ │ + 82. │ 2020-07-20 │ 3784 │ █████████████████████████████████████▋ │ + 83. │ 2020-07-27 │ 3960 │ ███████████████████████████████████████▌ │ + 84. │ 2020-08-03 │ 4323 │ ███████████████████████████████████████████▏ │ + 85. │ 2020-08-10 │ 4581 │ █████████████████████████████████████████████▋ │ + 86. │ 2020-08-17 │ 4791 │ ███████████████████████████████████████████████▊ │ + 87. │ 2020-08-24 │ 4928 │ █████████████████████████████████████████████████▎ │ + 88. │ 2020-08-31 │ 4687 │ ██████████████████████████████████████████████▋ │ + 89. │ 2020-09-07 │ 4643 │ ██████████████████████████████████████████████▍ │ + 90. │ 2020-09-14 │ 4594 │ █████████████████████████████████████████████▊ │ + 91. │ 2020-09-21 │ 4478 │ ████████████████████████████████████████████▋ │ + 92. │ 2020-09-28 │ 4382 │ ███████████████████████████████████████████▋ │ + 93. │ 2020-10-05 │ 4261 │ ██████████████████████████████████████████▌ │ + 94. │ 2020-10-12 │ 4243 │ ██████████████████████████████████████████▍ │ + 95. │ 2020-10-19 │ 3941 │ ███████████████████████████████████████▍ │ + 96. │ 2020-10-26 │ 3616 │ ████████████████████████████████████▏ │ + 97. │ 2020-11-02 │ 3586 │ ███████████████████████████████████▋ │ + 98. │ 2020-11-09 │ 3403 │ ██████████████████████████████████ │ + 99. │ 2020-11-16 │ 3336 │ █████████████████████████████████▎ │ +100. │ 2020-11-23 │ 3230 │ ████████████████████████████████▎ │ +101. │ 2020-11-30 │ 3183 │ ███████████████████████████████▋ │ +102. │ 2020-12-07 │ 3285 │ ████████████████████████████████▋ │ +103. │ 2020-12-14 │ 3367 │ █████████████████████████████████▋ │ +104. │ 2020-12-21 │ 3748 │ █████████████████████████████████████▍ │ +105. │ 2020-12-28 │ 3986 │ ███████████████████████████████████████▋ │ +106. │ 2021-01-04 │ 3906 │ ███████████████████████████████████████ │ +107. │ 2021-01-11 │ 3425 │ ██████████████████████████████████▎ │ +108. │ 2021-01-18 │ 3144 │ ███████████████████████████████▍ │ +109. │ 2021-01-25 │ 3115 │ ███████████████████████████████▏ │ +110. │ 2021-02-01 │ 3285 │ ████████████████████████████████▋ │ +111. │ 2021-02-08 │ 3321 │ █████████████████████████████████▏ │ +112. │ 2021-02-15 │ 3475 │ ██████████████████████████████████▋ │ +113. │ 2021-02-22 │ 3549 │ ███████████████████████████████████▍ │ +114. │ 2021-03-01 │ 3755 │ █████████████████████████████████████▌ │ +115. │ 2021-03-08 │ 3080 │ ██████████████████████████████▋ │ +116. │ 2021-03-15 │ 3789 │ █████████████████████████████████████▊ │ +117. │ 2021-03-22 │ 3804 │ ██████████████████████████████████████ │ +118. │ 2021-03-29 │ 4238 │ ██████████████████████████████████████████▍ │ +119. │ 2021-04-05 │ 4307 │ ███████████████████████████████████████████ │ +120. │ 2021-04-12 │ 4225 │ ██████████████████████████████████████████▎ │ +121. │ 2021-04-19 │ 4391 │ ███████████████████████████████████████████▊ │ +122. │ 2021-04-26 │ 4868 │ ████████████████████████████████████████████████▋ │ +123. │ 2021-05-03 │ 4977 │ █████████████████████████████████████████████████▋ │ +124. │ 2021-05-10 │ 5164 │ ███████████████████████████████████████████████████▋ │ +125. │ 2021-05-17 │ 4986 │ █████████████████████████████████████████████████▋ │ +126. │ 2021-05-24 │ 5024 │ ██████████████████████████████████████████████████▏ │ +127. │ 2021-05-31 │ 4824 │ ████████████████████████████████████████████████▏ │ +128. │ 2021-06-07 │ 5652 │ ████████████████████████████████████████████████████████▌ │ +129. │ 2021-06-14 │ 5613 │ ████████████████████████████████████████████████████████▏ │ +130. │ 2021-06-21 │ 6061 │ ████████████████████████████████████████████████████████████▌ │ +131. │ 2021-06-28 │ 2554 │ █████████████████████████▌ │ + └────────────┴──────┴──────────────────────────────────────────────────────────────────────────────┘ +``` + +### 在线 Playground {#playground} + +你可以使用交互式资源 [Online Playground](https://play.clickhouse.com/play?user=play) 来尝试对此数据集的其他查询。 例如, [执行这个查询](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBvcmlnaW4sCiAgICBjb3VudCgpLAogICAgcm91bmQoYXZnKGdlb0Rpc3RhbmNlKGxvbmdpdHVkZV8xLCBsYXRpdHVkZV8xLCBsb25naXR1ZGVfMiwgbGF0aXR1ZGVfMikpKSBBUyBkaXN0YW5jZSwKICAgIGJhcihkaXN0YW5jZSwgMCwgMTAwMDAwMDAsIDEwMCkgQVMgYmFyCkZST00gb3BlbnNreQpXSEVSRSBvcmlnaW4gIT0gJycKR1JPVVAgQlkgb3JpZ2luCk9SREVSIEJZIGNvdW50KCkgREVTQwpMSU1JVCAxMDA=). 但是,请注意无法在 Playground 中创建临时表。 diff --git a/docs/zh/getting-started/example-datasets/recipes.mdx b/docs/zh/getting-started/example-datasets/recipes.mdx index da3a2ac541b..b7ed92962c5 100644 --- a/docs/zh/getting-started/example-datasets/recipes.mdx +++ b/docs/zh/getting-started/example-datasets/recipes.mdx @@ -1,9 +1,339 @@ ---- -slug: /zh/getting-started/example-datasets/recipes -sidebar_label: Recipes Dataset -title: "Recipes Dataset" +--- +slug: /zh/getting-started/example-datasets/recipes +sidebar_label: 食谱数据集 +title: "食谱数据集" --- -import Content from '@site/docs/en/getting-started/example-datasets/recipes.md'; +RecipeNLG 数据集可在 [此处](https://recipenlg.cs.put.poznan.pl/dataset) 下载。其中包含 220 万份食谱。大小略小于 1 GB。 - +## 下载并解压数据集 + +1. 进入下载页面[https://recipenlg.cs.put.poznan.pl/dataset](https://recipenlg.cs.put.poznan.pl/dataset)。 +2. 接受条款和条件并下载 zip 文件。 +3. 使用 `unzip` 解压 zip 文件,得到 `full_dataset.csv` 文件。 + +## 创建表 + +运行 clickhouse-client 并执行以下 CREATE 请求: + +``` sql +CREATE TABLE recipes +( + title String, + ingredients Array(String), + directions Array(String), + link String, + source LowCardinality(String), + NER Array(String) +) ENGINE = MergeTree ORDER BY title; +``` + +## 插入数据 + +运行以下命令: + +``` bash +clickhouse-client --query " + INSERT INTO recipes + SELECT + title, + JSONExtract(ingredients, 'Array(String)'), + JSONExtract(directions, 'Array(String)'), + link, + source, + JSONExtract(NER, 'Array(String)') + FROM input('num UInt32, title String, ingredients String, directions String, link String, source LowCardinality(String), NER String') + FORMAT CSVWithNames +" --input_format_with_names_use_header 0 --format_csv_allow_single_quote 0 --input_format_allow_errors_num 10 < full_dataset.csv +``` + +这是一个展示如何解析自定义 CSV,这其中涉及了许多调整。 + +说明: +- 数据集为 CSV 格式,但在插入时需要一些预处理;使用表函数 [input](../../sql-reference/table-functions/input.md) 进行预处理; +- CSV 文件的结构在表函数 `input` 的参数中指定; +- 字段 `num`(行号)是不需要的 - 可以忽略并从文件中进行解析; +- 使用 `FORMAT CSVWithNames`,因为标题不包含第一个字段的名称,因此 CSV 中的标题将被忽略(通过命令行参数 `--input_format_with_names_use_header 0`); +- 文件仅使用双引号将 CSV 字符串括起来;一些字符串没有用双引号括起来,单引号也不能被解析为括起来的字符串 - 所以添加`--format_csv_allow_single_quote 0`参数接受文件中的单引号; +- 由于某些 CSV 的字符串的开头包含 `\M/` 因此无法被解析; CSV 中唯一可能以反斜杠开头的值是 `\N`,这个值被解析为 SQL NULL。通过添加`--input_format_allow_errors_num 10`参数,允许在导入过程中跳过 10 个格式错误; +- 在数据集中的 Ingredients、directions 和 NER 字段为数组;但这些数组并没有以一般形式表示:这些字段作为 JSON 序列化为字符串,然后放入 CSV 中 - 在导入是将它们解析为字符串,然后使用 [JSONExtract](../../sql-reference/functions/json-functions.md ) 函数将其转换为数组。 + +## 验证插入的数据 + +通过检查行数: + +请求: + +``` sql +SELECT count() FROM recipes; +``` + +结果: + +``` text +┌─count()─┐ +│ 2231141 │ +└─────────┘ +``` + +## 示例查询 + +### 按配方数量排列的顶级组件: + +在此示例中,我们学习如何使用 [arrayJoin](../../sql-reference/functions/array-join/) 函数将数组扩展为行的集合。 + +请求: + +``` sql +SELECT + arrayJoin(NER) AS k, + count() AS c +FROM recipes +GROUP BY k +ORDER BY c DESC +LIMIT 50 +``` + +结果: + +``` text +┌─k────────────────────┬──────c─┐ +│ salt │ 890741 │ +│ sugar │ 620027 │ +│ butter │ 493823 │ +│ flour │ 466110 │ +│ eggs │ 401276 │ +│ onion │ 372469 │ +│ garlic │ 358364 │ +│ milk │ 346769 │ +│ water │ 326092 │ +│ vanilla │ 270381 │ +│ olive oil │ 197877 │ +│ pepper │ 179305 │ +│ brown sugar │ 174447 │ +│ tomatoes │ 163933 │ +│ egg │ 160507 │ +│ baking powder │ 148277 │ +│ lemon juice │ 146414 │ +│ Salt │ 122557 │ +│ cinnamon │ 117927 │ +│ sour cream │ 116682 │ +│ cream cheese │ 114423 │ +│ margarine │ 112742 │ +│ celery │ 112676 │ +│ baking soda │ 110690 │ +│ parsley │ 102151 │ +│ chicken │ 101505 │ +│ onions │ 98903 │ +│ vegetable oil │ 91395 │ +│ oil │ 85600 │ +│ mayonnaise │ 84822 │ +│ pecans │ 79741 │ +│ nuts │ 78471 │ +│ potatoes │ 75820 │ +│ carrots │ 75458 │ +│ pineapple │ 74345 │ +│ soy sauce │ 70355 │ +│ black pepper │ 69064 │ +│ thyme │ 68429 │ +│ mustard │ 65948 │ +│ chicken broth │ 65112 │ +│ bacon │ 64956 │ +│ honey │ 64626 │ +│ oregano │ 64077 │ +│ ground beef │ 64068 │ +│ unsalted butter │ 63848 │ +│ mushrooms │ 61465 │ +│ Worcestershire sauce │ 59328 │ +│ cornstarch │ 58476 │ +│ green pepper │ 58388 │ +│ Cheddar cheese │ 58354 │ +└──────────────────────┴────────┘ + +50 rows in set. Elapsed: 0.112 sec. Processed 2.23 million rows, 361.57 MB (19.99 million rows/s., 3.24 GB/s.) +``` + +### 最复杂的草莓食谱 + +``` sql +SELECT + title, + length(NER), + length(directions) +FROM recipes +WHERE has(NER, 'strawberry') +ORDER BY length(directions) DESC +LIMIT 10 +``` + +结果: + +``` text +┌─title────────────────────────────────────────────────────────────┬─length(NER)─┬─length(directions)─┐ +│ Chocolate-Strawberry-Orange Wedding Cake │ 24 │ 126 │ +│ Strawberry Cream Cheese Crumble Tart │ 19 │ 47 │ +│ Charlotte-Style Ice Cream │ 11 │ 45 │ +│ Sinfully Good a Million Layers Chocolate Layer Cake, With Strawb │ 31 │ 45 │ +│ Sweetened Berries With Elderflower Sherbet │ 24 │ 44 │ +│ Chocolate-Strawberry Mousse Cake │ 15 │ 42 │ +│ Rhubarb Charlotte with Strawberries and Rum │ 20 │ 42 │ +│ Chef Joey's Strawberry Vanilla Tart │ 7 │ 37 │ +│ Old-Fashioned Ice Cream Sundae Cake │ 17 │ 37 │ +│ Watermelon Cake │ 16 │ 36 │ +└──────────────────────────────────────────────────────────────────┴─────────────┴────────────────────┘ + +10 rows in set. Elapsed: 0.215 sec. Processed 2.23 million rows, 1.48 GB (10.35 million rows/s., 6.86 GB/s.) +``` + +在此示例中,我们使用 [has](../../sql-reference/functions/array-functions/#hasarr-elem) 函数来按过滤数组类型元素并按 directions 的数量进行排序。 + +有一个婚礼蛋糕需要整个126个步骤来制作!显示 directions: + +请求: + +``` sql +SELECT arrayJoin(directions) +FROM recipes +WHERE title = 'Chocolate-Strawberry-Orange Wedding Cake' +``` + +结果: + +``` text +┌─arrayJoin(directions)───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ Position 1 rack in center and 1 rack in bottom third of oven and preheat to 350F. │ +│ Butter one 5-inch-diameter cake pan with 2-inch-high sides, one 8-inch-diameter cake pan with 2-inch-high sides and one 12-inch-diameter cake pan with 2-inch-high sides. │ +│ Dust pans with flour; line bottoms with parchment. │ +│ Combine 1/3 cup orange juice and 2 ounces unsweetened chocolate in heavy small saucepan. │ +│ Stir mixture over medium-low heat until chocolate melts. │ +│ Remove from heat. │ +│ Gradually mix in 1 2/3 cups orange juice. │ +│ Sift 3 cups flour, 2/3 cup cocoa, 2 teaspoons baking soda, 1 teaspoon salt and 1/2 teaspoon baking powder into medium bowl. │ +│ using electric mixer, beat 1 cup (2 sticks) butter and 3 cups sugar in large bowl until blended (mixture will look grainy). │ +│ Add 4 eggs, 1 at a time, beating to blend after each. │ +│ Beat in 1 tablespoon orange peel and 1 tablespoon vanilla extract. │ +│ Add dry ingredients alternately with orange juice mixture in 3 additions each, beating well after each addition. │ +│ Mix in 1 cup chocolate chips. │ +│ Transfer 1 cup plus 2 tablespoons batter to prepared 5-inch pan, 3 cups batter to prepared 8-inch pan and remaining batter (about 6 cups) to 12-inch pan. │ +│ Place 5-inch and 8-inch pans on center rack of oven. │ +│ Place 12-inch pan on lower rack of oven. │ +│ Bake cakes until tester inserted into center comes out clean, about 35 minutes. │ +│ Transfer cakes in pans to racks and cool completely. │ +│ Mark 4-inch diameter circle on one 6-inch-diameter cardboard cake round. │ +│ Cut out marked circle. │ +│ Mark 7-inch-diameter circle on one 8-inch-diameter cardboard cake round. │ +│ Cut out marked circle. │ +│ Mark 11-inch-diameter circle on one 12-inch-diameter cardboard cake round. │ +│ Cut out marked circle. │ +│ Cut around sides of 5-inch-cake to loosen. │ +│ Place 4-inch cardboard over pan. │ +│ Hold cardboard and pan together; turn cake out onto cardboard. │ +│ Peel off parchment.Wrap cakes on its cardboard in foil. │ +│ Repeat turning out, peeling off parchment and wrapping cakes in foil, using 7-inch cardboard for 8-inch cake and 11-inch cardboard for 12-inch cake. │ +│ Using remaining ingredients, make 1 more batch of cake batter and bake 3 more cake layers as described above. │ +│ Cool cakes in pans. │ +│ Cover cakes in pans tightly with foil. │ +│ (Can be prepared ahead. │ +│ Let stand at room temperature up to 1 day or double-wrap all cake layers and freeze up to 1 week. │ +│ Bring cake layers to room temperature before using.) │ +│ Place first 12-inch cake on its cardboard on work surface. │ +│ Spread 2 3/4 cups ganache over top of cake and all the way to edge. │ +│ Spread 2/3 cup jam over ganache, leaving 1/2-inch chocolate border at edge. │ +│ Drop 1 3/4 cups white chocolate frosting by spoonfuls over jam. │ +│ Gently spread frosting over jam, leaving 1/2-inch chocolate border at edge. │ +│ Rub some cocoa powder over second 12-inch cardboard. │ +│ Cut around sides of second 12-inch cake to loosen. │ +│ Place cardboard, cocoa side down, over pan. │ +│ Turn cake out onto cardboard. │ +│ Peel off parchment. │ +│ Carefully slide cake off cardboard and onto filling on first 12-inch cake. │ +│ Refrigerate. │ +│ Place first 8-inch cake on its cardboard on work surface. │ +│ Spread 1 cup ganache over top all the way to edge. │ +│ Spread 1/4 cup jam over, leaving 1/2-inch chocolate border at edge. │ +│ Drop 1 cup white chocolate frosting by spoonfuls over jam. │ +│ Gently spread frosting over jam, leaving 1/2-inch chocolate border at edge. │ +│ Rub some cocoa over second 8-inch cardboard. │ +│ Cut around sides of second 8-inch cake to loosen. │ +│ Place cardboard, cocoa side down, over pan. │ +│ Turn cake out onto cardboard. │ +│ Peel off parchment. │ +│ Slide cake off cardboard and onto filling on first 8-inch cake. │ +│ Refrigerate. │ +│ Place first 5-inch cake on its cardboard on work surface. │ +│ Spread 1/2 cup ganache over top of cake and all the way to edge. │ +│ Spread 2 tablespoons jam over, leaving 1/2-inch chocolate border at edge. │ +│ Drop 1/3 cup white chocolate frosting by spoonfuls over jam. │ +│ Gently spread frosting over jam, leaving 1/2-inch chocolate border at edge. │ +│ Rub cocoa over second 6-inch cardboard. │ +│ Cut around sides of second 5-inch cake to loosen. │ +│ Place cardboard, cocoa side down, over pan. │ +│ Turn cake out onto cardboard. │ +│ Peel off parchment. │ +│ Slide cake off cardboard and onto filling on first 5-inch cake. │ +│ Chill all cakes 1 hour to set filling. │ +│ Place 12-inch tiered cake on its cardboard on revolving cake stand. │ +│ Spread 2 2/3 cups frosting over top and sides of cake as a first coat. │ +│ Refrigerate cake. │ +│ Place 8-inch tiered cake on its cardboard on cake stand. │ +│ Spread 1 1/4 cups frosting over top and sides of cake as a first coat. │ +│ Refrigerate cake. │ +│ Place 5-inch tiered cake on its cardboard on cake stand. │ +│ Spread 3/4 cup frosting over top and sides of cake as a first coat. │ +│ Refrigerate all cakes until first coats of frosting set, about 1 hour. │ +│ (Cakes can be made to this point up to 1 day ahead; cover and keep refrigerate.) │ +│ Prepare second batch of frosting, using remaining frosting ingredients and following directions for first batch. │ +│ Spoon 2 cups frosting into pastry bag fitted with small star tip. │ +│ Place 12-inch cake on its cardboard on large flat platter. │ +│ Place platter on cake stand. │ +│ Using icing spatula, spread 2 1/2 cups frosting over top and sides of cake; smooth top. │ +│ Using filled pastry bag, pipe decorative border around top edge of cake. │ +│ Refrigerate cake on platter. │ +│ Place 8-inch cake on its cardboard on cake stand. │ +│ Using icing spatula, spread 1 1/2 cups frosting over top and sides of cake; smooth top. │ +│ Using pastry bag, pipe decorative border around top edge of cake. │ +│ Refrigerate cake on its cardboard. │ +│ Place 5-inch cake on its cardboard on cake stand. │ +│ Using icing spatula, spread 3/4 cup frosting over top and sides of cake; smooth top. │ +│ Using pastry bag, pipe decorative border around top edge of cake, spooning more frosting into bag if necessary. │ +│ Refrigerate cake on its cardboard. │ +│ Keep all cakes refrigerated until frosting sets, about 2 hours. │ +│ (Can be prepared 2 days ahead. │ +│ Cover loosely; keep refrigerated.) │ +│ Place 12-inch cake on platter on work surface. │ +│ Press 1 wooden dowel straight down into and completely through center of cake. │ +│ Mark dowel 1/4 inch above top of frosting. │ +│ Remove dowel and cut with serrated knife at marked point. │ +│ Cut 4 more dowels to same length. │ +│ Press 1 cut dowel back into center of cake. │ +│ Press remaining 4 cut dowels into cake, positioning 3 1/2 inches inward from cake edges and spacing evenly. │ +│ Place 8-inch cake on its cardboard on work surface. │ +│ Press 1 dowel straight down into and completely through center of cake. │ +│ Mark dowel 1/4 inch above top of frosting. │ +│ Remove dowel and cut with serrated knife at marked point. │ +│ Cut 3 more dowels to same length. │ +│ Press 1 cut dowel back into center of cake. │ +│ Press remaining 3 cut dowels into cake, positioning 2 1/2 inches inward from edges and spacing evenly. │ +│ Using large metal spatula as aid, place 8-inch cake on its cardboard atop dowels in 12-inch cake, centering carefully. │ +│ Gently place 5-inch cake on its cardboard atop dowels in 8-inch cake, centering carefully. │ +│ Using citrus stripper, cut long strips of orange peel from oranges. │ +│ Cut strips into long segments. │ +│ To make orange peel coils, wrap peel segment around handle of wooden spoon; gently slide peel off handle so that peel keeps coiled shape. │ +│ Garnish cake with orange peel coils, ivy or mint sprigs, and some berries. │ +│ (Assembled cake can be made up to 8 hours ahead. │ +│ Let stand at cool room temperature.) │ +│ Remove top and middle cake tiers. │ +│ Remove dowels from cakes. │ +│ Cut top and middle cakes into slices. │ +│ To cut 12-inch cake: Starting 3 inches inward from edge and inserting knife straight down, cut through from top to bottom to make 6-inch-diameter circle in center of cake. │ +│ Cut outer portion of cake into slices; cut inner portion into slices and serve with strawberries. │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ + +126 rows in set. Elapsed: 0.011 sec. Processed 8.19 thousand rows, 5.34 MB (737.75 thousand rows/s., 480.59 MB/s.) +``` + +### 在线 Playground + +此数据集也可在 [在线 Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhcnJheUpvaW4oTkVSKSBBUyBrLAogICAgY291bnQoKSBBUyBjCkZST00gcmVjaXBlcwpHUk9VUCBCWSBrCk9SREVSIEJZIGMgREVTQwpMSU1JVCA1MA==) 中体验。 + +[原文链接](https://clickhouse.com/docs/en/getting-started/example-datasets/recipes/) diff --git a/docs/zh/getting-started/example-datasets/uk-price-paid.mdx b/docs/zh/getting-started/example-datasets/uk-price-paid.mdx index 3a14a3ce55d..058f0ae421a 100644 --- a/docs/zh/getting-started/example-datasets/uk-price-paid.mdx +++ b/docs/zh/getting-started/example-datasets/uk-price-paid.mdx @@ -42,9 +42,9 @@ ORDER BY (postcode1, postcode2, addr1, addr2); - 将`postcode` 拆分为两个不同的列 - `postcode1` 和 `postcode2`,因为这更适合存储和查询 - 将`time` 字段转换为日期为它只包含 00:00 时间 -- 忽略 [UUid](../../sql-reference/data-types/uuid.md) 字段,因为我们不需要它进行分析 -- 使用 [transform](../../sql-reference/functions/other-functions.md#transform) 函数将 `Enum` 字段 `type` 和 `duration` 转换为更易读的 `Enum` 字段 -- 将 `is_new` 字段从单字符串(` Y`/`N`) 到 [UInt8](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64 -int128-int256) 字段为 0 或 1 +- 忽略 [UUid](/docs/zh/sql-reference/data-types/uuid.md) 字段,因为我们不需要它进行分析 +- 使用 [transform](/docs/zh/sql-reference/functions/other-functions.md#transform) 函数将 `Enum` 字段 `type` 和 `duration` 转换为更易读的 `Enum` 字段 +- 将 `is_new` 字段从单字符串(` Y`/`N`) 到 [UInt8](/docs/zh/sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64 -int128-int256) 字段为 0 或 1 - 删除最后两列,因为它们都具有相同的值(即 0) `url` 函数将来自网络服务器的数据流式传输到 ClickHouse 表中。以下命令将 500 万行插入到 `uk_price_paid` 表中: @@ -342,7 +342,7 @@ LIMIT 100 ## 使用 Projection 加速查询 {#speedup-with-projections} -[Projections](../../sql-reference/statements/alter/projection.md) 允许我们通过存储任意格式的预先聚合的数据来提高查询速度。在此示例中,我们创建了一个按年份、地区和城镇分组的房产的平均价格、总价格和数量的 Projection。在执行时,如果 ClickHouse 认为 Projection 可以提高查询的性能,它将使用 Projection(何时使用由 ClickHouse 决定)。 +[Projections](/docs/zh/sql-reference/statements/alter/projection.mdx) 允许我们通过存储任意格式的预先聚合的数据来提高查询速度。在此示例中,我们创建了一个按年份、地区和城镇分组的房产的平均价格、总价格和数量的 Projection。在执行时,如果 ClickHouse 认为 Projection 可以提高查询的性能,它将使用 Projection(何时使用由 ClickHouse 决定)。 ### 构建投影{#build-projection} diff --git a/docs/zh/sql-reference/data-types/date.md b/docs/zh/sql-reference/data-types/date.md index 9b1acdbe939..a8874151e75 100644 --- a/docs/zh/sql-reference/data-types/date.md +++ b/docs/zh/sql-reference/data-types/date.md @@ -3,7 +3,7 @@ slug: /zh/sql-reference/data-types/date --- # 日期 {#date} -日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2149年,但最终完全支持的年份为2148)。最小值输出为1970-01-01。 +日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为1970-01-01。 值的范围: \[1970-01-01, 2149-06-06\]。 diff --git a/docs/zh/sql-reference/functions/other-functions.md b/docs/zh/sql-reference/functions/other-functions.md index a475420ba64..62d2a377ff1 100644 --- a/docs/zh/sql-reference/functions/other-functions.md +++ b/docs/zh/sql-reference/functions/other-functions.md @@ -237,7 +237,7 @@ ORDER BY c DESC ``` sql SELECT - transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s, + transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s, count() AS c FROM test.hits GROUP BY domain(Referer) diff --git a/packages/build b/packages/build index c5ebf8641a3..531e068338d 100755 --- a/packages/build +++ b/packages/build @@ -26,8 +26,10 @@ SOURCE=${SOURCE:-$PKG_ROOT} HELP="${0} [--test] [--rpm] [-h|--help] --test - adds '+test' prefix to version --apk - build APK packages + --archlinux - build archlinux packages --rpm - build RPM packages --tgz - build tarball package + --deb - build deb package --help - show this help and exit Used envs: @@ -47,16 +49,21 @@ fi export CLICKHOUSE_VERSION_STRING - while [[ $1 == --* ]] do case "$1" in --test ) VERSION_POSTFIX+='+test' shift ;; + --deb ) + MAKE_DEB=1 + shift ;; --apk ) MAKE_APK=1 shift ;; + --archlinux ) + MAKE_ARCHLINUX=1 + shift ;; --rpm ) MAKE_RPM=1 shift ;; @@ -131,18 +138,24 @@ CLICKHOUSE_VERSION_STRING+=$VERSION_POSTFIX echo -e "\nCurrent version is $CLICKHOUSE_VERSION_STRING" for config in clickhouse*.yaml; do - echo "Building deb package for $config" + if [ -n "$MAKE_DEB" ] || [ -n "$MAKE_TGZ" ]; then + echo "Building deb package for $config" - # Preserve package path - exec 9>&1 - PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9) - PKG_PATH=${PKG_PATH##*created package: } - exec 9>&- + # Preserve package path + exec 9>&1 + PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9) + PKG_PATH=${PKG_PATH##*created package: } + exec 9>&- + fi if [ -n "$MAKE_APK" ]; then echo "Building apk package for $config" nfpm package --target "$OUTPUT_DIR" --config "$config" --packager apk fi + if [ -n "$MAKE_ARCHLINUX" ]; then + echo "Building archlinux package for $config" + nfpm package --target "$OUTPUT_DIR" --config "$config" --packager archlinux + fi if [ -n "$MAKE_RPM" ]; then echo "Building rpm package for $config" nfpm package --target "$OUTPUT_DIR" --config "$config" --packager rpm diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml index 7803729c469..8f319c97b65 100644 --- a/packages/clickhouse-keeper.yaml +++ b/packages/clickhouse-keeper.yaml @@ -27,8 +27,8 @@ deb: Source: clickhouse contents: -- src: root/etc/clickhouse-keeper - dst: /etc/clickhouse-keeper +- src: root/etc/clickhouse-keeper/keeper_config.xml + dst: /etc/clickhouse-keeper/keeper_config.xml type: config - src: root/usr/bin/clickhouse-keeper dst: /usr/bin/clickhouse-keeper diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml index a94ad1e9169..b0778e6bf72 100644 --- a/packages/clickhouse-server.yaml +++ b/packages/clickhouse-server.yaml @@ -42,8 +42,11 @@ deb: Source: clickhouse contents: -- src: root/etc/clickhouse-server - dst: /etc/clickhouse-server +- src: root/etc/clickhouse-server/config.xml + dst: /etc/clickhouse-server/config.xml + type: config +- src: root/etc/clickhouse-server/users.xml + dst: /etc/clickhouse-server/users.xml type: config - src: clickhouse-server.init dst: /etc/init.d/clickhouse-server diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index e616cb8cf72..58569a32619 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1108,15 +1108,21 @@ void Client::processConfig() else format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated"); - format_max_block_size = config().getInt("format_max_block_size", global_context->getSettingsRef().max_block_size); + format_max_block_size = config().getUInt64("format_max_block_size", + global_context->getSettingsRef().max_block_size); insert_format = "Values"; /// Setting value from cmd arg overrides one from config if (global_context->getSettingsRef().max_insert_block_size.changed) + { insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size; + } else - insert_format_max_block_size = config().getInt("insert_format_max_block_size", global_context->getSettingsRef().max_insert_block_size); + { + insert_format_max_block_size = config().getUInt64("insert_format_max_block_size", + global_context->getSettingsRef().max_insert_block_size); + } ClientInfo & client_info = global_context->getClientInfo(); client_info.setInitialQuery(); diff --git a/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml index 66e7afd8f8c..00f5b26eddf 100644 --- a/programs/client/clickhouse-client.xml +++ b/programs/client/clickhouse-client.xml @@ -19,7 +19,6 @@ {host} {port} {user} - {database} {display_name} Terminal colors: https://misc.flogisoft.com/bash/tip_colors_and_formatting See also: https://wiki.hackzine.org/development/misc/readline-color-prompt.html diff --git a/programs/copier/Aliases.h b/programs/copier/Aliases.h index c4d9c40d9f1..02be3441acd 100644 --- a/programs/copier/Aliases.h +++ b/programs/copier/Aliases.h @@ -1,6 +1,10 @@ #pragma once -#include +#include + +#include + +#include namespace DB { @@ -8,21 +12,4 @@ namespace DB using DatabaseAndTableName = std::pair; using ListOfDatabasesAndTableNames = std::vector; - - /// Hierarchical description of the tasks - struct ShardPartitionPiece; - struct ShardPartition; - struct TaskShard; - struct TaskTable; - struct TaskCluster; - struct ClusterPartition; - - using PartitionPieces = std::vector; - using TasksPartition = std::map>; - using ShardInfo = Cluster::ShardInfo; - using TaskShardPtr = std::shared_ptr; - using TasksShard = std::vector; - using TasksTable = std::list; - using ClusterPartitions = std::map>; } - diff --git a/programs/copier/CMakeLists.txt b/programs/copier/CMakeLists.txt index 57e0996ed78..2c17e70bc5e 100644 --- a/programs/copier/CMakeLists.txt +++ b/programs/copier/CMakeLists.txt @@ -1,7 +1,13 @@ set(CLICKHOUSE_COPIER_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp") + "${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/StatusAccumulator.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/TaskShard.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/TaskTable.cpp") set (CLICKHOUSE_COPIER_LINK PRIVATE diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index b354fc59eee..063b13e9078 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -3,7 +3,8 @@ #include "Aliases.h" #include "Internals.h" #include "TaskCluster.h" -#include "TaskTableAndShard.h" +#include "TaskShard.h" +#include "TaskTable.h" #include "ShardPartition.h" #include "ShardPartitionPiece.h" #include "ZooKeeperStaff.h" diff --git a/programs/copier/ClusterPartition.h b/programs/copier/ClusterPartition.h index ed69bfa8c26..22063989e22 100644 --- a/programs/copier/ClusterPartition.h +++ b/programs/copier/ClusterPartition.h @@ -1,17 +1,22 @@ #pragma once -#include "Aliases.h" +#include +#include namespace DB { - /// Contains info about all shards that contain a partition - struct ClusterPartition - { - double elapsed_time_seconds = 0; - UInt64 bytes_copied = 0; - UInt64 rows_copied = 0; - UInt64 blocks_copied = 0; - UInt64 total_tries = 0; - }; +/// Contains info about all shards that contain a partition +struct ClusterPartition +{ + double elapsed_time_seconds = 0; + UInt64 bytes_copied = 0; + UInt64 rows_copied = 0; + UInt64 blocks_copied = 0; + + UInt64 total_tries = 0; +}; + +using ClusterPartitions = std::map>; + } diff --git a/programs/copier/ShardPartition.cpp b/programs/copier/ShardPartition.cpp new file mode 100644 index 00000000000..4c962fc807d --- /dev/null +++ b/programs/copier/ShardPartition.cpp @@ -0,0 +1,70 @@ +#include "ShardPartition.h" + +#include "TaskShard.h" +#include "TaskTable.h" + +namespace DB +{ + +ShardPartition::ShardPartition(TaskShard & parent, String name_quoted_, size_t number_of_splits) + : task_shard(parent) + , name(std::move(name_quoted_)) +{ + pieces.reserve(number_of_splits); +} + +String ShardPartition::getPartitionCleanStartPath() const +{ + return getPartitionPath() + "/clean_start"; +} + +String ShardPartition::getPartitionPieceCleanStartPath(size_t current_piece_number) const +{ + assert(current_piece_number < task_shard.task_table.number_of_splits); + return getPartitionPiecePath(current_piece_number) + "/clean_start"; +} + +String ShardPartition::getPartitionPath() const +{ + return task_shard.task_table.getPartitionPath(name); +} + +String ShardPartition::getPartitionPiecePath(size_t current_piece_number) const +{ + assert(current_piece_number < task_shard.task_table.number_of_splits); + return task_shard.task_table.getPartitionPiecePath(name, current_piece_number); +} + +String ShardPartition::getShardStatusPath() const +{ + // schema: //tables///shards/ + // e.g. /root/table_test.hits/201701/shards/1 + return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster()); +} + +String ShardPartition::getPartitionShardsPath() const +{ + return getPartitionPath() + "/shards"; +} + +String ShardPartition::getPartitionActiveWorkersPath() const +{ + return getPartitionPath() + "/partition_active_workers"; +} + +String ShardPartition::getActiveWorkerPath() const +{ + return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster()); +} + +String ShardPartition::getCommonPartitionIsDirtyPath() const +{ + return getPartitionPath() + "/is_dirty"; +} + +String ShardPartition::getCommonPartitionIsCleanedPath() const +{ + return getCommonPartitionIsDirtyPath() + "/cleaned"; +} + +} diff --git a/programs/copier/ShardPartition.h b/programs/copier/ShardPartition.h index 7de381977f9..2457213733c 100644 --- a/programs/copier/ShardPartition.h +++ b/programs/copier/ShardPartition.h @@ -1,19 +1,23 @@ #pragma once -#include "Aliases.h" -#include "TaskTableAndShard.h" +#include "ShardPartitionPiece.h" + +#include + +#include namespace DB { +struct TaskShard; + /// Just destination partition of a shard /// I don't know what this comment means. /// In short, when we discovered what shards contain currently processing partition, /// This class describes a partition (name) that is stored on the shard (parent). struct ShardPartition { - ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits = 10) - : task_shard(parent), name(std::move(name_quoted_)) { pieces.reserve(number_of_splits); } + ShardPartition(TaskShard &parent, String name_quoted_, size_t number_of_splits = 10); String getPartitionPath() const; @@ -45,58 +49,6 @@ struct ShardPartition String name; }; -inline String ShardPartition::getPartitionCleanStartPath() const -{ - return getPartitionPath() + "/clean_start"; -} - -inline String ShardPartition::getPartitionPieceCleanStartPath(size_t current_piece_number) const -{ - assert(current_piece_number < task_shard.task_table.number_of_splits); - return getPartitionPiecePath(current_piece_number) + "/clean_start"; -} - -inline String ShardPartition::getPartitionPath() const -{ - return task_shard.task_table.getPartitionPath(name); -} - -inline String ShardPartition::getPartitionPiecePath(size_t current_piece_number) const -{ - assert(current_piece_number < task_shard.task_table.number_of_splits); - return task_shard.task_table.getPartitionPiecePath(name, current_piece_number); -} - -inline String ShardPartition::getShardStatusPath() const -{ - // schema: //tables/
//shards/ - // e.g. /root/table_test.hits/201701/shards/1 - return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster()); -} - -inline String ShardPartition::getPartitionShardsPath() const -{ - return getPartitionPath() + "/shards"; -} - -inline String ShardPartition::getPartitionActiveWorkersPath() const -{ - return getPartitionPath() + "/partition_active_workers"; -} - -inline String ShardPartition::getActiveWorkerPath() const -{ - return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster()); -} - -inline String ShardPartition::getCommonPartitionIsDirtyPath() const -{ - return getPartitionPath() + "/is_dirty"; -} - -inline String ShardPartition::getCommonPartitionIsCleanedPath() const -{ - return getCommonPartitionIsDirtyPath() + "/cleaned"; -} +using TasksPartition = std::map>; } diff --git a/programs/copier/ShardPartitionPiece.cpp b/programs/copier/ShardPartitionPiece.cpp new file mode 100644 index 00000000000..36d1621e012 --- /dev/null +++ b/programs/copier/ShardPartitionPiece.cpp @@ -0,0 +1,64 @@ +#include "ShardPartitionPiece.h" + +#include "ShardPartition.h" +#include "TaskShard.h" + +#include + +namespace DB +{ + +ShardPartitionPiece::ShardPartitionPiece(ShardPartition & parent, size_t current_piece_number_, bool is_present_piece_) + : is_absent_piece(!is_present_piece_) + , current_piece_number(current_piece_number_) + , shard_partition(parent) +{ +} + +String ShardPartitionPiece::getPartitionPiecePath() const +{ + return shard_partition.getPartitionPath() + "/piece_" + toString(current_piece_number); +} + +String ShardPartitionPiece::getPartitionPieceCleanStartPath() const +{ + return getPartitionPiecePath() + "/clean_start"; +} + +String ShardPartitionPiece::getPartitionPieceIsDirtyPath() const +{ + return getPartitionPiecePath() + "/is_dirty"; +} + +String ShardPartitionPiece::getPartitionPieceIsCleanedPath() const +{ + return getPartitionPieceIsDirtyPath() + "/cleaned"; +} + +String ShardPartitionPiece::getPartitionPieceActiveWorkersPath() const +{ + return getPartitionPiecePath() + "/partition_piece_active_workers"; +} + +String ShardPartitionPiece::getActiveWorkerPath() const +{ + return getPartitionPieceActiveWorkersPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); +} + +/// On what shards do we have current partition. +String ShardPartitionPiece::getPartitionPieceShardsPath() const +{ + return getPartitionPiecePath() + "/shards"; +} + +String ShardPartitionPiece::getShardStatusPath() const +{ + return getPartitionPieceShardsPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); +} + +String ShardPartitionPiece::getPartitionPieceCleanerPath() const +{ + return getPartitionPieceIsDirtyPath() + "/cleaner"; +} + +} diff --git a/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h index a21fd531da4..aba378d466d 100644 --- a/programs/copier/ShardPartitionPiece.h +++ b/programs/copier/ShardPartitionPiece.h @@ -1,16 +1,15 @@ #pragma once -#include "Internals.h" +#include namespace DB { +struct ShardPartition; + struct ShardPartitionPiece { - - ShardPartitionPiece(ShardPartition &parent, size_t current_piece_number_, bool is_present_piece_) - : is_absent_piece(!is_present_piece_), current_piece_number(current_piece_number_), - shard_partition(parent) {} + ShardPartitionPiece(ShardPartition & parent, size_t current_piece_number_, bool is_present_piece_); String getPartitionPiecePath() const; @@ -37,52 +36,6 @@ struct ShardPartitionPiece ShardPartition & shard_partition; }; - -inline String ShardPartitionPiece::getPartitionPiecePath() const -{ - return shard_partition.getPartitionPath() + "/piece_" + toString(current_piece_number); -} - -inline String ShardPartitionPiece::getPartitionPieceCleanStartPath() const -{ - return getPartitionPiecePath() + "/clean_start"; -} - -inline String ShardPartitionPiece::getPartitionPieceIsDirtyPath() const -{ - return getPartitionPiecePath() + "/is_dirty"; -} - -inline String ShardPartitionPiece::getPartitionPieceIsCleanedPath() const -{ - return getPartitionPieceIsDirtyPath() + "/cleaned"; -} - -inline String ShardPartitionPiece::getPartitionPieceActiveWorkersPath() const -{ - return getPartitionPiecePath() + "/partition_piece_active_workers"; -} - -inline String ShardPartitionPiece::getActiveWorkerPath() const -{ - return getPartitionPieceActiveWorkersPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); -} - -/// On what shards do we have current partition. -inline String ShardPartitionPiece::getPartitionPieceShardsPath() const -{ - return getPartitionPiecePath() + "/shards"; -} - -inline String ShardPartitionPiece::getShardStatusPath() const -{ - return getPartitionPieceShardsPath() + "/" + toString(shard_partition.task_shard.numberInCluster()); -} - -inline String ShardPartitionPiece::getPartitionPieceCleanerPath() const -{ - return getPartitionPieceIsDirtyPath() + "/cleaner"; -} - +using PartitionPieces = std::vector; } diff --git a/programs/copier/StatusAccumulator.cpp b/programs/copier/StatusAccumulator.cpp new file mode 100644 index 00000000000..77adeac708c --- /dev/null +++ b/programs/copier/StatusAccumulator.cpp @@ -0,0 +1,48 @@ +#include "StatusAccumulator.h" + +#include +#include +#include +#include + +#include + +namespace DB +{ + +StatusAccumulator::MapPtr StatusAccumulator::fromJSON(String state_json) +{ + Poco::JSON::Parser parser; + auto state = parser.parse(state_json).extract(); + MapPtr result_ptr = std::make_shared(); + for (const auto & table_name : state->getNames()) + { + auto table_status_json = state->getValue(table_name); + auto table_status = parser.parse(table_status_json).extract(); + /// Map entry will be created if it is absent + auto & map_table_status = (*result_ptr)[table_name]; + map_table_status.all_partitions_count += table_status->getValue("all_partitions_count"); + map_table_status.processed_partitions_count += table_status->getValue("processed_partitions_count"); + } + return result_ptr; +} + +String StatusAccumulator::serializeToJSON(MapPtr statuses) +{ + Poco::JSON::Object result_json; + for (const auto & [table_name, table_status] : *statuses) + { + Poco::JSON::Object status_json; + status_json.set("all_partitions_count", table_status.all_partitions_count); + status_json.set("processed_partitions_count", table_status.processed_partitions_count); + + result_json.set(table_name, status_json); + } + std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM + oss.exceptions(std::ios::failbit); + Poco::JSON::Stringifier::stringify(result_json, oss); + auto result = oss.str(); + return result; +} + +} diff --git a/programs/copier/StatusAccumulator.h b/programs/copier/StatusAccumulator.h index 6e20e3dc95d..d420b611602 100644 --- a/programs/copier/StatusAccumulator.h +++ b/programs/copier/StatusAccumulator.h @@ -1,65 +1,27 @@ #pragma once +#include -#include -#include -#include -#include - -#include #include -#include -#include +#include namespace DB { class StatusAccumulator { - public: - struct TableStatus - { - size_t all_partitions_count; - size_t processed_partitions_count; - }; +public: + struct TableStatus + { + size_t all_partitions_count; + size_t processed_partitions_count; + }; - using Map = std::unordered_map; - using MapPtr = std::shared_ptr; + using Map = std::unordered_map; + using MapPtr = std::shared_ptr; - static MapPtr fromJSON(std::string state_json) - { - Poco::JSON::Parser parser; - auto state = parser.parse(state_json).extract(); - MapPtr result_ptr = std::make_shared(); - for (const auto & table_name : state->getNames()) - { - auto table_status_json = state->getValue(table_name); - auto table_status = parser.parse(table_status_json).extract(); - /// Map entry will be created if it is absent - auto & map_table_status = (*result_ptr)[table_name]; - map_table_status.all_partitions_count += table_status->getValue("all_partitions_count"); - map_table_status.processed_partitions_count += table_status->getValue("processed_partitions_count"); - } - return result_ptr; - } - - static std::string serializeToJSON(MapPtr statuses) - { - Poco::JSON::Object result_json; - for (const auto & [table_name, table_status] : *statuses) - { - Poco::JSON::Object status_json; - status_json.set("all_partitions_count", table_status.all_partitions_count); - status_json.set("processed_partitions_count", table_status.processed_partitions_count); - - result_json.set(table_name, status_json); - } - std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - oss.exceptions(std::ios::failbit); - Poco::JSON::Stringifier::stringify(result_json, oss); - auto result = oss.str(); - return result; - } + static MapPtr fromJSON(String state_json); + static String serializeToJSON(MapPtr statuses); }; } diff --git a/programs/copier/TaskCluster.cpp b/programs/copier/TaskCluster.cpp new file mode 100644 index 00000000000..957c7d2120d --- /dev/null +++ b/programs/copier/TaskCluster.cpp @@ -0,0 +1,74 @@ +#include "TaskCluster.h" + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +TaskCluster::TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_) + : task_zookeeper_path(task_zookeeper_path_) + , default_local_database(default_local_database_) +{} + +void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key) +{ + String prefix = base_key.empty() ? "" : base_key + "."; + + clusters_prefix = prefix + "remote_servers"; + if (!config.has(clusters_prefix)) + throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS); + + Poco::Util::AbstractConfiguration::Keys tables_keys; + config.keys(prefix + "tables", tables_keys); + + for (const auto & table_key : tables_keys) + { + table_tasks.emplace_back(*this, config, prefix + "tables", table_key); + } +} + +void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfiguration & config, const String & base_key) +{ + String prefix = base_key.empty() ? "" : base_key + "."; + + max_workers = config.getUInt64(prefix + "max_workers"); + + settings_common = Settings(); + if (config.has(prefix + "settings")) + settings_common.loadSettingsFromConfig(prefix + "settings", config); + + settings_common.prefer_localhost_replica = false; + + settings_pull = settings_common; + if (config.has(prefix + "settings_pull")) + settings_pull.loadSettingsFromConfig(prefix + "settings_pull", config); + + settings_push = settings_common; + if (config.has(prefix + "settings_push")) + settings_push.loadSettingsFromConfig(prefix + "settings_push", config); + + auto set_default_value = [] (auto && setting, auto && default_value) + { + setting = setting.changed ? setting.value : default_value; + }; + + /// Override important settings + settings_pull.readonly = 1; + settings_pull.prefer_localhost_replica = false; + settings_push.insert_distributed_sync = true; + settings_push.prefer_localhost_replica = false; + + set_default_value(settings_pull.load_balancing, LoadBalancing::NEAREST_HOSTNAME); + set_default_value(settings_pull.max_threads, 1); + set_default_value(settings_pull.max_block_size, 8192UL); + set_default_value(settings_pull.preferred_block_size_bytes, 0); + + set_default_value(settings_push.insert_distributed_timeout, 0); + set_default_value(settings_push.replication_alter_partitions_sync, 2); +} + +} + diff --git a/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h index 7d8f01ba15f..fc1c8a663ec 100644 --- a/programs/copier/TaskCluster.h +++ b/programs/copier/TaskCluster.h @@ -1,21 +1,20 @@ #pragma once -#include "Aliases.h" +#include "TaskTable.h" + +#include +#include + #include +#include + namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} struct TaskCluster { - TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_) - : task_zookeeper_path(task_zookeeper_path_) - , default_local_database(default_local_database_) - {} + TaskCluster(const String & task_zookeeper_path_, const String & default_local_database_); void loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key = ""); @@ -50,61 +49,4 @@ struct TaskCluster pcg64 random_engine; }; -inline void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config, const String & base_key) -{ - String prefix = base_key.empty() ? "" : base_key + "."; - - clusters_prefix = prefix + "remote_servers"; - if (!config.has(clusters_prefix)) - throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS); - - Poco::Util::AbstractConfiguration::Keys tables_keys; - config.keys(prefix + "tables", tables_keys); - - for (const auto & table_key : tables_keys) - { - table_tasks.emplace_back(*this, config, prefix + "tables", table_key); - } -} - -inline void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfiguration & config, const String & base_key) -{ - String prefix = base_key.empty() ? "" : base_key + "."; - - max_workers = config.getUInt64(prefix + "max_workers"); - - settings_common = Settings(); - if (config.has(prefix + "settings")) - settings_common.loadSettingsFromConfig(prefix + "settings", config); - - settings_common.prefer_localhost_replica = 0; - - settings_pull = settings_common; - if (config.has(prefix + "settings_pull")) - settings_pull.loadSettingsFromConfig(prefix + "settings_pull", config); - - settings_push = settings_common; - if (config.has(prefix + "settings_push")) - settings_push.loadSettingsFromConfig(prefix + "settings_push", config); - - auto set_default_value = [] (auto && setting, auto && default_value) - { - setting = setting.changed ? setting.value : default_value; - }; - - /// Override important settings - settings_pull.readonly = 1; - settings_pull.prefer_localhost_replica = false; - settings_push.insert_distributed_sync = true; - settings_push.prefer_localhost_replica = false; - - set_default_value(settings_pull.load_balancing, LoadBalancing::NEAREST_HOSTNAME); - set_default_value(settings_pull.max_threads, 1); - set_default_value(settings_pull.max_block_size, 8192UL); - set_default_value(settings_pull.preferred_block_size_bytes, 0); - - set_default_value(settings_push.insert_distributed_timeout, 0); - set_default_value(settings_push.replication_alter_partitions_sync, 2); -} - } diff --git a/programs/copier/TaskShard.cpp b/programs/copier/TaskShard.cpp new file mode 100644 index 00000000000..d156f451a84 --- /dev/null +++ b/programs/copier/TaskShard.cpp @@ -0,0 +1,37 @@ +#include "TaskShard.h" + +#include "TaskTable.h" + +namespace DB +{ + +TaskShard::TaskShard(TaskTable & parent, const Cluster::ShardInfo & info_) + : task_table(parent) + , info(info_) +{ + list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); +} + +UInt32 TaskShard::numberInCluster() const +{ + return info.shard_num; +} + +UInt32 TaskShard::indexInCluster() const +{ + return info.shard_num - 1; +} + +String DB::TaskShard::getDescription() const +{ + return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", + numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); +} + +String DB::TaskShard::getHostNameExample() const +{ + const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); + return replicas.at(0).readableString(); +} + +} diff --git a/programs/copier/TaskShard.h b/programs/copier/TaskShard.h new file mode 100644 index 00000000000..05d652077ea --- /dev/null +++ b/programs/copier/TaskShard.h @@ -0,0 +1,56 @@ +#pragma once + +#include "Aliases.h" +#include "Internals.h" +#include "ClusterPartition.h" +#include "ShardPartition.h" + + +namespace DB +{ + +struct TaskTable; + +struct TaskShard +{ + TaskShard(TaskTable & parent, const Cluster::ShardInfo & info_); + + TaskTable & task_table; + + Cluster::ShardInfo info; + + UInt32 numberInCluster() const; + + UInt32 indexInCluster() const; + + String getDescription() const; + + String getHostNameExample() const; + + /// Used to sort clusters by their proximity + ShardPriority priority; + + /// Column with unique destination partitions (computed from engine_push_partition_key expr.) in the shard + ColumnWithTypeAndName partition_key_column; + + /// There is a task for each destination partition + TasksPartition partition_tasks; + + /// Which partitions have been checked for existence + /// If some partition from this lists is exists, it is in partition_tasks + std::set checked_partitions; + + /// Last CREATE TABLE query of the table of the shard + ASTPtr current_pull_table_create_query; + ASTPtr current_push_table_create_query; + + /// Internal distributed tables + DatabaseAndTableName table_read_shard; + DatabaseAndTableName main_table_split_shard; + ListOfDatabasesAndTableNames list_of_split_tables_on_shard; +}; + +using TaskShardPtr = std::shared_ptr; +using TasksShard = std::vector; + +} diff --git a/programs/copier/TaskTable.cpp b/programs/copier/TaskTable.cpp new file mode 100644 index 00000000000..5b09a9c99a7 --- /dev/null +++ b/programs/copier/TaskTable.cpp @@ -0,0 +1,221 @@ +#include "TaskTable.h" + +#include "ClusterPartition.h" +#include "TaskCluster.h" + +#include + +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int UNKNOWN_ELEMENT_IN_CONFIG; + extern const int LOGICAL_ERROR; +} + +TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, + const String & prefix_, const String & table_key) + : task_cluster(parent) +{ + String table_prefix = prefix_ + "." + table_key + "."; + + name_in_config = table_key; + + number_of_splits = config.getUInt64(table_prefix + "number_of_splits", 3); + + allow_to_copy_alias_and_materialized_columns = config.getBool(table_prefix + "allow_to_copy_alias_and_materialized_columns", false); + allow_to_drop_target_partitions = config.getBool(table_prefix + "allow_to_drop_target_partitions", false); + + cluster_pull_name = config.getString(table_prefix + "cluster_pull"); + cluster_push_name = config.getString(table_prefix + "cluster_push"); + + table_pull.first = config.getString(table_prefix + "database_pull"); + table_pull.second = config.getString(table_prefix + "table_pull"); + + table_push.first = config.getString(table_prefix + "database_push"); + table_push.second = config.getString(table_prefix + "table_push"); + + /// Used as node name in ZooKeeper + table_id = escapeForFileName(cluster_push_name) + + "." + escapeForFileName(table_push.first) + + "." + escapeForFileName(table_push.second); + + engine_push_str = config.getString(table_prefix + "engine", "rand()"); + + { + ParserStorage parser_storage; + engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); + primary_key_comma_separated = boost::algorithm::join(extractPrimaryKeyColumnNames(engine_push_ast), ", "); + is_replicated_table = isReplicatedTableEngine(engine_push_ast); + } + + sharding_key_str = config.getString(table_prefix + "sharding_key"); + + auxiliary_engine_split_asts.reserve(number_of_splits); + { + ParserExpressionWithOptionalAlias parser_expression(false); + sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, + sharding_key_ast); + + for (const auto piece_number : collections::range(0, number_of_splits)) + { + auxiliary_engine_split_asts.emplace_back + ( + createASTStorageDistributed(cluster_push_name, table_push.first, + table_push.second + "_piece_" + toString(piece_number), sharding_key_ast) + ); + } + } + + where_condition_str = config.getString(table_prefix + "where_condition", ""); + if (!where_condition_str.empty()) + { + ParserExpressionWithOptionalAlias parser_expression(false); + where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); + + // Will use canonical expression form + where_condition_str = queryToString(where_condition_ast); + } + + String enabled_partitions_prefix = table_prefix + "enabled_partitions"; + has_enabled_partitions = config.has(enabled_partitions_prefix); + + if (has_enabled_partitions) + { + Strings keys; + config.keys(enabled_partitions_prefix, keys); + + if (keys.empty()) + { + /// Parse list of partition from space-separated string + String partitions_str = config.getString(table_prefix + "enabled_partitions"); + boost::trim_if(partitions_str, isWhitespaceASCII); + boost::split(enabled_partitions, partitions_str, isWhitespaceASCII, boost::token_compress_on); + } + else + { + /// Parse sequence of ... + for (const String &key : keys) + { + if (!startsWith(key, "partition")) + throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); + + enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key)); + } + } + + std::copy(enabled_partitions.begin(), enabled_partitions.end(), std::inserter(enabled_partitions_set, enabled_partitions_set.begin())); + } +} + + +String TaskTable::getPartitionPath(const String & partition_name) const +{ + return task_cluster.task_zookeeper_path // root + + "/tables/" + table_id // tables/dst_cluster.merge.hits + + "/" + escapeForFileName(partition_name); // 201701 +} + +String TaskTable::getPartitionAttachIsActivePath(const String & partition_name) const +{ + return getPartitionPath(partition_name) + "/attach_active"; +} + +String TaskTable::getPartitionAttachIsDonePath(const String & partition_name) const +{ + return getPartitionPath(partition_name) + "/attach_is_done"; +} + +String TaskTable::getPartitionPiecePath(const String & partition_name, size_t piece_number) const +{ + assert(piece_number < number_of_splits); + return getPartitionPath(partition_name) + "/piece_" + toString(piece_number); // 1...number_of_splits +} + +String TaskTable::getCertainPartitionIsDirtyPath(const String &partition_name) const +{ + return getPartitionPath(partition_name) + "/is_dirty"; +} + +String TaskTable::getCertainPartitionPieceIsDirtyPath(const String & partition_name, const size_t piece_number) const +{ + return getPartitionPiecePath(partition_name, piece_number) + "/is_dirty"; +} + +String TaskTable::getCertainPartitionIsCleanedPath(const String & partition_name) const +{ + return getCertainPartitionIsDirtyPath(partition_name) + "/cleaned"; +} + +String TaskTable::getCertainPartitionPieceIsCleanedPath(const String & partition_name, const size_t piece_number) const +{ + return getCertainPartitionPieceIsDirtyPath(partition_name, piece_number) + "/cleaned"; +} + +String TaskTable::getCertainPartitionTaskStatusPath(const String & partition_name) const +{ + return getPartitionPath(partition_name) + "/shards"; +} + +String TaskTable::getCertainPartitionPieceTaskStatusPath(const String & partition_name, const size_t piece_number) const +{ + return getPartitionPiecePath(partition_name, piece_number) + "/shards"; +} + +bool TaskTable::isReplicatedTable() const +{ + return is_replicated_table; +} + +String TaskTable::getStatusAllPartitionCount() const +{ + return task_cluster.task_zookeeper_path + "/status/all_partitions_count"; +} + +String TaskTable::getStatusProcessedPartitionsCount() const +{ + return task_cluster.task_zookeeper_path + "/status/processed_partitions_count"; +} + +ASTPtr TaskTable::rewriteReplicatedCreateQueryToPlain() const +{ + ASTPtr prev_engine_push_ast = engine_push_ast->clone(); + + auto & new_storage_ast = prev_engine_push_ast->as(); + auto & new_engine_ast = new_storage_ast.engine->as(); + + /// Remove "Replicated" from name + new_engine_ast.name = new_engine_ast.name.substr(10); + + if (new_engine_ast.arguments) + { + auto & replicated_table_arguments = new_engine_ast.arguments->children; + + + /// In some cases of Atomic database engine usage ReplicatedMergeTree tables + /// could be created without arguments. + if (!replicated_table_arguments.empty()) + { + /// Delete first two arguments of Replicated...MergeTree() table. + replicated_table_arguments.erase(replicated_table_arguments.begin()); + replicated_table_arguments.erase(replicated_table_arguments.begin()); + } + } + + return new_storage_ast.clone(); +} + +ClusterPartition & TaskTable::getClusterPartition(const String & partition_name) +{ + auto it = cluster_partitions.find(partition_name); + if (it == cluster_partitions.end()) + throw Exception("There are no cluster partition " + partition_name + " in " + table_id, + ErrorCodes::LOGICAL_ERROR); + return it->second; +} + +} diff --git a/programs/copier/TaskTable.h b/programs/copier/TaskTable.h new file mode 100644 index 00000000000..2bb7f078bc6 --- /dev/null +++ b/programs/copier/TaskTable.h @@ -0,0 +1,173 @@ +#pragma once + +#include "Aliases.h" +#include "TaskShard.h" + + +namespace DB +{ + +struct ClusterPartition; +struct TaskCluster; + +struct TaskTable +{ + TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, const String & table_key); + + TaskCluster & task_cluster; + + /// These functions used in checkPartitionIsDone() or checkPartitionPieceIsDone() + /// They are implemented here not to call task_table.tasks_shard[partition_name].second.pieces[current_piece_number] etc. + + String getPartitionPath(const String & partition_name) const; + + String getPartitionAttachIsActivePath(const String & partition_name) const; + + String getPartitionAttachIsDonePath(const String & partition_name) const; + + String getPartitionPiecePath(const String & partition_name, size_t piece_number) const; + + String getCertainPartitionIsDirtyPath(const String & partition_name) const; + + String getCertainPartitionPieceIsDirtyPath(const String & partition_name, size_t piece_number) const; + + String getCertainPartitionIsCleanedPath(const String & partition_name) const; + + String getCertainPartitionPieceIsCleanedPath(const String & partition_name, size_t piece_number) const; + + String getCertainPartitionTaskStatusPath(const String & partition_name) const; + + String getCertainPartitionPieceTaskStatusPath(const String & partition_name, size_t piece_number) const; + + bool isReplicatedTable() const; + + /// These nodes are used for check-status option + String getStatusAllPartitionCount() const; + String getStatusProcessedPartitionsCount() const; + + /// Partitions will be split into number-of-splits pieces. + /// Each piece will be copied independently. (10 by default) + size_t number_of_splits; + + bool allow_to_copy_alias_and_materialized_columns{false}; + bool allow_to_drop_target_partitions{false}; + + String name_in_config; + + /// Used as task ID + String table_id; + + /// Column names in primary key + String primary_key_comma_separated; + + /// Source cluster and table + String cluster_pull_name; + DatabaseAndTableName table_pull; + + /// Destination cluster and table + String cluster_push_name; + DatabaseAndTableName table_push; + + /// Storage of destination table + /// (tables that are stored on each shard of target cluster) + String engine_push_str; + ASTPtr engine_push_ast; + ASTPtr engine_push_partition_key_ast; + + /// First argument of Replicated...MergeTree() + String engine_push_zk_path; + bool is_replicated_table; + + ASTPtr rewriteReplicatedCreateQueryToPlain() const; + + /* + * A Distributed table definition used to split data + * Distributed table will be created on each shard of default + * cluster to perform data copying and resharding + * */ + String sharding_key_str; + ASTPtr sharding_key_ast; + ASTPtr main_engine_split_ast; + + /* + * To copy partition piece form one cluster to another we have to use Distributed table. + * In case of usage separate table (engine_push) for each partition piece, + * we have to use many Distributed tables. + * */ + ASTs auxiliary_engine_split_asts; + + /// Additional WHERE expression to filter input data + String where_condition_str; + ASTPtr where_condition_ast; + + /// Resolved clusters + ClusterPtr cluster_pull; + ClusterPtr cluster_push; + + /// Filter partitions that should be copied + bool has_enabled_partitions = false; + Strings enabled_partitions; + NameSet enabled_partitions_set; + + /** + * Prioritized list of shards + * all_shards contains information about all shards in the table. + * So we have to check whether particular shard have current partition or not while processing. + */ + TasksShard all_shards; + TasksShard local_shards; + + /// All partitions of the current table. + ClusterPartitions cluster_partitions; + NameSet finished_cluster_partitions; + + /// Partition names to process in user-specified order + Strings ordered_partition_names; + + ClusterPartition & getClusterPartition(const String & partition_name); + + Stopwatch watch; + UInt64 bytes_copied = 0; + UInt64 rows_copied = 0; + + template + void initShards(RandomEngine &&random_engine); +}; + +using TasksTable = std::list; + + +template +inline void TaskTable::initShards(RandomEngine && random_engine) +{ + const String & fqdn_name = getFQDNOrHostName(); + std::uniform_int_distribution get_urand(0, std::numeric_limits::max()); + + // Compute the priority + for (const auto & shard_info : cluster_pull->getShardsInfo()) + { + TaskShardPtr task_shard = std::make_shared(*this, shard_info); + const auto & replicas = cluster_pull->getShardsAddresses().at(task_shard->indexInCluster()); + task_shard->priority = getReplicasPriority(replicas, fqdn_name, get_urand(random_engine)); + + all_shards.emplace_back(task_shard); + } + + // Sort by priority + std::sort(all_shards.begin(), all_shards.end(), + [](const TaskShardPtr & lhs, const TaskShardPtr & rhs) + { + return ShardPriority::greaterPriority(lhs->priority, rhs->priority); + }); + + // Cut local shards + auto it_first_remote = std::lower_bound(all_shards.begin(), all_shards.end(), 1, + [](const TaskShardPtr & lhs, UInt8 is_remote) + { + return lhs->priority.is_remote < is_remote; + }); + + local_shards.assign(all_shards.begin(), it_first_remote); +} + +} diff --git a/programs/copier/TaskTableAndShard.h b/programs/copier/TaskTableAndShard.h deleted file mode 100644 index cef9b669971..00000000000 --- a/programs/copier/TaskTableAndShard.h +++ /dev/null @@ -1,434 +0,0 @@ -#pragma once - -#include "Aliases.h" -#include "Internals.h" -#include "ClusterPartition.h" - -#include -#include - -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int UNKNOWN_ELEMENT_IN_CONFIG; - extern const int LOGICAL_ERROR; -} - -struct TaskShard; - -struct TaskTable -{ - TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, const String & prefix, - const String & table_key); - - TaskCluster & task_cluster; - - /// These functions used in checkPartitionIsDone() or checkPartitionPieceIsDone() - /// They are implemented here not to call task_table.tasks_shard[partition_name].second.pieces[current_piece_number] etc. - - String getPartitionPath(const String & partition_name) const; - - String getPartitionAttachIsActivePath(const String & partition_name) const; - - String getPartitionAttachIsDonePath(const String & partition_name) const; - - String getPartitionPiecePath(const String & partition_name, size_t piece_number) const; - - String getCertainPartitionIsDirtyPath(const String & partition_name) const; - - String getCertainPartitionPieceIsDirtyPath(const String & partition_name, size_t piece_number) const; - - String getCertainPartitionIsCleanedPath(const String & partition_name) const; - - String getCertainPartitionPieceIsCleanedPath(const String & partition_name, size_t piece_number) const; - - String getCertainPartitionTaskStatusPath(const String & partition_name) const; - - String getCertainPartitionPieceTaskStatusPath(const String & partition_name, size_t piece_number) const; - - bool isReplicatedTable() const { return is_replicated_table; } - - /// These nodes are used for check-status option - String getStatusAllPartitionCount() const; - String getStatusProcessedPartitionsCount() const; - - /// Partitions will be split into number-of-splits pieces. - /// Each piece will be copied independently. (10 by default) - size_t number_of_splits; - - bool allow_to_copy_alias_and_materialized_columns{false}; - bool allow_to_drop_target_partitions{false}; - - String name_in_config; - - /// Used as task ID - String table_id; - - /// Column names in primary key - String primary_key_comma_separated; - - /// Source cluster and table - String cluster_pull_name; - DatabaseAndTableName table_pull; - - /// Destination cluster and table - String cluster_push_name; - DatabaseAndTableName table_push; - - /// Storage of destination table - /// (tables that are stored on each shard of target cluster) - String engine_push_str; - ASTPtr engine_push_ast; - ASTPtr engine_push_partition_key_ast; - - /// First argument of Replicated...MergeTree() - String engine_push_zk_path; - bool is_replicated_table; - - ASTPtr rewriteReplicatedCreateQueryToPlain() const; - - /* - * A Distributed table definition used to split data - * Distributed table will be created on each shard of default - * cluster to perform data copying and resharding - * */ - String sharding_key_str; - ASTPtr sharding_key_ast; - ASTPtr main_engine_split_ast; - - /* - * To copy partition piece form one cluster to another we have to use Distributed table. - * In case of usage separate table (engine_push) for each partition piece, - * we have to use many Distributed tables. - * */ - ASTs auxiliary_engine_split_asts; - - /// Additional WHERE expression to filter input data - String where_condition_str; - ASTPtr where_condition_ast; - - /// Resolved clusters - ClusterPtr cluster_pull; - ClusterPtr cluster_push; - - /// Filter partitions that should be copied - bool has_enabled_partitions = false; - Strings enabled_partitions; - NameSet enabled_partitions_set; - - /** - * Prioritized list of shards - * all_shards contains information about all shards in the table. - * So we have to check whether particular shard have current partition or not while processing. - */ - TasksShard all_shards; - TasksShard local_shards; - - /// All partitions of the current table. - ClusterPartitions cluster_partitions; - NameSet finished_cluster_partitions; - - /// Partition names to process in user-specified order - Strings ordered_partition_names; - - ClusterPartition & getClusterPartition(const String & partition_name) - { - auto it = cluster_partitions.find(partition_name); - if (it == cluster_partitions.end()) - throw Exception("There are no cluster partition " + partition_name + " in " + table_id, - ErrorCodes::LOGICAL_ERROR); - return it->second; - } - - Stopwatch watch; - UInt64 bytes_copied = 0; - UInt64 rows_copied = 0; - - template - void initShards(RandomEngine &&random_engine); -}; - - -struct TaskShard -{ - TaskShard(TaskTable & parent, const ShardInfo & info_) : task_table(parent), info(info_) - { - list_of_split_tables_on_shard.assign(task_table.number_of_splits, DatabaseAndTableName()); - } - - TaskTable & task_table; - - ShardInfo info; - - UInt32 numberInCluster() const { return info.shard_num; } - - UInt32 indexInCluster() const { return info.shard_num - 1; } - - String getDescription() const; - - String getHostNameExample() const; - - /// Used to sort clusters by their proximity - ShardPriority priority; - - /// Column with unique destination partitions (computed from engine_push_partition_key expr.) in the shard - ColumnWithTypeAndName partition_key_column; - - /// There is a task for each destination partition - TasksPartition partition_tasks; - - /// Which partitions have been checked for existence - /// If some partition from this lists is exists, it is in partition_tasks - std::set checked_partitions; - - /// Last CREATE TABLE query of the table of the shard - ASTPtr current_pull_table_create_query; - ASTPtr current_push_table_create_query; - - /// Internal distributed tables - DatabaseAndTableName table_read_shard; - DatabaseAndTableName main_table_split_shard; - ListOfDatabasesAndTableNames list_of_split_tables_on_shard; -}; - - -inline String TaskTable::getPartitionPath(const String & partition_name) const -{ - return task_cluster.task_zookeeper_path // root - + "/tables/" + table_id // tables/dst_cluster.merge.hits - + "/" + escapeForFileName(partition_name); // 201701 -} - -inline String TaskTable::getPartitionAttachIsActivePath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/attach_active"; -} - -inline String TaskTable::getPartitionAttachIsDonePath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/attach_is_done"; -} - -inline String TaskTable::getPartitionPiecePath(const String & partition_name, size_t piece_number) const -{ - assert(piece_number < number_of_splits); - return getPartitionPath(partition_name) + "/piece_" + toString(piece_number); // 1...number_of_splits -} - -inline String TaskTable::getCertainPartitionIsDirtyPath(const String &partition_name) const -{ - return getPartitionPath(partition_name) + "/is_dirty"; -} - -inline String TaskTable::getCertainPartitionPieceIsDirtyPath(const String & partition_name, const size_t piece_number) const -{ - return getPartitionPiecePath(partition_name, piece_number) + "/is_dirty"; -} - -inline String TaskTable::getCertainPartitionIsCleanedPath(const String & partition_name) const -{ - return getCertainPartitionIsDirtyPath(partition_name) + "/cleaned"; -} - -inline String TaskTable::getCertainPartitionPieceIsCleanedPath(const String & partition_name, const size_t piece_number) const -{ - return getCertainPartitionPieceIsDirtyPath(partition_name, piece_number) + "/cleaned"; -} - -inline String TaskTable::getCertainPartitionTaskStatusPath(const String & partition_name) const -{ - return getPartitionPath(partition_name) + "/shards"; -} - -inline String TaskTable::getCertainPartitionPieceTaskStatusPath(const String & partition_name, const size_t piece_number) const -{ - return getPartitionPiecePath(partition_name, piece_number) + "/shards"; -} - -inline String TaskTable::getStatusAllPartitionCount() const -{ - return task_cluster.task_zookeeper_path + "/status/all_partitions_count"; -} - -inline String TaskTable::getStatusProcessedPartitionsCount() const -{ - return task_cluster.task_zookeeper_path + "/status/processed_partitions_count"; -} - -inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfiguration & config, - const String & prefix_, const String & table_key) - : task_cluster(parent) -{ - String table_prefix = prefix_ + "." + table_key + "."; - - name_in_config = table_key; - - number_of_splits = config.getUInt64(table_prefix + "number_of_splits", 3); - - allow_to_copy_alias_and_materialized_columns = config.getBool(table_prefix + "allow_to_copy_alias_and_materialized_columns", false); - allow_to_drop_target_partitions = config.getBool(table_prefix + "allow_to_drop_target_partitions", false); - - cluster_pull_name = config.getString(table_prefix + "cluster_pull"); - cluster_push_name = config.getString(table_prefix + "cluster_push"); - - table_pull.first = config.getString(table_prefix + "database_pull"); - table_pull.second = config.getString(table_prefix + "table_pull"); - - table_push.first = config.getString(table_prefix + "database_push"); - table_push.second = config.getString(table_prefix + "table_push"); - - /// Used as node name in ZooKeeper - table_id = escapeForFileName(cluster_push_name) - + "." + escapeForFileName(table_push.first) - + "." + escapeForFileName(table_push.second); - - engine_push_str = config.getString(table_prefix + "engine", "rand()"); - - { - ParserStorage parser_storage; - engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - engine_push_partition_key_ast = extractPartitionKey(engine_push_ast); - primary_key_comma_separated = boost::algorithm::join(extractPrimaryKeyColumnNames(engine_push_ast), ", "); - is_replicated_table = isReplicatedTableEngine(engine_push_ast); - } - - sharding_key_str = config.getString(table_prefix + "sharding_key"); - - auxiliary_engine_split_asts.reserve(number_of_splits); - { - ParserExpressionWithOptionalAlias parser_expression(false); - sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second, - sharding_key_ast); - - for (const auto piece_number : collections::range(0, number_of_splits)) - { - auxiliary_engine_split_asts.emplace_back - ( - createASTStorageDistributed(cluster_push_name, table_push.first, - table_push.second + "_piece_" + toString(piece_number), sharding_key_ast) - ); - } - } - - where_condition_str = config.getString(table_prefix + "where_condition", ""); - if (!where_condition_str.empty()) - { - ParserExpressionWithOptionalAlias parser_expression(false); - where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - - // Will use canonical expression form - where_condition_str = queryToString(where_condition_ast); - } - - String enabled_partitions_prefix = table_prefix + "enabled_partitions"; - has_enabled_partitions = config.has(enabled_partitions_prefix); - - if (has_enabled_partitions) - { - Strings keys; - config.keys(enabled_partitions_prefix, keys); - - if (keys.empty()) - { - /// Parse list of partition from space-separated string - String partitions_str = config.getString(table_prefix + "enabled_partitions"); - boost::trim_if(partitions_str, isWhitespaceASCII); - boost::split(enabled_partitions, partitions_str, isWhitespaceASCII, boost::token_compress_on); - } - else - { - /// Parse sequence of ... - for (const String &key : keys) - { - if (!startsWith(key, "partition")) - throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); - - enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key)); - } - } - - std::copy(enabled_partitions.begin(), enabled_partitions.end(), std::inserter(enabled_partitions_set, enabled_partitions_set.begin())); - } -} - -template -inline void TaskTable::initShards(RandomEngine && random_engine) -{ - const String & fqdn_name = getFQDNOrHostName(); - std::uniform_int_distribution get_urand(0, std::numeric_limits::max()); - - // Compute the priority - for (const auto & shard_info : cluster_pull->getShardsInfo()) - { - TaskShardPtr task_shard = std::make_shared(*this, shard_info); - const auto & replicas = cluster_pull->getShardsAddresses().at(task_shard->indexInCluster()); - task_shard->priority = getReplicasPriority(replicas, fqdn_name, get_urand(random_engine)); - - all_shards.emplace_back(task_shard); - } - - // Sort by priority - std::sort(all_shards.begin(), all_shards.end(), - [](const TaskShardPtr & lhs, const TaskShardPtr & rhs) - { - return ShardPriority::greaterPriority(lhs->priority, rhs->priority); - }); - - // Cut local shards - auto it_first_remote = std::lower_bound(all_shards.begin(), all_shards.end(), 1, - [](const TaskShardPtr & lhs, UInt8 is_remote) - { - return lhs->priority.is_remote < is_remote; - }); - - local_shards.assign(all_shards.begin(), it_first_remote); -} - -inline ASTPtr TaskTable::rewriteReplicatedCreateQueryToPlain() const -{ - ASTPtr prev_engine_push_ast = engine_push_ast->clone(); - - auto & new_storage_ast = prev_engine_push_ast->as(); - auto & new_engine_ast = new_storage_ast.engine->as(); - - /// Remove "Replicated" from name - new_engine_ast.name = new_engine_ast.name.substr(10); - - if (new_engine_ast.arguments) - { - auto & replicated_table_arguments = new_engine_ast.arguments->children; - - - /// In some cases of Atomic database engine usage ReplicatedMergeTree tables - /// could be created without arguments. - if (!replicated_table_arguments.empty()) - { - /// Delete first two arguments of Replicated...MergeTree() table. - replicated_table_arguments.erase(replicated_table_arguments.begin()); - replicated_table_arguments.erase(replicated_table_arguments.begin()); - } - } - - return new_storage_ast.clone(); -} - - -inline String DB::TaskShard::getDescription() const -{ - return fmt::format("N{} (having a replica {}, pull table {} of cluster {}", - numberInCluster(), getHostNameExample(), getQuotedTable(task_table.table_pull), task_table.cluster_pull_name); -} - -inline String DB::TaskShard::getHostNameExample() const -{ - const auto & replicas = task_table.cluster_pull->getShardsAddresses().at(indexInCluster()); - return replicas.at(0).readableString(); -} - -} diff --git a/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h index a9e04578607..3d4a11186e3 100644 --- a/programs/copier/ZooKeeperStaff.h +++ b/programs/copier/ZooKeeperStaff.h @@ -47,8 +47,8 @@ public: WrappingUInt32 epoch; WrappingUInt32 counter; explicit Zxid(UInt64 _zxid) - : epoch(_zxid >> 32) - , counter(_zxid) + : epoch(static_cast(_zxid >> 32)) + , counter(static_cast(_zxid)) {} bool operator<=(const Zxid & other) const diff --git a/programs/disks/DisksApp.cpp b/programs/disks/DisksApp.cpp index 749ccb3e503..91472a8df33 100644 --- a/programs/disks/DisksApp.cpp +++ b/programs/disks/DisksApp.cpp @@ -57,7 +57,7 @@ void DisksApp::addOptions( ("config-file,C", po::value(), "Set config file") ("disk", po::value(), "Set disk name") ("command_name", po::value(), "Name for command to do") - ("send-logs", "Send logs") + ("save-logs", "Save logs to a file") ("log-level", po::value(), "Logging level") ; @@ -82,10 +82,10 @@ void DisksApp::processOptions() config().setString("config-file", options["config-file"].as()); if (options.count("disk")) config().setString("disk", options["disk"].as()); - if (options.count("send-logs")) - config().setBool("send-logs", true); + if (options.count("save-logs")) + config().setBool("save-logs", true); if (options.count("log-level")) - Poco::Logger::root().setLevel(options["log-level"].as()); + config().setString("log-level", options["log-level"].as()); } void DisksApp::init(std::vector & common_arguments) @@ -149,15 +149,6 @@ void DisksApp::parseAndCheckOptions( int DisksApp::main(const std::vector & /*args*/) { - if (config().has("send-logs")) - { - auto log_level = config().getString("log-level", "trace"); - Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level)); - - auto log_path = config().getString("logger.clickhouse-disks", "/var/log/clickhouse-server/clickhouse-disks.log"); - Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::FileChannel(log_path))); - } - if (config().has("config-file") || fs::exists(getDefaultConfigFileName())) { String config_path = config().getString("config-file", getDefaultConfigFileName()); @@ -171,6 +162,20 @@ int DisksApp::main(const std::vector & /*args*/) throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specifiged"); } + if (config().has("save-logs")) + { + auto log_level = config().getString("log-level", "trace"); + Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level)); + + auto log_path = config().getString("logger.clickhouse-disks", "/var/log/clickhouse-server/clickhouse-disks.log"); + Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::FileChannel(log_path))); + } + else + { + auto log_level = config().getString("log-level", "none"); + Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level)); + } + registerDisks(); registerFormats(); diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp index 00c86571265..8028ccde72d 100644 --- a/programs/install/Install.cpp +++ b/programs/install/Install.cpp @@ -893,7 +893,7 @@ namespace if (fs::exists(pid_file)) { ReadBufferFromFile in(pid_file.string()); - UInt64 pid; + Int32 pid; if (tryReadIntText(pid, in)) { fmt::print("{} file exists and contains pid = {}.\n", pid_file.string(), pid); @@ -982,9 +982,9 @@ namespace return 0; } - UInt64 isRunning(const fs::path & pid_file) + int isRunning(const fs::path & pid_file) { - UInt64 pid = 0; + int pid = 0; if (fs::exists(pid_file)) { @@ -1057,7 +1057,7 @@ namespace if (force && do_not_kill) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible"); - UInt64 pid = isRunning(pid_file); + int pid = isRunning(pid_file); if (!pid) return 0; diff --git a/programs/keeper/CMakeLists.txt b/programs/keeper/CMakeLists.txt index ce176ccade5..9266a4ca419 100644 --- a/programs/keeper/CMakeLists.txt +++ b/programs/keeper/CMakeLists.txt @@ -45,6 +45,7 @@ if (BUILD_STANDALONE_KEEPER) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperLogStore.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperServer.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperSnapshotManager.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperSnapshotManagerS3.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateMachine.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 5077f59b7dd..e1d03b40b66 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -68,12 +68,12 @@ namespace ErrorCodes namespace { -int waitServersToFinish(std::vector & servers, size_t seconds_to_wait) +size_t waitServersToFinish(std::vector & servers, size_t seconds_to_wait) { - const int sleep_max_ms = 1000 * seconds_to_wait; - const int sleep_one_ms = 100; - int sleep_current_ms = 0; - int current_connections = 0; + const size_t sleep_max_ms = 1000 * seconds_to_wait; + const size_t sleep_one_ms = 100; + size_t sleep_current_ms = 0; + size_t current_connections = 0; for (;;) { current_connections = 0; @@ -441,7 +441,7 @@ int Keeper::main(const std::vector & /*args*/) main_config_reloader.reset(); LOG_DEBUG(log, "Waiting for current connections to Keeper to finish."); - int current_connections = 0; + size_t current_connections = 0; for (auto & server : *servers) { server.stop(); diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index e7bc019f597..e5fd4d6bf8d 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -8,9 +8,10 @@ #include #include #include +#include +#include #include #include -#include #include #include #include @@ -546,9 +547,14 @@ void LocalServer::processConfig() /// Setting value from cmd arg overrides one from config if (global_context->getSettingsRef().max_insert_block_size.changed) + { insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size; + } else - insert_format_max_block_size = config().getInt("insert_format_max_block_size", global_context->getSettingsRef().max_insert_block_size); + { + insert_format_max_block_size = config().getUInt64("insert_format_max_block_size", + global_context->getSettingsRef().max_insert_block_size); + } /// Sets external authenticators config (LDAP, Kerberos). global_context->setExternalAuthenticatorsConfig(config()); @@ -586,6 +592,18 @@ void LocalServer::processConfig() if (mmap_cache_size) global_context->setMMappedFileCache(mmap_cache_size); +#if USE_EMBEDDED_COMPILER + /// 128 MB + constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128; + size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default); + + constexpr size_t compiled_expression_cache_elements_size_default = 10000; + size_t compiled_expression_cache_elements_size + = config().getUInt64("compiled_expression_cache_elements_size", compiled_expression_cache_elements_size_default); + + CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size); +#endif + /// Load global settings from default_profile and system_profile. global_context->setDefaultProfiles(config()); diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index bdf26c9e730..7fdc5a54d8a 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -279,7 +279,7 @@ Float transformFloatMantissa(Float x, UInt64 seed) constexpr size_t mantissa_num_bits = std::is_same_v ? 23 : 52; UInt x_uint = bit_cast(x); - x_uint = feistelNetwork(x_uint, mantissa_num_bits, seed); + x_uint = static_cast(feistelNetwork(x_uint, mantissa_num_bits, seed)); return bit_cast(x_uint); } @@ -511,13 +511,13 @@ public: for (size_t i = 0; i < size; ++i) { UInt32 src_datetime = src_data[i]; - UInt32 src_date = date_lut.toDate(src_datetime); + UInt32 src_date = static_cast(date_lut.toDate(src_datetime)); Int32 src_diff = src_datetime - src_prev_value; - Int32 res_diff = transformSigned(src_diff, seed); + Int32 res_diff = static_cast(transformSigned(src_diff, seed)); UInt32 new_datetime = res_prev_value + res_diff; - UInt32 new_time = new_datetime - date_lut.toDate(new_datetime); + UInt32 new_time = new_datetime - static_cast(date_lut.toDate(new_datetime)); res_data[i] = src_date + new_time; src_prev_value = src_datetime; diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index 0ea2495af78..bf11947d436 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -183,7 +183,10 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ if (columns.empty()) throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + WriteBufferFromHTTPServerResponse out( + response, + request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, + keep_alive_timeout); try { writeStringBinary(columns.toString(), out); diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index dec4c249b4b..5bbc39dc559 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -139,7 +139,7 @@ void ODBCSource::insertValue( readDateTimeText(time, in, assert_cast(data_type.get())->getTimeZone()); if (time < 0) time = 0; - assert_cast(column).insertValue(time); + assert_cast(column).insertValue(static_cast(time)); break; } case ValueType::vtDateTime64: diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 7c3193ceac6..b412b579539 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -228,12 +228,12 @@ catch (...) path)); } -int waitServersToFinish(std::vector & servers, size_t seconds_to_wait) +size_t waitServersToFinish(std::vector & servers, size_t seconds_to_wait) { - const int sleep_max_ms = 1000 * seconds_to_wait; - const int sleep_one_ms = 100; - int sleep_current_ms = 0; - int current_connections = 0; + const size_t sleep_max_ms = 1000 * seconds_to_wait; + const size_t sleep_one_ms = 100; + size_t sleep_current_ms = 0; + size_t current_connections = 0; for (;;) { current_connections = 0; @@ -933,7 +933,7 @@ int Server::main(const std::vector & /*args*/) else { rlim_t old = rlim.rlim_cur; - rlim.rlim_cur = config().getUInt("max_open_files", rlim.rlim_max); + rlim.rlim_cur = config().getUInt("max_open_files", static_cast(rlim.rlim_max)); int rc = setrlimit(RLIMIT_NOFILE, &rlim); if (rc != 0) LOG_WARNING(log, "Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, errnoToString()); @@ -1507,7 +1507,7 @@ int Server::main(const std::vector & /*args*/) if (!servers_to_start_before_tables.empty()) { LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish."); - int current_connections = 0; + size_t current_connections = 0; for (auto & server : servers_to_start_before_tables) { server.stop(); @@ -1793,7 +1793,7 @@ int Server::main(const std::vector & /*args*/) is_cancelled = true; - int current_connections = 0; + size_t current_connections = 0; { std::lock_guard lock(servers_lock); for (auto & server : servers) diff --git a/src/Access/Common/AllowedClientHosts.cpp b/src/Access/Common/AllowedClientHosts.cpp index 2f8151bf757..905f7ba08b5 100644 --- a/src/Access/Common/AllowedClientHosts.cpp +++ b/src/Access/Common/AllowedClientHosts.cpp @@ -236,7 +236,7 @@ void AllowedClientHosts::IPSubnet::set(const IPAddress & prefix_, const IPAddres void AllowedClientHosts::IPSubnet::set(const IPAddress & prefix_, size_t num_prefix_bits) { - set(prefix_, IPAddress(num_prefix_bits, prefix_.family())); + set(prefix_, IPAddress(static_cast(num_prefix_bits), prefix_.family())); } void AllowedClientHosts::IPSubnet::set(const IPAddress & address) diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp index ff1ee6f3609..2affbc293ec 100644 --- a/src/Access/LDAPClient.cpp +++ b/src/Access/LDAPClient.cpp @@ -254,7 +254,7 @@ bool LDAPClient::openConnection() #endif { - const int search_timeout = params.search_timeout.count(); + const int search_timeout = static_cast(params.search_timeout.count()); diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout)); } diff --git a/src/AggregateFunctions/AggregateFunctionArray.h b/src/AggregateFunctions/AggregateFunctionArray.h index abefe8e0de1..c6e29e77318 100644 --- a/src/AggregateFunctions/AggregateFunctionArray.h +++ b/src/AggregateFunctions/AggregateFunctionArray.h @@ -156,6 +156,11 @@ public: nested_func->insertResultInto(place, to, arena); } + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + nested_func->insertMergeResultInto(place, to, arena); + } + bool allocatesMemoryInArena() const override { return nested_func->allocatesMemoryInArena(); diff --git a/src/AggregateFunctions/AggregateFunctionDistinct.h b/src/AggregateFunctions/AggregateFunctionDistinct.h index 482d21363fe..9884e92f425 100644 --- a/src/AggregateFunctions/AggregateFunctionDistinct.h +++ b/src/AggregateFunctions/AggregateFunctionDistinct.h @@ -196,7 +196,8 @@ public: this->data(place).deserialize(buf, arena); } - void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + template + void insertResultIntoImpl(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const { auto arguments = this->data(place).getArguments(this->argument_types); ColumnRawPtrs arguments_raw(arguments.size()); @@ -205,7 +206,20 @@ public: assert(!arguments.empty()); nested_func->addBatchSinglePlace(0, arguments[0]->size(), getNestedPlace(place), arguments_raw.data(), arena); - nested_func->insertResultInto(getNestedPlace(place), to, arena); + if constexpr (MergeResult) + nested_func->insertMergeResultInto(getNestedPlace(place), to, arena); + else + nested_func->insertResultInto(getNestedPlace(place), to, arena); + } + + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); } size_t sizeOfData() const override diff --git a/src/AggregateFunctions/AggregateFunctionForEach.h b/src/AggregateFunctions/AggregateFunctionForEach.h index 07713dcb304..62794ac0f53 100644 --- a/src/AggregateFunctions/AggregateFunctionForEach.h +++ b/src/AggregateFunctions/AggregateFunctionForEach.h @@ -257,7 +257,8 @@ public: } } - void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + template + void insertResultIntoImpl(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const { AggregateFunctionForEachData & state = data(place); @@ -268,13 +269,26 @@ public: char * nested_state = state.array_of_aggregate_datas; for (size_t i = 0; i < state.dynamic_array_size; ++i) { - nested_func->insertResultInto(nested_state, elems_to, arena); + if constexpr (merge) + nested_func->insertMergeResultInto(nested_state, elems_to, arena); + else + nested_func->insertResultInto(nested_state, elems_to, arena); nested_state += nested_size_of_data; } offsets_to.push_back(offsets_to.back() + state.dynamic_array_size); } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + bool allocatesMemoryInArena() const override { return true; diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmap.h b/src/AggregateFunctions/AggregateFunctionGroupBitmap.h index fd8357e3ba8..dacde67f3ca 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmap.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmap.h @@ -45,7 +45,8 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).rbs.size()); + assert_cast &>(to).getData().push_back( + static_cast(this->data(place).rbs.size())); } }; @@ -142,7 +143,8 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).rbs.size()); + assert_cast &>(to).getData().push_back( + static_cast(this->data(place).rbs.size())); } }; diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h index 21ba7cd7301..801526432ae 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h @@ -426,7 +426,7 @@ public: return 0; if (isSmall()) - return small.find(x) != small.end(); + return small.find(static_cast(x)) != small.end(); else return rb->contains(static_cast(x)); } diff --git a/src/AggregateFunctions/AggregateFunctionHistogram.h b/src/AggregateFunctions/AggregateFunctionHistogram.h index 9031eb73c09..18bfc085ba3 100644 --- a/src/AggregateFunctions/AggregateFunctionHistogram.h +++ b/src/AggregateFunctions/AggregateFunctionHistogram.h @@ -136,8 +136,8 @@ private: for (size_t i = 0; i <= size; ++i) { - previous[i] = i - 1; - next[i] = i + 1; + previous[i] = static_cast(i - 1); + next[i] = static_cast(i + 1); } next[size] = 0; @@ -157,7 +157,7 @@ private: auto quality = [&](UInt32 i) { return points[next[i]].mean - points[i].mean; }; for (size_t i = 0; i + 1 < size; ++i) - queue.push({quality(i), i}); + queue.push({quality(static_cast(i)), i}); while (new_size > max_bins && !queue.empty()) { @@ -217,7 +217,7 @@ private: points[left] = points[right]; } } - size = left + 1; + size = static_cast(left + 1); } public: diff --git a/src/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h index 79312dff2c7..ccc4809dd06 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.h +++ b/src/AggregateFunctions/AggregateFunctionIf.h @@ -183,6 +183,11 @@ public: nested_func->insertResultInto(place, to, arena); } + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + nested_func->insertMergeResultInto(place, to, arena); + } + bool allocatesMemoryInArena() const override { return nested_func->allocatesMemoryInArena(); diff --git a/src/AggregateFunctions/AggregateFunctionMap.h b/src/AggregateFunctions/AggregateFunctionMap.h index 4cb26fcc8d1..d349fc05944 100644 --- a/src/AggregateFunctions/AggregateFunctionMap.h +++ b/src/AggregateFunctions/AggregateFunctionMap.h @@ -264,7 +264,8 @@ public: } } - void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + template + void insertResultIntoImpl(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const { auto & map_column = assert_cast(to); auto & nested_column = map_column.getNestedColumn(); @@ -288,13 +289,26 @@ public: for (auto & key : keys) { key_column.insert(key); - nested_func->insertResultInto(merged_maps[key], val_column, arena); + if constexpr (merge) + nested_func->insertMergeResultInto(merged_maps[key], val_column, arena); + else + nested_func->insertResultInto(merged_maps[key], val_column, arena); } IColumn::Offsets & res_offsets = nested_column.getOffsets(); res_offsets.push_back(val_column.size()); } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + bool allocatesMemoryInArena() const override { return true; } AggregateFunctionPtr getNestedFunction() const override { return nested_func; } diff --git a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 783fa0606b5..f8d252cf8e9 100644 --- a/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -540,7 +540,7 @@ public: /// Assuming to.has() void changeImpl(StringRef value, Arena * arena) { - Int32 value_size = value.size; + Int32 value_size = static_cast(value.size); if (value_size <= MAX_SMALL_STRING_SIZE) { @@ -555,7 +555,7 @@ public: if (capacity < value_size) { /// Don't free large_data here. - capacity = roundUpToPowerOfTwoOrZero(value_size); + capacity = static_cast(roundUpToPowerOfTwoOrZero(value_size)); large_data = arena->alloc(capacity); } diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index f3a3f55972f..deed06b8bf2 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -163,14 +163,18 @@ public: } } - void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + template + void insertResultIntoImpl(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const { if constexpr (result_is_nullable) { ColumnNullable & to_concrete = assert_cast(to); if (getFlag(place)) { - nested_function->insertResultInto(nestedPlace(place), to_concrete.getNestedColumn(), arena); + if constexpr (merge) + nested_function->insertMergeResultInto(nestedPlace(place), to_concrete.getNestedColumn(), arena); + else + nested_function->insertResultInto(nestedPlace(place), to_concrete.getNestedColumn(), arena); to_concrete.getNullMapData().push_back(0); } else @@ -180,10 +184,23 @@ public: } else { - nested_function->insertResultInto(nestedPlace(place), to, arena); + if constexpr (merge) + nested_function->insertMergeResultInto(nestedPlace(place), to, arena); + else + nested_function->insertResultInto(nestedPlace(place), to, arena); } } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + bool allocatesMemoryInArena() const override { return nested_function->allocatesMemoryInArena(); diff --git a/src/AggregateFunctions/AggregateFunctionOrFill.h b/src/AggregateFunctions/AggregateFunctionOrFill.h index c5a0d60224a..eff4fb2bdc0 100644 --- a/src/AggregateFunctions/AggregateFunctionOrFill.h +++ b/src/AggregateFunctions/AggregateFunctionOrFill.h @@ -265,10 +265,11 @@ public: } } - void insertResultInto( + template + void insertResultIntoImpl( AggregateDataPtr __restrict place, IColumn & to, - Arena * arena) const override + Arena * arena) const { if (place[size_of_data]) { @@ -277,7 +278,12 @@ public: // -OrNull if (inner_nullable) - nested_function->insertResultInto(place, to, arena); + { + if constexpr (merge) + nested_function->insertMergeResultInto(place, to, arena); + else + nested_function->insertResultInto(place, to, arena); + } else { ColumnNullable & col = typeid_cast(to); @@ -289,14 +295,26 @@ public: else { // -OrDefault - - nested_function->insertResultInto(place, to, arena); + if constexpr (merge) + nested_function->insertMergeResultInto(place, to, arena); + else + nested_function->insertResultInto(place, to, arena); } } else to.insertDefault(); } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + AggregateFunctionPtr getNestedFunction() const override { return nested_function; } }; diff --git a/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/src/AggregateFunctions/AggregateFunctionQuantile.cpp index 38b3c91be69..60e759b45a3 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantile.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantile.cpp @@ -46,7 +46,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile( if (which.idx == TypeIndex::DateTime64) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int128) return std::make_shared>(argument_types, params); - if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); + if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int256) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::UInt256) return std::make_shared>(argument_types, params); diff --git a/src/AggregateFunctions/AggregateFunctionQuantileDeterministic.cpp b/src/AggregateFunctions/AggregateFunctionQuantileDeterministic.cpp index a9486da25fa..1605056e5d9 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileDeterministic.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileDeterministic.cpp @@ -40,7 +40,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile( if (which.idx == TypeIndex::DateTime) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int128) return std::make_shared>(argument_types, params); - if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); + if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int256) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::UInt256) return std::make_shared>(argument_types, params); diff --git a/src/AggregateFunctions/AggregateFunctionQuantileExact.cpp b/src/AggregateFunctions/AggregateFunctionQuantileExact.cpp index 39de9d0eeaf..e9a3edf1e05 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileExact.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileExact.cpp @@ -47,7 +47,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile( if (which.idx == TypeIndex::DateTime64) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int128) return std::make_shared>(argument_types, params); - if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); + if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int256) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::UInt256) return std::make_shared>(argument_types, params); diff --git a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp index 63e4d3df24b..e9b6012dcdb 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp @@ -46,7 +46,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile( if (which.idx == TypeIndex::DateTime64) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int128) return std::make_shared>(argument_types, params); - if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); + if (which.idx == TypeIndex::UInt128) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::Int256) return std::make_shared>(argument_types, params); if (which.idx == TypeIndex::UInt256) return std::make_shared>(argument_types, params); diff --git a/src/AggregateFunctions/AggregateFunctionResample.h b/src/AggregateFunctions/AggregateFunctionResample.h index 471a6820939..fe04ada1a77 100644 --- a/src/AggregateFunctions/AggregateFunctionResample.h +++ b/src/AggregateFunctions/AggregateFunctionResample.h @@ -195,17 +195,33 @@ public: return std::make_shared(nested_function->getReturnType()); } - void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + template + void insertResultIntoImpl(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const { auto & col = assert_cast(to); auto & col_offsets = assert_cast(col.getOffsetsColumn()); for (size_t i = 0; i < total; ++i) - nested_function->insertResultInto(place + i * size_of_data, col.getData(), arena); + { + if constexpr (merge) + nested_function->insertMergeResultInto(place + i * size_of_data, col.getData(), arena); + else + nested_function->insertResultInto(place + i * size_of_data, col.getData(), arena); + } col_offsets.getData().push_back(col.getData().size()); } + void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override + { + insertResultIntoImpl(place, to, arena); + } + AggregateFunctionPtr getNestedFunction() const override { return nested_function; } }; diff --git a/src/AggregateFunctions/AggregateFunctionRetention.h b/src/AggregateFunctions/AggregateFunctionRetention.h index a949b5e93f6..18d04fb1ea4 100644 --- a/src/AggregateFunctions/AggregateFunctionRetention.h +++ b/src/AggregateFunctions/AggregateFunctionRetention.h @@ -44,7 +44,7 @@ struct AggregateFunctionRetentionData void serialize(WriteBuffer & buf) const { - UInt32 event_value = events.to_ulong(); + UInt32 event_value = static_cast(events.to_ulong()); writeBinary(event_value, buf); } diff --git a/src/AggregateFunctions/AggregateFunctionSequenceMatch.h b/src/AggregateFunctions/AggregateFunctionSequenceMatch.h index 5c1ab803f19..bcea408d26b 100644 --- a/src/AggregateFunctions/AggregateFunctionSequenceMatch.h +++ b/src/AggregateFunctions/AggregateFunctionSequenceMatch.h @@ -272,7 +272,7 @@ private: actions.emplace_back(PatternActionType::SpecificEvent, event_number - 1); dfa_states.back().transition = DFATransition::SpecificEvent; - dfa_states.back().event = event_number - 1; + dfa_states.back().event = static_cast(event_number - 1); dfa_states.emplace_back(); conditions_in_pattern.set(event_number - 1); } diff --git a/src/AggregateFunctions/AggregateFunctionSequenceNextNode.h b/src/AggregateFunctions/AggregateFunctionSequenceNextNode.h index 423b53b03f3..c29055ae8db 100644 --- a/src/AggregateFunctions/AggregateFunctionSequenceNextNode.h +++ b/src/AggregateFunctions/AggregateFunctionSequenceNextNode.h @@ -226,7 +226,7 @@ public: for (UInt8 i = 0; i < events_size; ++i) if (assert_cast *>(columns[min_required_args + i])->getData()[row_num]) node->events_bitset.set(i); - node->event_time = timestamp; + node->event_time = static_cast(timestamp); node->can_be_base = assert_cast *>(columns[base_cond_column_idx])->getData()[row_num]; @@ -365,7 +365,7 @@ public: /// The first matched event is 0x00000001, the second one is 0x00000002, the third one is 0x00000004, and so on. UInt32 getNextNodeIndex(Data & data) const { - const UInt32 unmatched_idx = data.value.size(); + const UInt32 unmatched_idx = static_cast(data.value.size()); if (data.value.size() <= events_size) return unmatched_idx; diff --git a/src/AggregateFunctions/AggregateFunctionSparkbar.h b/src/AggregateFunctions/AggregateFunctionSparkbar.h index cb017053fd7..f0fbdd2f2e4 100644 --- a/src/AggregateFunctions/AggregateFunctionSparkbar.h +++ b/src/AggregateFunctions/AggregateFunctionSparkbar.h @@ -165,7 +165,7 @@ private: { for (size_t i = 0; i <= diff_x; ++i) { - auto it = data.points.find(min_x_local + i); + auto it = data.points.find(static_cast(min_x_local + i)); bool found = it != data.points.end(); value += getBar(found ? std::round(((it->getMapped() - min_y) / diff_y) * 7) + 1 : 0.0); } @@ -173,7 +173,7 @@ private: else { for (size_t i = 0; i <= diff_x; ++i) - value += getBar(data.points.has(min_x_local + i) ? 1 : 0); + value += getBar(data.points.has(min_x_local + static_cast(i)) ? 1 : 0); } } else @@ -202,7 +202,7 @@ private: if (i == bound.first) // is bound { Float64 proportion = bound.second - bound.first; - auto it = data.points.find(min_x_local + i); + auto it = data.points.find(min_x_local + static_cast(i)); bool found = (it != data.points.end()); if (found && proportion > 0) new_y = new_y.value_or(0) + it->getMapped() * proportion; @@ -229,7 +229,7 @@ private: } else { - auto it = data.points.find(min_x_local + i); + auto it = data.points.find(min_x_local + static_cast(i)); if (it != data.points.end()) new_y = new_y.value_or(0) + it->getMapped(); } @@ -267,8 +267,8 @@ public: if (params.size() == 3) { specified_min_max_x = true; - min_x = params.at(1).safeGet(); - max_x = params.at(2).safeGet(); + min_x = static_cast(params.at(1).safeGet()); + max_x = static_cast(params.at(2).safeGet()); } else { diff --git a/src/AggregateFunctions/AggregateFunctionState.h b/src/AggregateFunctions/AggregateFunctionState.h index 6ab3dbab625..20ccb2e543c 100644 --- a/src/AggregateFunctions/AggregateFunctionState.h +++ b/src/AggregateFunctions/AggregateFunctionState.h @@ -111,6 +111,11 @@ public: assert_cast(to).getData().push_back(place); } + void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override + { + assert_cast(to).insertFrom(place); + } + /// Aggregate function or aggregate function state. bool isState() const override { return true; } diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index d44937b3f9d..fe2530800cc 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -175,8 +175,9 @@ struct OneAdder { if constexpr (!std::is_same_v) { + using ValueType = typename decltype(data.set)::value_type; const auto & value = assert_cast &>(column).getElement(row_num); - data.set.insert(AggregateFunctionUniqTraits::hash(value)); + data.set.insert(static_cast(AggregateFunctionUniqTraits::hash(value))); } else { diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h index 51020abe826..47b3081225b 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.h +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.h @@ -43,7 +43,7 @@ namespace detail { static Ret hash(UInt128 x) { - return sipHash64(x); + return static_cast(sipHash64(x)); } }; diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 74000296a2d..3a72e0e92bb 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -40,7 +40,15 @@ struct WelchTTestData : public TTestMoments Float64 denominator_x = sx2 * sx2 / (nx * nx * (nx - 1)); Float64 denominator_y = sy2 * sy2 / (ny * ny * (ny - 1)); - return numerator / (denominator_x + denominator_y); + auto result = numerator / (denominator_x + denominator_y); + + if (result <= 0 || std::isinf(result) || isNaN(result)) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot calculate p_value, because the t-distribution \ + has inappropriate value of degrees of freedom (={}). It should be > 0", result); + + return result; } std::tuple getResult() const diff --git a/src/AggregateFunctions/IAggregateFunction.cpp b/src/AggregateFunctions/IAggregateFunction.cpp index 25d2a9a4530..7da341cc5b9 100644 --- a/src/AggregateFunctions/IAggregateFunction.cpp +++ b/src/AggregateFunctions/IAggregateFunction.cpp @@ -53,9 +53,12 @@ String IAggregateFunction::getDescription() const bool IAggregateFunction::haveEqualArgumentTypes(const IAggregateFunction & rhs) const { - return std::equal(argument_types.begin(), argument_types.end(), - rhs.argument_types.begin(), rhs.argument_types.end(), - [](const auto & t1, const auto & t2) { return t1->equals(*t2); }); + return std::equal( + argument_types.begin(), + argument_types.end(), + rhs.argument_types.begin(), + rhs.argument_types.end(), + [](const auto & t1, const auto & t2) { return t1->equals(*t2); }); } bool IAggregateFunction::haveSameStateRepresentation(const IAggregateFunction & rhs) const @@ -67,11 +70,7 @@ bool IAggregateFunction::haveSameStateRepresentation(const IAggregateFunction & bool IAggregateFunction::haveSameStateRepresentationImpl(const IAggregateFunction & rhs) const { - bool res = getName() == rhs.getName() - && parameters == rhs.parameters - && haveEqualArgumentTypes(rhs); - assert(res == (getStateType()->getName() == rhs.getStateType()->getName())); - return res; + return getStateType()->equals(*rhs.getStateType()); } } diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index d272eac9d10..7d2fe6ae8e3 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -164,6 +164,18 @@ public: /// window function. virtual void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const = 0; + /// Special method for aggregate functions with -State combinator, it behaves the same way as insertResultInto, + /// but if we need to insert AggregateData into ColumnAggregateFunction we use special method + /// insertInto that inserts default value and then performs merge with provided AggregateData + /// instead of just copying pointer to this AggregateData. Used in WindowTransform. + virtual void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const + { + if (isState()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} is marked as State but method insertMergeResultInto is not implemented"); + + insertResultInto(place, to, arena); + } + /// Used for machine learning methods. Predict result from trained model. /// Will insert result into `to` column for rows in range [offset, offset + limit). virtual void predictValues( diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index a64c02e823b..557fd93a3a9 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -84,7 +84,7 @@ public: if (isNaN(v)) return; - UInt32 hash = intHash64(determinator); + UInt32 hash = static_cast(intHash64(determinator)); insertImpl(v, hash); sorted = false; ++total_values; diff --git a/src/AggregateFunctions/UniquesHashSet.h b/src/AggregateFunctions/UniquesHashSet.h index 5c82ec10691..777ec0edc7e 100644 --- a/src/AggregateFunctions/UniquesHashSet.h +++ b/src/AggregateFunctions/UniquesHashSet.h @@ -118,7 +118,7 @@ private: HashValue hash(Value key) const { - return Hash()(key); + return static_cast(Hash()(key)); } /// Delete all values whose hashes do not divide by 2 ^ skip_degree diff --git a/src/Analyzer/AggregationUtils.cpp b/src/Analyzer/AggregationUtils.cpp new file mode 100644 index 00000000000..a73df87f9c2 --- /dev/null +++ b/src/Analyzer/AggregationUtils.cpp @@ -0,0 +1,114 @@ +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_AGGREGATION; +} + +namespace +{ + +class CollectAggregateFunctionNodesVisitor : public ConstInDepthQueryTreeVisitor +{ +public: + explicit CollectAggregateFunctionNodesVisitor(QueryTreeNodes * aggregate_function_nodes_) + : aggregate_function_nodes(aggregate_function_nodes_) + {} + + explicit CollectAggregateFunctionNodesVisitor(String assert_no_aggregates_place_message_) + : assert_no_aggregates_place_message(std::move(assert_no_aggregates_place_message_)) + {} + + void visitImpl(const QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || !function_node->isAggregateFunction()) + return; + + if (!assert_no_aggregates_place_message.empty()) + throw Exception(ErrorCodes::ILLEGAL_AGGREGATION, + "Aggregate function {} is found {} in query", + function_node->formatASTForErrorMessage(), + assert_no_aggregates_place_message); + + if (aggregate_function_nodes) + aggregate_function_nodes->push_back(node); + } + + static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + String assert_no_aggregates_place_message; + QueryTreeNodes * aggregate_function_nodes = nullptr; +}; + +} + +QueryTreeNodes collectAggregateFunctionNodes(const QueryTreeNodePtr & node) +{ + QueryTreeNodes result; + CollectAggregateFunctionNodesVisitor visitor(&result); + visitor.visit(node); + + return result; +} + +void collectAggregateFunctionNodes(const QueryTreeNodePtr & node, QueryTreeNodes & result) +{ + CollectAggregateFunctionNodesVisitor visitor(&result); + visitor.visit(node); +} + +void assertNoAggregateFunctionNodes(const QueryTreeNodePtr & node, const String & assert_no_aggregates_place_message) +{ + CollectAggregateFunctionNodesVisitor visitor(assert_no_aggregates_place_message); + visitor.visit(node); +} + +namespace +{ + +class ValidateGroupingFunctionNodesVisitor : public ConstInDepthQueryTreeVisitor +{ +public: + explicit ValidateGroupingFunctionNodesVisitor(String assert_no_grouping_function_place_message_) + : assert_no_grouping_function_place_message(std::move(assert_no_grouping_function_place_message_)) + {} + + void visitImpl(const QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (function_node && function_node->getFunctionName() == "grouping") + throw Exception(ErrorCodes::ILLEGAL_AGGREGATION, + "GROUPING function {} is found {} in query", + function_node->formatASTForErrorMessage(), + assert_no_grouping_function_place_message); + } + + static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + String assert_no_grouping_function_place_message; +}; + +} + +void assertNoGroupingFunction(const QueryTreeNodePtr & node, const String & assert_no_grouping_function_place_message) +{ + ValidateGroupingFunctionNodesVisitor visitor(assert_no_grouping_function_place_message); + visitor.visit(node); +} + +} diff --git a/src/Analyzer/AggregationUtils.h b/src/Analyzer/AggregationUtils.h new file mode 100644 index 00000000000..c2e53e55c04 --- /dev/null +++ b/src/Analyzer/AggregationUtils.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace DB +{ + +/** Collect aggregate function nodes in node children. + * Do not visit subqueries. + */ +QueryTreeNodes collectAggregateFunctionNodes(const QueryTreeNodePtr & node); + +/** Collect aggregate function nodes in node children and add them into result. + * Do not visit subqueries. + */ +void collectAggregateFunctionNodes(const QueryTreeNodePtr & node, QueryTreeNodes & result); + +/** Assert that there are no aggregate function nodes in node children. + * Do not visit subqueries. + */ +void assertNoAggregateFunctionNodes(const QueryTreeNodePtr & node, const String & assert_no_aggregates_place_message); + +/** Assert that there are no GROUPING functions in node children. + * Do not visit subqueries. + */ +void assertNoGroupingFunction(const QueryTreeNodePtr & node, const String & assert_no_grouping_function_place_message); + +} diff --git a/src/Analyzer/ArrayJoinNode.cpp b/src/Analyzer/ArrayJoinNode.cpp new file mode 100644 index 00000000000..2157b5edf6f --- /dev/null +++ b/src/Analyzer/ArrayJoinNode.cpp @@ -0,0 +1,71 @@ +#include + +#include +#include +#include + +#include + +#include + +namespace DB +{ + +ArrayJoinNode::ArrayJoinNode(QueryTreeNodePtr table_expression_, QueryTreeNodePtr join_expressions_, bool is_left_) + : IQueryTreeNode(children_size) + , is_left(is_left_) +{ + children[table_expression_child_index] = std::move(table_expression_); + children[join_expressions_child_index] = std::move(join_expressions_); +} + +void ArrayJoinNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "ARRAY_JOIN id: " << format_state.getNodeId(this); + buffer << ", is_left: " << is_left; + + buffer << '\n' << std::string(indent + 2, ' ') << "TABLE EXPRESSION\n"; + getTableExpression()->dumpTreeImpl(buffer, format_state, indent + 4); + + buffer << '\n' << std::string(indent + 2, ' ') << "JOIN EXPRESSIONS\n"; + getJoinExpressionsNode()->dumpTreeImpl(buffer, format_state, indent + 4); +} + +bool ArrayJoinNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return is_left == rhs_typed.is_left; +} + +void ArrayJoinNode::updateTreeHashImpl(HashState & state) const +{ + state.update(is_left); +} + +QueryTreeNodePtr ArrayJoinNode::cloneImpl() const +{ + return std::make_shared(getTableExpression(), getJoinExpressionsNode(), is_left); +} + +ASTPtr ArrayJoinNode::toASTImpl() const +{ + auto array_join_ast = std::make_shared(); + array_join_ast->kind = is_left ? ASTArrayJoin::Kind::Left : ASTArrayJoin::Kind::Inner; + + const auto & join_expression_list_node = getJoinExpressionsNode(); + array_join_ast->children.push_back(join_expression_list_node->toAST()); + array_join_ast->expression_list = array_join_ast->children.back(); + + ASTPtr tables_in_select_query_ast = std::make_shared(); + addTableExpressionOrJoinIntoTablesInSelectQuery(tables_in_select_query_ast, children[table_expression_child_index]); + + auto array_join_query_element_ast = std::make_shared(); + array_join_query_element_ast->children.push_back(std::move(array_join_ast)); + array_join_query_element_ast->array_join = array_join_query_element_ast->children.back(); + + tables_in_select_query_ast->children.push_back(std::move(array_join_query_element_ast)); + + return tables_in_select_query_ast; +} + +} diff --git a/src/Analyzer/ArrayJoinNode.h b/src/Analyzer/ArrayJoinNode.h new file mode 100644 index 00000000000..50d53df465a --- /dev/null +++ b/src/Analyzer/ArrayJoinNode.h @@ -0,0 +1,113 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include + +namespace DB +{ + +/** Array join node represents array join in query tree. + * + * In query tree array join expressions are represented by list query tree node. + * + * Example: SELECT id FROM test_table ARRAY JOIN [1, 2, 3] as a. + * + * Multiple expressions can be inside single array join. + * Example: SELECT id FROM test_table ARRAY JOIN [1, 2, 3] as a, [4, 5, 6] as b. + * Example: SELECT id FROM test_table ARRAY JOIN array_column_1 AS value_1, array_column_2 AS value_2. + * + * Multiple array joins can be inside JOIN TREE. + * Example: SELECT id FROM test_table ARRAY JOIN array_column_1 ARRAY JOIN array_column_2. + * + * Array join can be used inside JOIN TREE with ordinary JOINS. + * Example: SELECT t1.id FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id ARRAY JOIN [1,2,3]; + * Example: SELECT t1.id FROM test_table_1 AS t1 ARRAY JOIN [1,2,3] INNER JOIN test_table_2 AS t2 ON t1.id = t2.id; + */ +class ArrayJoinNode; +using ArrayJoinNodePtr = std::shared_ptr; + +class ArrayJoinNode final : public IQueryTreeNode +{ +public: + /** Construct array join node with table expression. + * Example: SELECT id FROM test_table ARRAY JOIN [1, 2, 3] as a. + * test_table - table expression. + * join_expression_list - list of array join expressions. + */ + ArrayJoinNode(QueryTreeNodePtr table_expression_, QueryTreeNodePtr join_expressions_, bool is_left_); + + /// Get table expression + const QueryTreeNodePtr & getTableExpression() const + { + return children[table_expression_child_index]; + } + + /// Get table expression + QueryTreeNodePtr & getTableExpression() + { + return children[table_expression_child_index]; + } + + /// Get join expressions + const ListNode & getJoinExpressions() const + { + return children[join_expressions_child_index]->as(); + } + + /// Get join expressions + ListNode & getJoinExpressions() + { + return children[join_expressions_child_index]->as(); + } + + /// Get join expressions node + const QueryTreeNodePtr & getJoinExpressionsNode() const + { + return children[join_expressions_child_index]; + } + + /// Get join expressions node + QueryTreeNodePtr & getJoinExpressionsNode() + { + return children[join_expressions_child_index]; + } + + /// Returns true if array join is left, false otherwise + bool isLeft() const + { + return is_left; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::ARRAY_JOIN; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + bool is_left = false; + + static constexpr size_t table_expression_child_index = 0; + static constexpr size_t join_expressions_child_index = 1; + static constexpr size_t children_size = join_expressions_child_index + 1; +}; + +} + diff --git a/src/Analyzer/CMakeLists.txt b/src/Analyzer/CMakeLists.txt new file mode 100644 index 00000000000..766767b5c13 --- /dev/null +++ b/src/Analyzer/CMakeLists.txt @@ -0,0 +1,7 @@ +if (ENABLE_TESTS) + add_subdirectory(tests) +endif() + +if (ENABLE_EXAMPLES) + add_subdirectory(examples) +endif() diff --git a/src/Analyzer/ColumnNode.cpp b/src/Analyzer/ColumnNode.cpp new file mode 100644 index 00000000000..4d0d349dabb --- /dev/null +++ b/src/Analyzer/ColumnNode.cpp @@ -0,0 +1,97 @@ +#include + +#include + +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +ColumnNode::ColumnNode(NameAndTypePair column_, QueryTreeNodePtr expression_node_, QueryTreeNodeWeakPtr column_source_) + : IQueryTreeNode(children_size, weak_pointers_size) + , column(std::move(column_)) +{ + children[expression_child_index] = std::move(expression_node_); + getSourceWeakPointer() = std::move(column_source_); +} + +ColumnNode::ColumnNode(NameAndTypePair column_, QueryTreeNodeWeakPtr column_source_) + : ColumnNode(std::move(column_), nullptr /*expression_node*/, std::move(column_source_)) +{ +} + +QueryTreeNodePtr ColumnNode::getColumnSource() const +{ + auto lock = getSourceWeakPointer().lock(); + if (!lock) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Column {} {} query tree node does not have valid source node", + column.name, + column.type->getName()); + + return lock; +} + +QueryTreeNodePtr ColumnNode::getColumnSourceOrNull() const +{ + return getSourceWeakPointer().lock(); +} + +void ColumnNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "COLUMN id: " << state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + buffer << ", column_name: " << column.name << ", result_type: " << column.type->getName(); + + auto column_source_ptr = getSourceWeakPointer().lock(); + if (column_source_ptr) + buffer << ", source_id: " << state.getNodeId(column_source_ptr.get()); + + const auto & expression = getExpression(); + + if (expression) + { + buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n"; + expression->dumpTreeImpl(buffer, state, indent + 4); + } +} + +bool ColumnNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return column == rhs_typed.column; +} + +void ColumnNode::updateTreeHashImpl(HashState & hash_state) const +{ + hash_state.update(column.name.size()); + hash_state.update(column.name); + + const auto & column_type_name = column.type->getName(); + hash_state.update(column_type_name.size()); + hash_state.update(column_type_name); +} + +QueryTreeNodePtr ColumnNode::cloneImpl() const +{ + return std::make_shared(column, getColumnSource()); +} + +ASTPtr ColumnNode::toASTImpl() const +{ + return std::make_shared(column.name); +} + +} diff --git a/src/Analyzer/ColumnNode.h b/src/Analyzer/ColumnNode.h new file mode 100644 index 00000000000..e378bc5f3d0 --- /dev/null +++ b/src/Analyzer/ColumnNode.h @@ -0,0 +1,151 @@ +#pragma once + +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +/** Column node represents column in query tree. + * Column node can have weak pointer to its column source. + * Column source can be table expression, lambda, subquery. + * + * For table ALIAS columns. Column node must contain expression. + * For ARRAY JOIN join expression column. Column node must contain expression. + * + * During query analysis pass identifier node is resolved into column. See IdentifierNode.h. + * + * Examples: + * SELECT id FROM test_table. id is identifier that must be resolved to column node during query analysis pass. + * SELECT lambda(x -> x + 1, [1,2,3]). x is identifier inside lambda that must be resolved to column node during query analysis pass. + * + * Column node is initialized with column name, type and column source weak pointer. + * In case of ALIAS column node is initialized with column name, type, alias expression and column source weak pointer. + */ +class ColumnNode; +using ColumnNodePtr = std::shared_ptr; + +class ColumnNode final : public IQueryTreeNode +{ +public: + /// Construct column node with column name, type, column expression and column source weak pointer + ColumnNode(NameAndTypePair column_, QueryTreeNodePtr expression_node_, QueryTreeNodeWeakPtr column_source_); + + /// Construct column node with column name, type and column source weak pointer + ColumnNode(NameAndTypePair column_, QueryTreeNodeWeakPtr column_source_); + + /// Get column + const NameAndTypePair & getColumn() const + { + return column; + } + + /// Get column name + const String & getColumnName() const + { + return column.name; + } + + /// Get column type + const DataTypePtr & getColumnType() const + { + return column.type; + } + + /// Set column type + void setColumnType(DataTypePtr column_type) + { + column.type = std::move(column_type); + } + + /// Returns true if column node has expression, false otherwise + bool hasExpression() const + { + return children[expression_child_index] != nullptr; + } + + /// Get column node expression node + const QueryTreeNodePtr & getExpression() const + { + return children[expression_child_index]; + } + + /// Get column node expression node + QueryTreeNodePtr & getExpression() + { + return children[expression_child_index]; + } + + /// Get column node expression node, if there are no expression node exception is thrown + QueryTreeNodePtr & getExpressionOrThrow() + { + if (!children[expression_child_index]) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Column expression is not initialized"); + + return children[expression_child_index]; + } + + /// Set column node expression node + void setExpression(QueryTreeNodePtr expression_value) + { + children[expression_child_index] = std::move(expression_value); + } + + /** Get column source. + * If column source is not valid logical exception is thrown. + */ + QueryTreeNodePtr getColumnSource() const; + + /** Get column source. + * If column source is not valid null is returned. + */ + QueryTreeNodePtr getColumnSourceOrNull() const; + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::COLUMN; + } + + DataTypePtr getResultType() const override + { + return column.type; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + const QueryTreeNodeWeakPtr & getSourceWeakPointer() const + { + return weak_pointers[source_weak_pointer_index]; + } + + QueryTreeNodeWeakPtr & getSourceWeakPointer() + { + return weak_pointers[source_weak_pointer_index]; + } + + NameAndTypePair column; + + static constexpr size_t expression_child_index = 0; + static constexpr size_t children_size = expression_child_index + 1; + + static constexpr size_t source_weak_pointer_index = 0; + static constexpr size_t weak_pointers_size = source_weak_pointer_index + 1; +}; + +} diff --git a/src/Analyzer/ColumnTransformers.cpp b/src/Analyzer/ColumnTransformers.cpp new file mode 100644 index 00000000000..27466ce5c27 --- /dev/null +++ b/src/Analyzer/ColumnTransformers.cpp @@ -0,0 +1,357 @@ +#include + +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + +/// IColumnTransformerNode implementation + +const char * toString(ColumnTransfomerType type) +{ + switch (type) + { + case ColumnTransfomerType::APPLY: return "APPLY"; + case ColumnTransfomerType::EXCEPT: return "EXCEPT"; + case ColumnTransfomerType::REPLACE: return "REPLACE"; + } +} + +IColumnTransformerNode::IColumnTransformerNode(size_t children_size) + : IQueryTreeNode(children_size) +{} + +/// ApplyColumnTransformerNode implementation + +const char * toString(ApplyColumnTransformerType type) +{ + switch (type) + { + case ApplyColumnTransformerType::LAMBDA: return "LAMBDA"; + case ApplyColumnTransformerType::FUNCTION: return "FUNCTION"; + } +} + +ApplyColumnTransformerNode::ApplyColumnTransformerNode(QueryTreeNodePtr expression_node_) + : IColumnTransformerNode(children_size) +{ + if (expression_node_->getNodeType() == QueryTreeNodeType::LAMBDA) + apply_transformer_type = ApplyColumnTransformerType::LAMBDA; + else if (expression_node_->getNodeType() == QueryTreeNodeType::FUNCTION) + apply_transformer_type = ApplyColumnTransformerType::FUNCTION; + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Apply column transformer expression must be lambda or function. Actual {}", + expression_node_->getNodeTypeName()); + + children[expression_child_index] = std::move(expression_node_); +} + +void ApplyColumnTransformerNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "APPLY COLUMN TRANSFORMER id: " << format_state.getNodeId(this); + buffer << ", apply_transformer_type: " << toString(apply_transformer_type); + + buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION" << '\n'; + + const auto & expression_node = getExpressionNode(); + expression_node->dumpTreeImpl(buffer, format_state, indent + 4); +} + +bool ApplyColumnTransformerNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return apply_transformer_type == rhs_typed.apply_transformer_type; +} + +void ApplyColumnTransformerNode::updateTreeHashImpl(IQueryTreeNode::HashState & hash_state) const +{ + hash_state.update(static_cast(getTransformerType())); + hash_state.update(static_cast(getApplyTransformerType())); +} + +QueryTreeNodePtr ApplyColumnTransformerNode::cloneImpl() const +{ + return std::make_shared(getExpressionNode()); +} + +ASTPtr ApplyColumnTransformerNode::toASTImpl() const +{ + auto ast_apply_transformer = std::make_shared(); + const auto & expression_node = getExpressionNode(); + + if (apply_transformer_type == ApplyColumnTransformerType::FUNCTION) + { + auto & function_expression = expression_node->as(); + ast_apply_transformer->func_name = function_expression.getFunctionName(); + ast_apply_transformer->parameters = function_expression.getParametersNode()->toAST(); + } + else + { + auto & lambda_expression = expression_node->as(); + if (!lambda_expression.getArgumentNames().empty()) + ast_apply_transformer->lambda_arg = lambda_expression.getArgumentNames()[0]; + ast_apply_transformer->lambda = lambda_expression.toAST(); + } + + return ast_apply_transformer; +} + +/// ExceptColumnTransformerNode implementation + +ExceptColumnTransformerNode::ExceptColumnTransformerNode(Names except_column_names_, bool is_strict_) + : IColumnTransformerNode(children_size) + , except_transformer_type(ExceptColumnTransformerType::COLUMN_LIST) + , except_column_names(std::move(except_column_names_)) + , is_strict(is_strict_) +{ +} + +ExceptColumnTransformerNode::ExceptColumnTransformerNode(std::shared_ptr column_matcher_) + : IColumnTransformerNode(children_size) + , except_transformer_type(ExceptColumnTransformerType::REGEXP) + , column_matcher(std::move(column_matcher_)) +{ +} + +bool ExceptColumnTransformerNode::isColumnMatching(const std::string & column_name) const +{ + if (column_matcher) + return RE2::PartialMatch(column_name, *column_matcher); + + for (const auto & name : except_column_names) + if (column_name == name) + return true; + + return false; +} + +const char * toString(ExceptColumnTransformerType type) +{ + switch (type) + { + case ExceptColumnTransformerType::REGEXP: + return "REGEXP"; + case ExceptColumnTransformerType::COLUMN_LIST: + return "COLUMN_LIST"; + } +} + +void ExceptColumnTransformerNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "EXCEPT COLUMN TRANSFORMER id: " << format_state.getNodeId(this); + buffer << ", except_transformer_type: " << toString(except_transformer_type); + + if (column_matcher) + { + buffer << ", pattern: " << column_matcher->pattern(); + return; + } + else + { + buffer << ", identifiers: "; + + size_t except_column_names_size = except_column_names.size(); + for (size_t i = 0; i < except_column_names_size; ++i) + { + buffer << except_column_names[i]; + + if (i + 1 != except_column_names_size) + buffer << ", "; + } + } +} + +bool ExceptColumnTransformerNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + if (except_transformer_type != rhs_typed.except_transformer_type || + is_strict != rhs_typed.is_strict || + except_column_names != rhs_typed.except_column_names) + return false; + + const auto & rhs_column_matcher = rhs_typed.column_matcher; + + if (!column_matcher && !rhs_column_matcher) + return true; + else if (column_matcher && !rhs_column_matcher) + return false; + else if (!column_matcher && rhs_column_matcher) + return false; + + return column_matcher->pattern() == rhs_column_matcher->pattern(); +} + +void ExceptColumnTransformerNode::updateTreeHashImpl(IQueryTreeNode::HashState & hash_state) const +{ + hash_state.update(static_cast(getTransformerType())); + hash_state.update(static_cast(getExceptTransformerType())); + + hash_state.update(except_column_names.size()); + + for (const auto & column_name : except_column_names) + { + hash_state.update(column_name.size()); + hash_state.update(column_name); + } + + if (column_matcher) + { + const auto & pattern = column_matcher->pattern(); + hash_state.update(pattern.size()); + hash_state.update(pattern); + } +} + +QueryTreeNodePtr ExceptColumnTransformerNode::cloneImpl() const +{ + if (except_transformer_type == ExceptColumnTransformerType::REGEXP) + return std::make_shared(column_matcher); + + return std::make_shared(except_column_names, is_strict); +} + +ASTPtr ExceptColumnTransformerNode::toASTImpl() const +{ + auto ast_except_transformer = std::make_shared(); + + if (column_matcher) + { + ast_except_transformer->setPattern(column_matcher->pattern()); + return ast_except_transformer; + } + + ast_except_transformer->children.reserve(except_column_names.size()); + for (const auto & name : except_column_names) + ast_except_transformer->children.push_back(std::make_shared(name)); + + return ast_except_transformer; +} + +/// ReplaceColumnTransformerNode implementation + +ReplaceColumnTransformerNode::ReplaceColumnTransformerNode(const std::vector & replacements_, bool is_strict_) + : IColumnTransformerNode(children_size) + , is_strict(is_strict_) +{ + children[replacements_child_index] = std::make_shared(); + + auto & replacement_expressions_nodes = getReplacements().getNodes(); + + std::unordered_set replacement_names_set; + + for (const auto & replacement : replacements_) + { + auto [_, inserted] = replacement_names_set.emplace(replacement.column_name); + + if (!inserted) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Expressions in column transformer replace should not contain same replacement {} more than once", + replacement.column_name); + + replacements_names.push_back(replacement.column_name); + replacement_expressions_nodes.push_back(replacement.expression_node); + } +} + +QueryTreeNodePtr ReplaceColumnTransformerNode::findReplacementExpression(const std::string & expression_name) +{ + auto it = std::find(replacements_names.begin(), replacements_names.end(), expression_name); + if (it == replacements_names.end()) + return {}; + + size_t replacement_index = it - replacements_names.begin(); + auto & replacement_expressions_nodes = getReplacements().getNodes(); + return replacement_expressions_nodes[replacement_index]; +} + +void ReplaceColumnTransformerNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "REPLACE COLUMN TRANSFORMER id: " << format_state.getNodeId(this); + + const auto & replacements_nodes = getReplacements().getNodes(); + size_t replacements_size = replacements_nodes.size(); + buffer << '\n' << std::string(indent + 2, ' ') << "REPLACEMENTS " << replacements_size << '\n'; + + for (size_t i = 0; i < replacements_size; ++i) + { + const auto & replacement_name = replacements_names[i]; + buffer << std::string(indent + 4, ' ') << "REPLACEMENT NAME " << replacement_name; + buffer << " EXPRESSION" << '\n'; + const auto & expression_node = replacements_nodes[i]; + expression_node->dumpTreeImpl(buffer, format_state, indent + 6); + + if (i + 1 != replacements_size) + buffer << '\n'; + } +} + +bool ReplaceColumnTransformerNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return is_strict == rhs_typed.is_strict && replacements_names == rhs_typed.replacements_names; +} + +void ReplaceColumnTransformerNode::updateTreeHashImpl(IQueryTreeNode::HashState & hash_state) const +{ + hash_state.update(static_cast(getTransformerType())); + + const auto & replacement_expressions_nodes = getReplacements().getNodes(); + size_t replacements_size = replacement_expressions_nodes.size(); + hash_state.update(replacements_size); + + for (size_t i = 0; i < replacements_size; ++i) + { + const auto & replacement_name = replacements_names[i]; + hash_state.update(replacement_name.size()); + hash_state.update(replacement_name); + } +} + +QueryTreeNodePtr ReplaceColumnTransformerNode::cloneImpl() const +{ + auto result_replace_transformer = std::make_shared(std::vector{}, false); + + result_replace_transformer->is_strict = is_strict; + result_replace_transformer->replacements_names = replacements_names; + + return result_replace_transformer; +} + +ASTPtr ReplaceColumnTransformerNode::toASTImpl() const +{ + auto ast_replace_transformer = std::make_shared(); + + const auto & replacement_expressions_nodes = getReplacements().getNodes(); + size_t replacements_size = replacement_expressions_nodes.size(); + + ast_replace_transformer->children.reserve(replacements_size); + + for (size_t i = 0; i < replacements_size; ++i) + { + auto replacement_ast = std::make_shared(); + replacement_ast->name = replacements_names[i]; + replacement_ast->expr = replacement_expressions_nodes[i]->toAST(); + ast_replace_transformer->children.push_back(replacement_ast); + } + + return ast_replace_transformer; +} + +} diff --git a/src/Analyzer/ColumnTransformers.h b/src/Analyzer/ColumnTransformers.h new file mode 100644 index 00000000000..e96e606d923 --- /dev/null +++ b/src/Analyzer/ColumnTransformers.h @@ -0,0 +1,316 @@ +#pragma once + +#include + +#include +#include +#include + +namespace DB +{ + +/** Transformers are query tree nodes that handle additional logic that you can apply after MatcherQueryTreeNode is resolved. + * Check MatcherQueryTreeNode.h before reading this documentation. + * + * They main purpose is to apply some logic for expressions after matcher is resolved. + * There are 3 types of transformers: + * + * 1. APPLY transformer: + * APPLY transformer transform matched expression using lambda or function into another expression. + * It has 2 syntax variants: + * 1. lambda variant: SELECT matcher APPLY (x -> expr(x)). + * 2. function variant: SELECT matcher APPLY function_name(optional_parameters). + * + * 2. EXCEPT transformer: + * EXCEPT transformer discard some columns. + * It has 2 syntax variants: + * 1. regexp variant: SELECT matcher EXCEPT ('regexp'). + * 2. column names list variant: SELECT matcher EXCEPT (column_name_1, ...). + * + * 3. REPLACE transformer: + * REPLACE transformer applies similar transformation as APPLY transformer, but only for expressions + * that match replacement expression name. + * + * Example: + * CREATE TABLE test_table (id UInt64) ENGINE=TinyLog; + * SELECT * REPLACE (id + 1 AS id) FROM test_table. + * This query is transformed into SELECT id + 1 FROM test_table. + * It is important that AS id is not alias, it is replacement name. id + 1 is replacement expression. + * + * REPLACE transformer cannot contain multiple replacements with same name. + * + * REPLACE transformer expression does not necessary include replacement column name. + * Example: + * SELECT * REPLACE (1 AS id) FROM test_table. + * + * REPLACE transformer expression does not throw exception if there are no columns to apply replacement. + * Example: + * SELECT * REPLACE (1 AS unknown_column) FROM test_table; + * + * REPLACE transform can contain multiple replacements. + * Example: + * SELECT * REPLACE (1 AS id, 2 AS value). + * + * Matchers can be combined together and chained. + * Example: + * SELECT * EXCEPT (id) APPLY (x -> toString(x)) APPLY (x -> length(x)) FROM test_table. + */ + +/// Column transformer type +enum class ColumnTransfomerType +{ + APPLY, + EXCEPT, + REPLACE +}; + +/// Get column transformer type name +const char * toString(ColumnTransfomerType type); + +class IColumnTransformerNode; +using ColumnTransformerNodePtr = std::shared_ptr; +using ColumnTransformersNodes = std::vector; + +/// IColumnTransformer base interface. +class IColumnTransformerNode : public IQueryTreeNode +{ +public: + /// Get transformer type + virtual ColumnTransfomerType getTransformerType() const = 0; + + /// Get transformer type name + const char * getTransformerTypeName() const + { + return toString(getTransformerType()); + } + + QueryTreeNodeType getNodeType() const final + { + return QueryTreeNodeType::TRANSFORMER; + } + +protected: + /// Construct column transformer node and resize children to children size + explicit IColumnTransformerNode(size_t children_size); +}; + +enum class ApplyColumnTransformerType +{ + LAMBDA, + FUNCTION +}; + +/// Get apply column transformer type name +const char * toString(ApplyColumnTransformerType type); + +class ApplyColumnTransformerNode; +using ApplyColumnTransformerNodePtr = std::shared_ptr; + +/// Apply column transformer +class ApplyColumnTransformerNode final : public IColumnTransformerNode +{ +public: + /** Initialize apply column transformer with expression node. + * Expression node must be lambda or function otherwise exception is thrown. + */ + explicit ApplyColumnTransformerNode(QueryTreeNodePtr expression_node_); + + /// Get apply transformer type + ApplyColumnTransformerType getApplyTransformerType() const + { + return apply_transformer_type; + } + + /// Get apply transformer expression node + const QueryTreeNodePtr & getExpressionNode() const + { + return children[expression_child_index]; + } + + ColumnTransfomerType getTransformerType() const override + { + return ColumnTransfomerType::APPLY; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(IQueryTreeNode::HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + ApplyColumnTransformerType apply_transformer_type = ApplyColumnTransformerType::LAMBDA; + + static constexpr size_t expression_child_index = 0; + static constexpr size_t children_size = expression_child_index + 1; +}; + +/// Except column transformer type +enum class ExceptColumnTransformerType +{ + REGEXP, + COLUMN_LIST, +}; + +const char * toString(ExceptColumnTransformerType type); + +class ExceptColumnTransformerNode; +using ExceptColumnTransformerNodePtr = std::shared_ptr; + +/** Except column transformer. + * Strict EXCEPT column transformer must use all column names during matched nodes transformation. + * + * Example: + * CREATE TABLE test_table (id UInt64, value String) ENGINE=TinyLog; + * SELECT * EXCEPT STRICT (id, value1) FROM test_table; + * Such query will throw exception because column with name `value1` was not matched by strict EXCEPT transformer. + * + * Strict is valid only for EXCEPT COLUMN_LIST transformer. + */ +class ExceptColumnTransformerNode final : public IColumnTransformerNode +{ +public: + /// Initialize except column transformer with column names + explicit ExceptColumnTransformerNode(Names except_column_names_, bool is_strict_); + + /// Initialize except column transformer with regexp column matcher + explicit ExceptColumnTransformerNode(std::shared_ptr column_matcher_); + + /// Get except transformer type + ExceptColumnTransformerType getExceptTransformerType() const + { + return except_transformer_type; + } + + /** Returns true if except column transformer is strict, false otherwise. + * Valid only for EXCEPT COLUMN_LIST transformer. + */ + bool isStrict() const + { + return is_strict; + } + + /// Returns true if except transformer match column name, false otherwise. + bool isColumnMatching(const std::string & column_name) const; + + /** Get except column names. + * Valid only for column list except transformer. + */ + const Names & getExceptColumnNames() const + { + return except_column_names; + } + + ColumnTransfomerType getTransformerType() const override + { + return ColumnTransfomerType::EXCEPT; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(IQueryTreeNode::HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + ExceptColumnTransformerType except_transformer_type; + Names except_column_names; + std::shared_ptr column_matcher; + bool is_strict = false; + + static constexpr size_t children_size = 0; +}; + +class ReplaceColumnTransformerNode; +using ReplaceColumnTransformerNodePtr = std::shared_ptr; + +/** Replace column transformer. + * Strict replace column transformer must use all replacements during matched nodes transformation. + * + * Example: + * CREATE TABLE test_table (id UInt64, value String) ENGINE=TinyLog; + * SELECT * REPLACE STRICT (1 AS id, 2 AS value_1) FROM test_table; + * Such query will throw exception because column with name `value1` was not matched by strict REPLACE transformer. + */ +class ReplaceColumnTransformerNode final : public IColumnTransformerNode +{ +public: + /// Replacement is column name and replace expression + struct Replacement + { + std::string column_name; + QueryTreeNodePtr expression_node; + }; + + /// Initialize replace column transformer with replacements + explicit ReplaceColumnTransformerNode(const std::vector & replacements_, bool is_strict); + + ColumnTransfomerType getTransformerType() const override + { + return ColumnTransfomerType::REPLACE; + } + + /// Get replacements + const ListNode & getReplacements() const + { + return children[replacements_child_index]->as(); + } + + /// Get replacements node + const QueryTreeNodePtr & getReplacementsNode() const + { + return children[replacements_child_index]; + } + + /// Get replacements names + const Names & getReplacementsNames() const + { + return replacements_names; + } + + /// Returns true if replace column transformer is strict, false otherwise + bool isStrict() const + { + return is_strict; + } + + /** Returns replacement expression if replacement is registered for expression name, null otherwise. + * Returned replacement expression must be cloned by caller. + */ + QueryTreeNodePtr findReplacementExpression(const std::string & expression_name); + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(IQueryTreeNode::HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + ListNode & getReplacements() + { + return children[replacements_child_index]->as(); + } + + Names replacements_names; + bool is_strict = false; + + static constexpr size_t replacements_child_index = 0; + static constexpr size_t children_size = replacements_child_index + 1; +}; + +} diff --git a/src/Analyzer/ConstantNode.cpp b/src/Analyzer/ConstantNode.cpp new file mode 100644 index 00000000000..b7de2acb5a4 --- /dev/null +++ b/src/Analyzer/ConstantNode.cpp @@ -0,0 +1,71 @@ +#include + +#include +#include + +#include +#include +#include + +#include + +#include + +#include + +namespace DB +{ + +ConstantNode::ConstantNode(ConstantValuePtr constant_value_) + : IQueryTreeNode(children_size) + , constant_value(std::move(constant_value_)) + , value_string(applyVisitor(FieldVisitorToString(), constant_value->getValue())) +{ +} + +ConstantNode::ConstantNode(Field value_, DataTypePtr value_data_type_) + : ConstantNode(std::make_shared(convertFieldToTypeOrThrow(value_, *value_data_type_), value_data_type_)) +{} + +ConstantNode::ConstantNode(Field value_) + : ConstantNode(value_, applyVisitor(FieldToDataType(), value_)) +{} + +void ConstantNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "CONSTANT id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + buffer << ", constant_value: " << constant_value->getValue().dump(); + buffer << ", constant_value_type: " << constant_value->getType()->getName(); +} + +bool ConstantNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return *constant_value == *rhs_typed.constant_value && value_string == rhs_typed.value_string; +} + +void ConstantNode::updateTreeHashImpl(HashState & hash_state) const +{ + auto type_name = constant_value->getType()->getName(); + hash_state.update(type_name.size()); + hash_state.update(type_name); + + hash_state.update(value_string.size()); + hash_state.update(value_string); +} + +QueryTreeNodePtr ConstantNode::cloneImpl() const +{ + return std::make_shared(constant_value); +} + +ASTPtr ConstantNode::toASTImpl() const +{ + return std::make_shared(constant_value->getValue()); +} + +} diff --git a/src/Analyzer/ConstantNode.h b/src/Analyzer/ConstantNode.h new file mode 100644 index 00000000000..0a6868b72d4 --- /dev/null +++ b/src/Analyzer/ConstantNode.h @@ -0,0 +1,77 @@ +#pragma once + +#include + +#include + +namespace DB +{ + +/** Constant node represents constant value in query tree. + * Constant value must be representable by Field. + * Examples: 1, 'constant_string', [1,2,3]. + */ +class ConstantNode; +using ConstantNodePtr = std::shared_ptr; + +class ConstantNode final : public IQueryTreeNode +{ +public: + /// Construct constant query tree node from constant value + explicit ConstantNode(ConstantValuePtr constant_value_); + + /** Construct constant query tree node from field and data type. + * + * Throws exception if value cannot be converted to value data type. + */ + explicit ConstantNode(Field value_, DataTypePtr value_data_type_); + + /// Construct constant query tree node from field, data type will be derived from field value + explicit ConstantNode(Field value_); + + /// Get constant value + const Field & getValue() const + { + return constant_value->getValue(); + } + + /// Get constant value string representation + const String & getValueStringRepresentation() const + { + return value_string; + } + + ConstantValuePtr getConstantValueOrNull() const override + { + return constant_value; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::CONSTANT; + } + + DataTypePtr getResultType() const override + { + return constant_value->getType(); + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + ConstantValuePtr constant_value; + String value_string; + + static constexpr size_t children_size = 0; +}; + +} diff --git a/src/Analyzer/ConstantValue.h b/src/Analyzer/ConstantValue.h new file mode 100644 index 00000000000..a9e2ffd9e65 --- /dev/null +++ b/src/Analyzer/ConstantValue.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/** Immutable constant value representation during analysis stage. + * Some query nodes can be represented by constant (scalar subqueries, functions with constant arguments). + */ +class ConstantValue; +using ConstantValuePtr = std::shared_ptr; + +class ConstantValue +{ +public: + ConstantValue(Field value_, DataTypePtr data_type_) + : value(std::move(value_)) + , data_type(std::move(data_type_)) + {} + + const Field & getValue() const + { + return value; + } + + const DataTypePtr & getType() const + { + return data_type; + } +private: + Field value; + DataTypePtr data_type; +}; + +inline bool operator==(const ConstantValue & lhs, const ConstantValue & rhs) +{ + return lhs.getValue() == rhs.getValue() && lhs.getType()->equals(*rhs.getType()); +} + +inline bool operator!=(const ConstantValue & lhs, const ConstantValue & rhs) +{ + return !(lhs == rhs); +} + +} diff --git a/src/Analyzer/FunctionNode.cpp b/src/Analyzer/FunctionNode.cpp new file mode 100644 index 00000000000..55e097cc5f4 --- /dev/null +++ b/src/Analyzer/FunctionNode.cpp @@ -0,0 +1,194 @@ +#include + +#include +#include + +#include +#include + +#include + +#include + +#include + +#include + +namespace DB +{ + +FunctionNode::FunctionNode(String function_name_) + : IQueryTreeNode(children_size) + , function_name(function_name_) +{ + children[parameters_child_index] = std::make_shared(); + children[arguments_child_index] = std::make_shared(); +} + +void FunctionNode::resolveAsFunction(FunctionOverloadResolverPtr function_value, DataTypePtr result_type_value) +{ + aggregate_function = nullptr; + function = std::move(function_value); + result_type = std::move(result_type_value); + function_name = function->getName(); +} + +void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_function_value, DataTypePtr result_type_value) +{ + function = nullptr; + aggregate_function = std::move(aggregate_function_value); + result_type = std::move(result_type_value); + function_name = aggregate_function->getName(); +} + +void FunctionNode::resolveAsWindowFunction(AggregateFunctionPtr window_function_value, DataTypePtr result_type_value) +{ + resolveAsAggregateFunction(window_function_value, result_type_value); +} + +void FunctionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "FUNCTION id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + buffer << ", function_name: " << function_name; + + std::string function_type = "ordinary"; + if (isAggregateFunction()) + function_type = "aggregate"; + else if (isWindowFunction()) + function_type = "window"; + + buffer << ", function_type: " << function_type; + + if (result_type) + buffer << ", result_type: " + result_type->getName(); + + if (constant_value) + { + buffer << ", constant_value: " << constant_value->getValue().dump(); + buffer << ", constant_value_type: " << constant_value->getType()->getName(); + } + + const auto & parameters = getParameters(); + if (!parameters.getNodes().empty()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "PARAMETERS\n"; + parameters.dumpTreeImpl(buffer, format_state, indent + 4); + } + + const auto & arguments = getArguments(); + if (!arguments.getNodes().empty()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "ARGUMENTS\n"; + arguments.dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasWindow()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "WINDOW\n"; + getWindowNode()->dumpTreeImpl(buffer, format_state, indent + 4); + } +} + +bool FunctionNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + if (function_name != rhs_typed.function_name || + isAggregateFunction() != rhs_typed.isAggregateFunction() || + isOrdinaryFunction() != rhs_typed.isOrdinaryFunction() || + isWindowFunction() != rhs_typed.isWindowFunction()) + return false; + + if (result_type && rhs_typed.result_type && !result_type->equals(*rhs_typed.getResultType())) + return false; + else if (result_type && !rhs_typed.result_type) + return false; + else if (!result_type && rhs_typed.result_type) + return false; + + if (constant_value && rhs_typed.constant_value && *constant_value != *rhs_typed.constant_value) + return false; + else if (constant_value && !rhs_typed.constant_value) + return false; + else if (!constant_value && rhs_typed.constant_value) + return false; + + return true; +} + +void FunctionNode::updateTreeHashImpl(HashState & hash_state) const +{ + hash_state.update(function_name.size()); + hash_state.update(function_name); + hash_state.update(isOrdinaryFunction()); + hash_state.update(isAggregateFunction()); + hash_state.update(isWindowFunction()); + + if (result_type) + { + auto result_type_name = result_type->getName(); + hash_state.update(result_type_name.size()); + hash_state.update(result_type_name); + } + + if (constant_value) + { + auto constant_dump = applyVisitor(FieldVisitorToString(), constant_value->getValue()); + hash_state.update(constant_dump.size()); + hash_state.update(constant_dump); + + auto constant_value_type_name = constant_value->getType()->getName(); + hash_state.update(constant_value_type_name.size()); + hash_state.update(constant_value_type_name); + } +} + +QueryTreeNodePtr FunctionNode::cloneImpl() const +{ + auto result_function = std::make_shared(function_name); + + /** This is valid for clone method to reuse same function pointers + * because ordinary functions or aggregate functions must be stateless. + */ + result_function->function = function; + result_function->aggregate_function = aggregate_function; + result_function->result_type = result_type; + result_function->constant_value = constant_value; + + return result_function; +} + +ASTPtr FunctionNode::toASTImpl() const +{ + auto function_ast = std::make_shared(); + + function_ast->name = function_name; + function_ast->is_window_function = isWindowFunction(); + + const auto & parameters = getParameters(); + if (!parameters.getNodes().empty()) + { + function_ast->children.push_back(parameters.toAST()); + function_ast->parameters = function_ast->children.back(); + } + + const auto & arguments = getArguments(); + function_ast->children.push_back(arguments.toAST()); + function_ast->arguments = function_ast->children.back(); + + auto window_node = getWindowNode(); + if (window_node) + { + if (auto * identifier_node = window_node->as()) + function_ast->window_name = identifier_node->getIdentifier().getFullName(); + else + function_ast->window_definition = window_node->toAST(); + } + + return function_ast; +} + +} diff --git a/src/Analyzer/FunctionNode.h b/src/Analyzer/FunctionNode.h new file mode 100644 index 00000000000..cb1bf0d8cc5 --- /dev/null +++ b/src/Analyzer/FunctionNode.h @@ -0,0 +1,230 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +class IFunctionOverloadResolver; +using FunctionOverloadResolverPtr = std::shared_ptr; + +class IAggregateFunction; +using AggregateFunctionPtr = std::shared_ptr; + +/** Function node represents function in query tree. + * Function syntax: function_name(parameter_1, ...)(argument_1, ...). + * If function does not have parameters its syntax is function_name(argument_1, ...). + * If function does not have arguments its syntax is function_name(). + * + * In query tree function parameters and arguments are represented by ListNode. + * + * Function can be: + * 1. Aggregate function. Example: quantile(0.5)(x), sum(x). + * 2. Non aggregate function. Example: plus(x, x). + * 3. Window function. Example: sum(x) OVER (PARTITION BY expr ORDER BY expr). + * + * Initially function node is initialized with function name. + * For window function client must initialize function window node. + * + * During query analysis pass function must be resolved using `resolveAsFunction`, `resolveAsAggregateFunction`, `resolveAsWindowFunction` methods. + * Resolved function is function that has result type and is initialized with concrete aggregate or non aggregate function. + */ +class FunctionNode; +using FunctionNodePtr = std::shared_ptr; + +class FunctionNode final : public IQueryTreeNode +{ +public: + /** Initialize function node with function name. + * Later during query analysis pass function must be resolved. + */ + explicit FunctionNode(String function_name_); + + /// Get function name + const String & getFunctionName() const + { + return function_name; + } + + /// Get parameters + const ListNode & getParameters() const + { + return children[parameters_child_index]->as(); + } + + /// Get parameters + ListNode & getParameters() + { + return children[parameters_child_index]->as(); + } + + /// Get parameters node + const QueryTreeNodePtr & getParametersNode() const + { + return children[parameters_child_index]; + } + + /// Get parameters node + QueryTreeNodePtr & getParametersNode() + { + return children[parameters_child_index]; + } + + /// Get arguments + const ListNode & getArguments() const + { + return children[arguments_child_index]->as(); + } + + /// Get arguments + ListNode & getArguments() + { + return children[arguments_child_index]->as(); + } + + /// Get arguments node + const QueryTreeNodePtr & getArgumentsNode() const + { + return children[arguments_child_index]; + } + + /// Get arguments node + QueryTreeNodePtr & getArgumentsNode() + { + return children[arguments_child_index]; + } + + /// Returns true if function node has window, false otherwise + bool hasWindow() const + { + return children[window_child_index] != nullptr; + } + + /** Get window node. + * Valid only for window function node. + * Result window node can be identifier node or window node. + * 1. It can be identifier node if window function is defined as expr OVER window_name. + * 2. It can be window node if window function is defined as expr OVER (window_name ...). + */ + const QueryTreeNodePtr & getWindowNode() const + { + return children[window_child_index]; + } + + /** Get window node. + * Valid only for window function node. + */ + QueryTreeNodePtr & getWindowNode() + { + return children[window_child_index]; + } + + /** Get non aggregate function. + * If function is not resolved nullptr returned. + */ + const FunctionOverloadResolverPtr & getFunction() const + { + return function; + } + + /** Get aggregate function. + * If function is not resolved nullptr returned. + * If function is resolved as non aggregate function nullptr returned. + */ + const AggregateFunctionPtr & getAggregateFunction() const + { + return aggregate_function; + } + + /// Is function node resolved + bool isResolved() const + { + return result_type != nullptr && (function != nullptr || aggregate_function != nullptr); + } + + /// Is function node window function + bool isWindowFunction() const + { + return getWindowNode() != nullptr; + } + + /// Is function node aggregate function + bool isAggregateFunction() const + { + return aggregate_function != nullptr && !isWindowFunction(); + } + + /// Is function node ordinary function + bool isOrdinaryFunction() const + { + return function != nullptr; + } + + /** Resolve function node as non aggregate function. + * It is important that function name is updated with resolved function name. + * Main motivation for this is query tree optimizations. + * Assume we have `multiIf` function with single condition, it can be converted to `if` function. + * Function name must be updated accordingly. + */ + void resolveAsFunction(FunctionOverloadResolverPtr function_value, DataTypePtr result_type_value); + + /** Resolve function node as aggregate function. + * It is important that function name is updated with resolved function name. + * Main motivation for this is query tree optimizations. + */ + void resolveAsAggregateFunction(AggregateFunctionPtr aggregate_function_value, DataTypePtr result_type_value); + + /** Resolve function node as window function. + * It is important that function name is updated with resolved function name. + * Main motivation for this is query tree optimizations. + */ + void resolveAsWindowFunction(AggregateFunctionPtr window_function_value, DataTypePtr result_type_value); + + /// Perform constant folding for function node + void performConstantFolding(ConstantValuePtr constant_folded_value) + { + constant_value = std::move(constant_folded_value); + } + + ConstantValuePtr getConstantValueOrNull() const override + { + return constant_value; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::FUNCTION; + } + + DataTypePtr getResultType() const override + { + return result_type; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + String function_name; + FunctionOverloadResolverPtr function; + AggregateFunctionPtr aggregate_function; + DataTypePtr result_type; + ConstantValuePtr constant_value; + + static constexpr size_t parameters_child_index = 0; + static constexpr size_t arguments_child_index = 1; + static constexpr size_t window_child_index = 2; + static constexpr size_t children_size = window_child_index + 1; +}; + +} diff --git a/src/Analyzer/IQueryTreeNode.cpp b/src/Analyzer/IQueryTreeNode.cpp new file mode 100644 index 00000000000..ea2412eadb2 --- /dev/null +++ b/src/Analyzer/IQueryTreeNode.cpp @@ -0,0 +1,332 @@ +#include + +#include + +#include + +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; +} + +const char * toString(QueryTreeNodeType type) +{ + switch (type) + { + case QueryTreeNodeType::IDENTIFIER: return "IDENTIFIER"; + case QueryTreeNodeType::MATCHER: return "MATCHER"; + case QueryTreeNodeType::TRANSFORMER: return "TRANSFORMER"; + case QueryTreeNodeType::LIST: return "LIST"; + case QueryTreeNodeType::CONSTANT: return "CONSTANT"; + case QueryTreeNodeType::FUNCTION: return "FUNCTION"; + case QueryTreeNodeType::COLUMN: return "COLUMN"; + case QueryTreeNodeType::LAMBDA: return "LAMBDA"; + case QueryTreeNodeType::SORT: return "SORT"; + case QueryTreeNodeType::INTERPOLATE: return "INTERPOLATE"; + case QueryTreeNodeType::WINDOW: return "WINDOW"; + case QueryTreeNodeType::TABLE: return "TABLE"; + case QueryTreeNodeType::TABLE_FUNCTION: return "TABLE_FUNCTION"; + case QueryTreeNodeType::QUERY: return "QUERY"; + case QueryTreeNodeType::ARRAY_JOIN: return "ARRAY_JOIN"; + case QueryTreeNodeType::JOIN: return "JOIN"; + case QueryTreeNodeType::UNION: return "UNION"; + } +} + +IQueryTreeNode::IQueryTreeNode(size_t children_size, size_t weak_pointers_size) +{ + children.resize(children_size); + weak_pointers.resize(weak_pointers_size); +} + +IQueryTreeNode::IQueryTreeNode(size_t children_size) +{ + children.resize(children_size); +} + +namespace +{ + +using NodePair = std::pair; + +struct NodePairHash +{ + size_t operator()(const NodePair & node_pair) const + { + auto hash = std::hash(); + + size_t result = 0; + boost::hash_combine(result, hash(node_pair.first)); + boost::hash_combine(result, hash(node_pair.second)); + + return result; + } +}; + +} + +bool IQueryTreeNode::isEqual(const IQueryTreeNode & rhs) const +{ + std::vector nodes_to_process; + std::unordered_set equals_pairs; + + nodes_to_process.emplace_back(this, &rhs); + + while (!nodes_to_process.empty()) + { + auto nodes_to_compare = nodes_to_process.back(); + nodes_to_process.pop_back(); + + const auto * lhs_node_to_compare = nodes_to_compare.first; + const auto * rhs_node_to_compare = nodes_to_compare.second; + + if (equals_pairs.contains(std::make_pair(lhs_node_to_compare, rhs_node_to_compare))) + continue; + + assert(lhs_node_to_compare); + assert(rhs_node_to_compare); + + if (lhs_node_to_compare->getNodeType() != rhs_node_to_compare->getNodeType() || + lhs_node_to_compare->alias != rhs_node_to_compare->alias || + !lhs_node_to_compare->isEqualImpl(*rhs_node_to_compare)) + { + return false; + } + + const auto & lhs_children = lhs_node_to_compare->children; + const auto & rhs_children = rhs_node_to_compare->children; + + size_t lhs_children_size = lhs_children.size(); + if (lhs_children_size != rhs_children.size()) + return false; + + for (size_t i = 0; i < lhs_children_size; ++i) + { + const auto & lhs_child = lhs_children[i]; + const auto & rhs_child = rhs_children[i]; + + if (!lhs_child && !rhs_child) + continue; + else if (lhs_child && !rhs_child) + return false; + else if (!lhs_child && rhs_child) + return false; + + nodes_to_process.emplace_back(lhs_child.get(), rhs_child.get()); + } + + const auto & lhs_weak_pointers = lhs_node_to_compare->weak_pointers; + const auto & rhs_weak_pointers = rhs_node_to_compare->weak_pointers; + + size_t lhs_weak_pointers_size = lhs_weak_pointers.size(); + + if (lhs_weak_pointers_size != rhs_weak_pointers.size()) + return false; + + for (size_t i = 0; i < lhs_weak_pointers_size; ++i) + { + auto lhs_strong_pointer = lhs_weak_pointers[i].lock(); + auto rhs_strong_pointer = rhs_weak_pointers[i].lock(); + + if (!lhs_strong_pointer && !rhs_strong_pointer) + continue; + else if (lhs_strong_pointer && !rhs_strong_pointer) + return false; + else if (!lhs_strong_pointer && rhs_strong_pointer) + return false; + + nodes_to_process.emplace_back(lhs_strong_pointer.get(), rhs_strong_pointer.get()); + } + + equals_pairs.emplace(lhs_node_to_compare, rhs_node_to_compare); + } + + return true; +} + +IQueryTreeNode::Hash IQueryTreeNode::getTreeHash() const +{ + HashState hash_state; + + std::unordered_map node_to_identifier; + + std::vector nodes_to_process; + nodes_to_process.push_back(this); + + while (!nodes_to_process.empty()) + { + const auto * node_to_process = nodes_to_process.back(); + nodes_to_process.pop_back(); + + auto node_identifier_it = node_to_identifier.find(node_to_process); + if (node_identifier_it != node_to_identifier.end()) + { + hash_state.update(node_identifier_it->second); + continue; + } + + node_to_identifier.emplace(node_to_process, node_to_identifier.size()); + + hash_state.update(static_cast(node_to_process->getNodeType())); + if (!node_to_process->alias.empty()) + { + hash_state.update(node_to_process->alias.size()); + hash_state.update(node_to_process->alias); + } + + node_to_process->updateTreeHashImpl(hash_state); + + hash_state.update(node_to_process->children.size()); + + for (const auto & node_to_process_child : node_to_process->children) + { + if (!node_to_process_child) + continue; + + nodes_to_process.push_back(node_to_process_child.get()); + } + + hash_state.update(node_to_process->weak_pointers.size()); + + for (const auto & weak_pointer : node_to_process->weak_pointers) + { + auto strong_pointer = weak_pointer.lock(); + if (!strong_pointer) + continue; + + nodes_to_process.push_back(strong_pointer.get()); + } + } + + Hash result; + hash_state.get128(result); + + return result; +} + +QueryTreeNodePtr IQueryTreeNode::clone() const +{ + /** Clone tree with this node as root. + * + * Algorithm + * For each node we clone state and also create mapping old pointer to new pointer. + * For each cloned node we update weak pointers array. + * + * After that we can update pointer in weak pointers array using old pointer to new pointer mapping. + */ + std::unordered_map old_pointer_to_new_pointer; + std::vector weak_pointers_to_update_after_clone; + + QueryTreeNodePtr result_cloned_node_place; + + std::vector> nodes_to_clone; + nodes_to_clone.emplace_back(this, &result_cloned_node_place); + + while (!nodes_to_clone.empty()) + { + const auto [node_to_clone, place_for_cloned_node] = nodes_to_clone.back(); + nodes_to_clone.pop_back(); + + auto node_clone = node_to_clone->cloneImpl(); + *place_for_cloned_node = node_clone; + + node_clone->setAlias(node_to_clone->alias); + node_clone->setOriginalAST(node_to_clone->original_ast); + node_clone->children = node_to_clone->children; + node_clone->weak_pointers = node_to_clone->weak_pointers; + + old_pointer_to_new_pointer.emplace(node_to_clone, node_clone); + + for (auto & child : node_clone->children) + { + if (!child) + continue; + + nodes_to_clone.emplace_back(child.get(), &child); + } + + for (auto & weak_pointer : node_clone->weak_pointers) + { + weak_pointers_to_update_after_clone.push_back(&weak_pointer); + } + } + + /** Update weak pointers to new pointers if they were changed during clone. + * To do this we check old pointer to new pointer map, if weak pointer + * strong pointer exists as old pointer in map, reinitialize weak pointer with new pointer. + */ + for (auto & weak_pointer_ptr : weak_pointers_to_update_after_clone) + { + assert(weak_pointer_ptr); + auto strong_pointer = weak_pointer_ptr->lock(); + auto it = old_pointer_to_new_pointer.find(strong_pointer.get()); + + /** If node had weak pointer to some other node and this node is not part of cloned subtree do not update weak pointer. + * It will continue to point to previous location and it is expected. + * + * Example: SELECT id FROM test_table; + * During analysis `id` is resolved as column node and `test_table` is column source. + * If we clone `id` column, result column node weak source pointer will point to the same `test_table` column source. + */ + if (it == old_pointer_to_new_pointer.end()) + continue; + + *weak_pointer_ptr = it->second; + } + + return result_cloned_node_place; +} + +ASTPtr IQueryTreeNode::toAST() const +{ + auto converted_node = toASTImpl(); + + if (auto * ast_with_alias = dynamic_cast(converted_node.get())) + converted_node->setAlias(alias); + + return converted_node; +} + +String IQueryTreeNode::formatOriginalASTForErrorMessage() const +{ + if (!original_ast) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Original AST was not set"); + + return original_ast->formatForErrorMessage(); +} + +String IQueryTreeNode::formatConvertedASTForErrorMessage() const +{ + return toAST()->formatForErrorMessage(); +} + +String IQueryTreeNode::dumpTree() const +{ + WriteBufferFromOwnString buffer; + dumpTree(buffer); + + return buffer.str(); +} + +size_t IQueryTreeNode::FormatState::getNodeId(const IQueryTreeNode * node) +{ + auto [it, _] = node_to_id.emplace(node, node_to_id.size()); + return it->second; +} + +void IQueryTreeNode::dumpTree(WriteBuffer & buffer) const +{ + FormatState state; + dumpTreeImpl(buffer, state, 0); +} + +} diff --git a/src/Analyzer/IQueryTreeNode.h b/src/Analyzer/IQueryTreeNode.h new file mode 100644 index 00000000000..c3b067ab005 --- /dev/null +++ b/src/Analyzer/IQueryTreeNode.h @@ -0,0 +1,274 @@ +#pragma once + +#include +#include +#include + +#include + +#include + +#include + +#include +#include + +class SipHash; + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; + extern const int LOGICAL_ERROR; +} + +class WriteBuffer; + +/// Query tree node type +enum class QueryTreeNodeType +{ + IDENTIFIER, + MATCHER, + TRANSFORMER, + LIST, + CONSTANT, + FUNCTION, + COLUMN, + LAMBDA, + SORT, + INTERPOLATE, + WINDOW, + TABLE, + TABLE_FUNCTION, + QUERY, + ARRAY_JOIN, + JOIN, + UNION +}; + +/// Convert query tree node type to string +const char * toString(QueryTreeNodeType type); + +/** Query tree is semantical representation of query. + * Query tree node represent node in query tree. + * IQueryTreeNode is base class for all query tree nodes. + * + * Important property of query tree is that each query tree node can contain weak pointers to other + * query tree nodes. Keeping weak pointer to other query tree nodes can be useful for example for column + * to keep weak pointer to column source, column source can be table, lambda, subquery and preserving of + * such information can significantly simplify query planning. + * + * Another important property of query tree it must be convertible to AST without losing information. + */ +class IQueryTreeNode; +using QueryTreeNodePtr = std::shared_ptr; +using QueryTreeNodes = std::vector; +using QueryTreeNodeWeakPtr = std::weak_ptr; +using QueryTreeWeakNodes = std::vector; + +class IQueryTreeNode : public TypePromotion +{ +public: + virtual ~IQueryTreeNode() = default; + + /// Get query tree node type + virtual QueryTreeNodeType getNodeType() const = 0; + + /// Get query tree node type name + const char * getNodeTypeName() const + { + return toString(getNodeType()); + } + + /** Get result type of query tree node that can be used as part of expression. + * If node does not support this method exception is thrown. + * TODO: Maybe this can be a part of ExpressionQueryTreeNode. + */ + virtual DataTypePtr getResultType() const + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for {} query node", getNodeTypeName()); + } + + /// Returns true if node has constant value + bool hasConstantValue() const + { + return getConstantValueOrNull() != nullptr; + } + + /** Returns constant value with type if node has constant value, and can be replaced with it. + * Examples: scalar subquery, function with constant arguments. + */ + virtual const ConstantValue & getConstantValue() const + { + auto constant_value = getConstantValueOrNull(); + if (!constant_value) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Node does not have constant value"); + + return *constant_value; + } + + /// Returns constant value with type if node has constant value or null otherwise + virtual ConstantValuePtr getConstantValueOrNull() const + { + return {}; + } + + /** Is tree equal to other tree with node root. + * + * Aliases of query tree nodes are compared during isEqual call. + * Original ASTs of query tree nodes are not compared during isEqual call. + */ + bool isEqual(const IQueryTreeNode & rhs) const; + + using Hash = std::pair; + using HashState = SipHash; + + /** Get tree hash identifying current tree + * + * Alias of query tree node is part of query tree hash. + * Original AST is not part of query tree hash. + */ + Hash getTreeHash() const; + + /// Get a deep copy of the query tree + QueryTreeNodePtr clone() const; + + /// Returns true if node has alias, false otherwise + bool hasAlias() const + { + return !alias.empty(); + } + + /// Get node alias + const String & getAlias() const + { + return alias; + } + + /// Set node alias + void setAlias(String alias_value) + { + alias = std::move(alias_value); + } + + /// Remove node alias + void removeAlias() + { + alias = {}; + } + + /// Returns true if query tree node has original AST, false otherwise + bool hasOriginalAST() const + { + return original_ast != nullptr; + } + + /// Get query tree node original AST + const ASTPtr & getOriginalAST() const + { + return original_ast; + } + + /** Set query tree node original AST. + * This AST will not be modified later. + */ + void setOriginalAST(ASTPtr original_ast_value) + { + original_ast = std::move(original_ast_value); + } + + /** If query tree has original AST format it for error message. + * Otherwise exception is thrown. + */ + String formatOriginalASTForErrorMessage() const; + + /// Convert query tree to AST + ASTPtr toAST() const; + + /// Convert query tree to AST and then format it for error message. + String formatConvertedASTForErrorMessage() const; + + /** Format AST for error message. + * If original AST exists use `formatOriginalASTForErrorMessage`. + * Otherwise use `formatConvertedASTForErrorMessage`. + */ + String formatASTForErrorMessage() const + { + if (original_ast) + return formatOriginalASTForErrorMessage(); + + return formatConvertedASTForErrorMessage(); + } + + /// Dump query tree to string + String dumpTree() const; + + /// Dump query tree to buffer + void dumpTree(WriteBuffer & buffer) const; + + class FormatState + { + public: + size_t getNodeId(const IQueryTreeNode * node); + + private: + std::unordered_map node_to_id; + }; + + /** Dump query tree to buffer starting with indent. + * + * Node must also dump its children. + */ + virtual void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const = 0; + + /// Get query tree node children + QueryTreeNodes & getChildren() + { + return children; + } + + /// Get query tree node children + const QueryTreeNodes & getChildren() const + { + return children; + } + +protected: + /** Construct query tree node. + * Resize children to children size. + * Resize weak pointers to weak pointers size. + */ + explicit IQueryTreeNode(size_t children_size, size_t weak_pointers_size); + + /// Construct query tree node and resize children to children size + explicit IQueryTreeNode(size_t children_size); + + /** Subclass must compare its internal state with rhs node internal state and do not compare children or weak pointers to other + * query tree nodes. + */ + virtual bool isEqualImpl(const IQueryTreeNode & rhs) const = 0; + + /** Subclass must update tree hash with its internal state and do not update tree hash for children or weak pointers to other + * query tree nodes. + */ + virtual void updateTreeHashImpl(HashState & hash_state) const = 0; + + /** Subclass must clone its internal state and do not clone children or weak pointers to other + * query tree nodes. + */ + virtual QueryTreeNodePtr cloneImpl() const = 0; + + /// Subclass must convert its internal state and its children to AST + virtual ASTPtr toASTImpl() const = 0; + + QueryTreeNodes children; + QueryTreeWeakNodes weak_pointers; + +private: + String alias; + ASTPtr original_ast; +}; + +} diff --git a/src/Analyzer/IQueryTreePass.h b/src/Analyzer/IQueryTreePass.h new file mode 100644 index 00000000000..39b3d743ed3 --- /dev/null +++ b/src/Analyzer/IQueryTreePass.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include + + +namespace DB +{ + +/** After query tree is build it can be later processed by query tree passes. + * This is abstract base class for all query tree passes. + * + * Query tree pass can make query tree modifications, after each pass query tree must be valid. + * Query tree pass must be isolated and perform only necessary query tree modifications for doing its job. + * Dependencies between passes must be avoided. + */ +class IQueryTreePass; +using QueryTreePassPtr = std::shared_ptr; +using QueryTreePasses = std::vector; + +class IQueryTreePass +{ +public: + virtual ~IQueryTreePass() = default; + + /// Get query tree pass name + virtual String getName() = 0; + + /// Get query tree pass description + virtual String getDescription() = 0; + + /// Run pass over query tree + virtual void run(QueryTreeNodePtr query_tree_node, ContextPtr context) = 0; + +}; + +} diff --git a/src/Analyzer/Identifier.h b/src/Analyzer/Identifier.h new file mode 100644 index 00000000000..2252ce2854f --- /dev/null +++ b/src/Analyzer/Identifier.h @@ -0,0 +1,412 @@ +#pragma once + +#include +#include + +#include +#include + +#include +#include + + +namespace DB +{ + +/** Identifier consists from identifier parts. + * Each identifier part is arbitrary long sequence of digits, underscores, lowercase and uppercase letters. + * Example: a, a.b, a.b.c. + */ +class Identifier +{ +public: + Identifier() = default; + + /// Create Identifier from parts + explicit Identifier(const std::vector & parts_) + : parts(parts_) + , full_name(boost::algorithm::join(parts, ".")) + { + } + + /// Create Identifier from parts + explicit Identifier(std::vector && parts_) + : parts(std::move(parts_)) + , full_name(boost::algorithm::join(parts, ".")) + { + } + + /// Create Identifier from full name, full name is split with '.' as separator. + explicit Identifier(const std::string & full_name_) + : full_name(full_name_) + { + boost::split(parts, full_name, [](char c) { return c == '.'; }); + } + + /// Create Identifier from full name, full name is split with '.' as separator. + explicit Identifier(std::string && full_name_) + : full_name(std::move(full_name_)) + { + boost::split(parts, full_name, [](char c) { return c == '.'; }); + } + + const std::string & getFullName() const + { + return full_name; + } + + const std::vector & getParts() const + { + return parts; + } + + size_t getPartsSize() const + { + return parts.size(); + } + + bool empty() const + { + return parts.empty(); + } + + bool isEmpty() const + { + return parts.empty(); + } + + bool isShort() const + { + return parts.size() == 1; + } + + bool isCompound() const + { + return parts.size() > 1; + } + + const std::string & at(size_t index) const + { + if (index >= parts.size()) + throw std::out_of_range("identifier access part is out of range"); + + return parts[index]; + } + + const std::string & operator[](size_t index) const + { + return parts[index]; + } + + const std::string & front() const + { + return parts.front(); + } + + const std::string & back() const + { + return parts.back(); + } + + /// Returns true, if identifier starts with part, false otherwise + bool startsWith(const std::string_view & part) + { + return !parts.empty() && parts[0] == part; + } + + /// Returns true, if identifier ends with part, false otherwise + bool endsWith(const std::string_view & part) + { + return !parts.empty() && parts.back() == part; + } + + using const_iterator = std::vector::const_iterator; + + const_iterator begin() const + { + return parts.begin(); + } + + const_iterator end() const + { + return parts.end(); + } + + void popFirst(size_t parts_to_remove_size) + { + assert(parts_to_remove_size <= parts.size()); + + size_t parts_size = parts.size(); + std::vector result_parts; + result_parts.reserve(parts_size - parts_to_remove_size); + + for (size_t i = parts_to_remove_size; i < parts_size; ++i) + result_parts.push_back(std::move(parts[i])); + + parts = std::move(result_parts); + full_name = boost::algorithm::join(parts, "."); + } + + void popFirst() + { + return popFirst(1); + } + + void popLast(size_t parts_to_remove_size) + { + assert(parts_to_remove_size <= parts.size()); + + for (size_t i = 0; i < parts_to_remove_size; ++i) + { + size_t last_part_size = parts.back().size(); + parts.pop_back(); + bool is_not_last = !parts.empty(); + full_name.resize(full_name.size() - (last_part_size + static_cast(is_not_last))); + } + } + + void popLast() + { + return popLast(1); + } + + void pop_back() /// NOLINT + { + popLast(); + } + + void push_back(std::string && part) /// NOLINT + { + parts.push_back(std::move(part)); + full_name += '.'; + full_name += parts.back(); + } + + void push_back(const std::string & part) /// NOLINT + { + parts.push_back(part); + full_name += '.'; + full_name += parts.back(); + } + + template + void emplace_back(Args&&... args) /// NOLINT + { + parts.emplace_back(std::forward(args)...); + full_name += '.'; + full_name += parts.back(); + } +private: + std::vector parts; + std::string full_name; +}; + +inline bool operator==(const Identifier & lhs, const Identifier & rhs) +{ + return lhs.getFullName() == rhs.getFullName(); +} + +inline bool operator!=(const Identifier & lhs, const Identifier & rhs) +{ + return !(lhs == rhs); +} + +inline std::ostream & operator<<(std::ostream & stream, const Identifier & identifier) +{ + stream << identifier.getFullName(); + return stream; +} + +using Identifiers = std::vector; + +/// View for Identifier +class IdentifierView +{ +public: + IdentifierView() = default; + + IdentifierView(const Identifier & identifier) /// NOLINT + : full_name_view(identifier.getFullName()) + , parts_start_it(identifier.begin()) + , parts_end_it(identifier.end()) + {} + + std::string_view getFullName() const + { + return full_name_view; + } + + size_t getPartsSize() const + { + return parts_end_it - parts_start_it; + } + + bool empty() const + { + return parts_start_it == parts_end_it; + } + + bool isEmpty() const + { + return parts_start_it == parts_end_it; + } + + bool isShort() const + { + return getPartsSize() == 1; + } + + bool isCompound() const + { + return getPartsSize() > 1; + } + + std::string_view at(size_t index) const + { + if (index >= getPartsSize()) + throw std::out_of_range("identifier access part is out of range"); + + return *(parts_start_it + index); + } + + std::string_view operator[](size_t index) const + { + return *(parts_start_it + index); + } + + std::string_view front() const + { + return *parts_start_it; + } + + std::string_view back() const + { + return *(parts_end_it - 1); + } + + bool startsWith(std::string_view part) const + { + return !isEmpty() && *parts_start_it == part; + } + + bool endsWith(std::string_view part) const + { + return !isEmpty() && *(parts_end_it - 1) == part; + } + + void popFirst(size_t parts_to_remove_size) + { + assert(parts_to_remove_size <= getPartsSize()); + + for (size_t i = 0; i < parts_to_remove_size; ++i) + { + size_t part_size = parts_start_it->size(); + ++parts_start_it; + bool is_not_last = parts_start_it != parts_end_it; + full_name_view.remove_prefix(part_size + is_not_last); + } + } + + void popFirst() + { + popFirst(1); + } + + void popLast(size_t parts_to_remove_size) + { + assert(parts_to_remove_size <= getPartsSize()); + + for (size_t i = 0; i < parts_to_remove_size; ++i) + { + size_t last_part_size = (parts_end_it - 1)->size(); + --parts_end_it; + bool is_not_last = parts_start_it != parts_end_it; + full_name_view.remove_suffix(last_part_size + is_not_last); + } + } + + void popLast() + { + popLast(1); + } + + using const_iterator = Identifier::const_iterator; + + const_iterator begin() const + { + return parts_start_it; + } + + const_iterator end() const + { + return parts_end_it; + } +private: + std::string_view full_name_view; + const_iterator parts_start_it; + const_iterator parts_end_it; +}; + +inline bool operator==(const IdentifierView & lhs, const IdentifierView & rhs) +{ + return lhs.getFullName() == rhs.getFullName(); +} + +inline bool operator!=(const IdentifierView & lhs, const IdentifierView & rhs) +{ + return !(lhs == rhs); +} + +inline std::ostream & operator<<(std::ostream & stream, const IdentifierView & identifier_view) +{ + stream << identifier_view.getFullName(); + return stream; +} + +} + +/// See https://fmt.dev/latest/api.html#formatting-user-defined-types + +template <> +struct fmt::formatter +{ + constexpr static auto parse(format_parse_context & ctx) + { + const auto * it = ctx.begin(); + const auto * end = ctx.end(); + + /// Only support {}. + if (it != end && *it != '}') + throw format_error("invalid format"); + + return it; + } + + template + auto format(const DB::Identifier & identifier, FormatContext & ctx) + { + return format_to(ctx.out(), "{}", identifier.getFullName()); + } +}; + +template <> +struct fmt::formatter +{ + constexpr static auto parse(format_parse_context & ctx) + { + const auto * it = ctx.begin(); + const auto * end = ctx.end(); + + /// Only support {}. + if (it != end && *it != '}') + throw format_error("invalid format"); + + return it; + } + + template + auto format(const DB::IdentifierView & identifier_view, FormatContext & ctx) + { + return format_to(ctx.out(), "{}", identifier_view.getFullName()); + } +}; diff --git a/src/Analyzer/IdentifierNode.cpp b/src/Analyzer/IdentifierNode.cpp new file mode 100644 index 00000000000..4efc7f515ea --- /dev/null +++ b/src/Analyzer/IdentifierNode.cpp @@ -0,0 +1,75 @@ +#include + +#include + +#include +#include + +#include + +namespace DB +{ + +IdentifierNode::IdentifierNode(Identifier identifier_) + : IQueryTreeNode(children_size) + , identifier(std::move(identifier_)) +{} + +IdentifierNode::IdentifierNode(Identifier identifier_, TableExpressionModifiers table_expression_modifiers_) + : IQueryTreeNode(children_size) + , identifier(std::move(identifier_)) + , table_expression_modifiers(std::move(table_expression_modifiers_)) +{} + +void IdentifierNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "IDENTIFIER id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + buffer << ", identifier: " << identifier.getFullName(); + + if (table_expression_modifiers) + { + buffer << ", "; + table_expression_modifiers->dump(buffer); + } +} + +bool IdentifierNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + + if (table_expression_modifiers && rhs_typed.table_expression_modifiers && table_expression_modifiers != rhs_typed.table_expression_modifiers) + return false; + else if (table_expression_modifiers && !rhs_typed.table_expression_modifiers) + return false; + else if (!table_expression_modifiers && rhs_typed.table_expression_modifiers) + return false; + + return identifier == rhs_typed.identifier; +} + +void IdentifierNode::updateTreeHashImpl(HashState & state) const +{ + const auto & identifier_name = identifier.getFullName(); + state.update(identifier_name.size()); + state.update(identifier_name); + + if (table_expression_modifiers) + table_expression_modifiers->updateTreeHash(state); +} + +QueryTreeNodePtr IdentifierNode::cloneImpl() const +{ + return std::make_shared(identifier); +} + +ASTPtr IdentifierNode::toASTImpl() const +{ + auto identifier_parts = identifier.getParts(); + return std::make_shared(std::move(identifier_parts)); +} + +} diff --git a/src/Analyzer/IdentifierNode.h b/src/Analyzer/IdentifierNode.h new file mode 100644 index 00000000000..358511d1f90 --- /dev/null +++ b/src/Analyzer/IdentifierNode.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/** Identifier node represents identifier in query tree. + * Example: SELECT a FROM test_table. + * a - is identifier. + * test_table - is identifier. + * + * Identifier resolution must be done during query analysis pass. + */ +class IdentifierNode final : public IQueryTreeNode +{ +public: + /// Construct identifier node with identifier + explicit IdentifierNode(Identifier identifier_); + + /** Construct identifier node with identifier and table expression modifiers + * when identifier node is part of JOIN TREE. + * + * Example: SELECT * FROM test_table SAMPLE 0.1 OFFSET 0.1 FINAL + */ + explicit IdentifierNode(Identifier identifier_, TableExpressionModifiers table_expression_modifiers_); + + /// Get identifier + const Identifier & getIdentifier() const + { + return identifier; + } + + /// Return true if identifier node has table expression modifiers, false otherwise + bool hasTableExpressionModifiers() const + { + return table_expression_modifiers.has_value(); + } + + /// Get table expression modifiers + const std::optional & getTableExpressionModifiers() const + { + return table_expression_modifiers; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::IDENTIFIER; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + Identifier identifier; + std::optional table_expression_modifiers; + + static constexpr size_t children_size = 0; +}; + +} diff --git a/src/Analyzer/InDepthQueryTreeVisitor.h b/src/Analyzer/InDepthQueryTreeVisitor.h new file mode 100644 index 00000000000..96972024d87 --- /dev/null +++ b/src/Analyzer/InDepthQueryTreeVisitor.h @@ -0,0 +1,87 @@ +#pragma once + +#include + +#include + + +namespace DB +{ + +/** Visitor that traverse query tree in depth. + * Derived class must implement `visitImpl` method. + * Additionally subclass can control if child need to be visited using `needChildVisit` method, by + * default all node children are visited. + * By default visitor traverse tree from top to bottom, if bottom to top traverse is required subclass + * can override `shouldTraverseTopToBottom` method. + * + * Usage example: + * class FunctionsVisitor : public InDepthQueryTreeVisitor + * { + * void visitImpl(VisitQueryTreeNodeType & query_tree_node) + * { + * if (query_tree_node->getNodeType() == QueryTreeNodeType::FUNCTION) + * processFunctionNode(query_tree_node); + * } + * } + */ +template +class InDepthQueryTreeVisitor +{ +public: + using VisitQueryTreeNodeType = std::conditional_t; + + /// Return true if visitor should traverse tree top to bottom, false otherwise + bool shouldTraverseTopToBottom() const + { + return true; + } + + /// Return true if visitor should visit child, false otherwise + bool needChildVisit(VisitQueryTreeNodeType & parent [[maybe_unused]], VisitQueryTreeNodeType & child [[maybe_unused]]) + { + return true; + } + + void visit(VisitQueryTreeNodeType & query_tree_node) + { + bool traverse_top_to_bottom = getDerived().shouldTraverseTopToBottom(); + if (!traverse_top_to_bottom) + visitChildren(query_tree_node); + + getDerived().visitImpl(query_tree_node); + + if (traverse_top_to_bottom) + visitChildren(query_tree_node); + } + +private: + Derived & getDerived() + { + return *static_cast(this); + } + + const Derived & getDerived() const + { + return *static_cast(this); + } + + void visitChildren(VisitQueryTreeNodeType & expression) + { + for (auto & child : expression->getChildren()) + { + if (!child) + continue; + + bool need_visit_child = getDerived().needChildVisit(expression, child); + + if (need_visit_child) + visit(child); + } + } +}; + +template +using ConstInDepthQueryTreeVisitor = InDepthQueryTreeVisitor; + +} diff --git a/src/Analyzer/InterpolateNode.cpp b/src/Analyzer/InterpolateNode.cpp new file mode 100644 index 00000000000..c8c61b05853 --- /dev/null +++ b/src/Analyzer/InterpolateNode.cpp @@ -0,0 +1,57 @@ +#include + +#include + +#include +#include + +#include + +namespace DB +{ + +InterpolateNode::InterpolateNode(QueryTreeNodePtr expression_, QueryTreeNodePtr interpolate_expression_) + : IQueryTreeNode(children_size) +{ + children[expression_child_index] = std::move(expression_); + children[interpolate_expression_child_index] = std::move(interpolate_expression_); +} + +void InterpolateNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "INTERPOLATE id: " << format_state.getNodeId(this); + + buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n"; + getExpression()->dumpTreeImpl(buffer, format_state, indent + 4); + + buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE_EXPRESSION\n"; + getInterpolateExpression()->dumpTreeImpl(buffer, format_state, indent + 4); +} + +bool InterpolateNode::isEqualImpl(const IQueryTreeNode &) const +{ + /// No state in interpolate node + return true; +} + +void InterpolateNode::updateTreeHashImpl(HashState &) const +{ + /// No state in interpolate node +} + +QueryTreeNodePtr InterpolateNode::cloneImpl() const +{ + return std::make_shared(nullptr /*expression*/, nullptr /*interpolate_expression*/); +} + +ASTPtr InterpolateNode::toASTImpl() const +{ + auto result = std::make_shared(); + result->column = getExpression()->toAST()->getColumnName(); + result->children.push_back(getInterpolateExpression()->toAST()); + result->expr = result->children.back(); + + return result; +} + +} diff --git a/src/Analyzer/InterpolateNode.h b/src/Analyzer/InterpolateNode.h new file mode 100644 index 00000000000..5764ea561c0 --- /dev/null +++ b/src/Analyzer/InterpolateNode.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/** Interpolate node represents expression interpolation in INTERPOLATE section that is part of ORDER BY section in query tree. + * + * Example: SELECT * FROM test_table ORDER BY id WITH FILL INTERPOLATE (value AS value + 1); + * value - expression to interpolate. + * value + 1 - interpolate expression. + */ +class InterpolateNode; +using InterpolateNodePtr = std::shared_ptr; + +class InterpolateNode final : public IQueryTreeNode +{ +public: + /// Initialize interpolate node with expression and interpolate expression + explicit InterpolateNode(QueryTreeNodePtr expression_, QueryTreeNodePtr interpolate_expression_); + + /// Get expression to interpolate + const QueryTreeNodePtr & getExpression() const + { + return children[expression_child_index]; + } + + /// Get expression to interpolate + QueryTreeNodePtr & getExpression() + { + return children[expression_child_index]; + } + + /// Get interpolate expression + const QueryTreeNodePtr & getInterpolateExpression() const + { + return children[interpolate_expression_child_index]; + } + + /// Get interpolate expression + QueryTreeNodePtr & getInterpolateExpression() + { + return children[interpolate_expression_child_index]; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::INTERPOLATE; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + static constexpr size_t expression_child_index = 0; + static constexpr size_t interpolate_expression_child_index = 1; + static constexpr size_t children_size = interpolate_expression_child_index + 1; +}; + +} diff --git a/src/Analyzer/JoinNode.cpp b/src/Analyzer/JoinNode.cpp new file mode 100644 index 00000000000..28a0c4ad7e0 --- /dev/null +++ b/src/Analyzer/JoinNode.cpp @@ -0,0 +1,116 @@ +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace DB +{ + +JoinNode::JoinNode(QueryTreeNodePtr left_table_expression_, + QueryTreeNodePtr right_table_expression_, + QueryTreeNodePtr join_expression_, + JoinLocality locality_, + JoinStrictness strictness_, + JoinKind kind_) + : IQueryTreeNode(children_size) + , locality(locality_) + , strictness(strictness_) + , kind(kind_) +{ + children[left_table_expression_child_index] = std::move(left_table_expression_); + children[right_table_expression_child_index] = std::move(right_table_expression_); + children[join_expression_child_index] = std::move(join_expression_); +} + +ASTPtr JoinNode::toASTTableJoin() const +{ + auto join_ast = std::make_shared(); + join_ast->locality = locality; + join_ast->strictness = strictness; + join_ast->kind = kind; + + if (children[join_expression_child_index]) + { + auto join_expression_ast = children[join_expression_child_index]->toAST(); + + if (children[join_expression_child_index]->getNodeType() == QueryTreeNodeType::LIST) + join_ast->using_expression_list = std::move(join_expression_ast); + else + join_ast->on_expression = std::move(join_expression_ast); + } + + return join_ast; +} + +void JoinNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "JOIN id: " << format_state.getNodeId(this); + + if (locality != JoinLocality::Unspecified) + buffer << ", locality: " << toString(locality); + + if (strictness != JoinStrictness::Unspecified) + buffer << ", strictness: " << toString(strictness); + + buffer << ", kind: " << toString(kind); + + buffer << '\n' << std::string(indent + 2, ' ') << "LEFT TABLE EXPRESSION\n"; + getLeftTableExpression()->dumpTreeImpl(buffer, format_state, indent + 4); + + buffer << '\n' << std::string(indent + 2, ' ') << "RIGHT TABLE EXPRESSION\n"; + getRightTableExpression()->dumpTreeImpl(buffer, format_state, indent + 4); + + if (getJoinExpression()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "JOIN EXPRESSION\n"; + getJoinExpression()->dumpTreeImpl(buffer, format_state, indent + 4); + } +} + +bool JoinNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return locality == rhs_typed.locality && strictness == rhs_typed.strictness && kind == rhs_typed.kind; +} + +void JoinNode::updateTreeHashImpl(HashState & state) const +{ + state.update(locality); + state.update(strictness); + state.update(kind); +} + +QueryTreeNodePtr JoinNode::cloneImpl() const +{ + return std::make_shared(getLeftTableExpression(), getRightTableExpression(), getJoinExpression(), locality, strictness, kind); +} + +ASTPtr JoinNode::toASTImpl() const +{ + ASTPtr tables_in_select_query_ast = std::make_shared(); + + addTableExpressionOrJoinIntoTablesInSelectQuery(tables_in_select_query_ast, children[left_table_expression_child_index]); + + size_t join_table_index = tables_in_select_query_ast->children.size(); + + auto join_ast = toASTTableJoin(); + + addTableExpressionOrJoinIntoTablesInSelectQuery(tables_in_select_query_ast, children[right_table_expression_child_index]); + + auto & table_element = tables_in_select_query_ast->children.at(join_table_index)->as(); + table_element.children.push_back(std::move(join_ast)); + table_element.table_join = table_element.children.back(); + + return tables_in_select_query_ast; +} + +} diff --git a/src/Analyzer/JoinNode.h b/src/Analyzer/JoinNode.h new file mode 100644 index 00000000000..15ba11a0122 --- /dev/null +++ b/src/Analyzer/JoinNode.h @@ -0,0 +1,152 @@ +#pragma once + +#include + +#include +#include +#include + +#include +#include + +#include + +namespace DB +{ + +/** Join node represents join in query tree. + * + * For JOIN without join expression, JOIN expression is null. + * Example: SELECT id FROM test_table_1 AS t1, test_table_2 AS t2; + * + * For JOIN with USING, JOIN expression contains list of identifier nodes. These nodes must be resolved + * during query analysis pass. + * Example: SELECT id FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 USING (id); + * + * For JOIN with ON, JOIN expression contains single expression. + * Example: SELECT id FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id; + */ +class JoinNode; +using JoinNodePtr = std::shared_ptr; + +class JoinNode final : public IQueryTreeNode +{ +public: + /** Construct join node with left table expression, right table expression and join expression. + * Example: SELECT id FROM test_table_1 INNER JOIN test_table_2 ON expression. + * + * test_table_1 - left table expression. + * test_table_2 - right table expression. + * expression - join expression. + */ + JoinNode(QueryTreeNodePtr left_table_expression_, + QueryTreeNodePtr right_table_expression_, + QueryTreeNodePtr join_expression_, + JoinLocality locality_, + JoinStrictness strictness_, + JoinKind kind_); + + /// Get left table expression + const QueryTreeNodePtr & getLeftTableExpression() const + { + return children[left_table_expression_child_index]; + } + + /// Get left table expression + QueryTreeNodePtr & getLeftTableExpression() + { + return children[left_table_expression_child_index]; + } + + /// Get right table expression + const QueryTreeNodePtr & getRightTableExpression() const + { + return children[right_table_expression_child_index]; + } + + /// Get right table expression + QueryTreeNodePtr & getRightTableExpression() + { + return children[right_table_expression_child_index]; + } + + /// Returns true if join has join expression, false otherwise + bool hasJoinExpression() const + { + return children[join_expression_child_index] != nullptr; + } + + /// Get join expression + const QueryTreeNodePtr & getJoinExpression() const + { + return children[join_expression_child_index]; + } + + /// Get join expression + QueryTreeNodePtr & getJoinExpression() + { + return children[join_expression_child_index]; + } + + /// Returns true if join has USING join expression, false otherwise + bool isUsingJoinExpression() const + { + return hasJoinExpression() && getJoinExpression()->getNodeType() == QueryTreeNodeType::LIST; + } + + /// Returns true if join has ON join expression, false otherwise + bool isOnJoinExpression() const + { + return hasJoinExpression() && getJoinExpression()->getNodeType() != QueryTreeNodeType::LIST; + } + + /// Get join locality + JoinLocality getLocality() const + { + return locality; + } + + /// Get join strictness + JoinStrictness getStrictness() const + { + return strictness; + } + + /// Get join kind + JoinKind getKind() const + { + return kind; + } + + /// Convert join node to ASTTableJoin + ASTPtr toASTTableJoin() const; + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::JOIN; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + JoinLocality locality = JoinLocality::Unspecified; + JoinStrictness strictness = JoinStrictness::Unspecified; + JoinKind kind = JoinKind::Inner; + + static constexpr size_t left_table_expression_child_index = 0; + static constexpr size_t right_table_expression_child_index = 1; + static constexpr size_t join_expression_child_index = 2; + static constexpr size_t children_size = join_expression_child_index + 1; +}; + +} + diff --git a/src/Analyzer/LambdaNode.cpp b/src/Analyzer/LambdaNode.cpp new file mode 100644 index 00000000000..809f73072d2 --- /dev/null +++ b/src/Analyzer/LambdaNode.cpp @@ -0,0 +1,88 @@ +#include + +#include + +#include +#include +#include + +namespace DB +{ + +LambdaNode::LambdaNode(Names argument_names_, QueryTreeNodePtr expression_) + : IQueryTreeNode(children_size) + , argument_names(std::move(argument_names_)) +{ + auto arguments_list_node = std::make_shared(); + auto & nodes = arguments_list_node->getNodes(); + + size_t argument_names_size = argument_names.size(); + nodes.reserve(argument_names_size); + + for (size_t i = 0; i < argument_names_size; ++i) + nodes.push_back(std::make_shared(Identifier{argument_names[i]})); + + children[arguments_child_index] = std::move(arguments_list_node); + children[expression_child_index] = std::move(expression_); +} + +void LambdaNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "LAMBDA id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + const auto & arguments = getArguments(); + if (!arguments.getNodes().empty()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "ARGUMENTS " << '\n'; + getArguments().dumpTreeImpl(buffer, format_state, indent + 4); + } + + buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION " << '\n'; + getExpression()->dumpTreeImpl(buffer, format_state, indent + 4); +} + +bool LambdaNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + return argument_names == rhs_typed.argument_names; +} + +void LambdaNode::updateTreeHashImpl(HashState & state) const +{ + state.update(argument_names.size()); + for (const auto & argument_name : argument_names) + { + state.update(argument_name.size()); + state.update(argument_name); + } +} + +QueryTreeNodePtr LambdaNode::cloneImpl() const +{ + return std::make_shared(argument_names, getExpression()); +} + +ASTPtr LambdaNode::toASTImpl() const +{ + auto lambda_function_arguments_ast = std::make_shared(); + + auto tuple_function = std::make_shared(); + tuple_function->name = "tuple"; + tuple_function->children.push_back(children[arguments_child_index]->toAST()); + tuple_function->arguments = tuple_function->children.back(); + + lambda_function_arguments_ast->children.push_back(std::move(tuple_function)); + lambda_function_arguments_ast->children.push_back(children[expression_child_index]->toAST()); + + auto lambda_function_ast = std::make_shared(); + lambda_function_ast->name = "lambda"; + lambda_function_ast->children.push_back(std::move(lambda_function_arguments_ast)); + lambda_function_ast->arguments = lambda_function_ast->children.back(); + + return lambda_function_ast; +} + +} diff --git a/src/Analyzer/LambdaNode.h b/src/Analyzer/LambdaNode.h new file mode 100644 index 00000000000..ad80c645656 --- /dev/null +++ b/src/Analyzer/LambdaNode.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include +#include + +#include + +namespace DB +{ + +/** Lambda node represents lambda expression in query tree. + * + * Lambda consist of argument names and lambda expression body. + * Lambda expression body does not necessary use lambda arguments. Example: SELECT arrayMap(x -> 1, [1, 2, 3]) + * + * Initially lambda is initialized with argument names and lambda body expression. + * + * Lambda expression result type can depend on arguments types. + * Example: WITH (x -> x) as lambda SELECT lambda(1), lambda('string_value'). + * + * During query analysis pass lambdas must be resolved. + * Lambda resolve must set concrete lambda arguments and resolve lambda expression body. + * In query tree lambda arguments are represented by ListNode. + * If client modified lambda arguments array its size must be equal to initial lambda argument names array. + * + * Examples: + * WITH (x -> x + 1) as lambda SELECT lambda(1); + * SELECT arrayMap(x -> x + 1, [1,2,3]); + */ +class LambdaNode; +using LambdaNodePtr = std::shared_ptr; + +class LambdaNode final : public IQueryTreeNode +{ +public: + /// Initialize lambda with argument names and lambda body expression + explicit LambdaNode(Names argument_names_, QueryTreeNodePtr expression_); + + /// Get argument names + const Names & getArgumentNames() const + { + return argument_names; + } + + /// Get arguments + const ListNode & getArguments() const + { + return children[arguments_child_index]->as(); + } + + /// Get arguments + ListNode & getArguments() + { + return children[arguments_child_index]->as(); + } + + /// Get arguments node + const QueryTreeNodePtr & getArgumentsNode() const + { + return children[arguments_child_index]; + } + + /// Get arguments node + QueryTreeNodePtr & getArgumentsNode() + { + return children[arguments_child_index]; + } + + /// Get expression + const QueryTreeNodePtr & getExpression() const + { + return children[expression_child_index]; + } + + /// Get expression + QueryTreeNodePtr & getExpression() + { + return children[expression_child_index]; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::LAMBDA; + } + + DataTypePtr getResultType() const override + { + return getExpression()->getResultType(); + } + + ConstantValuePtr getConstantValueOrNull() const override + { + return getExpression()->getConstantValueOrNull(); + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + Names argument_names; + + static constexpr size_t arguments_child_index = 0; + static constexpr size_t expression_child_index = 1; + static constexpr size_t children_size = expression_child_index + 1; +}; + +} diff --git a/src/Analyzer/ListNode.cpp b/src/Analyzer/ListNode.cpp new file mode 100644 index 00000000000..7bbb884fa7f --- /dev/null +++ b/src/Analyzer/ListNode.cpp @@ -0,0 +1,70 @@ +#include + +#include + +#include +#include +#include + +#include + +namespace DB +{ + +ListNode::ListNode() + : IQueryTreeNode(0 /*children_size*/) +{} + +ListNode::ListNode(QueryTreeNodes nodes) + : IQueryTreeNode(0 /*children_size*/) +{ + children = std::move(nodes); +} + +void ListNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "LIST id: " << format_state.getNodeId(this); + + size_t children_size = children.size(); + buffer << ", nodes: " << children_size << '\n'; + + for (size_t i = 0; i < children_size; ++i) + { + const auto & node = children[i]; + node->dumpTreeImpl(buffer, format_state, indent + 2); + + if (i + 1 != children_size) + buffer << '\n'; + } +} + +bool ListNode::isEqualImpl(const IQueryTreeNode &) const +{ + /// No state + return true; +} + +void ListNode::updateTreeHashImpl(HashState &) const +{ + /// No state +} + +QueryTreeNodePtr ListNode::cloneImpl() const +{ + return std::make_shared(); +} + +ASTPtr ListNode::toASTImpl() const +{ + auto expression_list_ast = std::make_shared(); + + size_t children_size = children.size(); + expression_list_ast->children.resize(children_size); + + for (size_t i = 0; i < children_size; ++i) + expression_list_ast->children[i] = children[i]->toAST(); + + return expression_list_ast; +} + +} diff --git a/src/Analyzer/ListNode.h b/src/Analyzer/ListNode.h new file mode 100644 index 00000000000..5b328d0f870 --- /dev/null +++ b/src/Analyzer/ListNode.h @@ -0,0 +1,54 @@ +#pragma once + +#include + +namespace DB +{ + +/** List node represents list of query tree nodes in query tree. + * + * Example: SELECT column_1, 1, 'constant_value' FROM table. + * column_1, 1, 'constant_value' is list query tree node. + */ +class ListNode; +using ListNodePtr = std::shared_ptr; + +class ListNode final : public IQueryTreeNode +{ +public: + /// Initialize list node with empty nodes + ListNode(); + + /// Initialize list node with nodes + explicit ListNode(QueryTreeNodes nodes); + + /// Get list nodes + const QueryTreeNodes & getNodes() const + { + return children; + } + + /// Get list nodes + QueryTreeNodes & getNodes() + { + return children; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::LIST; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState &) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; +}; + +} diff --git a/src/Analyzer/MatcherNode.cpp b/src/Analyzer/MatcherNode.cpp new file mode 100644 index 00000000000..9d822771087 --- /dev/null +++ b/src/Analyzer/MatcherNode.cpp @@ -0,0 +1,280 @@ +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +const char * toString(MatcherNodeType matcher_node_type) +{ + switch (matcher_node_type) + { + case MatcherNodeType::ASTERISK: + return "ASTERISK"; + case MatcherNodeType::COLUMNS_LIST: + return "COLUMNS_LIST"; + case MatcherNodeType::COLUMNS_REGEXP: + return "COLUMNS_REGEXP"; + } +} + +MatcherNode::MatcherNode(ColumnTransformersNodes column_transformers_) + : MatcherNode(MatcherNodeType::ASTERISK, + {} /*qualified_identifier*/, + {} /*columns_identifiers*/, + {} /*columns_matcher*/, + std::move(column_transformers_) /*column_transformers*/) +{ +} + +MatcherNode::MatcherNode(Identifier qualified_identifier_, ColumnTransformersNodes column_transformers_) + : MatcherNode(MatcherNodeType::ASTERISK, + std::move(qualified_identifier_), + {} /*columns_identifiers*/, + {} /*columns_matcher*/, + std::move(column_transformers_)) +{ +} + +MatcherNode::MatcherNode(std::shared_ptr columns_matcher_, ColumnTransformersNodes column_transformers_) + : MatcherNode(MatcherNodeType::COLUMNS_REGEXP, + {} /*qualified_identifier*/, + {} /*columns_identifiers*/, + std::move(columns_matcher_), + std::move(column_transformers_)) +{ +} + +MatcherNode::MatcherNode(Identifier qualified_identifier_, std::shared_ptr columns_matcher_, ColumnTransformersNodes column_transformers_) + : MatcherNode(MatcherNodeType::COLUMNS_REGEXP, + std::move(qualified_identifier_), + {} /*columns_identifiers*/, + std::move(columns_matcher_), + std::move(column_transformers_)) +{ +} + +MatcherNode::MatcherNode(Identifiers columns_identifiers_, ColumnTransformersNodes column_transformers_) + : MatcherNode(MatcherNodeType::COLUMNS_LIST, + {} /*qualified_identifier*/, + std::move(columns_identifiers_), + {} /*columns_matcher*/, + std::move(column_transformers_)) +{ +} + +MatcherNode::MatcherNode(Identifier qualified_identifier_, Identifiers columns_identifiers_, ColumnTransformersNodes column_transformers_) + : MatcherNode(MatcherNodeType::COLUMNS_LIST, + std::move(qualified_identifier_), + std::move(columns_identifiers_), + {} /*columns_matcher*/, + std::move(column_transformers_)) +{ +} + +MatcherNode::MatcherNode(MatcherNodeType matcher_type_, + Identifier qualified_identifier_, + Identifiers columns_identifiers_, + std::shared_ptr columns_matcher_, + ColumnTransformersNodes column_transformers_) + : IQueryTreeNode(children_size) + , matcher_type(matcher_type_) + , qualified_identifier(qualified_identifier_) + , columns_identifiers(columns_identifiers_) + , columns_matcher(columns_matcher_) +{ + auto column_transformers_list_node = std::make_shared(); + + auto & column_transformers_nodes = column_transformers_list_node->getNodes(); + column_transformers_nodes.reserve(column_transformers_.size()); + + for (auto && column_transformer : column_transformers_) + column_transformers_nodes.emplace_back(std::move(column_transformer)); + + children[column_transformers_child_index] = std::move(column_transformers_list_node); + + columns_identifiers_set.reserve(columns_identifiers.size()); + + for (auto & column_identifier : columns_identifiers) + columns_identifiers_set.insert(column_identifier.getFullName()); +} + +bool MatcherNode::isMatchingColumn(const std::string & column_name) +{ + if (matcher_type == MatcherNodeType::ASTERISK) + return true; + + if (columns_matcher) + return RE2::PartialMatch(column_name, *columns_matcher); + + return columns_identifiers_set.contains(column_name); +} + +void MatcherNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "MATCHER id: " << format_state.getNodeId(this); + + buffer << ", matcher_type: " << toString(matcher_type); + + if (!qualified_identifier.empty()) + buffer << ", qualified_identifier: " << qualified_identifier.getFullName(); + + if (columns_matcher) + { + buffer << ", columns_pattern: " << columns_matcher->pattern(); + } + else if (matcher_type == MatcherNodeType::COLUMNS_LIST) + { + buffer << ", " << fmt::format("column_identifiers: {}", fmt::join(columns_identifiers, ", ")); + } + + const auto & column_transformers_list = getColumnTransformers(); + if (!column_transformers_list.getNodes().empty()) + { + buffer << '\n'; + column_transformers_list.dumpTreeImpl(buffer, format_state, indent + 2); + } +} + +bool MatcherNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + if (matcher_type != rhs_typed.matcher_type || + qualified_identifier != rhs_typed.qualified_identifier || + columns_identifiers != rhs_typed.columns_identifiers || + columns_identifiers_set != rhs_typed.columns_identifiers_set) + return false; + + const auto & rhs_columns_matcher = rhs_typed.columns_matcher; + + if (!columns_matcher && !rhs_columns_matcher) + return true; + else if (columns_matcher && !rhs_columns_matcher) + return false; + else if (!columns_matcher && rhs_columns_matcher) + return false; + + return columns_matcher->pattern() == rhs_columns_matcher->pattern(); +} + +void MatcherNode::updateTreeHashImpl(HashState & hash_state) const +{ + hash_state.update(static_cast(matcher_type)); + + const auto & qualified_identifier_full_name = qualified_identifier.getFullName(); + hash_state.update(qualified_identifier_full_name.size()); + hash_state.update(qualified_identifier_full_name); + + for (const auto & identifier : columns_identifiers) + { + const auto & identifier_full_name = identifier.getFullName(); + hash_state.update(identifier_full_name.size()); + hash_state.update(identifier_full_name); + } + + if (columns_matcher) + { + const auto & columns_matcher_pattern = columns_matcher->pattern(); + hash_state.update(columns_matcher_pattern.size()); + hash_state.update(columns_matcher_pattern); + } +} + +QueryTreeNodePtr MatcherNode::cloneImpl() const +{ + MatcherNodePtr matcher_node = std::make_shared(); + + matcher_node->matcher_type = matcher_type; + matcher_node->qualified_identifier = qualified_identifier; + matcher_node->columns_identifiers = columns_identifiers; + matcher_node->columns_matcher = columns_matcher; + matcher_node->columns_identifiers_set = columns_identifiers_set; + + return matcher_node; +} + +ASTPtr MatcherNode::toASTImpl() const +{ + ASTPtr result; + + if (matcher_type == MatcherNodeType::ASTERISK) + { + if (qualified_identifier.empty()) + { + result = std::make_shared(); + } + else + { + auto qualified_asterisk = std::make_shared(); + + auto identifier_parts = qualified_identifier.getParts(); + qualified_asterisk->children.push_back(std::make_shared(std::move(identifier_parts))); + + result = qualified_asterisk; + } + } + else if (columns_matcher) + { + if (qualified_identifier.empty()) + { + auto regexp_matcher = std::make_shared(); + regexp_matcher->setPattern(columns_matcher->pattern()); + result = regexp_matcher; + } + else + { + auto regexp_matcher = std::make_shared(); + regexp_matcher->setPattern(columns_matcher->pattern()); + + auto identifier_parts = qualified_identifier.getParts(); + regexp_matcher->children.push_back(std::make_shared(std::move(identifier_parts))); + + result = regexp_matcher; + } + } + else + { + auto column_list = std::make_shared(); + column_list->children.reserve(columns_identifiers.size()); + + for (const auto & identifier : columns_identifiers) + { + auto identifier_parts = identifier.getParts(); + column_list->children.push_back(std::make_shared(std::move(identifier_parts))); + } + + if (qualified_identifier.empty()) + { + auto columns_list_matcher = std::make_shared(); + columns_list_matcher->column_list = std::move(column_list); + result = columns_list_matcher; + } + else + { + auto columns_list_matcher = std::make_shared(); + columns_list_matcher->column_list = std::move(column_list); + + auto identifier_parts = qualified_identifier.getParts(); + columns_list_matcher->children.push_back(std::make_shared(std::move(identifier_parts))); + + result = columns_list_matcher; + } + } + + for (const auto & child : children) + result->children.push_back(child->toAST()); + + return result; +} + +} diff --git a/src/Analyzer/MatcherNode.h b/src/Analyzer/MatcherNode.h new file mode 100644 index 00000000000..e79c1cb4bf2 --- /dev/null +++ b/src/Analyzer/MatcherNode.h @@ -0,0 +1,170 @@ +#pragma once + +#include + +#include +#include +#include +#include + + +namespace DB +{ + +/** Matcher query tree node. + * Matcher can be unqualified with identifier and qualified with identifier. + * It can be asterisk or COLUMNS('regexp') or COLUMNS(column_name_1, ...). + * In result we have 6 possible options: + * Unqualified + * 1. * + * 2. COLUMNS('regexp') + * 3. COLUMNS(column_name_1, ...) + * + * Qualified: + * 1. identifier.* + * 2. identifier.COLUMNS('regexp') + * 3. identifier.COLUMNS(column_name_1, ...) + * + * Matcher must be resolved during query analysis pass. + * + * Matchers can be applied to compound expressions. + * Example: SELECT compound_column AS a, a.* FROM test_table. + * Example: SELECT compound_column.* FROM test_table. + * + * Example: SELECT * FROM test_table; + * Example: SELECT test_table.* FROM test_table. + * Example: SELECT a.* FROM test_table AS a. + * + * Additionally each matcher can contain transformers, check ColumnTransformers.h. + * In query tree matchers column transformers are represended as ListNode. + */ +enum class MatcherNodeType +{ + ASTERISK, + COLUMNS_REGEXP, + COLUMNS_LIST +}; + +const char * toString(MatcherNodeType matcher_node_type); + +class MatcherNode; +using MatcherNodePtr = std::shared_ptr; + +class MatcherNode final : public IQueryTreeNode +{ +public: + /// Variant unqualified asterisk + explicit MatcherNode(ColumnTransformersNodes column_transformers_ = {}); + + /// Variant qualified asterisk + explicit MatcherNode(Identifier qualified_identifier_, ColumnTransformersNodes column_transformers_ = {}); + + /// Variant unqualified COLUMNS('regexp') + explicit MatcherNode(std::shared_ptr columns_matcher_, ColumnTransformersNodes column_transformers_ = {}); + + /// Variant qualified COLUMNS('regexp') + explicit MatcherNode(Identifier qualified_identifier_, std::shared_ptr columns_matcher_, ColumnTransformersNodes column_transformers_ = {}); + + /// Variant unqualified COLUMNS(column_name_1, ...) + explicit MatcherNode(Identifiers columns_identifiers_, ColumnTransformersNodes column_transformers_ = {}); + + /// Variant qualified COLUMNS(column_name_1, ...) + explicit MatcherNode(Identifier qualified_identifier_, Identifiers columns_identifiers_, ColumnTransformersNodes column_transformers_ = {}); + + /// Get matcher type + MatcherNodeType getMatcherType() const + { + return matcher_type; + } + + /// Returns true if matcher is asterisk matcher, false otherwise + bool isAsteriskMatcher() const + { + return matcher_type == MatcherNodeType::ASTERISK; + } + + /// Returns true if matcher is columns regexp or columns list matcher, false otherwise + bool isColumnsMatcher() const + { + return matcher_type == MatcherNodeType::COLUMNS_REGEXP || matcher_type == MatcherNodeType::COLUMNS_LIST; + } + + /// Returns true if matcher is qualified, false otherwise + bool isQualified() const + { + return !qualified_identifier.empty(); + } + + /// Returns true if matcher is not qualified, false otherwise + bool isUnqualified() const + { + return qualified_identifier.empty(); + } + + /// Get qualified identifier + const Identifier & getQualifiedIdentifier() const + { + return qualified_identifier; + } + + /// Get columns matcher. Valid only if this matcher has type COLUMNS_REGEXP. + const std::shared_ptr & getColumnsMatcher() const + { + return columns_matcher; + } + + /// Get columns identifiers. Valid only if this matcher has type COLUMNS_LIST. + const Identifiers & getColumnsIdentifiers() const + { + return columns_identifiers; + } + + /// Get column transformers + const ListNode & getColumnTransformers() const + { + return children[column_transformers_child_index]->as(); + } + + /// Get column transformers + const QueryTreeNodePtr & getColumnTransformersNode() const + { + return children[column_transformers_child_index]; + } + + /// Returns true if matcher match column name, false otherwise + bool isMatchingColumn(const std::string & column_name); + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::MATCHER; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + explicit MatcherNode(MatcherNodeType matcher_type_, + Identifier qualified_identifier_, + Identifiers columns_identifiers_, + std::shared_ptr columns_matcher_, + ColumnTransformersNodes column_transformers_); + + MatcherNodeType matcher_type; + Identifier qualified_identifier; + Identifiers columns_identifiers; + std::shared_ptr columns_matcher; + std::unordered_set columns_identifiers_set; + + static constexpr size_t column_transformers_child_index = 0; + static constexpr size_t children_size = column_transformers_child_index + 1; +}; + +} diff --git a/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp b/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp new file mode 100644 index 00000000000..dcf386b2988 --- /dev/null +++ b/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.cpp @@ -0,0 +1,170 @@ +#include + +#include +#include + +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_TYPE_OF_FIELD; +} + +namespace +{ + +Field zeroField(const Field & value) +{ + switch (value.getType()) + { + case Field::Types::UInt64: return static_cast(0); + case Field::Types::Int64: return static_cast(0); + case Field::Types::Float64: return static_cast(0); + case Field::Types::UInt128: return static_cast(0); + case Field::Types::Int128: return static_cast(0); + case Field::Types::UInt256: return static_cast(0); + case Field::Types::Int256: return static_cast(0); + default: + break; + } + + throw Exception(ErrorCodes::BAD_TYPE_OF_FIELD, "Unexpected literal type in function"); +} + +/** Rewrites: sum([multiply|divide]) -> [multiply|divide](sum) + * [min|max|avg]([multiply|divide|plus|minus]) -> [multiply|divide|plus|minus]([min|max|avg]) + * + * TODO: Support `groupBitAnd`, `groupBitOr`, `groupBitXor` functions. + * TODO: Support rewrite `f((2 * n) * n)` into '2 * f(n * n)'. + */ +class AggregateFunctionsArithmericOperationsVisitor : public InDepthQueryTreeVisitor +{ +public: + /// Traverse tree bottom to top + static bool shouldTraverseTopToBottom() + { + return false; + } + + static void visitImpl(QueryTreeNodePtr & node) + { + auto * aggregate_function_node = node->as(); + if (!aggregate_function_node || !aggregate_function_node->isAggregateFunction()) + return; + + static std::unordered_map> supported_functions + = {{"sum", {"multiply", "divide"}}, + {"min", {"multiply", "divide", "plus", "minus"}}, + {"max", {"multiply", "divide", "plus", "minus"}}, + {"avg", {"multiply", "divide", "plus", "minus"}}}; + + auto & aggregate_function_arguments_nodes = aggregate_function_node->getArguments().getNodes(); + if (aggregate_function_arguments_nodes.size() != 1) + return; + + auto * inner_function_node = aggregate_function_arguments_nodes[0]->as(); + if (!inner_function_node) + return; + + auto & inner_function_arguments_nodes = inner_function_node->getArguments().getNodes(); + if (inner_function_arguments_nodes.size() != 2) + return; + + /// Aggregate functions[sum|min|max|avg] is case-insensitive, so we use lower cases name + auto lower_function_name = Poco::toLower(aggregate_function_node->getFunctionName()); + + auto supported_function_it = supported_functions.find(lower_function_name); + if (supported_function_it == supported_functions.end()) + return; + + const auto & inner_function_name = inner_function_node->getFunctionName(); + + if (!supported_function_it->second.contains(inner_function_name)) + return; + + auto left_argument_constant_value = inner_function_arguments_nodes[0]->getConstantValueOrNull(); + auto right_argument_constant_value = inner_function_arguments_nodes[1]->getConstantValueOrNull(); + + /** If we extract negative constant, aggregate function name must be updated. + * + * Example: SELECT min(-1 * id); + * Result: SELECT -1 * max(id); + */ + std::string function_name_if_constant_is_negative; + if (inner_function_name == "multiply" || inner_function_name == "divide") + { + if (lower_function_name == "min") + function_name_if_constant_is_negative = "max"; + else if (lower_function_name == "max") + function_name_if_constant_is_negative = "min"; + } + + if (left_argument_constant_value && !right_argument_constant_value) + { + /// Do not rewrite `sum(1/n)` with `sum(1) * div(1/n)` because of lose accuracy + if (inner_function_name == "divide") + return; + + /// Rewrite `aggregate_function(inner_function(constant, argument))` into `inner_function(constant, aggregate_function(argument))` + const auto & left_argument_constant_value_literal = left_argument_constant_value->getValue(); + if (!function_name_if_constant_is_negative.empty() && + left_argument_constant_value_literal < zeroField(left_argument_constant_value_literal)) + { + resolveAggregateFunctionNode(*aggregate_function_node, function_name_if_constant_is_negative); + } + + auto inner_function = aggregate_function_arguments_nodes[0]; + auto inner_function_right_argument = std::move(inner_function_arguments_nodes[1]); + aggregate_function_arguments_nodes = {inner_function_right_argument}; + inner_function_arguments_nodes[1] = node; + node = std::move(inner_function); + } + else if (right_argument_constant_value) + { + /// Rewrite `aggregate_function(inner_function(argument, constant))` into `inner_function(aggregate_function(argument), constant)` + const auto & right_argument_constant_value_literal = right_argument_constant_value->getValue(); + if (!function_name_if_constant_is_negative.empty() && + right_argument_constant_value_literal < zeroField(right_argument_constant_value_literal)) + { + resolveAggregateFunctionNode(*aggregate_function_node, function_name_if_constant_is_negative); + } + + auto inner_function = aggregate_function_arguments_nodes[0]; + auto inner_function_left_argument = std::move(inner_function_arguments_nodes[0]); + aggregate_function_arguments_nodes = {inner_function_left_argument}; + inner_function_arguments_nodes[0] = node; + node = std::move(inner_function); + } + } + +private: + static inline void resolveAggregateFunctionNode(FunctionNode & function_node, const String & aggregate_function_name) + { + auto function_result_type = function_node.getResultType(); + auto function_aggregate_function = function_node.getAggregateFunction(); + + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, + function_aggregate_function->getArgumentTypes(), + function_aggregate_function->getParameters(), + properties); + + function_node.resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type)); + } +}; + +} + +void AggregateFunctionsArithmericOperationsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + AggregateFunctionsArithmericOperationsVisitor visitor; + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h b/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h new file mode 100644 index 00000000000..a89d2f87ad9 --- /dev/null +++ b/src/Analyzer/Passes/AggregateFunctionsArithmericOperationsPass.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace DB +{ + +/** Extract arithmeric operations from aggregate functions. + * + * Example: SELECT sum(a * 2); + * Result: SELECT sum(a) * 2; + */ +class AggregateFunctionsArithmericOperationsPass final : public IQueryTreePass +{ +public: + String getName() override { return "AggregateFunctionsArithmericOperations"; } + + String getDescription() override { return "Extract arithmeric operations from aggregate functions."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/CountDistinctPass.cpp b/src/Analyzer/Passes/CountDistinctPass.cpp new file mode 100644 index 00000000000..2b55efa3552 --- /dev/null +++ b/src/Analyzer/Passes/CountDistinctPass.cpp @@ -0,0 +1,85 @@ +#include + +#include +#include + +#include +#include +#include +#include + +namespace DB +{ + +namespace +{ + +class CountDistinctVisitor : public InDepthQueryTreeVisitor +{ +public: + static void visitImpl(QueryTreeNodePtr & node) + { + auto * query_node = node->as(); + + /// Check that query has only SELECT clause + if (!query_node || (query_node->hasWith() || query_node->hasPrewhere() || query_node->hasWhere() || query_node->hasGroupBy() || + query_node->hasHaving() || query_node->hasWindow() || query_node->hasOrderBy() || query_node->hasLimitByLimit() || query_node->hasLimitByOffset() || + query_node->hasLimitBy() || query_node->hasLimit() || query_node->hasOffset())) + return; + + /// Check that query has only single table expression + auto join_tree_node_type = query_node->getJoinTree()->getNodeType(); + if (join_tree_node_type == QueryTreeNodeType::JOIN || join_tree_node_type == QueryTreeNodeType::ARRAY_JOIN) + return; + + /// Check that query has only single node in projection + auto & projection_nodes = query_node->getProjection().getNodes(); + if (projection_nodes.size() != 1) + return; + + /// Check that query single projection node is `countDistinct` function + auto & projection_node = projection_nodes[0]; + auto * function_node = projection_node->as(); + if (!function_node) + return; + + auto lower_function_name = Poco::toLower(function_node->getFunctionName()); + if (lower_function_name != "countdistinct" && lower_function_name != "uniqexact") + return; + + /// Check that `countDistinct` function has single COLUMN argument + auto & count_distinct_arguments_nodes = function_node->getArguments().getNodes(); + if (count_distinct_arguments_nodes.size() != 1 && count_distinct_arguments_nodes[0]->getNodeType() != QueryTreeNodeType::COLUMN) + return; + + auto & count_distinct_argument_column = count_distinct_arguments_nodes[0]; + auto & count_distinct_argument_column_typed = count_distinct_argument_column->as(); + + /// Build subquery SELECT count_distinct_argument_column FROM table_expression GROUP BY count_distinct_argument_column + auto subquery = std::make_shared(); + subquery->getJoinTree() = query_node->getJoinTree(); + subquery->getProjection().getNodes().push_back(count_distinct_argument_column); + subquery->getGroupBy().getNodes().push_back(count_distinct_argument_column); + subquery->resolveProjectionColumns({count_distinct_argument_column_typed.getColumn()}); + + /// Put subquery into JOIN TREE of initial query + query_node->getJoinTree() = std::move(subquery); + + /// Replace `countDistinct` of initial query into `count` + auto result_type = function_node->getResultType(); + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get("count", {}, {}, properties); + function_node->resolveAsAggregateFunction(std::move(aggregate_function), std::move(result_type)); + function_node->getArguments().getNodes().clear(); + } +}; + +} + +void CountDistinctPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + CountDistinctVisitor visitor; + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/CountDistinctPass.h b/src/Analyzer/Passes/CountDistinctPass.h new file mode 100644 index 00000000000..cac5033c98f --- /dev/null +++ b/src/Analyzer/Passes/CountDistinctPass.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace DB +{ + +/** Optimize single `countDistinct` into `count` over subquery. + * + * Example: SELECT countDistinct(column) FROM table; + * Result: SELECT count() FROM (SELECT column FROM table GROUP BY column); + */ +class CountDistinctPass final : public IQueryTreePass +{ +public: + String getName() override { return "CountDistinct"; } + + String getDescription() override + { + return "Optimize single countDistinct into count over subquery"; + } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/CustomizeFunctionsPass.cpp b/src/Analyzer/Passes/CustomizeFunctionsPass.cpp new file mode 100644 index 00000000000..629ab411a55 --- /dev/null +++ b/src/Analyzer/Passes/CustomizeFunctionsPass.cpp @@ -0,0 +1,175 @@ +#include + +#include +#include + +#include + +#include + +#include +#include + +namespace DB +{ + +namespace +{ + +class CustomizeFunctionsVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit CustomizeFunctionsVisitor(ContextPtr & context_) + : context(context_) + {} + + void visitImpl(QueryTreeNodePtr & node) const + { + auto * function_node = node->as(); + if (!function_node) + return; + + const auto & settings = context->getSettingsRef(); + + /// After successful function replacement function name and function name lowercase must be recalculated + auto function_name = function_node->getFunctionName(); + auto function_name_lowercase = Poco::toLower(function_name); + + if (function_node->isAggregateFunction() || function_node->isWindowFunction()) + { + auto count_distinct_implementation_function_name = String(settings.count_distinct_implementation); + + /// Replace countDistinct with countDistinct implementation + if (function_name_lowercase == "countdistinct") + { + resolveAggregateOrWindowFunctionNode(*function_node, count_distinct_implementation_function_name); + function_name = function_node->getFunctionName(); + function_name_lowercase = Poco::toLower(function_name); + } + + /// Replace countIfDistinct with countDistinctIf implementation + if (function_name_lowercase == "countifdistinct") + { + resolveAggregateOrWindowFunctionNode(*function_node, count_distinct_implementation_function_name + "If"); + function_name = function_node->getFunctionName(); + function_name_lowercase = Poco::toLower(function_name); + } + + /// Replace aggregateFunctionIfDistinct into aggregateFunctionDistinctIf to make execution more optimal + if (function_name_lowercase.ends_with("ifdistinct")) + { + size_t prefix_length = function_name_lowercase.size() - strlen("ifdistinct"); + auto updated_function_name = function_name_lowercase.substr(0, prefix_length) + "DistinctIf"; + resolveAggregateOrWindowFunctionNode(*function_node, updated_function_name); + function_name = function_node->getFunctionName(); + function_name_lowercase = Poco::toLower(function_name); + } + + /// Rewrite all aggregate functions to add -OrNull suffix to them + if (settings.aggregate_functions_null_for_empty && !function_name.ends_with("OrNull")) + { + auto function_properies = AggregateFunctionFactory::instance().tryGetProperties(function_name); + if (function_properies && !function_properies->returns_default_when_only_null) + { + auto updated_function_name = function_name + "OrNull"; + resolveAggregateOrWindowFunctionNode(*function_node, updated_function_name); + function_name = function_node->getFunctionName(); + function_name_lowercase = Poco::toLower(function_name); + } + } + + /** Move -OrNull suffix ahead, this should execute after add -OrNull suffix. + * Used to rewrite aggregate functions with -OrNull suffix in some cases. + * Example: sumIfOrNull. + * Result: sumOrNullIf. + */ + if (function_name.ends_with("OrNull")) + { + auto function_properies = AggregateFunctionFactory::instance().tryGetProperties(function_name); + if (function_properies && !function_properies->returns_default_when_only_null) + { + size_t function_name_size = function_name.size(); + + static constexpr std::array suffixes_to_replace = {"MergeState", "Merge", "State", "If"}; + for (const auto & suffix : suffixes_to_replace) + { + auto suffix_string_value = String(suffix); + auto suffix_to_check = suffix_string_value + "OrNull"; + + if (!function_name.ends_with(suffix_to_check)) + continue; + + auto updated_function_name = function_name.substr(0, function_name_size - suffix_to_check.size()) + "OrNull" + suffix_string_value; + resolveAggregateOrWindowFunctionNode(*function_node, updated_function_name); + function_name = function_node->getFunctionName(); + function_name_lowercase = Poco::toLower(function_name); + break; + } + } + } + + return; + } + + if (settings.transform_null_in) + { + auto function_result_type = function_node->getResultType(); + + static constexpr std::array, 4> in_function_to_replace_null_in_function_map = + {{ + {"in", "nullIn"}, + {"notin", "notNullIn"}, + {"globalin", "globalNullIn"}, + {"globalnotin", "globalNotNullIn"}, + }}; + + for (const auto & [in_function_name, in_function_name_to_replace] : in_function_to_replace_null_in_function_map) + { + if (function_name_lowercase == in_function_name) + { + resolveOrdinaryFunctionNode(*function_node, String(in_function_name_to_replace)); + function_name = function_node->getFunctionName(); + function_name_lowercase = Poco::toLower(function_name); + break; + } + } + } + } + + static inline void resolveAggregateOrWindowFunctionNode(FunctionNode & function_node, const String & aggregate_function_name) + { + auto function_result_type = function_node.getResultType(); + auto function_aggregate_function = function_node.getAggregateFunction(); + + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, + function_aggregate_function->getArgumentTypes(), + function_aggregate_function->getParameters(), + properties); + + if (function_node.isAggregateFunction()) + function_node.resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type)); + else if (function_node.isWindowFunction()) + function_node.resolveAsWindowFunction(std::move(aggregate_function), std::move(function_result_type)); + } + + inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const + { + auto function_result_type = function_node.getResultType(); + auto function = FunctionFactory::instance().get(function_name, context); + function_node.resolveAsFunction(function, std::move(function_result_type)); + } + +private: + ContextPtr & context; +}; + +} + +void CustomizeFunctionsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context) +{ + CustomizeFunctionsVisitor visitor(context); + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/CustomizeFunctionsPass.h b/src/Analyzer/Passes/CustomizeFunctionsPass.h new file mode 100644 index 00000000000..7145099ca4c --- /dev/null +++ b/src/Analyzer/Passes/CustomizeFunctionsPass.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace DB +{ + +/** Customize aggregate functions and `in` functions implementations. + * + * Example: SELECT countDistinct(); + * Result: SELECT countDistinctImplementation(); + * Function countDistinctImplementation is taken from settings.count_distinct_implementation. + */ +class CustomizeFunctionsPass final : public IQueryTreePass +{ +public: + String getName() override { return "CustomizeFunctions"; } + + String getDescription() override { return "Customize implementation of aggregate functions, and in functions."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp new file mode 100644 index 00000000000..41cc7bf18b1 --- /dev/null +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -0,0 +1,211 @@ +#include + +#include +#include +#include + +#include + +#include + +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace +{ + +class FunctionToSubcolumnsVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit FunctionToSubcolumnsVisitor(ContextPtr & context_) + : context(context_) + {} + + void visitImpl(QueryTreeNodePtr & node) const + { + auto * function_node = node->as(); + if (!function_node) + return; + + auto & function_arguments_nodes = function_node->getArguments().getNodes(); + size_t function_arguments_nodes_size = function_arguments_nodes.size(); + + if (function_arguments_nodes.empty() || function_arguments_nodes_size > 2) + return; + + auto * first_argument_column_node = function_arguments_nodes.front()->as(); + + if (!first_argument_column_node) + return; + + auto column_source = first_argument_column_node->getColumnSource(); + auto * table_node = column_source->as(); + + if (!table_node) + return; + + const auto & storage = table_node->getStorage(); + if (!storage->supportsSubcolumns()) + return; + + auto column = first_argument_column_node->getColumn(); + WhichDataType column_type(column.type); + + const auto & function_name = function_node->getFunctionName(); + + if (function_arguments_nodes_size == 1) + { + if (column_type.isArray()) + { + if (function_name == "length") + { + /// Replace `length(array_argument)` with `array_argument.size0` + column.name += ".size0"; + + node = std::make_shared(column, column_source); + } + else if (function_name == "empty") + { + /// Replace `empty(array_argument)` with `equals(array_argument.size0, 0)` + column.name += ".size0"; + column.type = std::make_shared(); + + resolveOrdinaryFunctionNode(*function_node, "equals"); + + function_arguments_nodes.clear(); + function_arguments_nodes.push_back(std::make_shared(column, column_source)); + function_arguments_nodes.push_back(std::make_shared(static_cast(0))); + } + else if (function_name == "notEmpty") + { + /// Replace `notEmpty(array_argument)` with `notEquals(array_argument.size0, 0)` + column.name += ".size0"; + column.type = std::make_shared(); + + resolveOrdinaryFunctionNode(*function_node, "notEquals"); + + function_arguments_nodes.clear(); + function_arguments_nodes.push_back(std::make_shared(column, column_source)); + function_arguments_nodes.push_back(std::make_shared(static_cast(0))); + } + } + else if (column_type.isNullable()) + { + if (function_name == "isNull") + { + /// Replace `isNull(nullable_argument)` with `nullable_argument.null` + column.name += ".null"; + + node = std::make_shared(column, column_source); + } + else if (function_name == "isNotNull") + { + /// Replace `isNotNull(nullable_argument)` with `not(nullable_argument.null)` + column.name += ".null"; + column.type = std::make_shared(); + + resolveOrdinaryFunctionNode(*function_node, "not"); + + function_arguments_nodes = {std::make_shared(column, column_source)}; + } + } + else if (column_type.isMap()) + { + if (function_name == "mapKeys") + { + /// Replace `mapKeys(map_argument)` with `map_argument.keys` + column.name += ".keys"; + column.type = function_node->getResultType(); + + node = std::make_shared(column, column_source); + } + else if (function_name == "mapValues") + { + /// Replace `mapValues(map_argument)` with `map_argument.values` + column.name += ".values"; + column.type = function_node->getResultType(); + + node = std::make_shared(column, column_source); + } + } + } + else + { + auto second_argument_constant_value = function_arguments_nodes[1]->getConstantValueOrNull(); + + if (function_name == "tupleElement" && column_type.isTuple() && second_argument_constant_value) + { + /** Replace `tupleElement(tuple_argument, string_literal)`, `tupleElement(tuple_argument, integer_literal)` + * with `tuple_argument.column_name`. + */ + const auto & tuple_element_constant_value = second_argument_constant_value->getValue(); + const auto & tuple_element_constant_value_type = tuple_element_constant_value.getType(); + + const auto & data_type_tuple = assert_cast(*column.type); + + String subcolumn_name; + + if (tuple_element_constant_value_type == Field::Types::String) + { + subcolumn_name = tuple_element_constant_value.get(); + } + else if (tuple_element_constant_value_type == Field::Types::UInt64) + { + auto tuple_column_index = tuple_element_constant_value.get(); + subcolumn_name = data_type_tuple.getNameByPosition(tuple_column_index); + } + else + { + return; + } + + column.name += '.'; + column.name += subcolumn_name; + column.type = function_node->getResultType(); + + node = std::make_shared(column, column_source); + } + else if (function_name == "mapContains" && column_type.isMap()) + { + const auto & data_type_map = assert_cast(*column.type); + + /// Replace `mapContains(map_argument, argument)` with `has(map_argument.keys, argument)` + column.name += ".keys"; + column.type = data_type_map.getKeyType(); + + auto has_function_argument = std::make_shared(column, column_source); + resolveOrdinaryFunctionNode(*function_node, "has"); + + function_arguments_nodes[0] = std::move(has_function_argument); + } + } + } + +private: + inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const + { + auto function_result_type = function_node.getResultType(); + auto function = FunctionFactory::instance().get(function_name, context); + function_node.resolveAsFunction(function, std::move(function_result_type)); + } + + ContextPtr & context; +}; + +} + +void FunctionToSubcolumnsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context) +{ + FunctionToSubcolumnsVisitor visitor(context); + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.h b/src/Analyzer/Passes/FunctionToSubcolumnsPass.h new file mode 100644 index 00000000000..e31c39a8ff3 --- /dev/null +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +namespace DB +{ + +/** Transform functions to subcolumns. + * It can help to reduce amount of read data. + * + * Example: SELECT tupleElement(column, subcolumn) FROM test_table; + * Result: SELECT column.subcolumn FROM test_table; + * + * Example: SELECT length(array_column) FROM test_table; + * Result: SELECT array_column.size0 FROM test_table; + * + * Example: SELECT nullable_column IS NULL FROM test_table; + * Result: SELECT nullable_column.null FROM test_table; + */ +class FunctionToSubcolumnsPass final : public IQueryTreePass +{ +public: + String getName() override { return "FunctionToSubcolumns"; } + + String getDescription() override { return "Rewrite function to subcolumns, for example tupleElement(column, subcolumn) into column.subcolumn"; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/IfChainToMultiIfPass.cpp b/src/Analyzer/Passes/IfChainToMultiIfPass.cpp new file mode 100644 index 00000000000..f400b11765e --- /dev/null +++ b/src/Analyzer/Passes/IfChainToMultiIfPass.cpp @@ -0,0 +1,75 @@ +#include + +#include + +#include +#include +#include + +namespace DB +{ + +namespace +{ + +class IfChainToMultiIfPassVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit IfChainToMultiIfPassVisitor(FunctionOverloadResolverPtr multi_if_function_ptr_) + : multi_if_function_ptr(std::move(multi_if_function_ptr_)) + {} + + void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || function_node->getFunctionName() != "if" || function_node->getArguments().getNodes().size() != 3) + return; + + std::vector multi_if_arguments; + + auto & function_node_arguments = function_node->getArguments().getNodes(); + multi_if_arguments.insert(multi_if_arguments.end(), function_node_arguments.begin(), function_node_arguments.end()); + + QueryTreeNodePtr if_chain_node = multi_if_arguments.back(); + + while (true) + { + /// Check if last `multiIf` argument is `if` function + auto * if_chain_function_node = if_chain_node->as(); + if (!if_chain_function_node || if_chain_function_node->getFunctionName() != "if" || if_chain_function_node->getArguments().getNodes().size() != 3) + break; + + /// Replace last `multiIf` argument with `if` function arguments + + multi_if_arguments.pop_back(); + + auto & if_chain_function_node_arguments = if_chain_function_node->getArguments().getNodes(); + multi_if_arguments.insert(multi_if_arguments.end(), if_chain_function_node_arguments.begin(), if_chain_function_node_arguments.end()); + + /// Use last `multiIf` argument for next check + if_chain_node = multi_if_arguments.back(); + } + + /// Do not replace `if` with 3 arguments to `multiIf` + if (multi_if_arguments.size() <= 3) + return; + + auto multi_if_function = std::make_shared("multiIf"); + multi_if_function->resolveAsFunction(multi_if_function_ptr, std::make_shared()); + multi_if_function->getArguments().getNodes() = std::move(multi_if_arguments); + node = std::move(multi_if_function); + } + +private: + FunctionOverloadResolverPtr multi_if_function_ptr; +}; + +} + +void IfChainToMultiIfPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context) +{ + IfChainToMultiIfPassVisitor visitor(FunctionFactory::instance().get("multiIf", context)); + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/IfChainToMultiIfPass.h b/src/Analyzer/Passes/IfChainToMultiIfPass.h new file mode 100644 index 00000000000..43f3fb8831d --- /dev/null +++ b/src/Analyzer/Passes/IfChainToMultiIfPass.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace DB +{ + +/** Convert `if` chain into single `multiIf`. + * Replace if(cond_1, then_1_value, if(cond_2, ...)) chains into multiIf(cond_1, then_1_value, cond_2, ...). + * + * Example: SELECT if(cond_1, then_1_value, if(cond_2, then_2_value, else_value)); + * Result: SELECT multiIf(cond_1, then_1_value, cond_2, then_2_value, else_value); + */ +class IfChainToMultiIfPass final : public IQueryTreePass +{ +public: + String getName() override { return "IfChainToMultiIf"; } + + String getDescription() override { return "Optimize if chain to multiIf"; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/IfConstantConditionPass.cpp b/src/Analyzer/Passes/IfConstantConditionPass.cpp new file mode 100644 index 00000000000..1da1f5bd471 --- /dev/null +++ b/src/Analyzer/Passes/IfConstantConditionPass.cpp @@ -0,0 +1,56 @@ +#include + +#include +#include +#include + +namespace DB +{ + +namespace +{ + +class IfConstantConditionVisitor : public InDepthQueryTreeVisitor +{ +public: + static void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || (function_node->getFunctionName() != "if" && function_node->getFunctionName() != "multiIf")) + return; + + if (function_node->getArguments().getNodes().size() != 3) + return; + + auto & first_argument = function_node->getArguments().getNodes()[0]; + auto first_argument_constant_value = first_argument->getConstantValueOrNull(); + if (!first_argument_constant_value) + return; + + const auto & condition_value = first_argument_constant_value->getValue(); + + bool condition_boolean_value = false; + + if (condition_value.getType() == Field::Types::Int64) + condition_boolean_value = static_cast(condition_value.safeGet()); + else if (condition_value.getType() == Field::Types::UInt64) + condition_boolean_value = static_cast(condition_value.safeGet()); + else + return; + + if (condition_boolean_value) + node = function_node->getArguments().getNodes()[1]; + else + node = function_node->getArguments().getNodes()[2]; + } +}; + +} + +void IfConstantConditionPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + IfConstantConditionVisitor visitor; + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/IfConstantConditionPass.h b/src/Analyzer/Passes/IfConstantConditionPass.h new file mode 100644 index 00000000000..7817e67aa5e --- /dev/null +++ b/src/Analyzer/Passes/IfConstantConditionPass.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +namespace DB +{ + +/** Convert `if` with constant condition or `multiIf` with single constant condition into true condition argument value + * or false condition argument value. + * + * Example: SELECT if(1, true_value, false_value); + * Result: SELECT true_value; + * + * Example: SELECT if(0, true_value, false_value); + * Result: SELECT false_value; + */ +class IfConstantConditionPass final : public IQueryTreePass +{ +public: + String getName() override { return "IfConstantCondition"; } + + String getDescription() override { return "Optimize if, multiIf for constant condition."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/MultiIfToIfPass.cpp b/src/Analyzer/Passes/MultiIfToIfPass.cpp new file mode 100644 index 00000000000..6d2ebac33e6 --- /dev/null +++ b/src/Analyzer/Passes/MultiIfToIfPass.cpp @@ -0,0 +1,45 @@ +#include + +#include +#include +#include + +namespace DB +{ + +namespace +{ + +class MultiIfToIfVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit MultiIfToIfVisitor(FunctionOverloadResolverPtr if_function_ptr_) + : if_function_ptr(if_function_ptr_) + {} + + void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || function_node->getFunctionName() != "multiIf") + return; + + if (function_node->getArguments().getNodes().size() != 3) + return; + + auto result_type = function_node->getResultType(); + function_node->resolveAsFunction(if_function_ptr, std::move(result_type)); + } + +private: + FunctionOverloadResolverPtr if_function_ptr; +}; + +} + +void MultiIfToIfPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context) +{ + MultiIfToIfVisitor visitor(FunctionFactory::instance().get("if", context)); + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/MultiIfToIfPass.h b/src/Analyzer/Passes/MultiIfToIfPass.h new file mode 100644 index 00000000000..2213f3713ed --- /dev/null +++ b/src/Analyzer/Passes/MultiIfToIfPass.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace DB +{ + +/** Convert `multiIf` with single condition into `if`. + * + * Example: SELECT multiIf(x, 1, 0); + * Result: SELECT if(x, 1, 0); + */ +class MultiIfToIfPass final : public IQueryTreePass +{ +public: + String getName() override { return "MultiIfToIf"; } + + String getDescription() override { return "Optimize multiIf with single condition to if."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp new file mode 100644 index 00000000000..8c92ecc3900 --- /dev/null +++ b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp @@ -0,0 +1,58 @@ +#include + +#include +#include + +#include +#include + +namespace DB +{ + +namespace +{ + +class NormalizeCountVariantsVisitor : public InDepthQueryTreeVisitor +{ +public: + static void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || !function_node->isAggregateFunction() || (function_node->getFunctionName() != "count" && function_node->getFunctionName() != "sum")) + return; + + if (function_node->getArguments().getNodes().size() != 1) + return; + + auto & first_argument = function_node->getArguments().getNodes()[0]; + auto first_argument_constant_value = first_argument->getConstantValueOrNull(); + if (!first_argument_constant_value) + return; + + const auto & first_argument_constant_literal = first_argument_constant_value->getValue(); + + if (function_node->getFunctionName() == "count" && !first_argument_constant_literal.isNull()) + { + function_node->getArguments().getNodes().clear(); + } + else if (function_node->getFunctionName() == "sum" && first_argument_constant_literal.getType() == Field::Types::UInt64 && + first_argument_constant_literal.get() == 1) + { + auto result_type = function_node->getResultType(); + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get("count", {}, {}, properties); + function_node->resolveAsAggregateFunction(std::move(aggregate_function), std::move(result_type)); + function_node->getArguments().getNodes().clear(); + } + } +}; + +} + +void NormalizeCountVariantsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + NormalizeCountVariantsVisitor visitor; + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/NormalizeCountVariantsPass.h b/src/Analyzer/Passes/NormalizeCountVariantsPass.h new file mode 100644 index 00000000000..78a114f4a85 --- /dev/null +++ b/src/Analyzer/Passes/NormalizeCountVariantsPass.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace DB +{ + +/** Remove single literal argument from `count`. Convert `sum` with single `1` literal argument into `count`. + * + * Example: SELECT count(1); + * Result: SELECT count(); + * + * Example: SELECT sum(1); + * Result: SELECT count(); + */ +class NormalizeCountVariantsPass final : public IQueryTreePass +{ +public: + String getName() override { return "NormalizeCountVariants"; } + + String getDescription() override { return "Optimize count(literal), sum(1) into count()."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp b/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp new file mode 100644 index 00000000000..e4d6633b6e6 --- /dev/null +++ b/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.cpp @@ -0,0 +1,108 @@ +#include + +#include +#include +#include + +namespace DB +{ + +namespace +{ + +struct QueryTreeNodeWithHash +{ + explicit QueryTreeNodeWithHash(const IQueryTreeNode * node_) + : node(node_) + , hash(node->getTreeHash().first) + {} + + const IQueryTreeNode * node = nullptr; + size_t hash = 0; +}; + +struct QueryTreeNodeWithHashHash +{ + size_t operator()(const QueryTreeNodeWithHash & node_with_hash) const + { + return node_with_hash.hash; + } +}; + +struct QueryTreeNodeWithHashEqualTo +{ + bool operator()(const QueryTreeNodeWithHash & lhs_node, const QueryTreeNodeWithHash & rhs_node) const + { + return lhs_node.hash == rhs_node.hash && lhs_node.node->isEqual(*rhs_node.node); + } +}; + +using QueryTreeNodeWithHashSet = std::unordered_set; + +class OrderByLimitByDuplicateEliminationVisitor : public InDepthQueryTreeVisitor +{ +public: + void visitImpl(QueryTreeNodePtr & node) + { + auto * query_node = node->as(); + if (!query_node) + return; + + if (query_node->hasOrderBy()) + { + QueryTreeNodes result_nodes; + + auto & query_order_by_nodes = query_node->getOrderBy().getNodes(); + + for (auto & sort_node : query_order_by_nodes) + { + auto & sort_node_typed = sort_node->as(); + + /// Skip elements with WITH FILL + if (sort_node_typed.withFill()) + { + result_nodes.push_back(sort_node); + continue; + } + + auto [_, inserted] = unique_expressions_nodes_set.emplace(sort_node_typed.getExpression().get()); + if (inserted) + result_nodes.push_back(sort_node); + } + + query_order_by_nodes = std::move(result_nodes); + } + + unique_expressions_nodes_set.clear(); + + if (query_node->hasLimitBy()) + { + QueryTreeNodes result_nodes; + + auto & query_limit_by_nodes = query_node->getLimitBy().getNodes(); + + for (auto & limit_by_node : query_limit_by_nodes) + { + auto [_, inserted] = unique_expressions_nodes_set.emplace(limit_by_node.get()); + if (inserted) + result_nodes.push_back(limit_by_node); + } + + query_limit_by_nodes = std::move(result_nodes); + } + } + +private: + QueryTreeNodeWithHashSet unique_expressions_nodes_set; +}; + +} + +void OrderByLimitByDuplicateEliminationPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + OrderByLimitByDuplicateEliminationVisitor visitor; + visitor.visit(query_tree_node); +} + +} + diff --git a/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h b/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h new file mode 100644 index 00000000000..11a025af5b9 --- /dev/null +++ b/src/Analyzer/Passes/OrderByLimitByDuplicateEliminationPass.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace DB +{ + +/** Eliminate duplicate columns from ORDER BY and LIMIT BY. + * + * Example: SELECT * FROM test_table ORDER BY id, id; + * Result: SELECT * FROM test_table ORDER BY id; + * + * Example: SELECT * FROM test_table LIMIT 5 BY id, id; + * Result: SELECT * FROM test_table LIMIT 5 BY id; + */ +class OrderByLimitByDuplicateEliminationPass final : public IQueryTreePass +{ +public: + String getName() override { return "OrderByLimitByDuplicateElimination"; } + + String getDescription() override { return "Remove duplicate columns from ORDER BY, LIMIT BY."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/OrderByTupleEliminationPass.cpp b/src/Analyzer/Passes/OrderByTupleEliminationPass.cpp new file mode 100644 index 00000000000..f70ec27ba5d --- /dev/null +++ b/src/Analyzer/Passes/OrderByTupleEliminationPass.cpp @@ -0,0 +1,59 @@ +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace +{ + +class OrderByTupleEliminationVisitor : public InDepthQueryTreeVisitor +{ +public: + static void visitImpl(QueryTreeNodePtr & node) + { + auto * query_node = node->as(); + if (!query_node || !query_node->hasOrderBy()) + return; + + QueryTreeNodes result_nodes; + + for (auto & sort_node : query_node->getOrderBy().getNodes()) + { + auto & sort_node_typed = sort_node->as(); + auto * function_expression = sort_node_typed.getExpression()->as(); + if (sort_node_typed.withFill() || !function_expression || function_expression->getFunctionName() != "tuple") + { + result_nodes.push_back(sort_node); + continue; + } + + auto & tuple_arguments_nodes = function_expression->getArguments().getNodes(); + for (auto & argument_node : tuple_arguments_nodes) + { + auto result_sort_node = std::make_shared(argument_node, + sort_node_typed.getSortDirection(), + sort_node_typed.getNullsSortDirection(), + sort_node_typed.getCollator()); + result_nodes.push_back(std::move(result_sort_node)); + } + } + + query_node->getOrderBy().getNodes() = std::move(result_nodes); + } +}; + +} + +void OrderByTupleEliminationPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + OrderByTupleEliminationVisitor visitor; + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/OrderByTupleEliminationPass.h b/src/Analyzer/Passes/OrderByTupleEliminationPass.h new file mode 100644 index 00000000000..5665561e227 --- /dev/null +++ b/src/Analyzer/Passes/OrderByTupleEliminationPass.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace DB +{ + +/** Eliminate tuples from ORDER BY. + * + * Example: SELECT * FROM test_table ORDER BY (a, b); + * Result: SELECT * FROM test_table ORDER BY a, b; + */ +class OrderByTupleEliminationPass final : public IQueryTreePass +{ +public: + String getName() override { return "OrderByTupleElimination"; } + + String getDescription() override { return "Remove tuple from ORDER BY."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/QueryAnalysisPass.cpp b/src/Analyzer/Passes/QueryAnalysisPass.cpp new file mode 100644 index 00000000000..9db2d66d99d --- /dev/null +++ b/src/Analyzer/Passes/QueryAnalysisPass.cpp @@ -0,0 +1,5699 @@ +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; + extern const int UNKNOWN_IDENTIFIER; + extern const int UNKNOWN_FUNCTION; + extern const int LOGICAL_ERROR; + extern const int CYCLIC_ALIASES; + extern const int INCORRECT_RESULT_OF_SCALAR_SUBQUERY; + extern const int BAD_ARGUMENTS; + extern const int MULTIPLE_EXPRESSIONS_FOR_ALIAS; + extern const int TYPE_MISMATCH; + extern const int AMBIGUOUS_IDENTIFIER; + extern const int INVALID_WITH_FILL_EXPRESSION; + extern const int INVALID_LIMIT_EXPRESSION; + extern const int EMPTY_LIST_OF_COLUMNS_QUERIED; + extern const int TOO_DEEP_SUBQUERIES; + extern const int UNKNOWN_AGGREGATE_FUNCTION; + extern const int NOT_AN_AGGREGATE; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; + extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; + extern const int ILLEGAL_FINAL; + extern const int SAMPLING_NOT_SUPPORTED; + extern const int NO_COMMON_TYPE; + extern const int NOT_IMPLEMENTED; + extern const int ALIAS_REQUIRED; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + +/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h before. + * And additional documentation for each method, where special cases are described in detail. + * + * Each node in query must be resolved. For each query tree node resolved state is specific. + * + * For constant node no resolve process exists, it is resolved during construction. + * + * For table node no resolve process exists, it is resolved during construction. + * + * For function node to be resolved parameters and arguments must be resolved, function node must be initialized with concrete aggregate or + * non aggregate function and with result type. + * + * For lambda node there can be 2 different cases. + * 1. Standalone: WITH (x -> x + 1) AS lambda SELECT lambda(1); Such lambdas are inlined in query tree during query analysis pass. + * 2. Function arguments: WITH (x -> x + 1) AS lambda SELECT arrayMap(lambda, [1, 2, 3]); For such lambda resolution must + * set concrete lambda arguments (initially they are identifier nodes) and resolve lambda expression body. + * + * For query node resolve process must resolve all its inner nodes. + * + * For matcher node resolve process must replace it with matched nodes. + * + * For identifier node resolve process must replace it with concrete non identifier node. This part is most complex because + * for identifier resolution scopes and identifier lookup context play important part. + * + * ClickHouse SQL support lexical scoping for identifier resolution. Scope can be defined by query node or by expression node. + * Expression nodes that can define scope are lambdas and table ALIAS columns. + * + * Identifier lookup context can be expression, function, table. + * + * Examples: WITH (x -> x + 1) as func SELECT func() FROM func; During function `func` resolution identifier lookup is performed + * in function context. + * + * If there are no information of identifier context rules are following: + * 1. Try to resolve identifier in expression context. + * 2. Try to resolve identifier in function context, if it is allowed. Example: SELECT func(arguments); Here func identifier cannot be resolved in function context + * because query projection does not support that. + * 3. Try to resolve identifier in table context, if it is allowed. Example: SELECT table; Here table identifier cannot be resolved in function context + * because query projection does not support that. + * + * TODO: This does not supported properly before, because matchers could not be resolved from aliases. + * + * Identifiers are resolved with following resules: + * Resolution starts with current scope. + * 1. Try to resolve identifier from expression scope arguments. Lambda expression arguments are greatest priority. + * 2. Try to resolve identifier from aliases. + * 3. Try to resolve identifier from join tree if scope is query, or if there are registered table columns in scope. + * Steps 2 and 3 can be changed using prefer_column_name_to_alias setting. + * 4. If it is table lookup, try to resolve identifier from CTE. + * If identifier could not be resolved in current scope, resolution must be continued in parent scopes. + * 5. Try to resolve identifier from parent scopes. + * + * Additional rules about aliases and scopes. + * 1. Parent scope cannot refer alias from child scope. + * 2. Child scope can refer to alias in parent scope. + * + * Example: SELECT arrayMap(x -> x + 1 AS a, [1,2,3]), a; Identifier a is unknown in parent scope. + * Example: SELECT a FROM (SELECT 1 as a); Here we do not refer to alias a from child query scope. But we query it projection result, similar to tables. + * Example: WITH 1 as a SELECT (SELECT a) as b; Here in child scope identifier a is resolved using alias from parent scope. + * + * Additional rules about identifier binding. + * Bind for identifier to entity means that identifier first part match some node during analysis. + * If other parts of identifier cannot be resolved in that node, exception must be thrown. + * + * Example: + * CREATE TABLE test_table (id UInt64, compound_value Tuple(value UInt64)) ENGINE=TinyLog; + * SELECT compound_value.value, 1 AS compound_value FROM test_table; + * Identifier first part compound_value bound to entity with alias compound_value, but nested identifier part cannot be resolved from entity, + * lookup should not be continued, and exception must be thrown because if lookup continues that way identifier can be resolved from join tree. + * + * TODO: This was not supported properly before analyzer because nested identifier could not be resolved from alias. + * + * More complex example: + * CREATE TABLE test_table (id UInt64, value UInt64) ENGINE=TinyLog; + * WITH cast(('Value'), 'Tuple (value UInt64') AS value SELECT (SELECT value FROM test_table); + * Identifier first part value bound to test_table column value, but nested identifier part cannot be resolved from it, + * lookup should not be continued, and exception must be thrown because if lookup continues identifier can be resolved from parent scope. + * + * TODO: Update exception messages + * TODO: JOIN TREE subquery constant columns + * TODO: Table identifiers with optional UUID. + * TODO: Lookup functions arrayReduce(sum, [1, 2, 3]); + * TODO: SELECT (compound_expression).*, (compound_expression).COLUMNS are not supported on parser level. + * TODO: SELECT a.b.c.*, a.b.c.COLUMNS. Qualified matcher where identifier size is greater than 2 are not supported on parser level. + * TODO: Support function identifier resolve from parent query scope, if lambda in parent scope does not capture any columns. + * TODO: Support group_by_use_nulls. + * TODO: Scalar subqueries cache. + */ + +namespace +{ + +/// Identifier lookup context +enum class IdentifierLookupContext : uint8_t +{ + EXPRESSION = 0, + FUNCTION, + TABLE_EXPRESSION, +}; + +const char * toString(IdentifierLookupContext identifier_lookup_context) +{ + switch (identifier_lookup_context) + { + case IdentifierLookupContext::EXPRESSION: return "EXPRESSION"; + case IdentifierLookupContext::FUNCTION: return "FUNCTION"; + case IdentifierLookupContext::TABLE_EXPRESSION: return "TABLE_EXPRESSION"; + } +} + +const char * toStringLowercase(IdentifierLookupContext identifier_lookup_context) +{ + switch (identifier_lookup_context) + { + case IdentifierLookupContext::EXPRESSION: return "expression"; + case IdentifierLookupContext::FUNCTION: return "function"; + case IdentifierLookupContext::TABLE_EXPRESSION: return "table expression"; + } +} + +/** Structure that represent identifier lookup during query analysis. + * Lookup can be in query expression, function, table context. + */ +struct IdentifierLookup +{ + Identifier identifier; + IdentifierLookupContext lookup_context; + + bool isExpressionLookup() const + { + return lookup_context == IdentifierLookupContext::EXPRESSION; + } + + bool isFunctionLookup() const + { + return lookup_context == IdentifierLookupContext::FUNCTION; + } + + bool isTableExpressionLookup() const + { + return lookup_context == IdentifierLookupContext::TABLE_EXPRESSION; + } + + String dump() const + { + return identifier.getFullName() + ' ' + toString(lookup_context); + } +}; + +inline bool operator==(const IdentifierLookup & lhs, const IdentifierLookup & rhs) +{ + return lhs.identifier.getFullName() == rhs.identifier.getFullName() && lhs.lookup_context == rhs.lookup_context; +} + +[[maybe_unused]] inline bool operator!=(const IdentifierLookup & lhs, const IdentifierLookup & rhs) +{ + return !(lhs == rhs); +} + +struct IdentifierLookupHash +{ + size_t operator()(const IdentifierLookup & identifier_lookup) const + { + return std::hash()(identifier_lookup.identifier.getFullName()) ^ static_cast(identifier_lookup.lookup_context); + } +}; + +enum class IdentifierResolvePlace : UInt8 +{ + NONE = 0, + EXPRESSION_ARGUMENTS, + ALIASES, + JOIN_TREE, + /// Valid only for table lookup + CTE, + /// Valid only for table lookup + DATABASE_CATALOG +}; + +const char * toString(IdentifierResolvePlace resolved_identifier_place) +{ + switch (resolved_identifier_place) + { + case IdentifierResolvePlace::NONE: return "NONE"; + case IdentifierResolvePlace::EXPRESSION_ARGUMENTS: return "EXPRESSION_ARGUMENTS"; + case IdentifierResolvePlace::ALIASES: return "ALIASES"; + case IdentifierResolvePlace::JOIN_TREE: return "JOIN_TREE"; + case IdentifierResolvePlace::CTE: return "CTE"; + case IdentifierResolvePlace::DATABASE_CATALOG: return "DATABASE_CATALOG"; + } +} + +struct IdentifierResolveResult +{ + IdentifierResolveResult() = default; + + QueryTreeNodePtr resolved_identifier; + IdentifierResolvePlace resolve_place = IdentifierResolvePlace::NONE; + bool resolved_from_parent_scopes = false; + + [[maybe_unused]] bool isResolved() const + { + return resolve_place != IdentifierResolvePlace::NONE; + } + + [[maybe_unused]] bool isResolvedFromParentScopes() const + { + return resolved_from_parent_scopes; + } + + [[maybe_unused]] bool isResolvedFromExpressionArguments() const + { + return resolve_place == IdentifierResolvePlace::EXPRESSION_ARGUMENTS; + } + + [[maybe_unused]] bool isResolvedFromAliases() const + { + return resolve_place == IdentifierResolvePlace::ALIASES; + } + + [[maybe_unused]] bool isResolvedFromJoinTree() const + { + return resolve_place == IdentifierResolvePlace::JOIN_TREE; + } + + [[maybe_unused]] bool isResolvedFromCTEs() const + { + return resolve_place == IdentifierResolvePlace::CTE; + } + + void dump(WriteBuffer & buffer) const + { + if (!resolved_identifier) + { + buffer << "unresolved"; + return; + } + + buffer << resolved_identifier->formatASTForErrorMessage() << " place " << toString(resolve_place) << " resolved from parent scopes " << resolved_from_parent_scopes; + } + + [[maybe_unused]] String dump() const + { + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); + } +}; + +struct IdentifierResolveSettings +{ + /// Allow to check parent scopes during identifier resolution + bool allow_to_check_parent_scopes = true; + + /// Allow to check join tree during identifier resolution + bool allow_to_check_join_tree = true; + + /// Allow to check CTEs during table identifier resolution + bool allow_to_check_cte = true; + + /// Allow to check database catalog during table identifier resolution + bool allow_to_check_database_catalog = true; + + /// Allow to resolve subquery during identifier resolution + bool allow_to_resolve_subquery_during_identifier_resolution = true; +}; + +struct StringTransparentHash +{ + using is_transparent = void; + using hash = std::hash; + + [[maybe_unused]] size_t operator()(const char * data) const + { + return hash()(data); + } + + size_t operator()(std::string_view data) const + { + return hash()(data); + } + + size_t operator()(const std::string & data) const + { + return hash()(data); + } +}; + +using ColumnNameToColumnNodeMap = std::unordered_map>; + +struct TableExpressionData +{ + std::string table_expression_name; + std::string table_expression_description; + std::string table_name; + std::string database_name; + ColumnNameToColumnNodeMap column_name_to_column_node; + std::unordered_set> column_identifier_first_parts; + + bool hasFullIdentifierName(IdentifierView identifier) const + { + return column_name_to_column_node.contains(std::string_view(identifier.getFullName())); + } + + bool canBindIdentifier(IdentifierView identifier) const + { + return column_identifier_first_parts.contains(std::string_view(identifier.at(0))); + } + + [[maybe_unused]] void dump(WriteBuffer & buffer) const + { + buffer << "Columns size " << column_name_to_column_node.size() << '\n'; + + for (const auto & [column_name, column_node] : column_name_to_column_node) + buffer << "Column name " << column_name << " column node " << column_node->dumpTree() << '\n'; + } + + [[maybe_unused]] String dump() const + { + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); + } +}; + +class ExpressionsStack +{ +public: + void pushNode(const QueryTreeNodePtr & node) + { + if (node->hasAlias()) + { + expressions.emplace_back(node.get(), node->getAlias()); + ++alias_name_to_expressions_size[expressions.back().second]; + return; + } + + expressions.emplace_back(node.get(), std::string()); + } + + void popNode() + { + const auto & [_, top_expression_alias] = expressions.back(); + if (!top_expression_alias.empty()) + { + auto it = alias_name_to_expressions_size.find(top_expression_alias); + --it->second; + + if (it->second == 0) + alias_name_to_expressions_size.erase(it); + } + + expressions.pop_back(); + } + + const IQueryTreeNode * getRoot() const + { + if (expressions.empty()) + return nullptr; + + return expressions.front().first; + } + + const IQueryTreeNode * getTop() const + { + if (expressions.empty()) + return nullptr; + + return expressions.back().first; + } + + bool hasExpressionWithAlias(const std::string & alias) const + { + return alias_name_to_expressions_size.find(alias) != alias_name_to_expressions_size.end(); + } + + [[maybe_unused]] size_t size() const + { + return expressions.size(); + } + + bool empty() const + { + return expressions.empty(); + } + + void dump(WriteBuffer & buffer) const + { + buffer << expressions.size() << '\n'; + + for (const auto & [expression, alias] : expressions) + { + buffer << "Expression "; + buffer << expression->formatASTForErrorMessage(); + + if (!alias.empty()) + buffer << " alias " << alias; + + buffer << '\n'; + } + } + + [[maybe_unused]] String dump() const + { + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); + } + +private: + std::vector> expressions; + std::unordered_map alias_name_to_expressions_size; +}; + +/** Projection names is name of query tree node that is used in projection part of query node. + * Example: SELECT id FROM test_table; + * `id` is projection name of column node + * + * Example: SELECT id AS id_alias FROM test_table; + * `id_alias` is projection name of column node + * + * Calculation of projection names is done during expression nodes resolution. This is done this way + * because after identifier node is resolved we lose information about identifier name. We could + * potentially save this information in query tree node itself, but that would require to clone it in some cases. + * Example: SELECT big_scalar_subquery AS a, a AS b, b AS c; + * All 3 nodes in projection are the same big_scalar_subquery, but they have different projection names. + * If we want to save it in query tree node, we have to clone subquery node that could lead to performance degradation. + * + * Possible solution is to separate query node metadata and query node content. So only node metadata could be cloned + * if we want to change projection name. This solution does not seem to be easy for client of query tree because projection + * name will be part of interface. If we potentially could hide projection names calculation in analyzer without introducing additional + * changes in query tree structure that would be preferable. + * + * Currently each resolve method returns projection names array. Resolve method must compute projection names of node. + * If node is resolved as list node this is case for `untuple` function or `matcher` result projection names array must contain projection names + * for result nodes. + * If node is not resolved as list node, projection names array contain single projection name for node. + * + * Rules for projection names: + * 1. If node has alias. It is node projection name. + * Except scenario where `untuple` function has alias. Example: SELECT untuple(expr) AS alias, alias. + * + * 2. For constant it is constant value string representation. + * + * 3. For identifier: + * If identifier is resolved from JOIN TREE, we want to remove additional identifier qualifications. + * Example: SELECT default.test_table.id FROM test_table. + * Result projection name is `id`. + * + * Example: SELECT t1.id FROM test_table_1 AS t1, test_table_2 AS t2 + * In example both test_table_1, test_table_2 have `id` column. + * In such case projection name is `t1.id` because if additional qualification is removed then column projection name `id` will be ambiguous. + * + * Example: SELECT default.test_table_1.id FROM test_table_1 AS t1, test_table_2 AS t2 + * In such case projection name is `test_table_1.id` because we remove unnecessary database qualification, but table name qualification cannot be removed + * because otherwise column projection name `id` will be ambiguous. + * + * If identifier is not resolved from JOIN TREE. Identifier name is projection name. + * Except scenario where `untuple` function resolved using identifier. Example: SELECT untuple(expr) AS alias, alias. + * Example: SELECT sum(1, 1) AS value, value. + * In such case both nodes have `value` projection names. + * + * Example: SELECT id AS value, value FROM test_table. + * In such case both nodes have have `value` projection names. + * + * Special case is `untuple` function. If `untuple` function specified with alias, then result nodes will have alias.tuple_column_name projection names. + * Example: SELECT cast(tuple(1), 'Tuple(id UInt64)') AS value, untuple(value) AS a; + * Result projection names are `value`, `a.id`. + * + * If `untuple` function does not have alias then result nodes will have `tupleElement(untuple_expression_projection_name, 'tuple_column_name') projection names. + * + * Example: SELECT cast(tuple(1), 'Tuple(id UInt64)') AS value, untuple(value); + * Result projection names are `value`, `tupleElement(value, 'id')`; + * + * 4. For function: + * Projection name consists from function_name(parameters_projection_names)(arguments_projection_names). + * Additionally if function is window function. Window node projection name is used with OVER clause. + * Example: function_name (parameters_names)(argument_projection_names) OVER window_name; + * Example: function_name (parameters_names)(argument_projection_names) OVER (PARTITION BY id ORDER BY id). + * Example: function_name (parameters_names)(argument_projection_names) OVER (window_name ORDER BY id). + * + * 5. For lambda: + * If it is standalone lambda that returns single expression, function projection name is used. + * Example: WITH (x -> x + 1) AS lambda SELECT lambda(1). + * Projection name is `lambda(1)`. + * + * If is it standalone lambda that returns list, projection names of list nodes are used. + * Example: WITH (x -> *) AS lambda SELECT lambda(1) FROM test_table; + * If test_table has two columns `id`, `value`. Then result projection names are `id`, `value`. + * + * If lambda is argument of function. + * Then projection name consists from lambda(tuple(lambda_arguments)(lambda_body_projection_name)); + * + * 6. For matcher: + * Matched nodes projection names are used as matcher projection names. + * + * Matched nodes must be qualified if needed. + * Example: SELECT * FROM test_table_1 AS t1, test_table_2 AS t2. + * In example table test_table_1 and test_table_2 both have `id`, `value` columns. + * Matched nodes after unqualified matcher resolve must be qualified to avoid ambiguous projection names. + * Result projection names must be `t1.id`, `t1.value`, `t2.id`, `t2.value`. + * + * There are special cases + * 1. For lambda inside APPLY matcher transformer: + * Example: SELECT * APPLY x -> toString(x) FROM test_table. + * In such case lambda argument projection name `x` will be replaced by matched node projection name. + * If table has two columns `id` and `value`. Then result projection names are `toString(id)`, `toString(value)`; + * + * 2. For unqualified matcher when JOIN tree contains JOIN with USING. + * Example: SELECT * FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 USING(id); + * Result projection names must be `id`, `t1.value`, `t2.value`. + * + * 7. For subquery: + * For subquery projection name consists of `_subquery_` prefix and implementation specific unique number suffix. + * Example: SELECT (SELECT 1), (SELECT 1 UNION DISTINCT SELECT 1); + * Result projection name can be `_subquery_1`, `subquery_2`; + * + * 8. For table: + * Table node can be used in expression context only as right argument of IN function. In that case identifier is used + * as table node projection name. + * Example: SELECT id IN test_table FROM test_table; + * Result projection name is `in(id, test_table)`. + */ +using ProjectionName = String; +using ProjectionNames = std::vector; +constexpr auto PROJECTION_NAME_PLACEHOLDER = "__projection_name_placeholder"; + +struct IdentifierResolveScope +{ + /// Construct identifier resolve scope using scope node, and parent scope + IdentifierResolveScope(QueryTreeNodePtr scope_node_, IdentifierResolveScope * parent_scope_) + : scope_node(std::move(scope_node_)) + , parent_scope(parent_scope_) + { + if (parent_scope) + { + subquery_depth = parent_scope->subquery_depth; + context = parent_scope->context; + } + } + + QueryTreeNodePtr scope_node; + + IdentifierResolveScope * parent_scope = nullptr; + + ContextPtr context; + + /// Identifier lookup to result + std::unordered_map identifier_lookup_to_result; + + /// Lambda argument can be expression like constant, column, or it can be function + std::unordered_map expression_argument_name_to_node; + + /// Alias name to query expression node + std::unordered_map alias_name_to_expression_node; + + /// Alias name to lambda node + std::unordered_map alias_name_to_lambda_node; + + /// Alias name to table expression node + std::unordered_map alias_name_to_table_expression_node; + + /// Table column name to column node. Valid only during table ALIAS columns resolve. + ColumnNameToColumnNodeMap column_name_to_column_node; + + /// CTE name to query node + std::unordered_map cte_name_to_query_node; + + /// Window name to window node + std::unordered_map window_name_to_window_node; + + /// Nodes with duplicated aliases + std::unordered_set nodes_with_duplicated_aliases; + + /// Current scope expression in resolve process stack + ExpressionsStack expressions_in_resolve_process_stack; + + /// Table expressions in resolve process + std::unordered_set table_expressions_in_resolve_process; + + /// Current scope expression + std::unordered_set non_cached_identifier_lookups_during_expression_resolve; + + /// Table expression node to data + std::unordered_map table_expression_node_to_data; + + /// Use identifier lookup to result cache + bool use_identifier_lookup_to_result_cache = true; + + /// Subquery depth + size_t subquery_depth = 0; + + /** Scope join tree node for expression. + * Valid only during analysis construction for single expression. + */ + QueryTreeNodePtr expression_join_tree_node; + + [[maybe_unused]] const IdentifierResolveScope * getNearestQueryScope() const + { + const IdentifierResolveScope * scope_to_check = this; + while (scope_to_check != nullptr) + { + if (scope_to_check->scope_node->getNodeType() == QueryTreeNodeType::QUERY) + break; + + scope_to_check = scope_to_check->parent_scope; + } + + return scope_to_check; + } + + IdentifierResolveScope * getNearestQueryScope() + { + IdentifierResolveScope * scope_to_check = this; + while (scope_to_check != nullptr) + { + if (scope_to_check->scope_node->getNodeType() == QueryTreeNodeType::QUERY) + break; + + scope_to_check = scope_to_check->parent_scope; + } + + return scope_to_check; + } + + TableExpressionData & getTableExpressionDataOrThrow(QueryTreeNodePtr table_expression_node) + { + auto it = table_expression_node_to_data.find(table_expression_node); + if (it == table_expression_node_to_data.end()) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Table expression {} data must be initialized. In scope {}", + table_expression_node->formatASTForErrorMessage(), + scope_node->formatASTForErrorMessage()); + } + + return it->second; + } + + /// Dump identifier resolve scope + [[maybe_unused]] void dump(WriteBuffer & buffer) const + { + buffer << "Scope node " << scope_node->formatASTForErrorMessage() << '\n'; + buffer << "Identifier lookup to result " << identifier_lookup_to_result.size() << '\n'; + for (const auto & [identifier, result] : identifier_lookup_to_result) + { + buffer << "Identifier " << identifier.dump() << " resolve result "; + result.dump(buffer); + buffer << '\n'; + } + + buffer << "Expression argument name to node " << expression_argument_name_to_node.size() << '\n'; + for (const auto & [alias_name, node] : expression_argument_name_to_node) + buffer << "Alias name " << alias_name << " node " << node->formatASTForErrorMessage() << '\n'; + + buffer << "Alias name to expression node table size " << alias_name_to_expression_node.size() << '\n'; + for (const auto & [alias_name, node] : alias_name_to_expression_node) + buffer << "Alias name " << alias_name << " expression node " << node->dumpTree() << '\n'; + + buffer << "Alias name to function node table size " << alias_name_to_lambda_node.size() << '\n'; + for (const auto & [alias_name, node] : alias_name_to_lambda_node) + buffer << "Alias name " << alias_name << " lambda node " << node->formatASTForErrorMessage() << '\n'; + + buffer << "Alias name to table expression node table size " << alias_name_to_table_expression_node.size() << '\n'; + for (const auto & [alias_name, node] : alias_name_to_table_expression_node) + buffer << "Alias name " << alias_name << " node " << node->formatASTForErrorMessage() << '\n'; + + buffer << "CTE name to query node table size " << cte_name_to_query_node.size() << '\n'; + for (const auto & [cte_name, node] : cte_name_to_query_node) + buffer << "CTE name " << cte_name << " node " << node->formatASTForErrorMessage() << '\n'; + + buffer << "WINDOW name to window node table size " << window_name_to_window_node.size() << '\n'; + for (const auto & [window_name, node] : window_name_to_window_node) + buffer << "CTE name " << window_name << " node " << node->formatASTForErrorMessage() << '\n'; + + buffer << "Nodes with duplicated aliases size " << nodes_with_duplicated_aliases.size() << '\n'; + for (const auto & node : nodes_with_duplicated_aliases) + buffer << "Alias name " << node->getAlias() << " node " << node->formatASTForErrorMessage() << '\n'; + + buffer << "Expression resolve process stack " << '\n'; + expressions_in_resolve_process_stack.dump(buffer); + + buffer << "Table expressions in resolve process size " << table_expressions_in_resolve_process.size() << '\n'; + for (const auto & node : table_expressions_in_resolve_process) + buffer << "Table expression " << node->formatASTForErrorMessage() << '\n'; + + buffer << "Non cached identifier lookups during expression resolve " << non_cached_identifier_lookups_during_expression_resolve.size() << '\n'; + for (const auto & identifier_lookup : non_cached_identifier_lookups_during_expression_resolve) + buffer << "Identifier lookup " << identifier_lookup.dump() << '\n'; + + buffer << "Table expression node to data " << table_expression_node_to_data.size() << '\n'; + for (const auto & [table_expression_node, table_expression_data] : table_expression_node_to_data) + buffer << "Table expression node " << table_expression_node->formatASTForErrorMessage() << " data " << table_expression_data.dump() << '\n'; + + buffer << "Use identifier lookup to result cache " << use_identifier_lookup_to_result_cache << '\n'; + buffer << "Subquery depth " << subquery_depth << '\n'; + } + + [[maybe_unused]] String dump() const + { + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); + } +}; + + +/** Visitor that extracts expression and function aliases from node and initialize scope tables with it. + * Does not go into child lambdas and queries. + * + * Important: + * Identifier nodes with aliases are added both in alias to expression and alias to function map. + * + * These is necessary because identifier with alias can give alias name to any query tree node. + * + * Example: + * WITH (x -> x + 1) AS id, id AS value SELECT value(1); + * In this example id as value is identifier node that has alias, during scope initialization we cannot derive + * that id is actually lambda or expression. + * + * There are no easy solution here, without trying to make full featured expression resolution at this stage. + * Example: + * WITH (x -> x + 1) AS id, id AS id_1, id_1 AS id_2 SELECT id_2(1); + * Example: SELECT a, b AS a, b AS c, 1 AS c; + * + * It is client responsibility after resolving identifier node with alias, make following actions: + * 1. If identifier node was resolved in function scope, remove alias from scope expression map. + * 2. If identifier node was resolved in expression scope, remove alias from scope function map. + * + * That way we separate alias map initialization and expressions resolution. + */ +class QueryExpressionsAliasVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit QueryExpressionsAliasVisitor(IdentifierResolveScope & scope_) + : scope(scope_) + {} + + void visitImpl(QueryTreeNodePtr & node) + { + updateAliasesIfNeeded(node, false /*is_lambda_node*/); + } + + bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child) + { + if (auto * lambda_node = child->as()) + { + updateAliasesIfNeeded(child, true /*is_lambda_node*/); + return false; + } + else if (auto * query_tree_node = child->as()) + { + if (query_tree_node->isCTE()) + return false; + + updateAliasesIfNeeded(child, false /*is_lambda_node*/); + return false; + } + else if (auto * union_node = child->as()) + { + if (union_node->isCTE()) + return false; + + updateAliasesIfNeeded(child, false /*is_lambda_node*/); + return false; + } + + return true; + } +private: + void updateAliasesIfNeeded(const QueryTreeNodePtr & node, bool is_lambda_node) + { + if (!node->hasAlias()) + return; + + const auto & alias = node->getAlias(); + + if (is_lambda_node) + { + if (scope.alias_name_to_expression_node.contains(alias)) + scope.nodes_with_duplicated_aliases.insert(node); + + auto [_, inserted] = scope.alias_name_to_lambda_node.insert(std::make_pair(alias, node)); + if (!inserted) + scope.nodes_with_duplicated_aliases.insert(node); + + return; + } + + if (scope.alias_name_to_lambda_node.contains(alias)) + scope.nodes_with_duplicated_aliases.insert(node); + + auto [_, inserted] = scope.alias_name_to_expression_node.insert(std::make_pair(alias, node)); + if (!inserted) + scope.nodes_with_duplicated_aliases.insert(node); + + /// If node is identifier put it also in scope alias name to lambda node map + if (node->getNodeType() == QueryTreeNodeType::IDENTIFIER) + scope.alias_name_to_lambda_node.insert(std::make_pair(alias, node)); + } + + IdentifierResolveScope & scope; +}; + +class TableExpressionsAliasVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit TableExpressionsAliasVisitor(IdentifierResolveScope & scope_) + : scope(scope_) + {} + + void visitImpl(QueryTreeNodePtr & node) + { + updateAliasesIfNeeded(node); + } + + static bool needChildVisit(const QueryTreeNodePtr & node, const QueryTreeNodePtr & child) + { + auto node_type = node->getNodeType(); + + switch (node_type) + { + case QueryTreeNodeType::ARRAY_JOIN: + { + const auto & array_join_node = node->as(); + return child.get() == array_join_node.getTableExpression().get(); + } + case QueryTreeNodeType::JOIN: + { + const auto & join_node = node->as(); + return child.get() == join_node.getLeftTableExpression().get() || child.get() == join_node.getRightTableExpression().get(); + } + default: + { + break; + } + } + + return false; + } + +private: + void updateAliasesIfNeeded(const QueryTreeNodePtr & node) + { + if (!node->hasAlias()) + return; + + const auto & node_alias = node->getAlias(); + auto [_, inserted] = scope.alias_name_to_table_expression_node.emplace(node_alias, node); + if (!inserted) + throw Exception(ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS, + "Multiple table expressions with same alias {}. In scope {}", + node_alias, + scope.scope_node->formatASTForErrorMessage()); + } + + IdentifierResolveScope & scope; +}; + +class QueryAnalyzer +{ +public: + void resolve(QueryTreeNodePtr node, const QueryTreeNodePtr & table_expression, ContextPtr context) + { + IdentifierResolveScope scope(node, nullptr /*parent_scope*/); + scope.context = context; + + auto node_type = node->getNodeType(); + + switch (node_type) + { + case QueryTreeNodeType::QUERY: + { + if (table_expression) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "For query analysis table expression must be empty"); + + resolveQuery(node, scope); + break; + } + case QueryTreeNodeType::UNION: + { + if (table_expression) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "For union analysis table expression must be empty"); + + resolveUnion(node, scope); + break; + } + case QueryTreeNodeType::IDENTIFIER: + [[fallthrough]]; + case QueryTreeNodeType::CONSTANT: + [[fallthrough]]; + case QueryTreeNodeType::COLUMN: + [[fallthrough]]; + case QueryTreeNodeType::FUNCTION: + [[fallthrough]]; + case QueryTreeNodeType::LIST: + { + if (!table_expression) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "For expression analysis table expression must not be empty"); + + scope.expression_join_tree_node = table_expression; + validateTableExpressionModifiers(scope.expression_join_tree_node, scope); + initializeTableExpressionColumns(scope.expression_join_tree_node, scope); + + if (node_type == QueryTreeNodeType::LIST) + resolveExpressionNodeList(node, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + else + resolveExpressionNode(node, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + break; + } + default: + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Node {} with type {} is not supported by query analyzer. Supported nodes are query, union, identifier, constant, column, function, list.", + node->formatASTForErrorMessage(), + node->getNodeTypeName()); + } + } + } + +private: + /// Utility functions + + static bool isExpressionNodeType(QueryTreeNodeType node_type); + + static bool isFunctionExpressionNodeType(QueryTreeNodeType node_type); + + static bool isTableExpressionNodeType(QueryTreeNodeType node_type); + + static ProjectionName calculateFunctionProjectionName(const QueryTreeNodePtr & function_node, + const ProjectionNames & parameters_projection_names, + const ProjectionNames & arguments_projection_names); + + static ProjectionName calculateWindowProjectionName(const QueryTreeNodePtr & window_node, + const QueryTreeNodePtr & parent_window_node, + const String & parent_window_name, + const ProjectionNames & partition_by_projection_names, + const ProjectionNames & order_by_projection_names, + const ProjectionName & frame_begin_offset_projection_name, + const ProjectionName & frame_end_offset_projection_name); + + static ProjectionName calculateSortColumnProjectionName(const QueryTreeNodePtr & sort_column_node, + const ProjectionName & sort_expression_projection_name, + const ProjectionName & fill_from_expression_projection_name, + const ProjectionName & fill_to_expression_projection_name, + const ProjectionName & fill_step_expression_projection_name); + + static QueryTreeNodePtr wrapExpressionNodeInTupleElement(QueryTreeNodePtr expression_node, IdentifierView nested_path); + + static QueryTreeNodePtr tryGetLambdaFromSQLUserDefinedFunctions(const std::string & function_name, ContextPtr context); + + static void evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & query_tree_node, size_t subquery_depth, ContextPtr context); + + static void mergeWindowWithParentWindow(const QueryTreeNodePtr & window_node, const QueryTreeNodePtr & parent_window_node, IdentifierResolveScope & scope); + + static void replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_list, const QueryTreeNodes & projection_nodes, IdentifierResolveScope & scope); + + static void validateLimitOffsetExpression(QueryTreeNodePtr & expression_node, const String & expression_description, IdentifierResolveScope & scope); + + static void validateTableExpressionModifiers(const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + static void validateJoinTableExpressionWithoutAlias(const QueryTreeNodePtr & join_node, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + /// Resolve identifier functions + + static QueryTreeNodePtr tryResolveTableIdentifierFromDatabaseCatalog(const Identifier & table_identifier, ContextPtr context); + + QueryTreeNodePtr tryResolveIdentifierFromExpressionArguments(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope); + + static bool tryBindIdentifierToAliases(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope); + + QueryTreeNodePtr tryResolveIdentifierFromAliases(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope, IdentifierResolveSettings identifier_resolve_settings = {}); + + QueryTreeNodePtr tryResolveIdentifierFromTableColumns(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope); + + static bool tryBindIdentifierToTableExpression(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + QueryTreeNodePtr tryResolveIdentifierFromTableExpression(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + QueryTreeNodePtr tryResolveIdentifierFromJoin(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + QueryTreeNodePtr tryResolveIdentifierFromArrayJoin(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + QueryTreeNodePtr tryResolveIdentifierFromJoinTreeNode(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & join_tree_node, IdentifierResolveScope & scope); + + QueryTreeNodePtr tryResolveIdentifierFromJoinTree(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope); + + IdentifierResolveResult tryResolveIdentifierInParentScopes(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope); + + IdentifierResolveResult tryResolveIdentifier(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope, IdentifierResolveSettings identifier_resolve_settings = {}); + + /// Resolve query tree nodes functions + + using QueryTreeNodesWithNames = std::vector>; + + void qualifyMatchedColumnsProjectionNamesIfNeeded(QueryTreeNodesWithNames & matched_nodes_with_column_names, + const QueryTreeNodePtr & table_expression_node, + IdentifierResolveScope & scope); + + QueryTreeNodesWithNames resolveQualifiedMatcher(QueryTreeNodePtr & matcher_node, IdentifierResolveScope & scope); + + QueryTreeNodesWithNames resolveUnqualifiedMatcher(QueryTreeNodePtr & matcher_node, IdentifierResolveScope & scope); + + ProjectionNames resolveMatcher(QueryTreeNodePtr & matcher_node, IdentifierResolveScope & scope); + + ProjectionName resolveWindow(QueryTreeNodePtr & window_node, IdentifierResolveScope & scope); + + ProjectionNames resolveLambda(const QueryTreeNodePtr & lambda_node, + const QueryTreeNodePtr & lambda_node_to_resolve, + const QueryTreeNodes & lambda_arguments, + IdentifierResolveScope & scope); + + ProjectionNames resolveFunction(QueryTreeNodePtr & function_node, IdentifierResolveScope & scope); + + ProjectionNames resolveExpressionNode(QueryTreeNodePtr & node, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression); + + ProjectionNames resolveExpressionNodeList(QueryTreeNodePtr & node_list, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression); + + ProjectionNames resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope); + + void resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope); + + void resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope); + + NamesAndTypes resolveProjectionExpressionNodeList(QueryTreeNodePtr & projection_node_list, IdentifierResolveScope & scope); + + void initializeQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node, IdentifierResolveScope & scope); + + void initializeTableExpressionColumns(const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope); + + void resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node, IdentifierResolveScope & scope, QueryExpressionsAliasVisitor & expressions_visitor); + + void resolveQuery(const QueryTreeNodePtr & query_node, IdentifierResolveScope & scope); + + void resolveUnion(const QueryTreeNodePtr & union_node, IdentifierResolveScope & scope); + + /// Lambdas that are currently in resolve process + std::unordered_set lambdas_in_resolve_process; + + /// Array join expressions counter + size_t array_join_expressions_counter = 0; + + /// Subquery counter + size_t subquery_counter = 0; + + /// Global expression node to projection name map + std::unordered_map node_to_projection_name; + + /// Global resolve expression node to projection names map + std::unordered_map resolved_expressions; + +}; + +/// Utility functions implementation + + +bool QueryAnalyzer::isExpressionNodeType(QueryTreeNodeType node_type) +{ + return node_type == QueryTreeNodeType::CONSTANT || node_type == QueryTreeNodeType::COLUMN || node_type == QueryTreeNodeType::FUNCTION + || node_type == QueryTreeNodeType::QUERY || node_type == QueryTreeNodeType::UNION; +} + +bool QueryAnalyzer::isFunctionExpressionNodeType(QueryTreeNodeType node_type) +{ + return node_type == QueryTreeNodeType::LAMBDA; +} + +bool QueryAnalyzer::isTableExpressionNodeType(QueryTreeNodeType node_type) +{ + return node_type == QueryTreeNodeType::TABLE || node_type == QueryTreeNodeType::TABLE_FUNCTION || + node_type == QueryTreeNodeType::QUERY || node_type == QueryTreeNodeType::UNION; +} + +ProjectionName QueryAnalyzer::calculateFunctionProjectionName(const QueryTreeNodePtr & function_node, const ProjectionNames & parameters_projection_names, + const ProjectionNames & arguments_projection_names) +{ + const auto & function_node_typed = function_node->as(); + + WriteBufferFromOwnString buffer; + buffer << function_node_typed.getFunctionName(); + + if (!parameters_projection_names.empty()) + { + buffer << '('; + + size_t function_parameters_projection_names_size = parameters_projection_names.size(); + for (size_t i = 0; i < function_parameters_projection_names_size; ++i) + { + buffer << parameters_projection_names[i]; + + if (i + 1 != function_parameters_projection_names_size) + buffer << ", "; + } + + buffer << ')'; + } + + buffer << '('; + + size_t function_arguments_projection_names_size = arguments_projection_names.size(); + for (size_t i = 0; i < function_arguments_projection_names_size; ++i) + { + buffer << arguments_projection_names[i]; + + if (i + 1 != function_arguments_projection_names_size) + buffer << ", "; + } + + buffer << ')'; + + return buffer.str(); +} + +ProjectionName QueryAnalyzer::calculateWindowProjectionName(const QueryTreeNodePtr & window_node, + const QueryTreeNodePtr & parent_window_node, + const String & parent_window_name, + const ProjectionNames & partition_by_projection_names, + const ProjectionNames & order_by_projection_names, + const ProjectionName & frame_begin_offset_projection_name, + const ProjectionName & frame_end_offset_projection_name) +{ + const auto & window_node_typed = window_node->as(); + const auto & window_frame = window_node_typed.getWindowFrame(); + + bool parent_window_node_has_partition_by = false; + bool parent_window_node_has_order_by = false; + + if (parent_window_node) + { + const auto & parent_window_node_typed = parent_window_node->as(); + parent_window_node_has_partition_by = parent_window_node_typed.hasPartitionBy(); + parent_window_node_has_order_by = parent_window_node_typed.hasOrderBy(); + } + + WriteBufferFromOwnString buffer; + + if (!parent_window_name.empty()) + buffer << parent_window_name; + + if (!partition_by_projection_names.empty() && !parent_window_node_has_partition_by) + { + if (!parent_window_name.empty()) + buffer << ' '; + + buffer << "PARTITION BY "; + + size_t partition_by_projection_names_size = partition_by_projection_names.size(); + for (size_t i = 0; i < partition_by_projection_names_size; ++i) + { + buffer << partition_by_projection_names[i]; + if (i + 1 != partition_by_projection_names_size) + buffer << ", "; + } + } + + if (!order_by_projection_names.empty() && !parent_window_node_has_order_by) + { + if (!partition_by_projection_names.empty() || !parent_window_name.empty()) + buffer << ' '; + + buffer << "ORDER BY "; + + size_t order_by_projection_names_size = order_by_projection_names.size(); + for (size_t i = 0; i < order_by_projection_names_size; ++i) + { + buffer << order_by_projection_names[i]; + if (i + 1 != order_by_projection_names_size) + buffer << ", "; + } + } + + if (!window_frame.is_default) + { + if (!partition_by_projection_names.empty() || !order_by_projection_names.empty() || !parent_window_name.empty()) + buffer << ' '; + + buffer << window_frame.type << " BETWEEN "; + if (window_frame.begin_type == WindowFrame::BoundaryType::Current) + { + buffer << "CURRENT ROW"; + } + else if (window_frame.begin_type == WindowFrame::BoundaryType::Unbounded) + { + buffer << "UNBOUNDED"; + buffer << " " << (window_frame.begin_preceding ? "PRECEDING" : "FOLLOWING"); + } + else + { + buffer << frame_begin_offset_projection_name; + buffer << " " << (window_frame.begin_preceding ? "PRECEDING" : "FOLLOWING"); + } + + buffer << " AND "; + + if (window_frame.end_type == WindowFrame::BoundaryType::Current) + { + buffer << "CURRENT ROW"; + } + else if (window_frame.end_type == WindowFrame::BoundaryType::Unbounded) + { + buffer << "UNBOUNDED"; + buffer << " " << (window_frame.end_preceding ? "PRECEDING" : "FOLLOWING"); + } + else + { + buffer << frame_end_offset_projection_name; + buffer << " " << (window_frame.end_preceding ? "PRECEDING" : "FOLLOWING"); + } + } + + return buffer.str(); +} + +ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeNodePtr & sort_column_node, const ProjectionName & sort_expression_projection_name, + const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, const ProjectionName & fill_step_expression_projection_name) +{ + auto & sort_node_typed = sort_column_node->as(); + + WriteBufferFromOwnString sort_column_projection_name_buffer; + sort_column_projection_name_buffer << sort_expression_projection_name; + + auto sort_direction = sort_node_typed.getSortDirection(); + sort_column_projection_name_buffer << (sort_direction == SortDirection::ASCENDING ? " ASC" : " DESC"); + + auto nulls_sort_direction = sort_node_typed.getNullsSortDirection(); + + if (nulls_sort_direction) + sort_column_projection_name_buffer << " NULLS " << (nulls_sort_direction == sort_direction ? "LAST" : "FIRST"); + + if (auto collator = sort_node_typed.getCollator()) + sort_column_projection_name_buffer << " COLLATE " << collator->getLocale(); + + if (sort_node_typed.withFill()) + { + sort_column_projection_name_buffer << " WITH FILL"; + + if (sort_node_typed.hasFillFrom()) + sort_column_projection_name_buffer << " FROM " << fill_from_expression_projection_name; + + if (sort_node_typed.hasFillTo()) + sort_column_projection_name_buffer << " TO " << fill_to_expression_projection_name; + + if (sort_node_typed.hasFillStep()) + sort_column_projection_name_buffer << " STEP " << fill_step_expression_projection_name; + } + + return sort_column_projection_name_buffer.str(); +} + +/** Wrap expression node in tuple element function calls for nested paths. + * Example: Expression node: compound_expression. Nested path: nested_path_1.nested_path_2. + * Result: tupleElement(tupleElement(compound_expression, 'nested_path_1'), 'nested_path_2'). + */ +QueryTreeNodePtr QueryAnalyzer::wrapExpressionNodeInTupleElement(QueryTreeNodePtr expression_node, IdentifierView nested_path) +{ + size_t nested_path_parts_size = nested_path.getPartsSize(); + for (size_t i = 0; i < nested_path_parts_size; ++i) + { + const auto & nested_path_part = nested_path[i]; + auto tuple_element_function = std::make_shared("tupleElement"); + + auto & tuple_element_function_arguments_nodes = tuple_element_function->getArguments().getNodes(); + tuple_element_function_arguments_nodes.reserve(2); + tuple_element_function_arguments_nodes.push_back(expression_node); + tuple_element_function_arguments_nodes.push_back(std::make_shared(nested_path_part)); + + expression_node = std::move(tuple_element_function); + } + + return expression_node; +} + +/** Try to get lambda node from sql user defined functions if sql user defined function with function name exists. + * Returns lambda node if function exists, nullptr otherwise. + */ +QueryTreeNodePtr QueryAnalyzer::tryGetLambdaFromSQLUserDefinedFunctions(const std::string & function_name, ContextPtr context) +{ + auto user_defined_function = UserDefinedSQLFunctionFactory::instance().tryGet(function_name); + if (!user_defined_function) + return {}; + + const auto & create_function_query = user_defined_function->as(); + auto result_node = buildQueryTree(create_function_query->function_core, context); + if (result_node->getNodeType() != QueryTreeNodeType::LAMBDA) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "SQL user defined function {} must represent lambda expression. Actual {}", + function_name, + create_function_query->function_core->formatForErrorMessage()); + + return result_node; +} + +/// Evaluate scalar subquery and perform constant folding if scalar subquery does not have constant value +void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, size_t subquery_depth, ContextPtr context) +{ + auto * query_node = node->as(); + auto * union_node = node->as(); + if (!query_node && !union_node) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Node must have query or union type. Actual {} {}", + node->getNodeTypeName(), + node->formatASTForErrorMessage()); + + if (node->hasConstantValue()) + return; + + auto subquery_context = Context::createCopy(context); + + Settings subquery_settings = context->getSettings(); + subquery_settings.max_result_rows = 1; + subquery_settings.extremes = false; + subquery_context->setSettings(subquery_settings); + + auto options = SelectQueryOptions(QueryProcessingStage::Complete, subquery_depth, true /*is_subquery*/); + auto interpreter = std::make_unique(node, options, subquery_context); + + auto io = interpreter->execute(); + + Block block; + PullingAsyncPipelineExecutor executor(io.pipeline); + io.pipeline.setProgressCallback(context->getProgressCallback()); + + while (block.rows() == 0 && executor.pull(block)) + { + } + + if (block.rows() == 0) + { + auto types = interpreter->getSampleBlock().getDataTypes(); + if (types.size() != 1) + types = {std::make_shared(types)}; + + auto & type = types[0]; + if (!type->isNullable()) + { + if (!type->canBeInsideNullable()) + throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, + "Scalar subquery returned empty result of type {} which cannot be Nullable.", + type->getName()); + + type = makeNullable(type); + } + + auto constant_value = std::make_shared(Null(), std::move(type)); + + if (query_node) + query_node->performConstantFolding(std::move(constant_value)); + else if (union_node) + union_node->performConstantFolding(std::move(constant_value)); + + return; + } + + if (block.rows() != 1) + throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row"); + + Block tmp_block; + while (tmp_block.rows() == 0 && executor.pull(tmp_block)) + { + } + + if (tmp_block.rows() != 0) + throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row"); + + block = materializeBlock(block); + size_t columns = block.columns(); + + // Block scalar; + Field scalar_value; + DataTypePtr scalar_type; + + if (columns == 1) + { + auto & column = block.getByPosition(0); + /// Here we wrap type to nullable if we can. + /// It is needed cause if subquery return no rows, it's result will be Null. + /// In case of many columns, do not check it cause tuple can't be nullable. + if (!column.type->isNullable() && column.type->canBeInsideNullable()) + { + column.type = makeNullable(column.type); + column.column = makeNullable(column.column); + } + + column.column->get(0, scalar_value); + scalar_type = column.type; + } + else + { + auto tuple_column = ColumnTuple::create(block.getColumns()); + tuple_column->get(0, scalar_value); + scalar_type = std::make_shared(block.getDataTypes(), block.getNames()); + } + + auto constant_value = std::make_shared(std::move(scalar_value), std::move(scalar_type)); + if (query_node) + query_node->performConstantFolding(std::move(constant_value)); + else if (union_node) + union_node->performConstantFolding(std::move(constant_value)); +} + +void QueryAnalyzer::mergeWindowWithParentWindow(const QueryTreeNodePtr & window_node, const QueryTreeNodePtr & parent_window_node, IdentifierResolveScope & scope) +{ + auto & window_node_typed = window_node->as(); + auto parent_window_name = window_node_typed.getParentWindowName(); + + auto & parent_window_node_typed = parent_window_node->as(); + + /** If an existing_window_name is specified it must refer to an earlier + * entry in the WINDOW list; the new window copies its partitioning clause + * from that entry, as well as its ordering clause if any. In this case + * the new window cannot specify its own PARTITION BY clause, and it can + * specify ORDER BY only if the copied window does not have one. The new + * window always uses its own frame clause; the copied window must not + * specify a frame clause. + * https://www.postgresql.org/docs/current/sql-select.html + */ + if (window_node_typed.hasPartitionBy()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Derived window definition '{}' is not allowed to override PARTITION BY. In scope {}", + window_node_typed.formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + if (window_node_typed.hasOrderBy() && parent_window_node_typed.hasOrderBy()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Derived window definition '{}' is not allowed to override a non-empty ORDER BY. In scope {}", + window_node_typed.formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + if (!parent_window_node_typed.getWindowFrame().is_default) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Parent window '{}' is not allowed to define a frame: while processing derived window definition '{}'. In scope {}", + parent_window_name, + window_node_typed.formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + window_node_typed.getPartitionByNode() = parent_window_node_typed.getPartitionBy().clone(); + + if (parent_window_node_typed.hasOrderBy()) + window_node_typed.getOrderByNode() = parent_window_node_typed.getOrderBy().clone(); +} + +/** Replace nodes in node list with positional arguments. + * + * Example: SELECT id, value FROM test_table GROUP BY 1, 2; + * Example: SELECT id, value FROM test_table ORDER BY 1, 2; + * Example: SELECT id, value FROM test_table LIMIT 5 BY 1, 2; + */ +void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_list, const QueryTreeNodes & projection_nodes, IdentifierResolveScope & scope) +{ + auto & node_list_typed = node_list->as(); + + for (auto & node : node_list_typed.getNodes()) + { + auto * constant_node = node->as(); + if (!constant_node) + continue; + + if (!isNativeNumber(removeNullable(constant_node->getResultType()))) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Positional argument must be constant with numeric type. Actual {}. In scope {}", + constant_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + Field converted = convertFieldToType(constant_node->getValue(), DataTypeUInt64()); + if (converted.isNull()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Positional argument numeric constant expression is not representable as UInt64. In scope {}", + scope.scope_node->formatASTForErrorMessage()); + + UInt64 positional_argument_number = converted.safeGet(); + if (positional_argument_number == 0 || positional_argument_number > projection_nodes.size()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Positional argument number {} is out of bounds. Expected in range [1, {}]. In scope {}", + positional_argument_number, + projection_nodes.size(), + scope.scope_node->formatASTForErrorMessage()); + + --positional_argument_number; + node = projection_nodes[positional_argument_number]; + } +} + +void QueryAnalyzer::validateLimitOffsetExpression(QueryTreeNodePtr & expression_node, const String & expression_description, IdentifierResolveScope & scope) +{ + const auto limit_offset_constant_value = expression_node->getConstantValueOrNull(); + if (!limit_offset_constant_value || !isNativeNumber(removeNullable(limit_offset_constant_value->getType()))) + throw Exception(ErrorCodes::INVALID_LIMIT_EXPRESSION, + "{} expression must be constant with numeric type. Actual {}. In scope {}", + expression_description, + expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + Field converted = convertFieldToType(limit_offset_constant_value->getValue(), DataTypeUInt64()); + if (converted.isNull()) + throw Exception(ErrorCodes::INVALID_LIMIT_EXPRESSION, + "{} numeric constant expression is not representable as UInt64", + expression_description); +} + +void QueryAnalyzer::validateTableExpressionModifiers(const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + auto * table_node = table_expression_node->as(); + auto * table_function_node = table_expression_node->as(); + auto * query_node = table_expression_node->as(); + auto * union_node = table_expression_node->as(); + + if (!table_node && !table_function_node && !query_node && !union_node) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected table expression. Expected table, table function, query or union node. Actual {}", + table_expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + if (table_node || table_function_node) + { + auto table_expression_modifiers = table_node ? table_node->getTableExpressionModifiers() : table_function_node->getTableExpressionModifiers(); + + if (table_expression_modifiers.has_value()) + { + const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage(); + if (table_expression_modifiers->hasFinal() && !storage->supportsFinal()) + throw Exception(ErrorCodes::ILLEGAL_FINAL, + "Storage {} doesn't support FINAL", + storage->getName()); + + if (table_expression_modifiers->hasSampleSizeRatio() && !storage->supportsSampling()) + throw Exception(ErrorCodes::SAMPLING_NOT_SUPPORTED, + "Storage {} doesn't support sampling", + storage->getStorageID().getFullNameNotQuoted()); + } + } +} + +void QueryAnalyzer::validateJoinTableExpressionWithoutAlias(const QueryTreeNodePtr & join_node, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + if (!scope.context->getSettingsRef().joined_subquery_requires_alias) + return; + + bool table_expression_has_alias = table_expression_node->hasAlias(); + if (table_expression_has_alias) + return; + + auto table_expression_node_type = table_expression_node->getNodeType(); + if (table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION || + table_expression_node_type == QueryTreeNodeType::QUERY || + table_expression_node_type == QueryTreeNodeType::UNION) + throw Exception(ErrorCodes::ALIAS_REQUIRED, + "JOIN {} no alias for subquery or table function {}. In scope {} (set joined_subquery_requires_alias = 0 to disable restriction)", + join_node->formatASTForErrorMessage(), + table_expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); +} + + +/// Resolve identifier functions implementation + +/// Try resolve table identifier from database catalog +QueryTreeNodePtr QueryAnalyzer::tryResolveTableIdentifierFromDatabaseCatalog(const Identifier & table_identifier, ContextPtr context) +{ + size_t parts_size = table_identifier.getPartsSize(); + if (parts_size < 1 || parts_size > 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expected table identifier to contain 1 or 2 parts. Actual '{}'", + table_identifier.getFullName()); + + std::string database_name; + std::string table_name; + + if (table_identifier.isCompound()) + { + database_name = table_identifier[0]; + table_name = table_identifier[1]; + } + else + { + table_name = table_identifier[0]; + } + + StorageID storage_id(database_name, table_name); + storage_id = context->resolveStorageID(storage_id); + auto storage = DatabaseCatalog::instance().getTable(storage_id, context); + auto storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); + auto storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context); + + return std::make_shared(std::move(storage), storage_lock, storage_snapshot); +} + +/** Resolve identifier from expression arguments. + * + * Expression arguments can be initialized during lambda analysis or they could be provided externally. + * Expression arguments must be already resolved nodes. This is client responsibility to resolve them. + * + * Example: SELECT arrayMap(x -> x + 1, [1,2,3]); + * For lambda x -> x + 1, `x` is lambda expression argument. + * + * Resolve strategy: + * 1. Try to bind identifier to scope argument name to node map. + * 2. If identifier is binded but expression context and node type are incompatible return nullptr. + * + * It is important to support edge cases, where we lookup for table or function node, but argument has same name. + * Example: WITH (x -> x + 1) AS func, (func -> func(1) + func) AS lambda SELECT lambda(1); + * + * 3. If identifier is compound and identifier lookup is in expression context, pop first part from identifier lookup and wrap node + * using nested parts of identifier using `wrapExpressionNodeInTupleElement` function. + */ +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromExpressionArguments(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope) +{ + auto it = scope.expression_argument_name_to_node.find(identifier_lookup.identifier.getFullName()); + bool resolve_full_identifier = it != scope.expression_argument_name_to_node.end(); + + if (!resolve_full_identifier) + { + const auto & identifier_bind_part = identifier_lookup.identifier.front(); + + it = scope.expression_argument_name_to_node.find(identifier_bind_part); + if (it == scope.expression_argument_name_to_node.end()) + return {}; + } + + auto node_type = it->second->getNodeType(); + if (identifier_lookup.isExpressionLookup() && !isExpressionNodeType(node_type)) + return {}; + else if (identifier_lookup.isTableExpressionLookup() && !isTableExpressionNodeType(node_type)) + return {}; + else if (identifier_lookup.isFunctionLookup() && !isFunctionExpressionNodeType(node_type)) + return {}; + + if (!resolve_full_identifier && identifier_lookup.identifier.isCompound() && identifier_lookup.isExpressionLookup()) + { + auto nested_path = IdentifierView(identifier_lookup.identifier); + nested_path.popFirst(); + + auto tuple_element_result = wrapExpressionNodeInTupleElement(it->second, nested_path); + resolveFunction(tuple_element_result, scope); + + return tuple_element_result; + } + + return it->second; +} + +bool QueryAnalyzer::tryBindIdentifierToAliases(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope) +{ + const auto & identifier_bind_part = identifier_lookup.identifier.front(); + + auto get_alias_name_to_node_map = [&]() -> std::unordered_map & + { + if (identifier_lookup.isExpressionLookup()) + return scope.alias_name_to_expression_node; + else if (identifier_lookup.isFunctionLookup()) + return scope.alias_name_to_lambda_node; + + return scope.alias_name_to_table_expression_node; + }; + + auto & alias_name_to_node_map = get_alias_name_to_node_map(); + auto it = alias_name_to_node_map.find(identifier_bind_part); + + if (it == alias_name_to_node_map.end()) + return false; + + return true; +} + +/** Resolve identifier from scope aliases. + * + * Resolve strategy: + * 1. If alias is registered current expressions that are in resolve process and if last expression is not part of first expression subtree + * throw cyclic aliases exception. + * Otherwise prevent cache usage for identifier lookup and return nullptr. + * + * This is special scenario where identifier has name the same as alias name in one of its parent expressions including itself. + * In such case we cannot resolve identifier from aliases because of recursion. It is client responsibility to register and deregister alias + * names during expressions resolve. + * + * We must prevent cache usage for lookup because lookup outside of expression is supposed to return other value. + * Example: SELECT (id + 1) AS id, id + 2. Lookup for id inside (id + 1) as id should return id from table, but lookup (id + 2) should return + * (id + 1) AS id. + * + * Below cases should work: + * Example: + * SELECT id AS id FROM test_table; + * SELECT value.value1 AS value FROM test_table; + * SELECT (id + 1) AS id FROM test_table; + * SELECT (1 + (1 + id)) AS id FROM test_table; + * + * Below cases should throw cyclic aliases exception: + * SELECT (id + b) AS id, id as b FROM test_table; + * SELECT (1 + b + 1 + id) AS id, b as c, id as b FROM test_table; + * + * 2. Depending on IdentifierLookupContext get alias name to node map from IdentifierResolveScope. + * 3. Try to bind identifier to alias name in map. If there are no such binding return nullptr. + * 4. Add node into current expressions to resolve. TODO: Handle lambdas and tables properly. + * + * 5. If node in map is not resolved, resolve it. It is important because for result type of identifier lookup node can depend on it. + * Example: SELECT value.a, cast('(1)', 'Tuple(a UInt64)') AS value; + * + * Special case for IdentifierNode, if node is identifier depending on lookup context we need to erase entry from expression or lambda map. + * Check QueryExpressionsAliasVisitor documentation. + * + * Special case for QueryNode, if lookup context is expression, evaluate it as scalar subquery. + * + * 6. Pop node from current expressions to resolve. + * 7. If identifier is compound and identifier lookup is in expression context, pop first part from identifier lookup and wrap alias node + * using nested parts of identifier using `wrapExpressionNodeInTupleElement` function. + * + * Example: SELECT value AS alias, alias.nested_path. + * Result: SELECT value AS alias, tupleElement(value, 'nested_path') value.nested_path. + * + * 8. If identifier lookup is in expression context, clone result expression. + */ +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromAliases(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope, IdentifierResolveSettings identifier_resolve_settings) +{ + const auto & identifier_bind_part = identifier_lookup.identifier.front(); + + auto get_alias_name_to_node_map = [&]() -> std::unordered_map & + { + if (identifier_lookup.isExpressionLookup()) + return scope.alias_name_to_expression_node; + else if (identifier_lookup.isFunctionLookup()) + return scope.alias_name_to_lambda_node; + + return scope.alias_name_to_table_expression_node; + }; + + auto & alias_name_to_node_map = get_alias_name_to_node_map(); + auto it = alias_name_to_node_map.find(identifier_bind_part); + + if (it == alias_name_to_node_map.end()) + return {}; + + if (!it->second) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Node with alias {} is not valid. In scope {}", + identifier_bind_part, + scope.scope_node->formatASTForErrorMessage()); + + if (scope.expressions_in_resolve_process_stack.hasExpressionWithAlias(identifier_bind_part)) + { + const auto * root_expression = scope.expressions_in_resolve_process_stack.getRoot(); + const auto * top_expression = scope.expressions_in_resolve_process_stack.getTop(); + + if (!isNodePartOfTree(top_expression, root_expression)) + throw Exception(ErrorCodes::CYCLIC_ALIASES, + "Cyclic aliases for identifier '{}'. In scope {}", + identifier_lookup.identifier.getFullName(), + scope.scope_node->formatASTForErrorMessage()); + + scope.non_cached_identifier_lookups_during_expression_resolve.insert(identifier_lookup); + return {}; + } + + auto node_type = it->second->getNodeType(); + + /// Resolve expression if necessary + if (node_type == QueryTreeNodeType::IDENTIFIER) + { + scope.expressions_in_resolve_process_stack.pushNode(it->second); + + auto & alias_identifier_node = it->second->as(); + auto identifier = alias_identifier_node.getIdentifier(); + auto lookup_result = tryResolveIdentifier(IdentifierLookup{identifier, identifier_lookup.lookup_context}, scope, identifier_resolve_settings); + it->second = lookup_result.resolved_identifier; + + /** During collection of aliases if node is identifier and has alias, we cannot say if it is + * column or function node. Check QueryExpressionsAliasVisitor documentation for clarification. + * + * If we resolved identifier node as expression, we must remove identifier node alias from + * function alias map. + * If we resolved identifier node as function, we must remove identifier node alias from + * expression alias map. + */ + if (identifier_lookup.isExpressionLookup() && it->second) + scope.alias_name_to_lambda_node.erase(identifier_bind_part); + else if (identifier_lookup.isFunctionLookup() && it->second) + scope.alias_name_to_expression_node.erase(identifier_bind_part); + + scope.expressions_in_resolve_process_stack.popNode(); + } + else if (node_type == QueryTreeNodeType::FUNCTION) + { + resolveExpressionNode(it->second, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + } + else if (node_type == QueryTreeNodeType::QUERY || node_type == QueryTreeNodeType::UNION) + { + if (identifier_resolve_settings.allow_to_resolve_subquery_during_identifier_resolution) + resolveExpressionNode(it->second, scope, false /*allow_lambda_expression*/, identifier_lookup.isTableExpressionLookup() /*allow_table_expression*/); + } + + QueryTreeNodePtr result = it->second; + + /** If identifier is compound and it is expression identifier lookup, wrap compound expression into + * tuple elements functions. + * + * Example: SELECT compound_expression AS alias, alias.first.second; + * Result: SELECT compound_expression AS alias, tupleElement(tupleElement(compound_expression, 'first'), 'second'); + */ + if (identifier_lookup.identifier.isCompound() && result) + { + if (identifier_lookup.isExpressionLookup()) + { + auto nested_path = IdentifierView(identifier_lookup.identifier); + nested_path.popFirst(); + + auto tuple_element_result = wrapExpressionNodeInTupleElement(result, nested_path); + resolveFunction(tuple_element_result, scope); + + result = tuple_element_result; + } + else if (identifier_lookup.isFunctionLookup() || identifier_lookup.isTableExpressionLookup()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Compound identifier '{}' cannot be resolved as {}. In scope {}", + identifier_lookup.identifier.getFullName(), + identifier_lookup.isFunctionLookup() ? "function" : "table expression", + scope.scope_node->formatASTForErrorMessage()); + } + } + + return result; +} + +/** Resolve identifier from table columns. + * + * 1. If table column nodes are empty or identifier is not expression lookup return nullptr. + * 2. If identifier full name match table column use column. Save information that we resolve identifier using full name. + * 3. Else if identifier binds to table column, use column. + * 4. Try to resolve column ALIAS expression if it exists. + * 5. If identifier was compound and was not resolved using full name during step 1 pop first part from identifier lookup and wrap column node + * using nested parts of identifier using `wrapExpressionNodeInTupleElement` function. + * This can be the case with compound ALIAS columns. + * Example: + * CREATE TABLE test_table (id UInt64, value Tuple(id UInt64, value String), alias_value ALIAS value.id) ENGINE=TinyLog; + */ +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromTableColumns(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope) +{ + if (scope.column_name_to_column_node.empty() || !identifier_lookup.isExpressionLookup()) + return {}; + + const auto & identifier = identifier_lookup.identifier; + auto it = scope.column_name_to_column_node.find(identifier.getFullName()); + bool full_column_name_match = it != scope.column_name_to_column_node.end(); + + if (!full_column_name_match) + { + it = scope.column_name_to_column_node.find(identifier_lookup.identifier[0]); + if (it == scope.column_name_to_column_node.end()) + return {}; + } + + if (it->second->hasExpression()) + resolveExpressionNode(it->second->getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + QueryTreeNodePtr result = it->second; + + if (!full_column_name_match && identifier.isCompound()) + { + auto nested_path = IdentifierView(identifier_lookup.identifier); + nested_path.popFirst(); + + auto tuple_element_result = wrapExpressionNodeInTupleElement(it->second, nested_path); + resolveFunction(tuple_element_result, scope); + + result = tuple_element_result; + } + + return result; +} + +bool QueryAnalyzer::tryBindIdentifierToTableExpression(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + auto table_expression_node_type = table_expression_node->getNodeType(); + + if (table_expression_node_type != QueryTreeNodeType::TABLE && + table_expression_node_type != QueryTreeNodeType::TABLE_FUNCTION && + table_expression_node_type != QueryTreeNodeType::QUERY && + table_expression_node_type != QueryTreeNodeType::UNION) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Unexpected table expression. Expected table, table function, query or union node. Actual {}. In scope {}", + table_expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + const auto & identifier = identifier_lookup.identifier; + const auto & path_start = identifier.getParts().front(); + + auto & table_expression_data = scope.getTableExpressionDataOrThrow(table_expression_node); + + const auto & table_name = table_expression_data.table_name; + const auto & database_name = table_expression_data.database_name; + + if (identifier_lookup.isTableExpressionLookup()) + { + size_t parts_size = identifier_lookup.identifier.getPartsSize(); + if (parts_size != 1 && parts_size != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expected identifier '{}' to contain 1 or 2 parts to be resolved as table expression. In scope {}", + identifier_lookup.identifier.getFullName(), + table_expression_node->formatASTForErrorMessage()); + + if (parts_size == 1 && path_start == table_name) + return true; + else if (parts_size == 2 && path_start == database_name && identifier[1] == table_name) + return true; + else + return false; + } + + if (table_expression_data.hasFullIdentifierName(IdentifierView(identifier)) || table_expression_data.canBindIdentifier(IdentifierView(identifier))) + return true; + + if (identifier.getPartsSize() == 1) + return false; + + if ((!table_name.empty() && path_start == table_name) || (table_expression_node->hasAlias() && path_start == table_expression_node->getAlias())) + return true; + + if (identifier.getPartsSize() == 2) + return false; + + if (!database_name.empty() && path_start == database_name && identifier[1] == table_name) + return true; + + return false; +} + +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromTableExpression(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + auto table_expression_node_type = table_expression_node->getNodeType(); + + if (table_expression_node_type != QueryTreeNodeType::TABLE && + table_expression_node_type != QueryTreeNodeType::TABLE_FUNCTION && + table_expression_node_type != QueryTreeNodeType::QUERY && + table_expression_node_type != QueryTreeNodeType::UNION) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Unexpected table expression. Expected table, table function, query or union node. Actual {}. In scope {}", + table_expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + const auto & identifier = identifier_lookup.identifier; + const auto & path_start = identifier.getParts().front(); + + auto & table_expression_data = scope.getTableExpressionDataOrThrow(table_expression_node); + + if (identifier_lookup.isTableExpressionLookup()) + { + size_t parts_size = identifier_lookup.identifier.getPartsSize(); + if (parts_size != 1 && parts_size != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expected identifier '{}' to contain 1 or 2 parts to be resolved as table expression. In scope {}", + identifier_lookup.identifier.getFullName(), + table_expression_node->formatASTForErrorMessage()); + + const auto & table_name = table_expression_data.table_name; + const auto & database_name = table_expression_data.database_name; + + if (parts_size == 1 && path_start == table_name) + return table_expression_node; + else if (parts_size == 2 && path_start == database_name && identifier[1] == table_name) + return table_expression_node; + else + return {}; + } + + auto resolve_identifier_from_storage_or_throw = [&](size_t identifier_column_qualifier_parts) -> QueryTreeNodePtr + { + auto identifier_view = IdentifierView(identifier); + identifier_view.popFirst(identifier_column_qualifier_parts); + + /** Compound identifier cannot be resolved directly from storage if storage is not table. + * + * Example: SELECT test_table.id.value1.value2 FROM test_table; + * In table storage column test_table.id.value1.value2 will exists. + * + * Example: SELECT test_subquery.compound_expression.value FROM (SELECT compound_expression AS value) AS test_subquery; + * Here there is no column with name test_subquery.compound_expression.value, and additional wrap in tuple element is required. + */ + + ColumnNodePtr result_column; + bool compound_identifier = identifier_view.getPartsSize() > 1; + bool match_full_identifier = false; + + auto it = table_expression_data.column_name_to_column_node.find(std::string(identifier_view.getFullName())); + if (it != table_expression_data.column_name_to_column_node.end()) + { + match_full_identifier = true; + result_column = it->second; + } + else + { + it = table_expression_data.column_name_to_column_node.find(std::string(identifier_view.at(0))); + + if (it != table_expression_data.column_name_to_column_node.end()) + result_column = it->second; + } + + if (!result_column || (!match_full_identifier && !compound_identifier)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Identifier '{}' cannot be resolved from {}{}. In scope {}", + identifier.getFullName(), + table_expression_data.table_expression_description, + table_expression_data.table_expression_name.empty() ? "" : " with name " + table_expression_data.table_expression_name, + scope.scope_node->formatASTForErrorMessage()); + + QueryTreeNodePtr result_expression = result_column; + bool clone_is_needed = true; + + if (!match_full_identifier && compound_identifier) + { + IdentifierView nested_path(identifier_view); + nested_path.popFirst(); + auto tuple_element_result = wrapExpressionNodeInTupleElement(result_expression, identifier_view); + resolveFunction(tuple_element_result, scope); + result_expression = std::move(tuple_element_result); + clone_is_needed = false; + } + + if (clone_is_needed) + result_expression = result_expression->clone(); + + auto qualified_identifier = identifier; + for (size_t i = 0; i < identifier_column_qualifier_parts; ++i) + { + auto qualified_identifier_with_removed_part = qualified_identifier; + qualified_identifier_with_removed_part.popFirst(); + + if (qualified_identifier_with_removed_part.empty()) + break; + + if (scope.context->getSettingsRef().prefer_column_name_to_alias + && scope.alias_name_to_expression_node.contains(qualified_identifier_with_removed_part[0])) + break; + + bool can_remove_qualificator = true; + + for (auto & table_expression_to_check_data : scope.table_expression_node_to_data) + { + const auto & table_expression_to_check = table_expression_to_check_data.first; + if (table_expression_to_check.get() == table_expression_node.get()) + continue; + + IdentifierLookup column_identifier_lookup{qualified_identifier_with_removed_part, IdentifierLookupContext::EXPRESSION}; + bool can_bind_identifier_to_table_expression = tryBindIdentifierToTableExpression(column_identifier_lookup, table_expression_to_check, scope); + + if (can_bind_identifier_to_table_expression) + { + can_remove_qualificator = false; + break; + } + } + + if (!can_remove_qualificator) + break; + + qualified_identifier = std::move(qualified_identifier_with_removed_part); + } + + auto qualified_identifier_full_name = qualified_identifier.getFullName(); + node_to_projection_name.emplace(result_expression, std::move(qualified_identifier_full_name)); + + return result_expression; + }; + + /** If identifier first part binds to some column start or table has full identifier name. Then we can try to find whole identifier in table. + * 1. Try to bind identifier first part to column in table, if true get full identifier from table or throw exception. + * 2. Try to bind identifier first part to table name or storage alias, if true remove first part and try to get full identifier from table or throw exception. + * Storage alias works for subquery, table function as well. + * 3. Try to bind identifier first parts to database name and table name, if true remove first two parts and try to get full identifier from table or throw exception. + */ + if (table_expression_data.hasFullIdentifierName(IdentifierView(identifier))) + return resolve_identifier_from_storage_or_throw(0 /*identifier_column_qualifier_parts*/); + + if (table_expression_data.canBindIdentifier(IdentifierView(identifier))) + return resolve_identifier_from_storage_or_throw(0 /*identifier_column_qualifier_parts*/); + + if (identifier.getPartsSize() == 1) + return {}; + + const auto & table_name = table_expression_data.table_name; + if ((!table_name.empty() && path_start == table_name) || (table_expression_node->hasAlias() && path_start == table_expression_node->getAlias())) + return resolve_identifier_from_storage_or_throw(1 /*identifier_column_qualifier_parts*/); + + if (identifier.getPartsSize() == 2) + return {}; + + const auto & database_name = table_expression_data.database_name; + if (!database_name.empty() && path_start == database_name && identifier[1] == table_name) + return resolve_identifier_from_storage_or_throw(2 /*identifier_column_qualifier_parts*/); + + return {}; +} + +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoin(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + const auto & from_join_node = table_expression_node->as(); + auto left_resolved_identifier = tryResolveIdentifierFromJoinTreeNode(identifier_lookup, from_join_node.getLeftTableExpression(), scope); + auto right_resolved_identifier = tryResolveIdentifierFromJoinTreeNode(identifier_lookup, from_join_node.getRightTableExpression(), scope); + + if (!identifier_lookup.isExpressionLookup()) + { + if (left_resolved_identifier && right_resolved_identifier) + throw Exception(ErrorCodes::AMBIGUOUS_IDENTIFIER, + "JOIN {} ambiguous identifier {}. In scope {}", + table_expression_node->formatASTForErrorMessage(), + identifier_lookup.dump(), + scope.scope_node->formatASTForErrorMessage()); + + return left_resolved_identifier ? left_resolved_identifier : right_resolved_identifier; + } + + bool join_node_in_resolve_process = scope.table_expressions_in_resolve_process.contains(table_expression_node.get()); + + std::unordered_map join_using_column_name_to_column_node; + + if (!join_node_in_resolve_process && from_join_node.isUsingJoinExpression()) + { + auto & join_using_list = from_join_node.getJoinExpression()->as(); + + for (auto & join_using_node : join_using_list.getNodes()) + { + auto & column_node = join_using_node->as(); + join_using_column_name_to_column_node.emplace(column_node.getColumnName(), std::static_pointer_cast(join_using_node)); + } + } + + std::optional resolved_side; + QueryTreeNodePtr resolved_identifier; + + JoinKind join_kind = from_join_node.getKind(); + + if (left_resolved_identifier && right_resolved_identifier) + { + auto & left_resolved_column = left_resolved_identifier->as(); + auto & right_resolved_column = right_resolved_identifier->as(); + + auto using_column_node_it = join_using_column_name_to_column_node.find(left_resolved_column.getColumnName()); + if (using_column_node_it != join_using_column_name_to_column_node.end() + && left_resolved_column.getColumnName() == right_resolved_column.getColumnName()) + { + JoinTableSide using_column_inner_column_table_side = isRight(join_kind) ? JoinTableSide::Right : JoinTableSide::Left; + auto & using_column_node = using_column_node_it->second->as(); + auto & using_expression_list = using_column_node.getExpression()->as(); + + size_t inner_column_node_index = using_column_inner_column_table_side == JoinTableSide::Left ? 0 : 1; + const auto & inner_column_node = using_expression_list.getNodes().at(inner_column_node_index); + + auto result_column_node = inner_column_node->clone(); + auto & result_column = result_column_node->as(); + result_column.setColumnType(using_column_node.getColumnType()); + + resolved_identifier = std::move(result_column_node); + } + else + { + throw Exception(ErrorCodes::AMBIGUOUS_IDENTIFIER, + "JOIN {} ambiguous identifier '{}'. In scope {}", + table_expression_node->formatASTForErrorMessage(), + identifier_lookup.identifier.getFullName(), + scope.scope_node->formatASTForErrorMessage()); + } + } + else if (left_resolved_identifier) + { + resolved_side = JoinTableSide::Left; + auto & left_resolved_column = left_resolved_identifier->as(); + + resolved_identifier = left_resolved_identifier; + + auto using_column_node_it = join_using_column_name_to_column_node.find(left_resolved_column.getColumnName()); + if (using_column_node_it != join_using_column_name_to_column_node.end() && + !using_column_node_it->second->getColumnType()->equals(*left_resolved_column.getColumnType())) + { + auto left_resolved_column_clone = std::static_pointer_cast(left_resolved_column.clone()); + left_resolved_column_clone->setColumnType(using_column_node_it->second->getColumnType()); + resolved_identifier = std::move(left_resolved_column_clone); + } + else + { + resolved_identifier = left_resolved_identifier; + } + } + else if (right_resolved_identifier) + { + resolved_side = JoinTableSide::Right; + auto & right_resolved_column = right_resolved_identifier->as(); + + auto using_column_node_it = join_using_column_name_to_column_node.find(right_resolved_column.getColumnName()); + if (using_column_node_it != join_using_column_name_to_column_node.end() && + !using_column_node_it->second->getColumnType()->equals(*right_resolved_column.getColumnType())) + { + auto right_resolved_column_clone = std::static_pointer_cast(right_resolved_column.clone()); + right_resolved_column_clone->setColumnType(using_column_node_it->second->getColumnType()); + resolved_identifier = std::move(right_resolved_column_clone); + } + else + { + resolved_identifier = right_resolved_identifier; + } + } + + if (join_node_in_resolve_process || !resolved_identifier) + return resolved_identifier; + + bool join_use_nulls = scope.context->getSettingsRef().join_use_nulls; + + if (join_use_nulls + && (isFull(join_kind) || + (isLeft(join_kind) && resolved_side && *resolved_side == JoinTableSide::Right) || + (isRight(join_kind) && resolved_side && *resolved_side == JoinTableSide::Left))) + { + resolved_identifier = resolved_identifier->clone(); + auto & resolved_column = resolved_identifier->as(); + resolved_column.setColumnType(makeNullable(resolved_column.getColumnType())); + } + + return resolved_identifier; +} + +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromArrayJoin(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + const auto & from_array_join_node = table_expression_node->as(); + auto resolved_identifier = tryResolveIdentifierFromJoinTreeNode(identifier_lookup, from_array_join_node.getTableExpression(), scope); + + /** Special case when qualified or unqualified identifier point to array join expression without alias. + * + * CREATE TABLE test_table (id UInt64, value String, value_array Array(UInt8)) ENGINE=TinyLog; + * SELECT id, value, value_array, test_table.value_array, default.test_table.value_array FROM test_table ARRAY JOIN value_array; + * + * value_array, test_table.value_array, default.test_table.value_array must be resolved into array join expression. + */ + if (!scope.table_expressions_in_resolve_process.contains(table_expression_node.get()) && resolved_identifier) + { + for (const auto & array_join_expression : from_array_join_node.getJoinExpressions().getNodes()) + { + auto & array_join_column_expression = array_join_expression->as(); + if (array_join_column_expression.hasAlias()) + continue; + + auto & array_join_column_inner_expression = array_join_column_expression.getExpressionOrThrow(); + if (array_join_column_inner_expression.get() == resolved_identifier.get() || + array_join_column_inner_expression->isEqual(*resolved_identifier)) + { + auto array_join_column = array_join_column_expression.getColumn(); + auto result = std::make_shared(array_join_column, table_expression_node); + + return result; + } + } + } + + return resolved_identifier; +} + +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoinTreeNode(const IdentifierLookup & identifier_lookup, const QueryTreeNodePtr & join_tree_node, IdentifierResolveScope & scope) +{ + auto join_tree_node_type = join_tree_node->getNodeType(); + + switch (join_tree_node_type) + { + case QueryTreeNodeType::JOIN: + return tryResolveIdentifierFromJoin(identifier_lookup, join_tree_node, scope); + case QueryTreeNodeType::ARRAY_JOIN: + return tryResolveIdentifierFromArrayJoin(identifier_lookup, join_tree_node, scope); + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + [[fallthrough]]; + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + { + /** Edge case scenario when subquery in FROM node try to resolve identifier from parent scopes, when FROM is not resolved. + * SELECT subquery.b AS value FROM (SELECT value, 1 AS b) AS subquery; + * TODO: This can be supported + */ + if (scope.table_expressions_in_resolve_process.contains(join_tree_node.get())) + return {}; + + return tryResolveIdentifierFromTableExpression(identifier_lookup, join_tree_node, scope); + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Scope FROM section expected table, table function, query, union, join or array join. Actual {}. In scope {}", + join_tree_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + } +} + +/** Resolve identifier from scope join tree. + * + * 1. If identifier is in function lookup context return nullptr. + * 2. Try to resolve identifier from table columns. + * 3. If there is no FROM section return nullptr. + * 4. If identifier is in table lookup context, check if it has 1 or 2 parts, otherwise throw exception. + * If identifier has 2 parts try to match it with database_name and table_name. + * If identifier has 1 part try to match it with table_name, then try to match it with table alias. + * 5. If identifier is in expression lookup context, we first need to bind identifier to some table column using identifier first part. + * Start with identifier first part, if it match some column name in table try to get column with full identifier name. + * TODO: Need to check if it is okay to throw exception if compound identifier first part bind to column but column is not valid. + */ +QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoinTree(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope) +{ + if (identifier_lookup.isFunctionLookup()) + return {}; + + /// Try to resolve identifier from table columns + if (auto resolved_identifier = tryResolveIdentifierFromTableColumns(identifier_lookup, scope)) + return resolved_identifier; + + if (scope.expression_join_tree_node) + return tryResolveIdentifierFromJoinTreeNode(identifier_lookup, scope.expression_join_tree_node, scope); + + auto * query_scope_node = scope.scope_node->as(); + if (!query_scope_node || !query_scope_node->getJoinTree()) + return {}; + + const auto & join_tree_node = query_scope_node->getJoinTree(); + return tryResolveIdentifierFromJoinTreeNode(identifier_lookup, join_tree_node, scope); +} + +/** Try resolve identifier in current scope parent scopes. + * + * TODO: If column is matched, throw exception that nested subqueries are not supported. + * + * If initial scope is expression. Then try to resolve identifier in parent scopes until query scope is hit. + * For query scope resolve strategy is same as if initial scope if query. + */ +IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope) +{ + bool initial_scope_is_query = scope.scope_node->getNodeType() == QueryTreeNodeType::QUERY; + bool initial_scope_is_expression = !initial_scope_is_query; + + IdentifierResolveSettings identifier_resolve_settings; + identifier_resolve_settings.allow_to_check_parent_scopes = false; + identifier_resolve_settings.allow_to_check_database_catalog = false; + + IdentifierResolveScope * scope_to_check = scope.parent_scope; + + if (initial_scope_is_expression) + { + while (scope_to_check != nullptr) + { + auto resolve_result = tryResolveIdentifier(identifier_lookup, *scope_to_check, identifier_resolve_settings); + if (resolve_result.resolved_identifier) + return resolve_result; + + bool scope_was_query = scope_to_check->scope_node->getNodeType() == QueryTreeNodeType::QUERY; + scope_to_check = scope_to_check->parent_scope; + + if (scope_was_query) + break; + } + } + + while (scope_to_check != nullptr) + { + auto lookup_result = tryResolveIdentifier(identifier_lookup, *scope_to_check, identifier_resolve_settings); + const auto & resolved_identifier = lookup_result.resolved_identifier; + + scope_to_check = scope_to_check->parent_scope; + + if (resolved_identifier) + { + bool is_cte = resolved_identifier->as() && resolved_identifier->as()->isCTE(); + + /** From parent scopes we can resolve table identifiers only as CTE. + * Example: SELECT (SELECT 1 FROM a) FROM test_table AS a; + * + * During child scope table identifier resolve a, table node test_table with alias a from parent scope + * is invalid. + */ + if (identifier_lookup.isTableExpressionLookup() && !is_cte) + continue; + + if (is_cte) + { + return lookup_result; + } + else if (const auto constant_value = resolved_identifier->getConstantValueOrNull()) + { + lookup_result.resolved_identifier = std::make_shared(constant_value); + return lookup_result; + } + + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Resolve identifier '{}' from parent scope only supported for constants and CTE. Actual {} node type {}. In scope {}", + identifier_lookup.identifier.getFullName(), + resolved_identifier->formatASTForErrorMessage(), + resolved_identifier->getNodeTypeName(), + scope.scope_node->formatASTForErrorMessage()); + } + } + + return {}; +} + +/** Resolve identifier in scope. + * + * If identifier was resolved resolve identified lookup status will be updated. + * + * Steps: + * 1. Register identifier lookup in scope identifier lookup to resolve status table. + * If entry is already registered and is not resolved, that means that we have cyclic aliases for identifier. + * Example: SELECT a AS b, b AS a; + * Try resolve identifier in current scope: + * 3. Try resolve identifier from expression arguments. + * + * If prefer_column_name_to_alias = true. + * 4. Try to resolve identifier from join tree. + * 5. Try to resolve identifier from aliases. + * Otherwise. + * 4. Try to resolve identifier from aliases. + * 5. Try to resolve identifier from join tree. + * + * 6. If it is table identifier lookup try to lookup identifier in current scope CTEs. + * + * 7. If identifier is not resolved in current scope, try to resolve it in parent scopes. + * 8. If identifier is not resolved from parent scopes and it is table identifier lookup try to lookup identifier + * in database catalog. + * + * Same is not done for functions because function resolution is more complex, and in case of aggregate functions requires not only name + * but also argument types, it is responsibility of resolve function method to handle resolution of function name. + * + * 9. If identifier was not resolved, or identifier caching was disabled remove it from identifier lookup to resolve status table. + * + * It is okay for identifier to be not resolved, in case we want first try to lookup identifier in one context, + * then if there is no identifier in this context, try to lookup in another context. + * Example: Try to lookup identifier as expression, if it is not found, lookup as function. + * Example: Try to lookup identifier as expression, if it is not found, lookup as table. + */ +IdentifierResolveResult QueryAnalyzer::tryResolveIdentifier(const IdentifierLookup & identifier_lookup, IdentifierResolveScope & scope, IdentifierResolveSettings identifier_resolve_settings) +{ + auto it = scope.identifier_lookup_to_result.find(identifier_lookup); + if (it != scope.identifier_lookup_to_result.end()) + { + if (!it->second.resolved_identifier) + throw Exception(ErrorCodes::CYCLIC_ALIASES, + "Cyclic aliases for identifier '{}'. In scope {}", + identifier_lookup.identifier.getFullName(), + scope.scope_node->formatASTForErrorMessage()); + + if (scope.use_identifier_lookup_to_result_cache && !scope.non_cached_identifier_lookups_during_expression_resolve.contains(identifier_lookup)) + return it->second; + } + + auto [insert_it, _] = scope.identifier_lookup_to_result.insert({identifier_lookup, IdentifierResolveResult()}); + it = insert_it; + + /// Resolve identifier from current scope + + IdentifierResolveResult resolve_result; + resolve_result.resolved_identifier = tryResolveIdentifierFromExpressionArguments(identifier_lookup, scope); + if (resolve_result.resolved_identifier) + resolve_result.resolve_place = IdentifierResolvePlace::EXPRESSION_ARGUMENTS; + + if (!resolve_result.resolved_identifier) + { + bool prefer_column_name_to_alias = scope.context->getSettingsRef().prefer_column_name_to_alias; + + if (unlikely(prefer_column_name_to_alias)) + { + if (identifier_resolve_settings.allow_to_check_join_tree) + { + resolve_result.resolved_identifier = tryResolveIdentifierFromJoinTree(identifier_lookup, scope); + + if (resolve_result.resolved_identifier) + resolve_result.resolve_place = IdentifierResolvePlace::JOIN_TREE; + } + + if (!resolve_result.resolved_identifier) + { + resolve_result.resolved_identifier = tryResolveIdentifierFromAliases(identifier_lookup, scope, identifier_resolve_settings); + + if (resolve_result.resolved_identifier) + resolve_result.resolve_place = IdentifierResolvePlace::ALIASES; + } + } + else + { + resolve_result.resolved_identifier = tryResolveIdentifierFromAliases(identifier_lookup, scope, identifier_resolve_settings); + + if (resolve_result.resolved_identifier) + { + resolve_result.resolve_place = IdentifierResolvePlace::ALIASES; + } + else if (identifier_resolve_settings.allow_to_check_join_tree) + { + resolve_result.resolved_identifier = tryResolveIdentifierFromJoinTree(identifier_lookup, scope); + + if (resolve_result.resolved_identifier) + resolve_result.resolve_place = IdentifierResolvePlace::JOIN_TREE; + } + } + } + + if (!resolve_result.resolved_identifier && identifier_lookup.isTableExpressionLookup()) + { + auto cte_query_node_it = scope.cte_name_to_query_node.find(identifier_lookup.identifier.getFullName()); + if (cte_query_node_it != scope.cte_name_to_query_node.end()) + { + resolve_result.resolved_identifier = cte_query_node_it->second; + resolve_result.resolve_place = IdentifierResolvePlace::CTE; + } + } + + /// Try to resolve identifier from parent scopes + + if (!resolve_result.resolved_identifier && identifier_resolve_settings.allow_to_check_parent_scopes) + { + resolve_result = tryResolveIdentifierInParentScopes(identifier_lookup, scope); + + if (resolve_result.resolved_identifier) + resolve_result.resolved_from_parent_scopes = true; + } + + /// Try to resolve table identifier from database catalog + + if (!resolve_result.resolved_identifier && identifier_resolve_settings.allow_to_check_database_catalog && identifier_lookup.isTableExpressionLookup()) + { + resolve_result.resolved_identifier = tryResolveTableIdentifierFromDatabaseCatalog(identifier_lookup.identifier, scope.context); + + if (resolve_result.resolved_identifier) + resolve_result.resolve_place = IdentifierResolvePlace::DATABASE_CATALOG; + } + + it->second = resolve_result; + + /** If identifier was not resolved, or during expression resolution identifier was explicitly added into non cached set, + * or identifier caching was disabled in resolve scope we remove identifier lookup result from identifier lookup to result table. + */ + if (!resolve_result.resolved_identifier || + scope.non_cached_identifier_lookups_during_expression_resolve.contains(identifier_lookup) || + !scope.use_identifier_lookup_to_result_cache) + scope.identifier_lookup_to_result.erase(it); + + return resolve_result; +} + +/// Resolve query tree nodes functions implementation + +/** Qualify matched columns projection names for unqualified matcher or qualified matcher resolved nodes + * + * Example: SELECT * FROM test_table AS t1, test_table AS t2; + */ +void QueryAnalyzer::qualifyMatchedColumnsProjectionNamesIfNeeded(QueryTreeNodesWithNames & matched_nodes_with_column_names, + const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + /// Build additional column qualification parts array + std::vector additional_column_qualification_parts; + + if (table_expression_node->hasAlias()) + additional_column_qualification_parts = {table_expression_node->getAlias()}; + else if (auto * table_node = table_expression_node->as()) + additional_column_qualification_parts = {table_node->getStorageID().getDatabaseName(), table_node->getStorageID().getTableName()}; + + size_t additional_column_qualification_parts_size = additional_column_qualification_parts.size(); + + /** For each matched column node iterate over additional column qualifications and apply them if column needs to be qualified. + * To check if column needs to be qualified we check if column name can bind to any other table expression in scope or to scope aliases. + */ + std::vector column_qualified_identifier_parts; + + for (auto & [column_node, column_name] : matched_nodes_with_column_names) + { + column_qualified_identifier_parts = Identifier(column_name).getParts(); + + /// Iterate over additional column qualifications and apply them if needed + for (size_t i = 0; i < additional_column_qualification_parts_size; ++i) + { + bool need_to_qualify = false; + auto identifier_to_check = Identifier(column_qualified_identifier_parts); + IdentifierLookup lookup{identifier_to_check, IdentifierLookupContext::EXPRESSION}; + + for (auto & table_expression_data : scope.table_expression_node_to_data) + { + if (table_expression_data.first.get() == table_expression_node.get()) + continue; + + if (tryBindIdentifierToTableExpression(lookup, table_expression_data.first, scope)) + { + need_to_qualify = true; + break; + } + } + + if (tryBindIdentifierToAliases(lookup, scope)) + need_to_qualify = true; + + if (need_to_qualify) + { + /** Add last qualification part that was not used into column qualified identifier. + * If additional column qualification parts consists from [database_name, table_name]. + * On first iteration if column is needed to be qualified to qualify it with table_name. + * On second iteration if column is needed to be qualified to qualify it with database_name. + */ + size_t part_index_to_use_for_qualification = additional_column_qualification_parts_size - i - 1; + const auto & part_to_use = additional_column_qualification_parts[part_index_to_use_for_qualification]; + column_qualified_identifier_parts.insert(column_qualified_identifier_parts.begin(), part_to_use); + } + else + { + break; + } + } + + node_to_projection_name.emplace(column_node, Identifier(column_qualified_identifier_parts).getFullName()); + } +} + +/** Resolve qualified tree matcher. + * + * First try to match qualified identifier to expression. If qualified identifier matched expression node then + * if expression is compound match it column names using matcher `isMatchingColumn` method, if expression is not compound, throw exception. + * If qualified identifier did not match expression in query tree, try to lookup qualified identifier in table context. + */ +QueryAnalyzer::QueryTreeNodesWithNames QueryAnalyzer::resolveQualifiedMatcher(QueryTreeNodePtr & matcher_node, IdentifierResolveScope & scope) +{ + auto & matcher_node_typed = matcher_node->as(); + assert(matcher_node_typed.isQualified()); + + QueryTreeNodesWithNames matched_expression_nodes_with_column_names; + + auto expression_identifier_lookup = IdentifierLookup{matcher_node_typed.getQualifiedIdentifier(), IdentifierLookupContext::EXPRESSION}; + auto expression_identifier_resolve_result = tryResolveIdentifier(expression_identifier_lookup, scope); + auto expression_query_tree_node = expression_identifier_resolve_result.resolved_identifier; + + /// Try to resolve unqualified matcher for query expression + + if (expression_query_tree_node) + { + auto result_type = expression_query_tree_node->getResultType(); + + while (const auto * array_type = typeid_cast(result_type.get())) + result_type = array_type->getNestedType(); + + const auto * tuple_data_type = typeid_cast(result_type.get()); + if (!tuple_data_type) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Qualified matcher {} find non compound expression {} with type {}. Expected tuple or array of tuples. In scope {}", + matcher_node->formatASTForErrorMessage(), + expression_query_tree_node->formatASTForErrorMessage(), + expression_query_tree_node->getResultType()->getName(), + scope.scope_node->formatASTForErrorMessage()); + + const auto & element_names = tuple_data_type->getElementNames(); + + auto qualified_matcher_element_identifier = matcher_node_typed.getQualifiedIdentifier(); + for (const auto & element_name : element_names) + { + if (!matcher_node_typed.isMatchingColumn(element_name)) + continue; + + auto tuple_element_function = std::make_shared("tupleElement"); + tuple_element_function->getArguments().getNodes().push_back(expression_query_tree_node); + tuple_element_function->getArguments().getNodes().push_back(std::make_shared(element_name)); + + QueryTreeNodePtr function_query_node = tuple_element_function; + resolveFunction(function_query_node, scope); + + qualified_matcher_element_identifier.push_back(element_name); + node_to_projection_name.emplace(function_query_node, qualified_matcher_element_identifier.getFullName()); + qualified_matcher_element_identifier.pop_back(); + + matched_expression_nodes_with_column_names.emplace_back(std::move(function_query_node), element_name); + } + + return matched_expression_nodes_with_column_names; + } + + /// Try to resolve qualified matcher for table expression + + IdentifierResolveSettings identifier_resolve_settings; + identifier_resolve_settings.allow_to_check_cte = false; + identifier_resolve_settings.allow_to_check_database_catalog = false; + + auto table_identifier_lookup = IdentifierLookup{matcher_node_typed.getQualifiedIdentifier(), IdentifierLookupContext::TABLE_EXPRESSION}; + auto table_identifier_resolve_result = tryResolveIdentifier(table_identifier_lookup, scope, identifier_resolve_settings); + auto table_expression_node = table_identifier_resolve_result.resolved_identifier; + + if (!table_expression_node) + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Qualified matcher {} does not find table. In scope {}", + matcher_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + NamesAndTypes initial_matcher_columns; + + auto * table_expression_query_node = table_expression_node->as(); + auto * table_expression_union_node = table_expression_node->as(); + auto * table_expression_table_node = table_expression_node->as(); + auto * table_expression_table_function_node = table_expression_node->as(); + + if (table_expression_query_node || table_expression_union_node) + { + initial_matcher_columns = table_expression_query_node ? table_expression_query_node->getProjectionColumns() + : table_expression_union_node->computeProjectionColumns(); + } + else if (table_expression_table_node || table_expression_table_function_node) + { + const auto & storage_snapshot = table_expression_table_node ? table_expression_table_node->getStorageSnapshot() + : table_expression_table_function_node->getStorageSnapshot(); + auto storage_columns_list = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::All)); + initial_matcher_columns = NamesAndTypes(storage_columns_list.begin(), storage_columns_list.end()); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Invalid table expression node {}. In scope {}", + table_expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + for (auto & column : initial_matcher_columns) + { + const auto & column_name = column.name; + if (matcher_node_typed.isMatchingColumn(column_name)) + matched_expression_nodes_with_column_names.emplace_back(std::make_shared(column, table_expression_node), column_name); + } + + qualifyMatchedColumnsProjectionNamesIfNeeded(matched_expression_nodes_with_column_names, table_expression_node, scope); + + return matched_expression_nodes_with_column_names; +} + + +/// Resolve non qualified matcher, using scope join tree node. +QueryAnalyzer::QueryTreeNodesWithNames QueryAnalyzer::resolveUnqualifiedMatcher(QueryTreeNodePtr & matcher_node, IdentifierResolveScope & scope) +{ + auto & matcher_node_typed = matcher_node->as(); + assert(matcher_node_typed.isUnqualified()); + + /** There can be edge case if matcher is inside lambda expression. + * Try to find parent query expression using parent scopes. + */ + auto * nearest_query_scope = scope.getNearestQueryScope(); + auto * nearest_query_scope_query_node = nearest_query_scope ? nearest_query_scope->scope_node->as() : nullptr; + + /// If there are no parent query scope or query scope does not have join tree + if (!nearest_query_scope_query_node || !nearest_query_scope_query_node->getJoinTree()) + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Unqualified matcher {} cannot be resolved. There are no table sources. In scope {}", + matcher_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + /** For unqualifited matcher resolve we build table expressions stack from JOIN tree and then process it. + * For table, table function, query, union table expressions add matched columns into table expressions columns stack. + * For array join continue processing. + * For join node combine last left and right table expressions columns on stack together. It is important that if JOIN has USING + * we must add USING columns before combining left and right table expressions columns. Columns from left and right table + * expressions that have same names as columns in USING clause must be skipped. + */ + + auto table_expressions_stack = buildTableExpressionsStack(nearest_query_scope_query_node->getJoinTree()); + std::vector table_expressions_column_nodes_with_names_stack; + + for (auto & table_expression : table_expressions_stack) + { + QueryTreeNodesWithNames matched_expression_nodes_with_column_names; + + if (auto * array_join_node = table_expression->as()) + continue; + + bool table_expression_in_resolve_process = scope.table_expressions_in_resolve_process.contains(table_expression.get()); + + auto * join_node = table_expression->as(); + + if (join_node) + { + size_t table_expressions_column_nodes_with_names_stack_size = table_expressions_column_nodes_with_names_stack.size(); + if (table_expressions_column_nodes_with_names_stack_size < 2) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected at least 2 table expressions on stack before JOIN processing. Actual {}", + table_expressions_column_nodes_with_names_stack_size); + + auto right_table_expression_columns = std::move(table_expressions_column_nodes_with_names_stack.back()); + table_expressions_column_nodes_with_names_stack.pop_back(); + + auto left_table_expression_columns = std::move(table_expressions_column_nodes_with_names_stack.back()); + table_expressions_column_nodes_with_names_stack.pop_back(); + + std::unordered_set column_names_to_skip; + + if (!table_expression_in_resolve_process && join_node->isUsingJoinExpression()) + { + auto & join_using_list = join_node->getJoinExpression()->as(); + + for (auto & join_using_node : join_using_list.getNodes()) + { + auto & column_node = join_using_node->as(); + const auto & column_name = column_node.getColumnName(); + + if (!matcher_node_typed.isMatchingColumn(column_name)) + continue; + + column_names_to_skip.insert(column_name); + + QueryTreeNodePtr column_source = getColumnSourceForJoinNodeWithUsing(table_expression); + auto matched_column_node = std::make_shared(column_node.getColumn(), column_source); + matched_expression_nodes_with_column_names.emplace_back(std::move(matched_column_node), column_name); + } + } + + for (auto && left_table_column : left_table_expression_columns) + { + if (column_names_to_skip.contains(left_table_column.second)) + continue; + + matched_expression_nodes_with_column_names.push_back(std::move(left_table_column)); + } + + for (auto && right_table_column : right_table_expression_columns) + { + if (column_names_to_skip.contains(right_table_column.second)) + continue; + + matched_expression_nodes_with_column_names.push_back(std::move(right_table_column)); + } + + table_expressions_column_nodes_with_names_stack.push_back(std::move(matched_expression_nodes_with_column_names)); + continue; + } + + auto * table_node = table_expression->as(); + auto * table_function_node = table_expression->as(); + auto * query_node = table_expression->as(); + auto * union_node = table_expression->as(); + + if (table_expression_in_resolve_process) + { + table_expressions_column_nodes_with_names_stack.emplace_back(); + continue; + } + + NamesAndTypes table_expression_columns; + + if (query_node || union_node) + { + table_expression_columns = query_node ? query_node->getProjectionColumns() : union_node->computeProjectionColumns(); + } + else if (table_node || table_function_node) + { + const auto & storage_snapshot + = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot(); + + UInt8 get_column_options_kind = 0; + + if (matcher_node_typed.isAsteriskMatcher()) + { + get_column_options_kind = GetColumnsOptions::Ordinary; + const auto & settings = scope.context->getSettingsRef(); + + if (settings.asterisk_include_alias_columns) + get_column_options_kind |= GetColumnsOptions::Kind::Aliases; + + if (settings.asterisk_include_materialized_columns) + get_column_options_kind |= GetColumnsOptions::Kind::Materialized; + } + else + { + /// TODO: Check if COLUMNS select aliases column by default + get_column_options_kind = GetColumnsOptions::All; + } + + auto get_columns_options = GetColumnsOptions(static_cast(get_column_options_kind)); + auto storage_columns_list = storage_snapshot->getColumns(get_columns_options); + table_expression_columns = NamesAndTypes(storage_columns_list.begin(), storage_columns_list.end()); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unqualified matcher {} resolve unexpected table expression. In scope {}", + matcher_node_typed.formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + for (auto & table_expression_column : table_expression_columns) + { + if (!matcher_node_typed.isMatchingColumn(table_expression_column.name)) + continue; + + auto matched_column_node = std::make_shared(table_expression_column, table_expression); + matched_expression_nodes_with_column_names.emplace_back(std::move(matched_column_node), table_expression_column.name); + } + + qualifyMatchedColumnsProjectionNamesIfNeeded(matched_expression_nodes_with_column_names, table_expression, scope); + + for (auto & [matched_node, column_name] : matched_expression_nodes_with_column_names) + { + auto node_projection_name_it = node_to_projection_name.find(matcher_node); + if (node_projection_name_it != node_to_projection_name.end()) + column_name = node_projection_name_it->second; + } + + table_expressions_column_nodes_with_names_stack.push_back(std::move(matched_expression_nodes_with_column_names)); + } + + QueryTreeNodesWithNames result; + + for (auto & table_expression_column_nodes_with_names : table_expressions_column_nodes_with_names_stack) + { + for (auto && table_expression_column_node_with_name : table_expression_column_nodes_with_names) + result.push_back(std::move(table_expression_column_node_with_name)); + } + + return result; +} + + +/** Resolve query tree matcher. Check MatcherNode.h for detailed matcher description. Check ColumnTransformers.h for detailed transformers description. + * + * 1. Populate matched expression nodes resolving qualified or unqualified matcher. + * 2. Apply column transformers to matched expression nodes. For strict column transformers save used column names. + * 3. Validate strict column transformers. + */ +ProjectionNames QueryAnalyzer::resolveMatcher(QueryTreeNodePtr & matcher_node, IdentifierResolveScope & scope) +{ + auto & matcher_node_typed = matcher_node->as(); + + QueryTreeNodesWithNames matched_expression_nodes_with_names; + + if (matcher_node_typed.isQualified()) + matched_expression_nodes_with_names = resolveQualifiedMatcher(matcher_node, scope); + else + matched_expression_nodes_with_names = resolveUnqualifiedMatcher(matcher_node, scope); + + std::unordered_map> strict_transformer_to_used_column_names; + auto add_strict_transformer_column_name = [&](const IColumnTransformerNode * transformer, const std::string & column_name) + { + auto it = strict_transformer_to_used_column_names.find(transformer); + if (it == strict_transformer_to_used_column_names.end()) + { + auto [inserted_it, _] = strict_transformer_to_used_column_names.emplace(transformer, std::unordered_set()); + it = inserted_it; + } + + it->second.insert(column_name); + }; + + ListNodePtr list = std::make_shared(); + ProjectionNames result_projection_names; + ProjectionNames node_projection_names; + + for (auto & [node, column_name] : matched_expression_nodes_with_names) + { + bool apply_transformer_was_used = false; + bool replace_transformer_was_used = false; + bool execute_apply_transformer = false; + bool execute_replace_transformer = false; + + auto projection_name_it = node_to_projection_name.find(node); + if (projection_name_it != node_to_projection_name.end()) + result_projection_names.push_back(projection_name_it->second); + else + result_projection_names.push_back(column_name); + + for (const auto & transformer : matcher_node_typed.getColumnTransformers().getNodes()) + { + if (auto * apply_transformer = transformer->as()) + { + const auto & expression_node = apply_transformer->getExpressionNode(); + apply_transformer_was_used = true; + + if (apply_transformer->getApplyTransformerType() == ApplyColumnTransformerType::LAMBDA) + { + auto lambda_expression_to_resolve = expression_node->clone(); + IdentifierResolveScope lambda_scope(expression_node, &scope /*parent_scope*/); + node_projection_names = resolveLambda(expression_node, lambda_expression_to_resolve, {node}, lambda_scope); + auto & lambda_expression_to_resolve_typed = lambda_expression_to_resolve->as(); + node = lambda_expression_to_resolve_typed.getExpression(); + } + else if (apply_transformer->getApplyTransformerType() == ApplyColumnTransformerType::FUNCTION) + { + auto function_to_resolve_untyped = expression_node->clone(); + auto & function_to_resolve_typed = function_to_resolve_untyped->as(); + function_to_resolve_typed.getArguments().getNodes().push_back(node); + node_projection_names = resolveFunction(function_to_resolve_untyped, scope); + node = function_to_resolve_untyped; + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Unsupported apply matcher expression type. Expected lambda or function apply transformer. Actual {}. In scope {}", + transformer->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + execute_apply_transformer = true; + } + else if (auto * except_transformer = transformer->as()) + { + if (apply_transformer_was_used || replace_transformer_was_used) + break; + + if (except_transformer->isColumnMatching(column_name)) + { + if (except_transformer->isStrict()) + add_strict_transformer_column_name(except_transformer, column_name); + + node = {}; + break; + } + } + else if (auto * replace_transformer = transformer->as()) + { + if (apply_transformer_was_used || replace_transformer_was_used) + break; + + replace_transformer_was_used = true; + + auto replace_expression = replace_transformer->findReplacementExpression(column_name); + if (!replace_expression) + continue; + + if (replace_transformer->isStrict()) + add_strict_transformer_column_name(replace_transformer, column_name); + + node = replace_expression->clone(); + node_projection_names = resolveExpressionNode(node, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + execute_replace_transformer = true; + } + + if (execute_apply_transformer || execute_replace_transformer) + { + if (auto * node_list = node->as()) + { + auto & node_list_nodes = node_list->getNodes(); + size_t node_list_nodes_size = node_list_nodes.size(); + + if (node_list_nodes_size != 1) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "{} transformer {} resolved as list node with size {}. Expected 1. In scope {}", + execute_apply_transformer ? "APPLY" : "REPLACE", + transformer->formatASTForErrorMessage(), + node_list_nodes_size, + scope.scope_node->formatASTForErrorMessage()); + + node = node_list_nodes[0]; + } + + if (node_projection_names.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Matcher node expected 1 projection name. Actual {}", node_projection_names.size()); + + result_projection_names.back() = std::move(node_projection_names[0]); + node_to_projection_name.emplace(node, result_projection_names.back()); + node_projection_names.clear(); + } + } + + if (node) + list->getNodes().push_back(node); + else + result_projection_names.pop_back(); + } + + for (auto & [strict_transformer, used_column_names] : strict_transformer_to_used_column_names) + { + auto strict_transformer_type = strict_transformer->getTransformerType(); + const Names * strict_transformer_column_names = nullptr; + + switch (strict_transformer_type) + { + case ColumnTransfomerType::EXCEPT: + { + const auto * except_transformer = static_cast(strict_transformer); + const auto & except_names = except_transformer->getExceptColumnNames(); + + if (except_names.size() != used_column_names.size()) + strict_transformer_column_names = &except_transformer->getExceptColumnNames(); + + break; + } + case ColumnTransfomerType::REPLACE: + { + const auto * replace_transformer = static_cast(strict_transformer); + const auto & replacement_names = replace_transformer->getReplacementsNames(); + + if (replacement_names.size() != used_column_names.size()) + strict_transformer_column_names = &replace_transformer->getReplacementsNames(); + + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected strict EXCEPT or REPLACE column transformer. Actual type {}. In scope {}", + toString(strict_transformer_type), + scope.scope_node->formatASTForErrorMessage()); + } + } + + if (!strict_transformer_column_names) + continue; + + Names non_matched_column_names; + size_t strict_transformer_column_names_size = strict_transformer_column_names->size(); + for (size_t i = 0; i < strict_transformer_column_names_size; ++i) + { + const auto & column_name = (*strict_transformer_column_names)[i]; + if (used_column_names.find(column_name) == used_column_names.end()) + non_matched_column_names.push_back(column_name); + } + + WriteBufferFromOwnString non_matched_column_names_buffer; + size_t non_matched_column_names_size = non_matched_column_names.size(); + for (size_t i = 0; i < non_matched_column_names_size; ++i) + { + const auto & column_name = non_matched_column_names[i]; + + non_matched_column_names_buffer << column_name; + if (i + 1 != non_matched_column_names_size) + non_matched_column_names_buffer << ", "; + } + + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Strict {} column transformer {} expects following column(s) {}", + toString(strict_transformer_type), + strict_transformer->formatASTForErrorMessage(), + non_matched_column_names_buffer.str()); + } + + matcher_node = std::move(list); + + return result_projection_names; +} + +/** Resolve window function window node. + * + * Node can be identifier or window node. + * Example: SELECT count(*) OVER w FROM test_table WINDOW w AS (PARTITION BY id); + * Example: SELECT count(*) OVER (PARTITION BY id); + * + * If node has parent window name specified, then parent window definition is searched in nearest query scope WINDOW section. + * If node is identifier, than node is replaced with window definition. + * If node is window, that window node is merged with parent window node. + * + * Window node PARTITION BY and ORDER BY parts are resolved. + * If window node has frame begin OFFSET or frame end OFFSET specified, they are resolved, and window node frame constants are updated. + * Window node frame is validated. + */ +ProjectionName QueryAnalyzer::resolveWindow(QueryTreeNodePtr & node, IdentifierResolveScope & scope) +{ + std::string parent_window_name; + auto * identifier_node = node->as(); + + ProjectionName result_projection_name; + QueryTreeNodePtr parent_window_node; + + if (identifier_node) + parent_window_name = identifier_node->getIdentifier().getFullName(); + else if (auto * window_node = node->as()) + parent_window_name = window_node->getParentWindowName(); + + if (!parent_window_name.empty()) + { + auto * nearest_query_scope = scope.getNearestQueryScope(); + + if (!nearest_query_scope) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Window '{}' does not exists.", parent_window_name); + + auto & scope_window_name_to_window_node = nearest_query_scope->window_name_to_window_node; + + auto window_node_it = scope_window_name_to_window_node.find(parent_window_name); + if (window_node_it == scope_window_name_to_window_node.end()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Window '{}' does not exists. In scope {}", + parent_window_name, + nearest_query_scope->scope_node->formatASTForErrorMessage()); + + parent_window_node = window_node_it->second; + + if (identifier_node) + { + node = parent_window_node->clone(); + result_projection_name = parent_window_name; + } + else + { + mergeWindowWithParentWindow(node, parent_window_node, scope); + } + } + + auto & window_node = node->as(); + window_node.setParentWindowName({}); + + ProjectionNames partition_by_projection_names = resolveExpressionNodeList(window_node.getPartitionByNode(), + scope, + false /*allow_lambda_expression*/, + false /*allow_table_expression*/); + + ProjectionNames order_by_projection_names = resolveSortNodeList(window_node.getOrderByNode(), scope); + + ProjectionNames frame_begin_offset_projection_names; + ProjectionNames frame_end_offset_projection_names; + + if (window_node.hasFrameBeginOffset()) + { + frame_begin_offset_projection_names = resolveExpressionNode(window_node.getFrameBeginOffsetNode(), + scope, + false /*allow_lambda_expression*/, + false /*allow_table_expression*/); + + const auto window_frame_begin_constant_value = window_node.getFrameBeginOffsetNode()->getConstantValueOrNull(); + if (!window_frame_begin_constant_value || !isNativeNumber(removeNullable(window_frame_begin_constant_value->getType()))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Window frame begin OFFSET expression must be constant with numeric type. Actual {}. In scope {}", + window_node.getFrameBeginOffsetNode()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + window_node.getWindowFrame().begin_offset = window_frame_begin_constant_value->getValue(); + if (frame_begin_offset_projection_names.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Window FRAME begin offset expected 1 projection name. Actual {}", + frame_begin_offset_projection_names.size()); + } + + if (window_node.hasFrameEndOffset()) + { + frame_end_offset_projection_names = resolveExpressionNode(window_node.getFrameEndOffsetNode(), + scope, + false /*allow_lambda_expression*/, + false /*allow_table_expression*/); + + const auto window_frame_end_constant_value = window_node.getFrameEndOffsetNode()->getConstantValueOrNull(); + if (!window_frame_end_constant_value || !isNativeNumber(removeNullable(window_frame_end_constant_value->getType()))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Window frame begin OFFSET expression must be constant with numeric type. Actual {}. In scope {}", + window_node.getFrameEndOffsetNode()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + window_node.getWindowFrame().end_offset = window_frame_end_constant_value->getValue(); + if (frame_end_offset_projection_names.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Window FRAME begin offset expected 1 projection name. Actual {}", + frame_end_offset_projection_names.size()); + } + + window_node.getWindowFrame().checkValid(); + + if (result_projection_name.empty()) + { + result_projection_name = calculateWindowProjectionName(node, + parent_window_node, + parent_window_name, + partition_by_projection_names, + order_by_projection_names, + frame_begin_offset_projection_names.empty() ? "" : frame_begin_offset_projection_names.front(), + frame_end_offset_projection_names.empty() ? "" : frame_end_offset_projection_names.front()); + } + + return result_projection_name; +} + +/** Resolve lambda function. + * This function modified lambda_node during resolve. It is caller responsibility to clone lambda before resolve + * if it is needed for later use. + * + * Lambda body expression result projection names is used as lambda projection names. + * + * Lambda expression can be resolved into list node. It is caller responsibility to handle it properly. + * + * lambda_node - node that must have LambdaNode type. + * lambda_node_to_resolve - lambda node to resolve that must have LambdaNode type. + * arguments - lambda arguments. + * scope - lambda scope. It is client responsibility to create it. + * + * Resolve steps: + * 1. Validate arguments. + * 2. Register lambda node in lambdas in resolve process. This is necessary to prevent recursive lambda resolving. + * 3. Initialize scope with lambda aliases. + * 4. Validate lambda argument names, and scope expressions. + * 5. Resolve lambda body expression. + * 6. Deregister lambda node from lambdas in resolve process. + */ +ProjectionNames QueryAnalyzer::resolveLambda(const QueryTreeNodePtr & lambda_node, + const QueryTreeNodePtr & lambda_node_to_resolve, + const QueryTreeNodes & lambda_arguments, + IdentifierResolveScope & scope) +{ + auto & lambda_to_resolve = lambda_node_to_resolve->as(); + auto & lambda_arguments_nodes = lambda_to_resolve.getArguments().getNodes(); + size_t lambda_arguments_nodes_size = lambda_arguments_nodes.size(); + + /** Register lambda as being resolved, to prevent recursive lambdas resolution. + * Example: WITH (x -> x + lambda_2(x)) AS lambda_1, (x -> x + lambda_1(x)) AS lambda_2 SELECT 1; + */ + auto it = lambdas_in_resolve_process.find(lambda_node.get()); + if (it != lambdas_in_resolve_process.end()) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Recursive lambda {}. In scope {}", + lambda_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + lambdas_in_resolve_process.emplace(lambda_node.get()); + + size_t arguments_size = lambda_arguments.size(); + if (lambda_arguments_nodes_size != arguments_size) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Lambda {} expect {} arguments. Actual {}. In scope {}", + lambda_to_resolve.formatASTForErrorMessage(), + arguments_size, + lambda_arguments_nodes_size, + scope.scope_node->formatASTForErrorMessage()); + + /// Initialize aliases in lambda scope + QueryExpressionsAliasVisitor visitor(scope); + visitor.visit(lambda_to_resolve.getExpression()); + + /** Replace lambda arguments with new arguments. + * Additionally validate that there are no aliases with same name as lambda arguments. + * Arguments are registered in current scope expression_argument_name_to_node map. + */ + QueryTreeNodes lambda_new_arguments_nodes; + lambda_new_arguments_nodes.reserve(lambda_arguments_nodes_size); + + for (size_t i = 0; i < lambda_arguments_nodes_size; ++i) + { + auto & lambda_argument_node = lambda_arguments_nodes[i]; + auto & lambda_argument_node_typed = lambda_argument_node->as(); + const auto & lambda_argument_name = lambda_argument_node_typed.getIdentifier().getFullName(); + + bool has_expression_node = scope.alias_name_to_expression_node.contains(lambda_argument_name); + bool has_alias_node = scope.alias_name_to_lambda_node.contains(lambda_argument_name); + + if (has_expression_node || has_alias_node) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Alias name '{}' inside lambda {} cannot have same name as lambda argument. In scope {}", + lambda_argument_name, + lambda_argument_node_typed.formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + scope.expression_argument_name_to_node.emplace(lambda_argument_name, lambda_arguments[i]); + lambda_new_arguments_nodes.push_back(lambda_arguments[i]); + } + + lambda_to_resolve.getArguments().getNodes() = std::move(lambda_new_arguments_nodes); + + /// Lambda body expression is resolved as standard query expression node. + auto result_projection_names = resolveExpressionNode(lambda_to_resolve.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + lambdas_in_resolve_process.erase(lambda_node.get()); + + return result_projection_names; +} + +/** Resolve function node in scope. + * During function node resolve, function node can be replaced with another expression (if it match lambda or sql user defined function), + * with constant (if it allow constant folding), or with expression list. It is caller responsibility to handle such cases appropriately. + * + * Steps: + * 1. Resolve function parameters. Validate that each function parameter must be constant node. + * 2. Try to lookup function as lambda in current scope. If it is lambda we can skip `in` and `count` special handling. + * 3. If function is count function, that take unqualified ASTERISK matcher, remove it from its arguments. Example: SELECT count(*) FROM test_table; + * 4. If function is `IN` function, then right part of `IN` function is replaced as subquery. + * 5. Resolve function arguments list, lambda expressions are allowed as function arguments. + * For `IN` function table expressions are allowed as function arguments. + * 6. Initialize argument_columns, argument_types, function_lambda_arguments_indexes arrays from function arguments. + * 7. If function name identifier was not resolved as function in current scope, try to lookup lambda from sql user defined functions factory. + * 8. If function was resolve as lambda from step 2 or 7, then resolve lambda using function arguments and replace function node with lambda result. + * After than function node is resolved. + * 9. If function was not resolved during step 6 as lambda, then try to resolve function as window function or executable user defined function + * or ordinary function or aggregate function. + * + * If function is resolved as window function or executable user defined function or aggregate function, function node is resolved + * no additional special handling is required. + * + * 8. If function was resolved as non aggregate function. Then if some of function arguments are lambda expressions, their result types need to be initialized and + * they must be resolved. + * 9. If function is suitable for constant folding, try to perform constant folding for function node. + */ +ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, IdentifierResolveScope & scope) +{ + FunctionNodePtr function_node_ptr = std::static_pointer_cast(node); + auto function_name = function_node_ptr->getFunctionName(); + + /// Resolve function parameters + + auto parameters_projection_names = resolveExpressionNodeList(function_node_ptr->getParametersNode(), + scope, + false /*allow_lambda_expression*/, + false /*allow_table_expression*/); + + /// Convert function parameters into constant parameters array + + Array parameters; + + auto & parameters_nodes = function_node_ptr->getParameters().getNodes(); + parameters.reserve(parameters_nodes.size()); + + for (auto & parameter_node : parameters_nodes) + { + auto constant_value = parameter_node->getConstantValueOrNull(); + + if (!constant_value) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Parameter for function {} expected to have constant value. Actual {}. In scope {}", + function_name, + parameter_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + parameters.push_back(constant_value->getValue()); + } + + //// If function node is not window function try to lookup function node name as lambda identifier. + QueryTreeNodePtr lambda_expression_untyped; + if (!function_node_ptr->isWindowFunction()) + { + auto function_lookup_result = tryResolveIdentifier({Identifier{function_name}, IdentifierLookupContext::FUNCTION}, scope); + lambda_expression_untyped = function_lookup_result.resolved_identifier; + } + + bool is_special_function_in = false; + bool is_special_function_dict_get_or_join_get = false; + bool is_special_function_exists = false; + + if (!lambda_expression_untyped) + { + is_special_function_in = isNameOfInFunction(function_name); + is_special_function_dict_get_or_join_get = functionIsJoinGet(function_name) || functionIsDictGet(function_name); + is_special_function_exists = function_name == "exists"; + + /// Handle SELECT count(*) FROM test_table + if (function_name == "count" && function_node_ptr->getArguments().getNodes().size() == 1) + { + auto * matcher_node = function_node_ptr->getArguments().getNodes().front()->as(); + if (matcher_node && matcher_node->isUnqualified()) + function_node_ptr->getArguments().getNodes().clear(); + } + } + + /** Special functions dictGet and its variations and joinGet can be executed when first argument is identifier. + * Example: SELECT dictGet(identifier, 'value', toUInt64(0)); + * + * Try to resolve identifier as expression identifier and if it is resolved use it. + * Example: WITH 'dict_name' AS identifier SELECT dictGet(identifier, 'value', toUInt64(0)); + * + * Otherwise replace identifier with identifier full name constant. + * Validation that dictionary exists or table exists will be performed during function `getReturnType` method call. + */ + if (is_special_function_dict_get_or_join_get && + !function_node_ptr->getArguments().getNodes().empty() && + function_node_ptr->getArguments().getNodes()[0]->getNodeType() == QueryTreeNodeType::IDENTIFIER) + { + auto & first_argument = function_node_ptr->getArguments().getNodes()[0]; + auto & identifier_node = first_argument->as(); + IdentifierLookup identifier_lookup{identifier_node.getIdentifier(), IdentifierLookupContext::EXPRESSION}; + auto resolve_result = tryResolveIdentifier(identifier_lookup, scope); + + if (resolve_result.isResolved()) + first_argument = std::move(resolve_result.resolved_identifier); + else + first_argument = std::make_shared(identifier_node.getIdentifier().getFullName()); + } + + /// Resolve function arguments + + bool allow_table_expressions = is_special_function_in || is_special_function_exists; + auto arguments_projection_names = resolveExpressionNodeList(function_node_ptr->getArgumentsNode(), + scope, + true /*allow_lambda_expression*/, + allow_table_expressions /*allow_table_expression*/); + + if (is_special_function_exists) + { + /// Rewrite EXISTS (subquery) into 1 IN (SELECT 1 FROM (subquery) LIMIT 1). + auto & exists_subquery_argument = function_node_ptr->getArguments().getNodes().at(0); + + auto constant_data_type = std::make_shared(); + + auto in_subquery = std::make_shared(); + in_subquery->getProjection().getNodes().push_back(std::make_shared(1UL, constant_data_type)); + in_subquery->getJoinTree() = exists_subquery_argument; + in_subquery->getLimit() = std::make_shared(1UL, constant_data_type); + in_subquery->resolveProjectionColumns({NameAndTypePair("1", constant_data_type)}); + + function_node_ptr = std::make_shared("in"); + function_node_ptr->getArguments().getNodes() = {std::make_shared(1UL, constant_data_type), in_subquery}; + node = function_node_ptr; + function_name = "in"; + + is_special_function_in = true; + } + + auto & function_node = *function_node_ptr; + + /// Replace right IN function argument if it is table or table function with subquery that read ordinary columns + if (is_special_function_in) + { + auto & function_in_arguments_nodes = function_node.getArguments().getNodes(); + if (function_in_arguments_nodes.size() != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} expects 2 arguments", function_name); + + auto & in_second_argument = function_in_arguments_nodes[1]; + auto * table_node = in_second_argument->as(); + auto * table_function_node = in_second_argument->as(); + auto * query_node = in_second_argument->as(); + auto * union_node = in_second_argument->as(); + + if (table_node && dynamic_cast(table_node->getStorage().get()) != nullptr) + { + /// If table is already prepared set, we do not replace it with subquery + } + else if (table_node || table_function_node) + { + const auto & storage_snapshot = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot(); + auto columns_to_select = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::Ordinary)); + + size_t columns_to_select_size = columns_to_select.size(); + + auto column_nodes_to_select = std::make_shared(); + column_nodes_to_select->getNodes().reserve(columns_to_select_size); + + NamesAndTypes projection_columns; + projection_columns.reserve(columns_to_select_size); + + for (auto & column : columns_to_select) + { + column_nodes_to_select->getNodes().emplace_back(std::make_shared(column, in_second_argument)); + projection_columns.emplace_back(column.name, column.type); + } + + auto in_second_argument_query_node = std::make_shared(); + in_second_argument_query_node->setIsSubquery(true); + in_second_argument_query_node->getProjectionNode() = std::move(column_nodes_to_select); + in_second_argument_query_node->getJoinTree() = std::move(in_second_argument); + in_second_argument_query_node->resolveProjectionColumns(std::move(projection_columns)); + + in_second_argument = std::move(in_second_argument_query_node); + } + else if (query_node || union_node) + { + IdentifierResolveScope subquery_scope(in_second_argument, &scope /*parent_scope*/); + subquery_scope.subquery_depth = scope.subquery_depth + 1; + + if (query_node) + resolveQuery(in_second_argument, subquery_scope); + else if (union_node) + resolveUnion(in_second_argument, subquery_scope); + } + } + + /// Initialize function argument columns + + ColumnsWithTypeAndName argument_columns; + DataTypes argument_types; + bool all_arguments_constants = true; + std::vector function_lambda_arguments_indexes; + + auto & function_arguments = function_node.getArguments().getNodes(); + size_t function_arguments_size = function_arguments.size(); + + for (size_t function_argument_index = 0; function_argument_index < function_arguments_size; ++function_argument_index) + { + auto & function_argument = function_arguments[function_argument_index]; + + ColumnWithTypeAndName argument_column; + bool argument_is_lambda = false; + + /** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction + * where function argument types are initialized with empty array of lambda arguments size. + */ + if (const auto * lambda_node = function_argument->as()) + { + argument_is_lambda = true; + size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size(); + argument_column.type = std::make_shared(DataTypes(lambda_arguments_size, nullptr), nullptr); + function_lambda_arguments_indexes.push_back(function_argument_index); + } + else if (is_special_function_in && + (function_argument->getNodeType() == QueryTreeNodeType::TABLE || + function_argument->getNodeType() == QueryTreeNodeType::QUERY || + function_argument->getNodeType() == QueryTreeNodeType::UNION)) + { + argument_column.type = std::make_shared(); + } + else + { + argument_column.type = function_argument->getResultType(); + } + + if (!argument_column.type) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Function {} argument is not resolved. In scope {}", + function_node.getFunctionName(), + scope.scope_node->formatASTForErrorMessage()); + + const auto constant_value = function_argument->getConstantValueOrNull(); + if (!argument_is_lambda && constant_value) + { + argument_column.column = constant_value->getType()->createColumnConst(1, constant_value->getValue()); + argument_column.type = constant_value->getType(); + } + else + { + all_arguments_constants = false; + } + + argument_types.push_back(argument_column.type); + argument_columns.emplace_back(std::move(argument_column)); + } + + /// Calculate function projection name + ProjectionNames result_projection_names = {calculateFunctionProjectionName(node, parameters_projection_names, arguments_projection_names)}; + + /** Try to resolve function as + * 1. Lambda function in current scope. Example: WITH (x -> x + 1) AS lambda SELECT lambda(1); + * 2. Lambda function from sql user defined functions. + * 3. Special `untuple` function. + * 4. Special `grouping` function. + * 5. Window function. + * 6. Executable user defined function. + * 7. Ordinary function. + * 8. Aggregate function. + * + * TODO: Provide better error hints. + */ + if (!function_node.isWindowFunction()) + { + if (!lambda_expression_untyped) + lambda_expression_untyped = tryGetLambdaFromSQLUserDefinedFunctions(function_node.getFunctionName(), scope.context); + + /** If function is resolved as lambda. + * Clone lambda before resolve. + * Initialize lambda arguments as function arguments. + * Resolve lambda and then replace function node with resolved lambda expression body. + * Example: WITH (x -> x + 1) AS lambda SELECT lambda(value) FROM test_table; + * Result: SELECT value + 1 FROM test_table; + */ + if (lambda_expression_untyped) + { + auto * lambda_expression = lambda_expression_untyped->as(); + if (!lambda_expression) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Function identifier {} must be resolved as lambda. Actual {}. In scope {}", + function_node.getFunctionName(), + lambda_expression_untyped->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + auto lambda_expression_clone = lambda_expression_untyped->clone(); + + IdentifierResolveScope lambda_scope(lambda_expression_clone, &scope /*parent_scope*/); + ProjectionNames lambda_projection_names = resolveLambda(lambda_expression_untyped, lambda_expression_clone, function_arguments, lambda_scope); + + auto & resolved_lambda = lambda_expression_clone->as(); + node = resolved_lambda.getExpression(); + + if (node->getNodeType() == QueryTreeNodeType::LIST) + result_projection_names = std::move(lambda_projection_names); + + return result_projection_names; + } + + if (function_name == "untuple") + { + /// Special handling of `untuple` function + + if (function_arguments.size() != 1) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Function 'untuple' must have 1 argument. In scope {}", + scope.scope_node->formatASTForErrorMessage()); + + const auto & untuple_argument = function_arguments[0]; + auto result_type = untuple_argument->getResultType(); + const auto * tuple_data_type = typeid_cast(result_type.get()); + if (!tuple_data_type) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Function untuple argument must be have compound type. Actual type {}. In scope {}", + result_type->getName(), + scope.scope_node->formatASTForErrorMessage()); + + const auto & element_names = tuple_data_type->getElementNames(); + + auto result_list = std::make_shared(); + result_list->getNodes().reserve(element_names.size()); + + for (const auto & element_name : element_names) + { + auto tuple_element_function = std::make_shared("tupleElement"); + tuple_element_function->getArguments().getNodes().push_back(untuple_argument); + tuple_element_function->getArguments().getNodes().push_back(std::make_shared(element_name)); + + QueryTreeNodePtr function_query_node = tuple_element_function; + resolveFunction(function_query_node, scope); + + result_list->getNodes().push_back(std::move(function_query_node)); + } + + auto untuple_argument_projection_name = arguments_projection_names.at(0); + result_projection_names.clear(); + + for (const auto & element_name : element_names) + { + if (node->hasAlias()) + result_projection_names.push_back(node->getAlias() + '.' + element_name); + else + result_projection_names.push_back(fmt::format("tupleElement({}, '{}')", untuple_argument_projection_name, element_name)); + } + + node = std::move(result_list); + return result_projection_names; + } + else if (function_name == "grouping") + { + /// It is responsibility of planner to perform additional handling of grouping function + if (function_arguments_size == 0) + throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, + "Function GROUPING expects at least one argument"); + else if (function_arguments_size > 64) + throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, + "Function GROUPING can have up to 64 arguments, but {} provided", + function_arguments_size); + + bool force_grouping_standard_compatibility = scope.context->getSettingsRef().force_grouping_standard_compatibility; + auto grouping_function = std::make_shared(force_grouping_standard_compatibility); + auto grouping_function_adaptor = std::make_shared(std::move(grouping_function)); + function_node.resolveAsFunction(std::move(grouping_function_adaptor), std::make_shared()); + return result_projection_names; + } + } + + if (function_node.isWindowFunction()) + { + if (!AggregateFunctionFactory::instance().isAggregateFunctionName(function_name)) + throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, + "Aggregate function with name {} does not exists. In scope {}", + function_name, + scope.scope_node->formatASTForErrorMessage()); + + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties); + + function_node.resolveAsWindowFunction(aggregate_function, aggregate_function->getReturnType()); + + bool window_node_is_identifier = function_node.getWindowNode()->getNodeType() == QueryTreeNodeType::IDENTIFIER; + ProjectionName window_projection_name = resolveWindow(function_node.getWindowNode(), scope); + + if (window_node_is_identifier) + result_projection_names[0] += " OVER " + window_projection_name; + else + result_projection_names[0] += " OVER (" + window_projection_name + ')'; + + return result_projection_names; + } + + FunctionOverloadResolverPtr function = UserDefinedExecutableFunctionFactory::instance().tryGet(function_name, scope.context, parameters); + + if (!function) + function = FunctionFactory::instance().tryGet(function_name, scope.context); + + if (!function) + { + if (!AggregateFunctionFactory::instance().isAggregateFunctionName(function_name)) + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, + "Function with name {} does not exists. In scope {}", + function_name, + scope.scope_node->formatASTForErrorMessage()); + + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties); + function_node.resolveAsAggregateFunction(aggregate_function, aggregate_function->getReturnType()); + return result_projection_names; + } + + /** For lambda arguments we need to initialize lambda argument types DataTypeFunction using `getLambdaArgumentTypes` function. + * Then each lambda arguments are initialized with columns, where column source is lambda. + * This information is important for later steps of query processing. + * Example: SELECT arrayMap(x -> x + 1, [1, 2, 3]). + * lambda node x -> x + 1 identifier x is resolved as column where source is lambda node. + */ + bool has_lambda_arguments = !function_lambda_arguments_indexes.empty(); + if (has_lambda_arguments) + { + function->getLambdaArgumentTypes(argument_types); + + ProjectionNames lambda_projection_names; + for (auto & function_lambda_argument_index : function_lambda_arguments_indexes) + { + auto & lambda_argument = function_arguments[function_lambda_argument_index]; + auto lambda_to_resolve = lambda_argument->clone(); + auto & lambda_to_resolve_typed = lambda_to_resolve->as(); + + const auto & lambda_argument_names = lambda_to_resolve_typed.getArgumentNames(); + size_t lambda_arguments_size = lambda_to_resolve_typed.getArguments().getNodes().size(); + + const auto * function_data_type = typeid_cast(argument_types[function_lambda_argument_index].get()); + if (!function_data_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Function {} expected function data type for lambda argument with index {}. Actual {}. In scope {}", + function_name, + function_lambda_argument_index, + argument_types[function_lambda_argument_index]->getName(), + scope.scope_node->formatASTForErrorMessage()); + + const auto & function_data_type_argument_types = function_data_type->getArgumentTypes(); + size_t function_data_type_arguments_size = function_data_type_argument_types.size(); + if (function_data_type_arguments_size != lambda_arguments_size) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Function {} function data type for lambda argument with index {} arguments size mismatch. Actual {}. Expected {}. In scope {}", + function_name, + function_data_type_arguments_size, + lambda_arguments_size, + argument_types[function_lambda_argument_index]->getName(), + scope.scope_node->formatASTForErrorMessage()); + + QueryTreeNodes lambda_arguments; + lambda_arguments.reserve(lambda_arguments_size); + + for (size_t i = 0; i < lambda_arguments_size; ++i) + { + const auto & argument_type = function_data_type_argument_types[i]; + auto column_name_and_type = NameAndTypePair{lambda_argument_names[i], argument_type}; + lambda_arguments.push_back(std::make_shared(std::move(column_name_and_type), lambda_to_resolve)); + } + + IdentifierResolveScope lambda_scope(lambda_to_resolve, &scope /*parent_scope*/); + lambda_projection_names = resolveLambda(lambda_argument, lambda_to_resolve, lambda_arguments, lambda_scope); + + if (auto * lambda_list_node_result = lambda_to_resolve_typed.getExpression()->as()) + { + size_t lambda_list_node_result_nodes_size = lambda_list_node_result->getNodes().size(); + + if (lambda_list_node_result_nodes_size != 1) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Lambda as function argument resolved as list node with size {}. Expected 1. In scope {}", + lambda_list_node_result_nodes_size, + lambda_to_resolve->formatASTForErrorMessage()); + + lambda_to_resolve_typed.getExpression() = lambda_list_node_result->getNodes().front(); + } + + if (arguments_projection_names.at(function_lambda_argument_index) == PROJECTION_NAME_PLACEHOLDER) + { + size_t lambda_projection_names_size =lambda_projection_names.size(); + if (lambda_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Lambda argument inside function expected to have 1 projection name. Actual {}", + lambda_projection_names_size); + + WriteBufferFromOwnString lambda_argument_projection_name_buffer; + lambda_argument_projection_name_buffer << "lambda("; + lambda_argument_projection_name_buffer << "tuple("; + + size_t lambda_argument_names_size = lambda_argument_names.size(); + + for (size_t i = 0; i < lambda_argument_names_size; ++i) + { + const auto & lambda_argument_name = lambda_argument_names[i]; + lambda_argument_projection_name_buffer << lambda_argument_name; + + if (i + 1 != lambda_argument_names_size) + lambda_argument_projection_name_buffer << ", "; + } + + lambda_argument_projection_name_buffer << "), "; + lambda_argument_projection_name_buffer << lambda_projection_names[0]; + lambda_argument_projection_name_buffer << ")"; + + lambda_projection_names.clear(); + + arguments_projection_names[function_lambda_argument_index] = lambda_argument_projection_name_buffer.str(); + } + + argument_types[function_lambda_argument_index] = std::make_shared(function_data_type_argument_types, lambda_to_resolve->getResultType()); + argument_columns[function_lambda_argument_index].type = argument_types[function_lambda_argument_index]; + function_arguments[function_lambda_argument_index] = std::move(lambda_to_resolve); + } + + /// Recalculate function projection name after lambda resolution + result_projection_names = {calculateFunctionProjectionName(node, parameters_projection_names, arguments_projection_names)}; + } + + /** Create SET column for special function IN to allow constant folding + * if left and right arguments are constants. + * + * Example: SELECT * FROM test_table LIMIT 1 IN 1; + */ + if (is_special_function_in && + function_arguments.at(0)->hasConstantValue() && + function_arguments.at(1)->hasConstantValue()) + { + const auto & first_argument_constant_value = function_arguments[0]->getConstantValue(); + const auto & second_argument_constant_value = function_arguments[1]->getConstantValue(); + + const auto & first_argument_constant_type = first_argument_constant_value.getType(); + const auto & second_argument_constant_literal = second_argument_constant_value.getValue(); + const auto & second_argument_constant_type = second_argument_constant_value.getType(); + + auto set = makeSetForConstantValue(first_argument_constant_type, second_argument_constant_literal, second_argument_constant_type, scope.context->getSettingsRef()); + + /// Create constant set column for constant folding + + auto column_set = ColumnSet::create(1, std::move(set)); + argument_columns[1].column = ColumnConst::create(std::move(column_set), 1); + } + + DataTypePtr result_type; + + try + { + auto function_base = function->build(argument_columns); + result_type = function_base->getResultType(); + + /** If function is suitable for constant folding try to convert it to constant. + * Example: SELECT plus(1, 1); + * Result: SELECT 2; + */ + if (function_base->isSuitableForConstantFolding()) + { + auto executable_function = function_base->prepare(argument_columns); + + ColumnPtr column; + + if (all_arguments_constants) + { + size_t num_rows = function_arguments.empty() ? 0 : argument_columns.front().column->size(); + column = executable_function->execute(argument_columns, result_type, num_rows, true); + } + else + { + column = function_base->getConstantResultForNonConstArguments(argument_columns, result_type); + } + + if (column && isColumnConst(*column)) + { + /// Replace function node with result constant node + Field constant_value; + column->get(0, constant_value); + + function_node.performConstantFolding(std::make_shared(std::move(constant_value), result_type)); + } + } + } + catch (Exception & e) + { + e.addMessage("In scope {}", scope.scope_node->formatASTForErrorMessage()); + throw; + } + + function_node.resolveAsFunction(std::move(function), std::move(result_type)); + + return result_projection_names; +} + +/** Resolve expression node. + * Argument node can be replaced with different node, or even with list node in case of matcher resolution. + * Example: SELECT * FROM test_table; + * * - is matcher node, and it can be resolved into ListNode. + * + * Steps: + * 1. If node has alias, replace node with its value in scope alias map. Register alias in expression_aliases_in_resolve_process, to prevent resolving identifier + * which can bind to expression alias name. Check tryResolveIdentifierFromAliases documentation for additional explanation. + * Example: + * SELECT id AS id FROM test_table; + * SELECT value.value1 AS value FROM test_table; + * + * 2. Call specific resolve method depending on node type. + * + * If allow_table_expression = true and node is query node, then it is not evaluated as scalar subquery. + * Although if node is identifier that is resolved into query node that query is evaluated as scalar subquery. + * SELECT id, (SELECT 1) AS c FROM test_table WHERE a IN c; + * SELECT id, FROM test_table WHERE a IN (SELECT 1); + * + * 3. Special case identifier node. + * Try resolve it as expression identifier. + * Then if allow_lambda_expression = true try to resolve it as function. + * Then if allow_table_expression = true try to resolve it as table expression. + * + * 4. If node has alias, update its value in scope alias map. Deregister alias from expression_aliases_in_resolve_process. + */ +ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression) +{ + auto resolved_expression_it = resolved_expressions.find(node); + if (resolved_expression_it != resolved_expressions.end()) + { + /** There can be edge case, when subquery for IN function is resolved multiple times in different context. + * SELECT id IN (subquery AS value), value FROM test_table; + * When we start to resolve `value` identifier, subquery is already resolved but constant folding is not performed. + */ + auto node_type = node->getNodeType(); + if (!allow_table_expression && (node_type == QueryTreeNodeType::QUERY || node_type == QueryTreeNodeType::UNION)) + { + IdentifierResolveScope subquery_scope(node, &scope /*parent_scope*/); + subquery_scope.subquery_depth = scope.subquery_depth + 1; + + evaluateScalarSubqueryIfNeeded(node, subquery_scope.subquery_depth, subquery_scope.context); + } + + return resolved_expression_it->second; + } + + String node_alias = node->getAlias(); + ProjectionNames result_projection_names; + + if (node_alias.empty()) + { + auto projection_name_it = node_to_projection_name.find(node); + if (projection_name_it != node_to_projection_name.end()) + result_projection_names.push_back(projection_name_it->second); + } + else + { + result_projection_names.push_back(node_alias); + } + + /** Do not use alias table if node has alias same as some other node. + * Example: WITH x -> x + 1 AS lambda SELECT 1 AS lambda; + * During 1 AS lambda resolve if we use alias table we replace node with x -> x + 1 AS lambda. + * + * Do not use alias table if allow_table_expression = true and we resolve query node directly. + * Example: SELECT a FROM test_table WHERE id IN (SELECT 1) AS a; + * To support both (SELECT 1) AS expression in projection and (SELECT 1) as subquery in IN, do not use + * alias table because in alias table subquery could be evaluated as scalar. + */ + bool use_alias_table = true; + if (scope.nodes_with_duplicated_aliases.contains(node) || (allow_table_expression && node->getNodeType() == QueryTreeNodeType::QUERY)) + use_alias_table = false; + + if (!node_alias.empty() && use_alias_table) + { + /** Node could be potentially resolved by resolving other nodes. + * SELECT b, a as b FROM test_table; + * + * To resolve b we need to resolve a. + */ + auto it = scope.alias_name_to_expression_node.find(node_alias); + if (it != scope.alias_name_to_expression_node.end()) + node = it->second; + + if (allow_lambda_expression) + { + it = scope.alias_name_to_lambda_node.find(node_alias); + if (it != scope.alias_name_to_lambda_node.end()) + node = it->second; + } + } + + scope.expressions_in_resolve_process_stack.pushNode(node); + + auto node_type = node->getNodeType(); + + switch (node_type) + { + case QueryTreeNodeType::IDENTIFIER: + { + auto & identifier_node = node->as(); + auto unresolved_identifier = identifier_node.getIdentifier(); + auto resolve_identifier_expression_result = tryResolveIdentifier({unresolved_identifier, IdentifierLookupContext::EXPRESSION}, scope); + node = resolve_identifier_expression_result.resolved_identifier; + + if (node && result_projection_names.empty() && + (resolve_identifier_expression_result.isResolvedFromJoinTree() || resolve_identifier_expression_result.isResolvedFromExpressionArguments())) + { + auto projection_name_it = node_to_projection_name.find(node); + if (projection_name_it != node_to_projection_name.end()) + result_projection_names.push_back(projection_name_it->second); + } + + if (node && !node_alias.empty()) + scope.alias_name_to_lambda_node.erase(node_alias); + + if (!node && allow_lambda_expression) + { + node = tryResolveIdentifier({unresolved_identifier, IdentifierLookupContext::FUNCTION}, scope).resolved_identifier; + + if (node && !node_alias.empty()) + scope.alias_name_to_expression_node.erase(node_alias); + } + + if (!node && allow_table_expression) + { + node = tryResolveIdentifier({unresolved_identifier, IdentifierLookupContext::TABLE_EXPRESSION}, scope).resolved_identifier; + + /// If table identifier is resolved as CTE clone it + bool resolved_as_cte = node && node->as() && node->as()->isCTE(); + + if (resolved_as_cte) + { + node = node->clone(); + node->as().setIsCTE(false); + } + } + + if (!node) + { + std::string message_clarification; + if (allow_lambda_expression) + message_clarification = std::string(" or ") + toStringLowercase(IdentifierLookupContext::FUNCTION); + + if (allow_table_expression) + message_clarification = std::string(" or ") + toStringLowercase(IdentifierLookupContext::TABLE_EXPRESSION); + + throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, + "Unknown {}{} identifier '{}' in scope {}", + toStringLowercase(IdentifierLookupContext::EXPRESSION), + message_clarification, + unresolved_identifier.getFullName(), + scope.scope_node->formatASTForErrorMessage()); + } + + if (node->getNodeType() == QueryTreeNodeType::LIST) + { + result_projection_names.clear(); + resolved_expression_it = resolved_expressions.find(node); + if (resolved_expression_it != resolved_expressions.end()) + return resolved_expression_it->second; + else + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Identifier '{}' resolve into list node and list node projection names are not initialized. In scope {}", + unresolved_identifier.getFullName(), + scope.scope_node->formatASTForErrorMessage()); + } + + if (result_projection_names.empty()) + result_projection_names.push_back(unresolved_identifier.getFullName()); + + break; + } + case QueryTreeNodeType::MATCHER: + { + result_projection_names = resolveMatcher(node, scope); + break; + } + case QueryTreeNodeType::LIST: + { + /** Edge case if list expression has alias. + * Matchers cannot have aliases, but `untuple` function can. + * Example: SELECT a, untuple(CAST(('hello', 1) AS Tuple(name String, count UInt32))) AS a; + * During resolveFunction `untuple` function is replaced by list of 2 constants 'hello', 1. + */ + result_projection_names = resolveExpressionNodeList(node, scope, allow_lambda_expression, allow_lambda_expression); + break; + } + case QueryTreeNodeType::CONSTANT: + { + if (result_projection_names.empty()) + { + const auto & constant_node = node->as(); + result_projection_names.push_back(constant_node.getValueStringRepresentation()); + } + + /// Already resolved + break; + } + case QueryTreeNodeType::COLUMN: + { + auto & column_node = node->as(); + if (column_node.hasExpression()) + resolveExpressionNode(column_node.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + if (result_projection_names.empty()) + result_projection_names.push_back(column_node.getColumnName()); + + break; + } + case QueryTreeNodeType::FUNCTION: + { + auto function_projection_names = resolveFunction(node, scope); + + if (result_projection_names.empty() || node->getNodeType() == QueryTreeNodeType::LIST) + result_projection_names = std::move(function_projection_names); + + break; + } + case QueryTreeNodeType::LAMBDA: + { + if (!allow_lambda_expression) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Lambda {} is not allowed in expression context. In scope {}", + node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + if (result_projection_names.empty()) + result_projection_names.push_back(PROJECTION_NAME_PLACEHOLDER); + + /// Lambda must be resolved by caller + break; + } + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + { + IdentifierResolveScope subquery_scope(node, &scope /*parent_scope*/); + subquery_scope.subquery_depth = scope.subquery_depth + 1; + + if (node_type == QueryTreeNodeType::QUERY) + resolveQuery(node, subquery_scope); + else + resolveUnion(node, subquery_scope); + + if (!allow_table_expression) + evaluateScalarSubqueryIfNeeded(node, subquery_scope.subquery_depth, subquery_scope.context); + + ++subquery_counter; + if (result_projection_names.empty()) + result_projection_names.push_back("_subquery_" + std::to_string(subquery_counter)); + + break; + } + case QueryTreeNodeType::TABLE: + { + if (!allow_table_expression) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Table {} is not allowed in expression context. In scope {}", + node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + auto & table_node = node->as(); + result_projection_names.push_back(table_node.getStorageID().getFullNameNotQuoted()); + + break; + } + case QueryTreeNodeType::TRANSFORMER: + [[fallthrough]]; + case QueryTreeNodeType::SORT: + [[fallthrough]]; + case QueryTreeNodeType::INTERPOLATE: + [[fallthrough]]; + case QueryTreeNodeType::WINDOW: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + [[fallthrough]]; + case QueryTreeNodeType::ARRAY_JOIN: + [[fallthrough]]; + case QueryTreeNodeType::JOIN: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "{} {} is not allowed in expression context. In scope {}", + node->getNodeType(), + node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + } + + /** Update aliases after expression node was resolved. + * Do not update node in alias table if we resolve it for duplicate alias. + */ + if (!node_alias.empty() && use_alias_table) + { + auto it = scope.alias_name_to_expression_node.find(node_alias); + if (it != scope.alias_name_to_expression_node.end()) + it->second = node; + + if (allow_lambda_expression) + { + it = scope.alias_name_to_lambda_node.find(node_alias); + if (it != scope.alias_name_to_lambda_node.end()) + it->second = node; + } + } + + resolved_expressions.emplace(node, result_projection_names); + + scope.expressions_in_resolve_process_stack.popNode(); + bool expression_was_root = scope.expressions_in_resolve_process_stack.empty(); + if (expression_was_root) + scope.non_cached_identifier_lookups_during_expression_resolve.clear(); + + return result_projection_names; +} + +/** Resolve expression node list. + * If expression is CTE subquery node it is skipped. + * If expression is resolved in list, it is flattened into initial node list. + * + * Such examples must work: + * Example: CREATE TABLE test_table (id UInt64, value UInt64) ENGINE=TinyLog; SELECT plus(*) FROM test_table; + * Example: SELECT *** FROM system.one; + */ +ProjectionNames QueryAnalyzer::resolveExpressionNodeList(QueryTreeNodePtr & node_list, IdentifierResolveScope & scope, bool allow_lambda_expression, bool allow_table_expression) +{ + auto & node_list_typed = node_list->as(); + size_t node_list_size = node_list_typed.getNodes().size(); + + QueryTreeNodes result_nodes; + result_nodes.reserve(node_list_size); + + ProjectionNames result_projection_names; + + for (auto & node : node_list_typed.getNodes()) + { + auto node_to_resolve = node; + auto expression_node_projection_names = resolveExpressionNode(node_to_resolve, scope, allow_lambda_expression, allow_table_expression); + + size_t expected_projection_names_size = 1; + if (auto * expression_list = node_to_resolve->as()) + { + expected_projection_names_size = expression_list->getNodes().size(); + for (auto & expression_list_node : expression_list->getNodes()) + result_nodes.push_back(expression_list_node); + } + else + { + result_nodes.push_back(std::move(node_to_resolve)); + } + + if (expression_node_projection_names.size() != expected_projection_names_size) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expression nodes list expected {} projection names. Actual {}", + expected_projection_names_size, + expression_node_projection_names.size()); + + result_projection_names.insert(result_projection_names.end(), expression_node_projection_names.begin(), expression_node_projection_names.end()); + expression_node_projection_names.clear(); + } + + node_list_typed.getNodes() = std::move(result_nodes); + + return result_projection_names; +} + +/** Resolve sort columns nodes list. + */ +ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope) +{ + ProjectionNames result_projection_names; + ProjectionNames sort_expression_projection_names; + ProjectionNames fill_from_expression_projection_names; + ProjectionNames fill_to_expression_projection_names; + ProjectionNames fill_step_expression_projection_names; + + auto & sort_node_list_typed = sort_node_list->as(); + for (auto & node : sort_node_list_typed.getNodes()) + { + auto & sort_node = node->as(); + sort_expression_projection_names = resolveExpressionNode(sort_node.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + if (auto * sort_column_list_node = sort_node.getExpression()->as()) + { + size_t sort_column_list_node_size = sort_column_list_node->getNodes().size(); + if (sort_column_list_node_size != 1) + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Sort column node expression resolved into list with size {}. Expected 1. In scope {}", + sort_column_list_node_size, + scope.scope_node->formatASTForErrorMessage()); + } + + sort_node.getExpression() = sort_column_list_node->getNodes().front(); + } + + size_t sort_expression_projection_names_size = sort_expression_projection_names.size(); + if (sort_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort expression expected 1 projection name. Actual {}", + sort_expression_projection_names_size); + + if (sort_node.hasFillFrom()) + { + fill_from_expression_projection_names = resolveExpressionNode(sort_node.getFillFrom(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + const auto constant_value = sort_node.getFillFrom()->getConstantValueOrNull(); + if (!constant_value || !isColumnedAsNumber(constant_value->getType())) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL FROM expression must be constant with numeric type. Actual {}. In scope {}", + sort_node.getFillFrom()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + size_t fill_from_expression_projection_names_size = fill_from_expression_projection_names.size(); + if (fill_from_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort node FILL FROM expression expected 1 projection name. Actual {}", + fill_from_expression_projection_names_size); + } + + if (sort_node.hasFillTo()) + { + fill_to_expression_projection_names = resolveExpressionNode(sort_node.getFillTo(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + const auto constant_value = sort_node.getFillTo()->getConstantValueOrNull(); + if (!constant_value || !isColumnedAsNumber(constant_value->getType())) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL TO expression must be constant with numeric type. Actual {}. In scope {}", + sort_node.getFillFrom()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + size_t fill_to_expression_projection_names_size = fill_to_expression_projection_names.size(); + if (fill_to_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort node FILL TO expression expected 1 projection name. Actual {}", + fill_to_expression_projection_names_size); + } + + if (sort_node.hasFillStep()) + { + fill_step_expression_projection_names = resolveExpressionNode(sort_node.getFillStep(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + const auto constant_value = sort_node.getFillStep()->getConstantValueOrNull(); + if (!constant_value) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STEP expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStep()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + bool is_number = isColumnedAsNumber(constant_value->getType()); + bool is_interval = WhichDataType(constant_value->getType()).isInterval(); + if (!is_number && !is_interval) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STEP expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStep()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + size_t fill_step_expression_projection_names_size = fill_step_expression_projection_names.size(); + if (fill_step_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort FILL STEP expression expected 1 projection name. Actual {}", + fill_step_expression_projection_names_size); + } + + auto sort_column_projection_name = calculateSortColumnProjectionName(node, + sort_expression_projection_names[0], + fill_from_expression_projection_names.empty() ? "" : fill_from_expression_projection_names.front(), + fill_to_expression_projection_names.empty() ? "" : fill_to_expression_projection_names.front(), + fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front()); + + result_projection_names.push_back(std::move(sort_column_projection_name)); + + sort_expression_projection_names.clear(); + fill_from_expression_projection_names.clear(); + fill_to_expression_projection_names.clear(); + fill_step_expression_projection_names.clear(); + } + + return result_projection_names; +} + +/** Resolve interpolate columns nodes list. + */ +void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope) +{ + auto & interpolate_node_list_typed = interpolate_node_list->as(); + + for (auto & interpolate_node : interpolate_node_list_typed.getNodes()) + { + auto & interpolate_node_typed = interpolate_node->as(); + + resolveExpressionNode(interpolate_node_typed.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + resolveExpressionNode(interpolate_node_typed.getInterpolateExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + } +} + +/** Resolve window nodes list. + */ +void QueryAnalyzer::resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope) +{ + auto & window_node_list_typed = window_node_list->as(); + for (auto & node : window_node_list_typed.getNodes()) + resolveWindow(node, scope); +} + +NamesAndTypes QueryAnalyzer::resolveProjectionExpressionNodeList(QueryTreeNodePtr & projection_node_list, IdentifierResolveScope & scope) +{ + ProjectionNames projection_names = resolveExpressionNodeList(projection_node_list, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + auto projection_nodes = projection_node_list->as().getNodes(); + size_t projection_nodes_size = projection_nodes.size(); + + NamesAndTypes projection_columns; + projection_columns.reserve(projection_nodes_size); + + for (size_t i = 0; i < projection_nodes_size; ++i) + { + auto projection_node = projection_nodes[i]; + + if (!isExpressionNodeType(projection_node->getNodeType())) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Projection node must be constant, function, column, query or union"); + + projection_columns.emplace_back(projection_names[i], projection_node->getResultType()); + } + + return projection_columns; +} + +/** Initialize query join tree node. + * + * 1. Resolve identifiers. + * 2. Register table, table function, query, union, join, array join nodes in scope table expressions in resolve process. + */ +void QueryAnalyzer::initializeQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node, IdentifierResolveScope & scope) +{ + std::deque join_tree_node_ptrs_to_process_queue; + join_tree_node_ptrs_to_process_queue.push_back(&join_tree_node); + + while (!join_tree_node_ptrs_to_process_queue.empty()) + { + auto * current_join_tree_node_ptr = join_tree_node_ptrs_to_process_queue.front(); + join_tree_node_ptrs_to_process_queue.pop_front(); + + auto & current_join_tree_node = *current_join_tree_node_ptr; + auto current_join_tree_node_type = current_join_tree_node->getNodeType(); + + switch (current_join_tree_node_type) + { + case QueryTreeNodeType::IDENTIFIER: + { + auto & from_table_identifier = current_join_tree_node->as(); + auto table_identifier_lookup = IdentifierLookup{from_table_identifier.getIdentifier(), IdentifierLookupContext::TABLE_EXPRESSION}; + + IdentifierResolveSettings resolve_settings; + /// In join tree initialization ignore join tree as identifier lookup source + resolve_settings.allow_to_check_join_tree = false; + /** Disable resolve of subquery during identifier resolution. + * Example: SELECT * FROM (SELECT 1) AS t1, t1; + * During `t1` identifier resolution we resolve it into subquery SELECT 1, but we want to disable + * subquery resolution at this stage, because JOIN TREE of parent query is not resolved. + */ + resolve_settings.allow_to_resolve_subquery_during_identifier_resolution = false; + + auto table_identifier_resolve_result = tryResolveIdentifier(table_identifier_lookup, scope, resolve_settings); + auto resolved_identifier = table_identifier_resolve_result.resolved_identifier; + + if (!resolved_identifier) + throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, + "Unknown table expression identifier '{}' in scope {}", + from_table_identifier.getIdentifier().getFullName(), + scope.scope_node->formatASTForErrorMessage()); + + resolved_identifier = resolved_identifier->clone(); + + auto table_expression_modifiers = from_table_identifier.getTableExpressionModifiers(); + + auto * resolved_identifier_query_node = resolved_identifier->as(); + auto * resolved_identifier_union_node = resolved_identifier->as(); + + if (resolved_identifier_query_node || resolved_identifier_union_node) + { + if (resolved_identifier_query_node) + resolved_identifier_query_node->setIsCTE(false); + else + resolved_identifier_union_node->setIsCTE(false); + + if (table_expression_modifiers.has_value()) + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Table expression modifiers {} are not supported for subquery {}", + table_expression_modifiers->formatForErrorMessage(), + resolved_identifier->formatASTForErrorMessage()); + } + } + else if (auto * resolved_identifier_table_node = resolved_identifier->as()) + { + if (table_expression_modifiers.has_value()) + resolved_identifier_table_node->setTableExpressionModifiers(*table_expression_modifiers); + } + else if (auto * resolved_identifier_table_function_node = resolved_identifier->as()) + { + if (table_expression_modifiers.has_value()) + resolved_identifier_table_function_node->setTableExpressionModifiers(*table_expression_modifiers); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Identifier in JOIN TREE '{}' resolved into unexpected table expression. In scope {}", + from_table_identifier.getIdentifier().getFullName(), + scope.scope_node->formatASTForErrorMessage()); + } + + auto current_join_tree_node_alias = current_join_tree_node->getAlias(); + resolved_identifier->setAlias(current_join_tree_node_alias); + current_join_tree_node = resolved_identifier; + + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + case QueryTreeNodeType::QUERY: + { + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + case QueryTreeNodeType::UNION: + { + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + case QueryTreeNodeType::TABLE_FUNCTION: + { + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + case QueryTreeNodeType::TABLE: + { + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + { + auto & array_join = current_join_tree_node->as(); + join_tree_node_ptrs_to_process_queue.push_back(&array_join.getTableExpression()); + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + case QueryTreeNodeType::JOIN: + { + auto & join = current_join_tree_node->as(); + join_tree_node_ptrs_to_process_queue.push_back(&join.getLeftTableExpression()); + join_tree_node_ptrs_to_process_queue.push_back(&join.getRightTableExpression()); + scope.table_expressions_in_resolve_process.insert(current_join_tree_node.get()); + break; + } + default: + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Query FROM section expected table, table function, query, UNION, ARRAY JOIN or JOIN. Actual {} {}. In scope {}", + current_join_tree_node->getNodeTypeName(), + current_join_tree_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + } + } +} + +/// Initialize table expression columns for table expression node +void QueryAnalyzer::initializeTableExpressionColumns(const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope) +{ + auto * table_node = table_expression_node->as(); + auto * query_node = table_expression_node->as(); + auto * union_node = table_expression_node->as(); + auto * table_function_node = table_expression_node->as(); + + if (!table_node && !table_function_node && !query_node && !union_node) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Unexpected table expression. Expected table, table function, query or union node. Actual {}. In scope {}", + table_expression_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + auto table_expression_data_it = scope.table_expression_node_to_data.find(table_expression_node); + if (table_expression_data_it != scope.table_expression_node_to_data.end()) + return; + + TableExpressionData table_expression_data; + + if (table_node) + { + const auto & table_storage_id = table_node->getStorageID(); + table_expression_data.table_name = table_storage_id.table_name; + table_expression_data.database_name = table_storage_id.database_name; + table_expression_data.table_expression_name = table_storage_id.getFullNameNotQuoted(); + table_expression_data.table_expression_description = "table"; + } + else if (query_node || union_node) + { + table_expression_data.table_name = query_node ? query_node->getCTEName() : union_node->getCTEName(); + table_expression_data.table_expression_description = "subquery"; + + if (table_expression_node->hasAlias()) + table_expression_data.table_expression_name = table_expression_node->getAlias(); + } + else if (table_function_node) + { + table_expression_data.table_expression_description = "table_function"; + if (table_function_node->hasAlias()) + table_expression_data.table_expression_name = table_function_node->getAlias(); + } + + if (table_node || table_function_node) + { + const auto & storage_snapshot = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot(); + + auto column_names_and_types = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::All).withSubcolumns().withVirtuals()); + const auto & columns_description = storage_snapshot->metadata->getColumns(); + + std::vector> alias_columns_to_resolve; + ColumnNameToColumnNodeMap column_name_to_column_node; + column_name_to_column_node.reserve(column_names_and_types.size()); + + /** For ALIAS columns in table we must additionally analyze ALIAS expressions. + * Example: CREATE TABLE test_table (id UInt64, alias_value_1 ALIAS id + 5); + * + * To do that we collect alias columns and build table column name to column node map. + * For each alias column we build identifier resolve scope, initialize it with table column name to node map + * and resolve alias column. + */ + for (const auto & column_name_and_type : column_names_and_types) + { + const auto & column_default = columns_description.getDefault(column_name_and_type.name); + + if (column_default && column_default->kind == ColumnDefaultKind::Alias) + { + auto column_node = std::make_shared(column_name_and_type, buildQueryTree(column_default->expression, scope.context), table_expression_node); + column_name_to_column_node.emplace(column_name_and_type.name, column_node); + alias_columns_to_resolve.emplace_back(column_name_and_type.name, column_node); + } + else + { + auto column_node = std::make_shared(column_name_and_type, table_expression_node); + column_name_to_column_node.emplace(column_name_and_type.name, column_node); + } + } + + for (auto & [alias_column_to_resolve_name, alias_column_to_resolve] : alias_columns_to_resolve) + { + /** Alias column could be potentially resolved during resolve of other ALIAS column. + * Example: CREATE TABLE test_table (id UInt64, alias_value_1 ALIAS id + alias_value_2, alias_value_2 ALIAS id + 5) ENGINE=TinyLog; + * + * During resolve of alias_value_1, alias_value_2 column will be resolved. + */ + alias_column_to_resolve = column_name_to_column_node[alias_column_to_resolve_name]; + + IdentifierResolveScope alias_column_resolve_scope(alias_column_to_resolve, nullptr /*parent_scope*/); + alias_column_resolve_scope.column_name_to_column_node = std::move(column_name_to_column_node); + alias_column_resolve_scope.context = scope.context; + + /// Initialize aliases in alias column scope + QueryExpressionsAliasVisitor visitor(alias_column_resolve_scope); + visitor.visit(alias_column_to_resolve->getExpression()); + + resolveExpressionNode(alias_column_resolve_scope.scope_node, + alias_column_resolve_scope, + false /*allow_lambda_expression*/, + false /*allow_table_expression*/); + + column_name_to_column_node = std::move(alias_column_resolve_scope.column_name_to_column_node); + column_name_to_column_node[alias_column_to_resolve_name] = alias_column_to_resolve; + } + + table_expression_data.column_name_to_column_node = std::move(column_name_to_column_node); + } + else if (query_node || union_node) + { + auto column_names_and_types = query_node ? query_node->getProjectionColumns() : union_node->computeProjectionColumns(); + table_expression_data.column_name_to_column_node.reserve(column_names_and_types.size()); + + for (const auto & column_name_and_type : column_names_and_types) + { + auto column_node = std::make_shared(column_name_and_type, table_expression_node); + table_expression_data.column_name_to_column_node.emplace(column_name_and_type.name, column_node); + } + } + + table_expression_data.column_identifier_first_parts.reserve(table_expression_data.column_name_to_column_node.size()); + + for (auto & [column_name, _] : table_expression_data.column_name_to_column_node) + { + Identifier column_name_identifier(column_name); + table_expression_data.column_identifier_first_parts.insert(column_name_identifier.at(0)); + } + + scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data)); +} + +/** Resolve query join tree. + * + * Query join tree must be initialized before calling this function. + */ +void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node, IdentifierResolveScope & scope, QueryExpressionsAliasVisitor & expressions_visitor) +{ + auto add_table_expression_alias_into_scope = [&](const QueryTreeNodePtr & table_expression_node) + { + const auto & alias_name = table_expression_node->getAlias(); + if (alias_name.empty()) + return; + + auto [it, inserted] = scope.alias_name_to_table_expression_node.emplace(alias_name, table_expression_node); + if (!inserted) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Duplicate aliases {} for table expressions in FROM section are not allowed. Try to register {}. Already registered {}.", + alias_name, + table_expression_node->formatASTForErrorMessage(), + it->second->formatASTForErrorMessage()); + }; + + auto from_node_type = join_tree_node->getNodeType(); + + switch (from_node_type) + { + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + { + IdentifierResolveScope subquery_scope(join_tree_node, &scope); + subquery_scope.subquery_depth = scope.subquery_depth + 1; + + if (from_node_type == QueryTreeNodeType::QUERY) + resolveQuery(join_tree_node, subquery_scope); + else if (from_node_type == QueryTreeNodeType::UNION) + resolveUnion(join_tree_node, subquery_scope); + + break; + } + case QueryTreeNodeType::TABLE_FUNCTION: + { + auto & table_function_node = join_tree_node->as(); + expressions_visitor.visit(table_function_node.getArgumentsNode()); + + const auto & table_function_factory = TableFunctionFactory::instance(); + const auto & table_function_name = table_function_node.getTableFunctionName(); + + auto & scope_context = scope.context; + + TableFunctionPtr table_function_ptr = table_function_factory.tryGet(table_function_name, scope_context); + if (!table_function_ptr) + { + auto hints = TableFunctionFactory::instance().getHints(table_function_name); + if (!hints.empty()) + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, + "Unknown table function {}. Maybe you meant: {}", + table_function_name, + DB::toString(hints)); + else + throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}", table_function_name); + } + + if (scope_context->getSettingsRef().use_structure_from_insertion_table_in_table_functions && table_function_ptr->needStructureHint()) + { + const auto & insertion_table = scope_context->getInsertionTable(); + if (!insertion_table.empty()) + { + const auto & structure_hint + = DatabaseCatalog::instance().getTable(insertion_table, scope_context)->getInMemoryMetadataPtr()->columns; + table_function_ptr->setStructureHint(structure_hint); + } + } + + /// TODO: Special functions that can take query + /// TODO: Support qualified matchers for table function + + for (auto & argument_node : table_function_node.getArguments().getNodes()) + { + if (argument_node->getNodeType() == QueryTreeNodeType::MATCHER) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Matcher as table function argument is not supported {}. In scope {}", + join_tree_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + auto * function_node = argument_node->as(); + if (function_node && table_function_factory.hasNameOrAlias(function_node->getFunctionName())) + continue; + + resolveExpressionNode(argument_node, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/); + } + + auto table_function_ast = table_function_node.toAST(); + table_function_ptr->parseArguments(table_function_ast, scope_context); + + auto table_function_storage = table_function_ptr->execute(table_function_ast, scope_context, table_function_ptr->getName()); + table_function_node.resolve(std::move(table_function_ptr), std::move(table_function_storage), scope_context); + + break; + } + case QueryTreeNodeType::TABLE: + { + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + { + auto & array_join_node = join_tree_node->as(); + resolveQueryJoinTreeNode(array_join_node.getTableExpression(), scope, expressions_visitor); + validateJoinTableExpressionWithoutAlias(join_tree_node, array_join_node.getTableExpression(), scope); + + /// Wrap array join expressions into column nodes, where array join expression is inner expression. + + for (auto & array_join_expression : array_join_node.getJoinExpressions().getNodes()) + { + auto array_join_expression_alias = array_join_expression->getAlias(); + if (!array_join_expression_alias.empty() && scope.alias_name_to_expression_node.contains(array_join_expression_alias)) + throw Exception(ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS, + "ARRAY JOIN expression {} with duplicate alias {}. In scope {}", + array_join_expression->formatASTForErrorMessage(), + array_join_expression_alias, + scope.scope_node->formatASTForErrorMessage()); + + /// Add array join expression into scope + expressions_visitor.visit(array_join_expression); + + resolveExpressionNode(array_join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + auto result_type = array_join_expression->getResultType(); + + if (!isArray(result_type)) + throw Exception(ErrorCodes::TYPE_MISMATCH, + "ARRAY JOIN {} requires expression with Array type. Actual {}. In scope {}", + array_join_node.formatASTForErrorMessage(), + result_type->getName(), + scope.scope_node->formatASTForErrorMessage()); + + result_type = assert_cast(*result_type).getNestedType(); + + auto array_join_expression_name = "__array_join_expression_" + std::to_string(array_join_expressions_counter); + ++array_join_expressions_counter; + + auto array_join_column = std::make_shared(NameAndTypePair{array_join_expression_name, result_type}, array_join_expression, join_tree_node); + array_join_expression = std::move(array_join_column); + array_join_expression->setAlias(array_join_expression_alias); + + auto it = scope.alias_name_to_expression_node.find(array_join_expression_alias); + if (it != scope.alias_name_to_expression_node.end()) + it->second = std::make_shared(NameAndTypePair{array_join_expression_name, result_type}, join_tree_node); + } + + break; + } + case QueryTreeNodeType::JOIN: + { + auto & join_node = join_tree_node->as(); + + resolveQueryJoinTreeNode(join_node.getLeftTableExpression(), scope, expressions_visitor); + validateJoinTableExpressionWithoutAlias(join_tree_node, join_node.getLeftTableExpression(), scope); + + resolveQueryJoinTreeNode(join_node.getRightTableExpression(), scope, expressions_visitor); + validateJoinTableExpressionWithoutAlias(join_tree_node, join_node.getRightTableExpression(), scope); + + if (join_node.isUsingJoinExpression()) + { + auto & join_using_list = join_node.getJoinExpression()->as(); + std::unordered_set join_using_identifiers; + + for (auto & join_using_node : join_using_list.getNodes()) + { + auto * identifier_node = join_using_node->as(); + if (!identifier_node) + continue; + + const auto & identifier_full_name = identifier_node->getIdentifier().getFullName(); + + if (join_using_identifiers.contains(identifier_full_name)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "JOIN {} identifier '{}' appears more than once in USING clause", + join_node.formatASTForErrorMessage(), + identifier_full_name); + + join_using_identifiers.insert(identifier_full_name); + + IdentifierLookup identifier_lookup {identifier_node->getIdentifier(), IdentifierLookupContext::EXPRESSION}; + auto result_left_table_expression = tryResolveIdentifierFromJoinTreeNode(identifier_lookup, join_node.getLeftTableExpression(), scope); + if (!result_left_table_expression) + throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "JOIN {} using identifier '{}' cannot be resolved from left table expression. In scope {}", + join_node.formatASTForErrorMessage(), + identifier_full_name, + scope.scope_node->formatASTForErrorMessage()); + + auto result_right_table_expression = tryResolveIdentifierFromJoinTreeNode(identifier_lookup, join_node.getRightTableExpression(), scope); + if (!result_right_table_expression) + throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "JOIN {} using identifier '{}' cannot be resolved from right table expression. In scope {}", + join_node.formatASTForErrorMessage(), + identifier_full_name, + scope.scope_node->formatASTForErrorMessage()); + + DataTypePtr common_type = tryGetLeastSupertype(DataTypes{result_left_table_expression->getResultType(), result_right_table_expression->getResultType()}); + + if (!common_type) + throw Exception(ErrorCodes::NO_COMMON_TYPE, + "JOIN {} cannot infer common type in USING for identifier '{}'. In scope {}", + join_node.formatASTForErrorMessage(), + identifier_full_name, + scope.scope_node->formatASTForErrorMessage()); + + NameAndTypePair join_using_columns_common_name_and_type(identifier_full_name, common_type); + ListNodePtr join_using_expression = std::make_shared(QueryTreeNodes{result_left_table_expression, result_right_table_expression}); + auto join_using_column = std::make_shared(join_using_columns_common_name_and_type, std::move(join_using_expression), join_tree_node); + + join_using_node = std::move(join_using_column); + } + } + else if (join_node.getJoinExpression()) + { + expressions_visitor.visit(join_node.getJoinExpression()); + auto join_expression = join_node.getJoinExpression(); + resolveExpressionNode(join_expression, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + join_node.getJoinExpression() = std::move(join_expression); + } + + break; + } + case QueryTreeNodeType::IDENTIFIER: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Identifiers in FROM section must be already resolved. In scope {}", + join_tree_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + default: + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Query FROM section expected table, table function, query, ARRAY JOIN or JOIN. Actual {}. In scope {}", + join_tree_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + } + + auto join_tree_node_type = join_tree_node->getNodeType(); + if (isTableExpressionNodeType(join_tree_node_type)) + { + validateTableExpressionModifiers(join_tree_node, scope); + initializeTableExpressionColumns(join_tree_node, scope); + } + + add_table_expression_alias_into_scope(join_tree_node); + scope.table_expressions_in_resolve_process.erase(join_tree_node.get()); +} + +class ValidateGroupByColumnsVisitor : public ConstInDepthQueryTreeVisitor +{ +public: + ValidateGroupByColumnsVisitor(const QueryTreeNodes & group_by_keys_nodes_, const IdentifierResolveScope & scope_) + : group_by_keys_nodes(group_by_keys_nodes_) + , scope(scope_) + {} + + void visitImpl(const QueryTreeNodePtr & node) + { + auto query_tree_node_type = node->getNodeType(); + if (query_tree_node_type == QueryTreeNodeType::CONSTANT || + query_tree_node_type == QueryTreeNodeType::SORT || + query_tree_node_type == QueryTreeNodeType::INTERPOLATE) + return; + + auto * function_node = node->as(); + if (function_node && function_node->getFunctionName() == "grouping") + { + auto & grouping_function_arguments_nodes = function_node->getArguments().getNodes(); + for (auto & grouping_function_arguments_node : grouping_function_arguments_nodes) + { + bool found_argument_in_group_by_keys = false; + + for (const auto & group_by_key_node : group_by_keys_nodes) + { + if (grouping_function_arguments_node->isEqual(*group_by_key_node)) + { + found_argument_in_group_by_keys = true; + break; + } + } + + if (!found_argument_in_group_by_keys) + throw Exception(ErrorCodes::NOT_AN_AGGREGATE, + "GROUPING function argument {} is not in GROUP BY. In scope {}", + grouping_function_arguments_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + + return; + } + + auto * column_node = node->as(); + if (!column_node) + return; + + auto column_node_source = column_node->getColumnSource(); + if (column_node_source->getNodeType() == QueryTreeNodeType::LAMBDA) + return; + + for (const auto & group_by_key_node : group_by_keys_nodes) + { + if (node->isEqual(*group_by_key_node)) + return; + } + + std::string column_name; + + if (column_node_source->hasAlias()) + column_name = column_node_source->getAlias(); + else if (auto * table_node = column_node_source->as()) + column_name = table_node->getStorageID().getFullTableName(); + + column_name += '.' + column_node->getColumnName(); + + throw Exception(ErrorCodes::NOT_AN_AGGREGATE, + "Column {} is not under aggregate function and not in GROUP BY. In scope {}", + column_name, + scope.scope_node->formatASTForErrorMessage()); + } + + bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + auto * child_function_node = child_node->as(); + if (child_function_node) + { + if (child_function_node->isAggregateFunction()) + return false; + + for (const auto & group_by_key_node : group_by_keys_nodes) + { + if (child_node->isEqual(*group_by_key_node)) + return false; + } + } + + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + const QueryTreeNodes & group_by_keys_nodes; + const IdentifierResolveScope & scope; +}; + +/** Resolve query. + * This function modifies query node during resolve. It is caller responsibility to clone query node before resolve + * if it is needed for later use. + * + * query_node - query_tree_node that must have QueryNode type. + * scope - query scope. It is caller responsibility to create it. + * + * Resolve steps: + * 1. Validate subqueries depth, perform GROUP BY validation that does not depend on information about aggregate functions. + * 2. Initialize query scope with aliases. + * 3. Register CTE subqueries from WITH section in scope and remove them from WITH section. + * 4. Resolve JOIN TREE. + * 5. Resolve projection columns. + * 6. Resolve expressions in other query parts. + * 7. Validate nodes with duplicate aliases. + * 8. Validate aggregate functions, GROUPING function, window functions. + * 9. Remove WITH and WINDOW sections from query. + * 10. Remove aliases from expression and lambda nodes. + * 11. Resolve query tree node with projection columns. + */ +void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, IdentifierResolveScope & scope) +{ + size_t max_subquery_depth = scope.context->getSettingsRef().max_subquery_depth; + if (max_subquery_depth && scope.subquery_depth > max_subquery_depth) + throw Exception(ErrorCodes::TOO_DEEP_SUBQUERIES, + "Too deep subqueries. Maximum: {}", + max_subquery_depth); + + auto & query_node_typed = query_node->as(); + + if (query_node_typed.hasSettingsChanges()) + { + auto updated_scope_context = Context::createCopy(scope.context); + updated_scope_context->applySettingsChanges(query_node_typed.getSettingsChanges()); + scope.context = std::move(updated_scope_context); + } + + const auto & settings = scope.context->getSettingsRef(); + + if (settings.group_by_use_nulls) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "GROUP BY use nulls is not supported"); + + bool is_rollup_or_cube = query_node_typed.isGroupByWithRollup() || query_node_typed.isGroupByWithCube(); + + if (query_node_typed.isGroupByWithGroupingSets() && query_node_typed.isGroupByWithTotals()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and GROUPING SETS are not supported together"); + + if (query_node_typed.isGroupByWithGroupingSets() && is_rollup_or_cube) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "GROUPING SETS are not supported together with ROLLUP and CUBE"); + + if (query_node_typed.isGroupByWithRollup() && (query_node_typed.isGroupByWithGroupingSets() || query_node_typed.isGroupByWithCube())) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ROLLUP is not supported together with GROUPING SETS and CUBE"); + + if (query_node_typed.isGroupByWithCube() && (query_node_typed.isGroupByWithGroupingSets() || query_node_typed.isGroupByWithRollup())) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CUBE is not supported together with GROUPING SETS and ROLLUP"); + + if (query_node_typed.hasHaving() && query_node_typed.isGroupByWithTotals() && is_rollup_or_cube) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING"); + + /// Initialize aliases in query node scope + QueryExpressionsAliasVisitor visitor(scope); + + if (query_node_typed.hasWith()) + visitor.visit(query_node_typed.getWithNode()); + + if (!query_node_typed.getProjection().getNodes().empty()) + visitor.visit(query_node_typed.getProjectionNode()); + + if (query_node_typed.getPrewhere()) + visitor.visit(query_node_typed.getPrewhere()); + + if (query_node_typed.getWhere()) + visitor.visit(query_node_typed.getWhere()); + + if (query_node_typed.hasGroupBy()) + visitor.visit(query_node_typed.getGroupByNode()); + + if (query_node_typed.hasHaving()) + visitor.visit(query_node_typed.getHaving()); + + if (query_node_typed.hasWindow()) + visitor.visit(query_node_typed.getWindowNode()); + + if (query_node_typed.hasOrderBy()) + visitor.visit(query_node_typed.getOrderByNode()); + + if (query_node_typed.hasInterpolate()) + visitor.visit(query_node_typed.getInterpolate()); + + if (query_node_typed.hasLimitByLimit()) + visitor.visit(query_node_typed.getLimitByLimit()); + + if (query_node_typed.hasLimitByOffset()) + visitor.visit(query_node_typed.getLimitByOffset()); + + if (query_node_typed.hasLimitBy()) + visitor.visit(query_node_typed.getLimitByNode()); + + if (query_node_typed.hasLimit()) + visitor.visit(query_node_typed.getLimit()); + + if (query_node_typed.hasOffset()) + visitor.visit(query_node_typed.getOffset()); + + /// Register CTE subqueries and remove them from WITH section + + auto & with_nodes = query_node_typed.getWith().getNodes(); + + for (auto & node : with_nodes) + { + auto * subquery_node = node->as(); + auto * union_node = node->as(); + + bool subquery_is_cte = (subquery_node && subquery_node->isCTE()) || (union_node && union_node->isCTE()); + + if (!subquery_is_cte) + continue; + + const auto & cte_name = subquery_node ? subquery_node->getCTEName() : union_node->getCTEName(); + + auto [_, inserted] = scope.cte_name_to_query_node.emplace(cte_name, node); + if (!inserted) + throw Exception(ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS, + "CTE with name {} already exists. In scope {}", + cte_name, + scope.scope_node->formatASTForErrorMessage()); + } + + std::erase_if(with_nodes, [](const QueryTreeNodePtr & node) + { + auto * subquery_node = node->as(); + auto * union_node = node->as(); + + return (subquery_node && subquery_node->isCTE()) || (union_node && union_node->isCTE()); + }); + + for (auto & window_node : query_node_typed.getWindow().getNodes()) + { + auto & window_node_typed = window_node->as(); + auto parent_window_name = window_node_typed.getParentWindowName(); + if (!parent_window_name.empty()) + { + auto window_node_it = scope.window_name_to_window_node.find(parent_window_name); + if (window_node_it == scope.window_name_to_window_node.end()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Window '{}' does not exists. In scope {}", + parent_window_name, + scope.scope_node->formatASTForErrorMessage()); + + mergeWindowWithParentWindow(window_node, window_node_it->second, scope); + window_node_typed.setParentWindowName({}); + } + + scope.window_name_to_window_node.emplace(window_node_typed.getAlias(), window_node); + } + + /** Disable identifier cache during JOIN TREE resolve. + * Depending on JOIN expression section, identifier with same name + * can be resolved in different columns. + * + * Example: SELECT id FROM test_table AS t1 INNER JOIN test_table AS t2 ON t1.id = t2.id INNER JOIN test_table AS t3 ON t1.id = t3.id + * In first join expression ON t1.id = t2.id t1.id is resolved into test_table.id column. + * In second join expression ON t1.id = t3.id t1.id must be resolved into test_table.id column after first JOIN. + */ + scope.use_identifier_lookup_to_result_cache = false; + + if (query_node_typed.getJoinTree()) + { + TableExpressionsAliasVisitor table_expressions_visitor(scope); + table_expressions_visitor.visit(query_node_typed.getJoinTree()); + + initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); + scope.alias_name_to_table_expression_node.clear(); + + resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); + } + + scope.use_identifier_lookup_to_result_cache = true; + + /// Resolve query node sections. + + auto projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope); + if (query_node_typed.getProjection().getNodes().empty()) + throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED, + "Empty list of columns in projection. In scope {}", + scope.scope_node->formatASTForErrorMessage()); + + if (query_node_typed.hasWith()) + resolveExpressionNodeList(query_node_typed.getWithNode(), scope, true /*allow_lambda_expression*/, false /*allow_table_expression*/); + + if (query_node_typed.getPrewhere()) + resolveExpressionNode(query_node_typed.getPrewhere(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + if (query_node_typed.getWhere()) + resolveExpressionNode(query_node_typed.getWhere(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + if (query_node_typed.hasGroupBy()) + { + if (query_node_typed.isGroupByWithGroupingSets()) + { + for (auto & grouping_sets_keys_list_node : query_node_typed.getGroupBy().getNodes()) + { + if (settings.enable_positional_arguments) + replaceNodesWithPositionalArguments(grouping_sets_keys_list_node, query_node_typed.getProjection().getNodes(), scope); + + resolveExpressionNodeList(grouping_sets_keys_list_node, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + } + } + else + { + if (settings.enable_positional_arguments) + replaceNodesWithPositionalArguments(query_node_typed.getGroupByNode(), query_node_typed.getProjection().getNodes(), scope); + + resolveExpressionNodeList(query_node_typed.getGroupByNode(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + } + } + + if (query_node_typed.hasHaving()) + resolveExpressionNode(query_node_typed.getHaving(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + if (query_node_typed.hasWindow()) + resolveWindowNodeList(query_node_typed.getWindowNode(), scope); + + if (query_node_typed.hasOrderBy()) + { + if (settings.enable_positional_arguments) + replaceNodesWithPositionalArguments(query_node_typed.getOrderByNode(), query_node_typed.getProjection().getNodes(), scope); + + resolveSortNodeList(query_node_typed.getOrderByNode(), scope); + } + + if (query_node_typed.hasInterpolate()) + resolveInterpolateColumnsNodeList(query_node_typed.getInterpolate(), scope); + + if (query_node_typed.hasLimitByLimit()) + { + resolveExpressionNode(query_node_typed.getLimitByLimit(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + validateLimitOffsetExpression(query_node_typed.getLimitByLimit(), "LIMIT BY LIMIT", scope); + } + + if (query_node_typed.hasLimitByOffset()) + { + resolveExpressionNode(query_node_typed.getLimitByOffset(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + validateLimitOffsetExpression(query_node_typed.getLimitByOffset(), "LIMIT BY OFFSET", scope); + } + + if (query_node_typed.hasLimitBy()) + { + if (settings.enable_positional_arguments) + replaceNodesWithPositionalArguments(query_node_typed.getLimitByNode(), query_node_typed.getProjection().getNodes(), scope); + + resolveExpressionNodeList(query_node_typed.getLimitByNode(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + } + + if (query_node_typed.hasLimit()) + { + resolveExpressionNode(query_node_typed.getLimit(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + validateLimitOffsetExpression(query_node_typed.getLimit(), "LIMIT", scope); + } + + if (query_node_typed.hasOffset()) + { + resolveExpressionNode(query_node_typed.getOffset(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + validateLimitOffsetExpression(query_node_typed.getOffset(), "OFFSET", scope); + } + + /** Resolve nodes with duplicate aliases. + * Table expressions cannot have duplicate aliases. + * + * Such nodes during scope aliases collection are placed into duplicated array. + * After scope nodes are resolved, we can compare node with duplicate alias with + * node from scope alias table. + */ + for (const auto & node_with_duplicated_alias : scope.nodes_with_duplicated_aliases) + { + auto node = node_with_duplicated_alias; + auto node_alias = node->getAlias(); + resolveExpressionNode(node, scope, true /*allow_lambda_expression*/, false /*allow_table_expression*/); + + bool has_node_in_alias_table = false; + + auto it = scope.alias_name_to_expression_node.find(node_alias); + if (it != scope.alias_name_to_expression_node.end()) + { + has_node_in_alias_table = true; + + if (!it->second->isEqual(*node)) + throw Exception(ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS, + "Multiple expressions {} and {} for alias {}. In scope {}", + node->formatASTForErrorMessage(), + it->second->formatASTForErrorMessage(), + node_alias, + scope.scope_node->formatASTForErrorMessage()); + } + + it = scope.alias_name_to_lambda_node.find(node_alias); + if (it != scope.alias_name_to_lambda_node.end()) + { + has_node_in_alias_table = true; + + if (!it->second->isEqual(*node)) + throw Exception(ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS, + "Multiple expressions {} and {} for alias {}. In scope {}", + node->formatASTForErrorMessage(), + it->second->formatASTForErrorMessage(), + node_alias, + scope.scope_node->formatASTForErrorMessage()); + } + + if (!has_node_in_alias_table) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Node {} with duplicate alias {} does not exists in alias table. In scope {}", + node->formatASTForErrorMessage(), + node_alias, + scope.scope_node->formatASTForErrorMessage()); + + node->removeAlias(); + } + + /** Validate aggregates + * + * 1. Check that there are no aggregate functions and GROUPING function in JOIN TREE, WHERE, PREWHERE, in another aggregate functions. + * 2. Check that there are no window functions in JOIN TREE, WHERE, PREWHERE, HAVING, WINDOW, inside another aggregate function, + * inside window function arguments, inside window function window definition. + * 3. Check that there are no columns that are not specified in GROUP BY keys. + * 4. Validate GROUP BY modifiers. + */ + assertNoAggregateFunctionNodes(query_node_typed.getJoinTree(), "in JOIN TREE"); + assertNoGroupingFunction(query_node_typed.getJoinTree(), "in JOIN TREE"); + assertNoWindowFunctionNodes(query_node_typed.getJoinTree(), "in JOIN TREE"); + + if (query_node_typed.hasWhere()) + { + assertNoAggregateFunctionNodes(query_node_typed.getWhere(), "in WHERE"); + assertNoGroupingFunction(query_node_typed.getWhere(), "in WHERE"); + assertNoWindowFunctionNodes(query_node_typed.getWhere(), "in WHERE"); + } + + if (query_node_typed.hasPrewhere()) + { + assertNoAggregateFunctionNodes(query_node_typed.getPrewhere(), "in PREWHERE"); + assertNoGroupingFunction(query_node_typed.getPrewhere(), "in PREWHERE"); + assertNoWindowFunctionNodes(query_node_typed.getPrewhere(), "in PREWHERE"); + } + + if (query_node_typed.hasHaving()) + assertNoWindowFunctionNodes(query_node_typed.getHaving(), "in HAVING"); + + if (query_node_typed.hasWindow()) + assertNoWindowFunctionNodes(query_node_typed.getWindowNode(), "in WINDOW"); + + QueryTreeNodes aggregate_function_nodes; + QueryTreeNodes window_function_nodes; + + collectAggregateFunctionNodes(query_node, aggregate_function_nodes); + collectWindowFunctionNodes(query_node, window_function_nodes); + + if (query_node_typed.hasGroupBy()) + assertNoAggregateFunctionNodes(query_node_typed.getGroupByNode(), "in GROUP BY"); + + for (auto & aggregate_function_node : aggregate_function_nodes) + { + auto & aggregate_function_node_typed = aggregate_function_node->as(); + + assertNoAggregateFunctionNodes(aggregate_function_node_typed.getArgumentsNode(), "inside another aggregate function"); + assertNoGroupingFunction(aggregate_function_node_typed.getArgumentsNode(), "inside another aggregate function"); + assertNoWindowFunctionNodes(aggregate_function_node_typed.getArgumentsNode(), "inside an aggregate function"); + } + + for (auto & window_function_node : window_function_nodes) + { + auto & window_function_node_typed = window_function_node->as(); + assertNoWindowFunctionNodes(window_function_node_typed.getArgumentsNode(), "inside another window function"); + + if (query_node_typed.hasWindow()) + assertNoWindowFunctionNodes(window_function_node_typed.getWindowNode(), "inside window definition"); + } + + QueryTreeNodes group_by_keys_nodes; + group_by_keys_nodes.reserve(query_node_typed.getGroupBy().getNodes().size()); + + for (auto & node : query_node_typed.getGroupBy().getNodes()) + { + if (query_node_typed.isGroupByWithGroupingSets()) + { + auto & grouping_set_keys = node->as(); + for (auto & grouping_set_key : grouping_set_keys.getNodes()) + { + if (grouping_set_key->hasConstantValue()) + continue; + + group_by_keys_nodes.push_back(grouping_set_key); + } + } + else + { + if (node->hasConstantValue()) + continue; + + group_by_keys_nodes.push_back(node); + } + } + + if (query_node_typed.getGroupBy().getNodes().empty()) + { + if (query_node_typed.hasHaving()) + assertNoGroupingFunction(query_node_typed.getHaving(), "in HAVING without GROUP BY"); + + if (query_node_typed.hasOrderBy()) + assertNoGroupingFunction(query_node_typed.getOrderByNode(), "in ORDER BY without GROUP BY"); + + assertNoGroupingFunction(query_node_typed.getProjectionNode(), "in SELECT without GROUP BY"); + } + + bool has_aggregation = !query_node_typed.getGroupBy().getNodes().empty() || !aggregate_function_nodes.empty(); + + if (has_aggregation) + { + ValidateGroupByColumnsVisitor validate_group_by_columns_visitor(group_by_keys_nodes, scope); + + if (query_node_typed.hasHaving()) + validate_group_by_columns_visitor.visit(query_node_typed.getHaving()); + + if (query_node_typed.hasOrderBy()) + validate_group_by_columns_visitor.visit(query_node_typed.getOrderByNode()); + + validate_group_by_columns_visitor.visit(query_node_typed.getProjectionNode()); + } + + if (!has_aggregation && (query_node_typed.isGroupByWithGroupingSets() || is_rollup_or_cube)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS, ROLLUP, CUBE or GROUPING SETS are not supported without aggregation"); + + /** WITH section can be safely removed, because WITH section only can provide aliases to query expressions + * and CTE for other sections to use. + * + * Example: WITH 1 AS constant, (x -> x + 1) AS lambda, a AS (SELECT * FROM test_table); + */ + query_node_typed.getWith().getNodes().clear(); + + /** WINDOW section can be safely removed, because WINDOW section can only provide window definition to window functions. + * + * Example: SELECT count(*) OVER w FROM test_table WINDOW w AS (PARTITION BY id); + */ + query_node_typed.getWindow().getNodes().clear(); + + /// Remove aliases from expression and lambda nodes + + for (auto & [_, node] : scope.alias_name_to_expression_node) + node->removeAlias(); + + for (auto & [_, node] : scope.alias_name_to_lambda_node) + node->removeAlias(); + + query_node_typed.resolveProjectionColumns(std::move(projection_columns)); +} + +void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, IdentifierResolveScope & scope) +{ + auto & union_node_typed = union_node->as(); + auto & queries_nodes = union_node_typed.getQueries().getNodes(); + + for (auto & query_node : queries_nodes) + { + IdentifierResolveScope subquery_scope(query_node, &scope /*parent_scope*/); + auto query_node_type = query_node->getNodeType(); + + if (query_node_type == QueryTreeNodeType::QUERY) + { + resolveQuery(query_node, subquery_scope); + } + else if (query_node_type == QueryTreeNodeType::UNION) + { + resolveUnion(query_node, subquery_scope); + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "UNION unsupported node {}. In scope {}", + query_node->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + } + } +} + +} + +QueryAnalysisPass::QueryAnalysisPass(QueryTreeNodePtr table_expression_) + : table_expression(std::move(table_expression_)) +{} + +void QueryAnalysisPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context) +{ + QueryAnalyzer analyzer; + analyzer.resolve(query_tree_node, table_expression, context); +} + +} diff --git a/src/Analyzer/Passes/QueryAnalysisPass.h b/src/Analyzer/Passes/QueryAnalysisPass.h new file mode 100644 index 00000000000..677a13044f2 --- /dev/null +++ b/src/Analyzer/Passes/QueryAnalysisPass.h @@ -0,0 +1,96 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +/** This pass make initial query analysis. + * + * 1. All identifiers are resolved. Next passes can expect that there will be no IdentifierNode in query tree. + * 2. All matchers are resolved. Next passes can expect that there will be no MatcherNode in query tree. + * 3. All functions are resolved. Next passes can expect that for each FunctionNode its result type will be set, and it will be resolved + * as aggregate or non aggregate function. + * 4. All lambda expressions that are function arguments are resolved. Next passes can expect that LambaNode expression is resolved, and lambda has concrete arguments. + * 5. All standalone lambda expressions are resolved. Next passes can expect that there will be no standalone LambaNode expressions in query. + * 6. Constants are folded. Example: SELECT plus(1, 1). + * Motivation for this, there are places in query tree that must contain constant: + * Function parameters. Example: SELECT quantile(0.5)(x). + * Functions in which result type depends on constant expression argument. Example: cast(x, 'type_name'). + * Expressions that are part of LIMIT BY LIMIT, LIMIT BY OFFSET, LIMIT, OFFSET. Example: SELECT * FROM test_table LIMIT expr. + * Window function window frame OFFSET begin and OFFSET end. + * + * 7. All scalar subqueries are evaluated. + * TODO: Scalar subqueries must be evaluated only if they are part of query tree where we must have constant. This is currently not done + * because execution layer does not support scalar subqueries execution. + * + * 8. For query node. + * + * Projection columns are calculated. Later passes cannot change type, display name of projection column, and cannot add or remove + * columns in projection section. + * WITH and WINDOW sections are removed. + * + * 9. Query is validated. Parts that are validated: + * + * Constness of function parameters. + * Constness of LIMIT and OFFSET. + * Window functions frame. Constness of window functions frame begin OFFSET, end OFFSET. + * In query only columns that are specified in GROUP BY keys after GROUP BY are used. + * GROUPING function arguments are specified in GROUP BY keys. + * No GROUPING function if there is no GROUP BY. + * No aggregate functions in JOIN TREE, WHERE, PREWHERE, GROUP BY and inside another aggregate functions. + * GROUP BY modifiers CUBE, ROLLUP, GROUPING SETS and WITH TOTALS. + * Table expression modifiers are validated for table and table function nodes in JOIN TREE. + * Table expression modifiers are disabled for subqueries in JOIN TREE. + * For JOIN, ARRAY JOIN subqueries and table functions must have alias (Can be changed using joined_subquery_requires_alias setting). + * + * 10. Special functions handling: + * Function `untuple` is handled properly. + * Function `arrayJoin` is handled properly. + * For functions `dictGet` and its variations and for function `joinGet` identifier as first argument is handled properly. + * Function `exists` is converted into `in`. + * + * For function `grouping` arguments are resolved, but it is planner responsibility to initialize it with concrete grouping function + * based on group by kind and group by keys positions. + * + * For function `in` and its variations arguments are resolved, but sets are not build. + * If left and right arguments are constants constant folding is performed. + * If right argument resolved as table, and table is not of type Set, it is replaced with query that read only ordinary columns from underlying + * storage. + * Example: SELECT id FROM test_table WHERE id IN test_table_other; + * Result: SELECT id FROM test_table WHERE id IN (SELECT test_table_column FROM test_table_other); + */ +class QueryAnalysisPass final : public IQueryTreePass +{ +public: + /** Construct query analysis pass for query or union analysis. + * Available columns are extracted from query node join tree. + */ + QueryAnalysisPass() = default; + + /** Construct query analysis pass for expression or list of expressions analysis. + * Available expression columns are extracted from table expression. + * Table expression node must have query, union, table, table function type. + */ + explicit QueryAnalysisPass(QueryTreeNodePtr table_expression_); + + String getName() override + { + return "QueryAnalysis"; + } + + String getDescription() override + { + return "Resolve type for each query expression. Replace identifiers, matchers with query expressions. Perform constant folding. Evaluate scalar subqueries."; + } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +private: + QueryTreeNodePtr table_expression; +}; + +} diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp new file mode 100644 index 00000000000..f43c90e10eb --- /dev/null +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -0,0 +1,157 @@ +#include + +#include +#include + +#include +#include + +#include + +#include + +#include +#include + +namespace DB +{ + +namespace +{ + +class SumIfToCountIfVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit SumIfToCountIfVisitor(ContextPtr & context_) + : context(context_) + {} + + void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || !function_node->isAggregateFunction()) + return; + + auto function_name = function_node->getFunctionName(); + auto lower_function_name = Poco::toLower(function_name); + + /// sumIf, SumIf or sUMIf are valid function names, but sumIF or sumiF are not + if (lower_function_name != "sum" && (lower_function_name != "sumif" || !function_name.ends_with("If"))) + return; + + auto & function_node_arguments_nodes = function_node->getArguments().getNodes(); + + /// Rewrite `sumIf(1, cond)` into `countIf(cond)` + if (lower_function_name == "sumif") + { + if (function_node_arguments_nodes.size() != 2) + return; + + auto constant_value = function_node_arguments_nodes[0]->getConstantValueOrNull(); + if (!constant_value) + return; + + const auto & constant_value_literal = constant_value->getValue(); + if (!isInt64OrUInt64FieldType(constant_value_literal.getType())) + return; + + if (constant_value_literal.get() != 1) + return; + + function_node_arguments_nodes[0] = std::move(function_node_arguments_nodes[1]); + function_node_arguments_nodes.resize(1); + + resolveAggregateFunctionNode(*function_node, "countIf"); + return; + } + + /** Rewrite `sum(if(cond, 1, 0))` into `countIf(cond)`. + * Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))`. + */ + if (function_node_arguments_nodes.size() != 1) + return; + + auto & nested_argument = function_node_arguments_nodes[0]; + auto * nested_function = nested_argument->as(); + if (!nested_function || nested_function->getFunctionName() != "if") + return; + + auto & nested_if_function_arguments_nodes = nested_function->getArguments().getNodes(); + if (nested_if_function_arguments_nodes.size() != 3) + return; + + auto if_true_condition_constant_value = nested_if_function_arguments_nodes[1]->getConstantValueOrNull(); + auto if_false_condition_constant_value = nested_if_function_arguments_nodes[2]->getConstantValueOrNull(); + + if (!if_true_condition_constant_value || !if_false_condition_constant_value) + return; + + const auto & if_true_condition_constant_value_literal = if_true_condition_constant_value->getValue(); + const auto & if_false_condition_constant_value_literal = if_false_condition_constant_value->getValue(); + + if (!isInt64OrUInt64FieldType(if_true_condition_constant_value_literal.getType()) || + !isInt64OrUInt64FieldType(if_false_condition_constant_value_literal.getType())) + return; + + auto if_true_condition_value = if_true_condition_constant_value_literal.get(); + auto if_false_condition_value = if_false_condition_constant_value_literal.get(); + + /// Rewrite `sum(if(cond, 1, 0))` into `countIf(cond)`. + if (if_true_condition_value == 1 && if_false_condition_value == 0) + { + function_node_arguments_nodes[0] = std::move(nested_if_function_arguments_nodes[0]); + function_node_arguments_nodes.resize(1); + + resolveAggregateFunctionNode(*function_node, "countIf"); + return; + } + + /// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))`. + if (if_true_condition_value == 0 && if_false_condition_value == 1) + { + auto condition_result_type = nested_if_function_arguments_nodes[0]->getResultType(); + DataTypePtr not_function_result_type = std::make_shared(); + if (condition_result_type->isNullable()) + not_function_result_type = makeNullable(not_function_result_type); + + auto not_function = std::make_shared("not"); + not_function->resolveAsFunction(FunctionFactory::instance().get("not", context), std::move(not_function_result_type)); + + auto & not_function_arguments = not_function->getArguments().getNodes(); + not_function_arguments.push_back(std::move(nested_if_function_arguments_nodes[0])); + + function_node_arguments_nodes[0] = std::move(not_function); + function_node_arguments_nodes.resize(1); + + resolveAggregateFunctionNode(*function_node, "countIf"); + return; + } + } + +private: + static inline void resolveAggregateFunctionNode(FunctionNode & function_node, const String & aggregate_function_name) + { + auto function_result_type = function_node.getResultType(); + auto function_aggregate_function = function_node.getAggregateFunction(); + + AggregateFunctionProperties properties; + auto aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, + function_aggregate_function->getArgumentTypes(), + function_aggregate_function->getParameters(), + properties); + + function_node.resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type)); + } + + ContextPtr & context; +}; + +} + +void SumIfToCountIfPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context) +{ + SumIfToCountIfVisitor visitor(context); + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.h b/src/Analyzer/Passes/SumIfToCountIfPass.h new file mode 100644 index 00000000000..f3ba47f1c2c --- /dev/null +++ b/src/Analyzer/Passes/SumIfToCountIfPass.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace DB +{ + +/** Rewrite `sum(if(cond, value_1, value_2))` and `sumIf` functions to `countIf`. + * + * Example: SELECT sumIf(1, cond); + * Result: SELECT countIf(cond); + * + * Example: SELECT sum(if(cond, 1, 0)); + * Result: SELECT countIf(cond); + * + * Example: SELECT sum(if(cond, 0, 1)); + * Result: SELECT countIf(not(cond)); + */ +class SumIfToCountIfPass final : public IQueryTreePass +{ +public: + String getName() override { return "SumIfToCountIf"; } + + String getDescription() override { return "Rewrite sum(if) and sumIf into countIf"; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp b/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp new file mode 100644 index 00000000000..6520cb0717d --- /dev/null +++ b/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.cpp @@ -0,0 +1,64 @@ +#include + +#include + +#include +#include + +namespace DB +{ + +namespace +{ + +bool isUniqFunction(const String & function_name) +{ + return function_name == "uniq" || + function_name == "uniqExact" || + function_name == "uniqHLL12" || + function_name == "uniqCombined" || + function_name == "uniqCombined64" || + function_name == "uniqTheta"; +} + +class UniqInjectiveFunctionsEliminationVisitor : public InDepthQueryTreeVisitor +{ +public: + static void visitImpl(QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || !function_node->isAggregateFunction() || !isUniqFunction(function_node->getFunctionName())) + return; + + auto & uniq_function_arguments_nodes = function_node->getArguments().getNodes(); + for (auto & uniq_function_argument_node : uniq_function_arguments_nodes) + { + auto * uniq_function_argument_node_typed = uniq_function_argument_node->as(); + if (!uniq_function_argument_node_typed || !uniq_function_argument_node_typed->isOrdinaryFunction()) + continue; + + auto & uniq_function_argument_node_argument_nodes = uniq_function_argument_node_typed->getArguments().getNodes(); + + /// Do not apply optimization if injective function contains multiple arguments + if (uniq_function_argument_node_argument_nodes.size() != 1) + continue; + + const auto & uniq_function_argument_node_function = uniq_function_argument_node_typed->getFunction(); + if (!uniq_function_argument_node_function->isInjective({})) + continue; + + /// Replace injective function with its single argument + uniq_function_argument_node = uniq_function_argument_node_argument_nodes[0]; + } + } +}; + +} + +void UniqInjectiveFunctionsEliminationPass::run(QueryTreeNodePtr query_tree_node, ContextPtr) +{ + UniqInjectiveFunctionsEliminationVisitor visitor; + visitor.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h b/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h new file mode 100644 index 00000000000..a0f07dfb7b5 --- /dev/null +++ b/src/Analyzer/Passes/UniqInjectiveFunctionsEliminationPass.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace DB +{ + +/** Remove injective functions from `uniq*` functions arguments. + * + * Example: SELECT uniq(injectiveFunction(argument)); + * Result: SELECT uniq(argument); + */ +class UniqInjectiveFunctionsEliminationPass final : public IQueryTreePass +{ +public: + String getName() override { return "UniqInjectiveFunctionsElimination"; } + + String getDescription() override { return "Remove injective functions from uniq functions arguments."; } + + void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override; + +}; + +} diff --git a/src/Analyzer/QueryNode.cpp b/src/Analyzer/QueryNode.cpp new file mode 100644 index 00000000000..c5bbc193544 --- /dev/null +++ b/src/Analyzer/QueryNode.cpp @@ -0,0 +1,345 @@ +#include + +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +QueryNode::QueryNode() + : IQueryTreeNode(children_size) +{ + children[with_child_index] = std::make_shared(); + children[projection_child_index] = std::make_shared(); + children[group_by_child_index] = std::make_shared(); + children[window_child_index] = std::make_shared(); + children[order_by_child_index] = std::make_shared(); + children[limit_by_child_index] = std::make_shared(); +} + +void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "QUERY id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + if (is_subquery) + buffer << ", is_subquery: " << is_subquery; + + if (is_cte) + buffer << ", is_cte: " << is_cte; + + if (is_distinct) + buffer << ", is_distinct: " << is_distinct; + + if (is_limit_with_ties) + buffer << ", is_limit_with_ties: " << is_limit_with_ties; + + if (is_group_by_with_totals) + buffer << ", is_group_by_with_totals: " << is_group_by_with_totals; + + std::string group_by_type; + if (is_group_by_with_rollup) + group_by_type = "rollup"; + else if (is_group_by_with_cube) + group_by_type = "cube"; + else if (is_group_by_with_grouping_sets) + group_by_type = "grouping_sets"; + + if (!group_by_type.empty()) + buffer << ", group_by_type: " << group_by_type; + + if (!cte_name.empty()) + buffer << ", cte_name: " << cte_name; + + if (constant_value) + { + buffer << ", constant_value: " << constant_value->getValue().dump(); + buffer << ", constant_value_type: " << constant_value->getType()->getName(); + } + + if (hasWith()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "WITH\n"; + getWith().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (!projection_columns.empty()) + { + buffer << '\n'; + buffer << std::string(indent + 2, ' ') << "PROJECTION COLUMNS\n"; + + size_t projection_columns_size = projection_columns.size(); + for (size_t i = 0; i < projection_columns_size; ++i) + { + const auto & projection_column = projection_columns[i]; + buffer << std::string(indent + 4, ' ') << projection_column.name << " " << projection_column.type->getName(); + if (i + 1 != projection_columns_size) + buffer << '\n'; + } + } + + buffer << '\n'; + buffer << std::string(indent + 2, ' ') << "PROJECTION\n"; + getProjection().dumpTreeImpl(buffer, format_state, indent + 4); + + if (getJoinTree()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "JOIN TREE\n"; + getJoinTree()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (getPrewhere()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "PREWHERE\n"; + getPrewhere()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (getWhere()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "WHERE\n"; + getWhere()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasGroupBy()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "GROUP BY\n"; + getGroupBy().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasHaving()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "HAVING\n"; + getHaving()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasWindow()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "WINDOW\n"; + getWindow().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasOrderBy()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "ORDER BY\n"; + getOrderBy().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasInterpolate()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE\n"; + getInterpolate()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasLimitByLimit()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "LIMIT BY LIMIT\n"; + getLimitByLimit()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasLimitByOffset()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "LIMIT BY OFFSET\n"; + getLimitByOffset()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasLimitBy()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "LIMIT BY\n"; + getLimitBy().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasLimit()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "LIMIT\n"; + getLimit()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasOffset()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "OFFSET\n"; + getOffset()->dumpTreeImpl(buffer, format_state, indent + 4); + } +} + +bool QueryNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + + if (constant_value && rhs_typed.constant_value && *constant_value != *rhs_typed.constant_value) + return false; + else if (constant_value && !rhs_typed.constant_value) + return false; + else if (!constant_value && rhs_typed.constant_value) + return false; + + return is_subquery == rhs_typed.is_subquery && + is_cte == rhs_typed.is_cte && + cte_name == rhs_typed.cte_name && + projection_columns == rhs_typed.projection_columns && + is_distinct == rhs_typed.is_distinct && + is_limit_with_ties == rhs_typed.is_limit_with_ties && + is_group_by_with_totals == rhs_typed.is_group_by_with_totals && + is_group_by_with_rollup == rhs_typed.is_group_by_with_rollup && + is_group_by_with_cube == rhs_typed.is_group_by_with_cube && + is_group_by_with_grouping_sets == rhs_typed.is_group_by_with_grouping_sets; +} + +void QueryNode::updateTreeHashImpl(HashState & state) const +{ + state.update(is_subquery); + state.update(is_cte); + + state.update(cte_name.size()); + state.update(cte_name); + + state.update(projection_columns.size()); + for (const auto & projection_column : projection_columns) + { + state.update(projection_column.name.size()); + state.update(projection_column.name); + + auto projection_column_type_name = projection_column.type->getName(); + state.update(projection_column_type_name.size()); + state.update(projection_column_type_name); + } + + state.update(is_distinct); + state.update(is_limit_with_ties); + state.update(is_group_by_with_totals); + state.update(is_group_by_with_rollup); + state.update(is_group_by_with_cube); + state.update(is_group_by_with_grouping_sets); + + if (constant_value) + { + auto constant_dump = applyVisitor(FieldVisitorToString(), constant_value->getValue()); + state.update(constant_dump.size()); + state.update(constant_dump); + + auto constant_value_type_name = constant_value->getType()->getName(); + state.update(constant_value_type_name.size()); + state.update(constant_value_type_name); + } +} + +QueryTreeNodePtr QueryNode::cloneImpl() const +{ + auto result_query_node = std::make_shared(); + + result_query_node->is_subquery = is_subquery; + result_query_node->is_cte = is_cte; + result_query_node->is_distinct = is_distinct; + result_query_node->is_limit_with_ties = is_limit_with_ties; + result_query_node->is_group_by_with_totals = is_group_by_with_totals; + result_query_node->is_group_by_with_rollup = is_group_by_with_rollup; + result_query_node->is_group_by_with_cube = is_group_by_with_cube; + result_query_node->is_group_by_with_grouping_sets = is_group_by_with_grouping_sets; + result_query_node->cte_name = cte_name; + result_query_node->projection_columns = projection_columns; + result_query_node->constant_value = constant_value; + + return result_query_node; +} + +ASTPtr QueryNode::toASTImpl() const +{ + auto select_query = std::make_shared(); + select_query->distinct = is_distinct; + select_query->limit_with_ties = is_limit_with_ties; + select_query->group_by_with_totals = is_group_by_with_totals; + select_query->group_by_with_rollup = is_group_by_with_rollup; + select_query->group_by_with_cube = is_group_by_with_cube; + select_query->group_by_with_grouping_sets = is_group_by_with_grouping_sets; + + if (hasWith()) + select_query->setExpression(ASTSelectQuery::Expression::WITH, getWith().toAST()); + + select_query->setExpression(ASTSelectQuery::Expression::SELECT, getProjection().toAST()); + + ASTPtr tables_in_select_query_ast = std::make_shared(); + addTableExpressionOrJoinIntoTablesInSelectQuery(tables_in_select_query_ast, getJoinTree()); + select_query->setExpression(ASTSelectQuery::Expression::TABLES, std::move(tables_in_select_query_ast)); + + if (getPrewhere()) + select_query->setExpression(ASTSelectQuery::Expression::PREWHERE, getPrewhere()->toAST()); + + if (getWhere()) + select_query->setExpression(ASTSelectQuery::Expression::WHERE, getWhere()->toAST()); + + if (hasGroupBy()) + select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, getGroupBy().toAST()); + + if (hasHaving()) + select_query->setExpression(ASTSelectQuery::Expression::HAVING, getHaving()->toAST()); + + if (hasWindow()) + select_query->setExpression(ASTSelectQuery::Expression::WINDOW, getWindow().toAST()); + + if (hasOrderBy()) + select_query->setExpression(ASTSelectQuery::Expression::ORDER_BY, getOrderBy().toAST()); + + if (hasInterpolate()) + select_query->setExpression(ASTSelectQuery::Expression::INTERPOLATE, getInterpolate()->toAST()); + + if (hasLimitByLimit()) + select_query->setExpression(ASTSelectQuery::Expression::LIMIT_BY_LENGTH, getLimitByLimit()->toAST()); + + if (hasLimitByOffset()) + select_query->setExpression(ASTSelectQuery::Expression::LIMIT_BY_OFFSET, getLimitByOffset()->toAST()); + + if (hasLimitBy()) + select_query->setExpression(ASTSelectQuery::Expression::LIMIT_BY, getLimitBy().toAST()); + + if (hasLimit()) + select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, getLimit()->toAST()); + + if (hasOffset()) + select_query->setExpression(ASTSelectQuery::Expression::LIMIT_OFFSET, getOffset()->toAST()); + + if (hasSettingsChanges()) + { + auto settings_query = std::make_shared(); + settings_query->changes = settings_changes; + select_query->setExpression(ASTSelectQuery::Expression::SETTINGS, std::move(settings_query)); + } + + auto result_select_query = std::make_shared(); + result_select_query->union_mode = SelectUnionMode::UNION_DEFAULT; + + auto list_of_selects = std::make_shared(); + list_of_selects->children.push_back(std::move(select_query)); + + result_select_query->children.push_back(std::move(list_of_selects)); + result_select_query->list_of_selects = result_select_query->children.back(); + + if (is_subquery) + { + auto subquery = std::make_shared(); + + subquery->cte_name = cte_name; + subquery->children.push_back(std::move(result_select_query)); + + return subquery; + } + + return result_select_query; +} + +} diff --git a/src/Analyzer/QueryNode.h b/src/Analyzer/QueryNode.h new file mode 100644 index 00000000000..1bb381c95c9 --- /dev/null +++ b/src/Analyzer/QueryNode.h @@ -0,0 +1,607 @@ +#pragma once + +#include + +#include +#include + +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; +} + +/** Query node represents query in query tree. + * + * Example: SELECT * FROM test_table WHERE id == 0; + * Example: SELECT * FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id; + * + * Query node consists of following sections. + * 1. WITH section. + * 2. PROJECTION section. + * 3. JOIN TREE section. + * Example: SELECT * FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id; + * test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id - JOIN TREE section. + * 4. PREWHERE section. + * 5. WHERE section. + * 6. GROUP BY section. + * 7. HAVING section. + * 8. WINDOW section. + * Example: SELECT * FROM test_table WINDOW window AS (PARTITION BY id); + * 9. ORDER BY section. + * 10. INTERPOLATE section. + * Example: SELECT * FROM test_table ORDER BY id WITH FILL INTERPOLATE (value AS value + 1); + * value AS value + 1 - INTERPOLATE section. + * 11. LIMIT BY limit section. + * 12. LIMIT BY offset section. + * 13. LIMIT BY section. + * Example: SELECT * FROM test_table LIMIT 1 AS a OFFSET 5 AS b BY id, value; + * 1 AS a - LIMIT BY limit section. + * 5 AS b - LIMIT BY offset section. + * id, value - LIMIT BY section. + * 14. LIMIT section. + * 15. OFFSET section. + * + * Query node contains settings changes that must be applied before query analysis or execution. + * Example: SELECT * FROM test_table SETTINGS prefer_column_name_to_alias = 1, join_use_nulls = 1; + * + * Query node can be used as CTE. + * Example: WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery; + * + * Query node can be used as scalar subquery. + * Example: SELECT (SELECT 1) AS scalar_subquery. + * + * During query analysis pass query node must be resolved with projection columns. + */ +class QueryNode; +using QueryNodePtr = std::shared_ptr; + +class QueryNode final : public IQueryTreeNode +{ +public: + explicit QueryNode(); + + /// Returns true if query node is subquery, false otherwise + bool isSubquery() const + { + return is_subquery; + } + + /// Set query node is subquery value + void setIsSubquery(bool is_subquery_value) + { + is_subquery = is_subquery_value; + } + + /// Returns true if query node is CTE, false otherwise + bool isCTE() const + { + return is_cte; + } + + /// Set query node is CTE + void setIsCTE(bool is_cte_value) + { + is_cte = is_cte_value; + } + + /// Get query node CTE name + const std::string & getCTEName() const + { + return cte_name; + } + + /// Set query node CTE name + void setCTEName(std::string cte_name_value) + { + cte_name = std::move(cte_name_value); + } + + /// Returns true if query node has DISTINCT, false otherwise + bool isDistinct() const + { + return is_distinct; + } + + /// Set query node DISTINCT value + void setIsDistinct(bool is_distinct_value) + { + is_distinct = is_distinct_value; + } + + /// Returns true if query node has LIMIT WITH TIES, false otherwise + bool isLimitWithTies() const + { + return is_limit_with_ties; + } + + /// Set query node LIMIT WITH TIES value + void setIsLimitWithTies(bool is_limit_with_ties_value) + { + is_limit_with_ties = is_limit_with_ties_value; + } + + /// Returns true, if query node has GROUP BY WITH TOTALS, false otherwise + bool isGroupByWithTotals() const + { + return is_group_by_with_totals; + } + + /// Set query node GROUP BY WITH TOTALS value + void setIsGroupByWithTotals(bool is_group_by_with_totals_value) + { + is_group_by_with_totals = is_group_by_with_totals_value; + } + + /// Returns true, if query node has GROUP BY with ROLLUP modifier, false otherwise + bool isGroupByWithRollup() const + { + return is_group_by_with_rollup; + } + + /// Set query node GROUP BY with ROLLUP modifier value + void setIsGroupByWithRollup(bool is_group_by_with_rollup_value) + { + is_group_by_with_rollup = is_group_by_with_rollup_value; + } + + /// Returns true, if query node has GROUP BY with CUBE modifier, false otherwise + bool isGroupByWithCube() const + { + return is_group_by_with_cube; + } + + /// Set query node GROUP BY with CUBE modifier value + void setIsGroupByWithCube(bool is_group_by_with_cube_value) + { + is_group_by_with_cube = is_group_by_with_cube_value; + } + + /// Returns true, if query node has GROUP BY with GROUPING SETS modifier, false otherwise + bool isGroupByWithGroupingSets() const + { + return is_group_by_with_grouping_sets; + } + + /// Set query node GROUP BY with GROUPING SETS modifier value + void setIsGroupByWithGroupingSets(bool is_group_by_with_grouping_sets_value) + { + is_group_by_with_grouping_sets = is_group_by_with_grouping_sets_value; + } + + /// Returns true if query node WITH section is not empty, false otherwise + bool hasWith() const + { + return !getWith().getNodes().empty(); + } + + /// Get WITH section + const ListNode & getWith() const + { + return children[with_child_index]->as(); + } + + /// Get WITH section + ListNode & getWith() + { + return children[with_child_index]->as(); + } + + /// Get WITH section node + const QueryTreeNodePtr & getWithNode() const + { + return children[with_child_index]; + } + + /// Get WITH section node + QueryTreeNodePtr & getWithNode() + { + return children[with_child_index]; + } + + /// Get PROJECTION section + const ListNode & getProjection() const + { + return children[projection_child_index]->as(); + } + + /// Get PROJECTION section + ListNode & getProjection() + { + return children[projection_child_index]->as(); + } + + /// Get PROJECTION section node + const QueryTreeNodePtr & getProjectionNode() const + { + return children[projection_child_index]; + } + + /// Get PROJECTION section node + QueryTreeNodePtr & getProjectionNode() + { + return children[projection_child_index]; + } + + /// Get JOIN TREE section node + const QueryTreeNodePtr & getJoinTree() const + { + return children[join_tree_child_index]; + } + + /// Get JOIN TREE section node + QueryTreeNodePtr & getJoinTree() + { + return children[join_tree_child_index]; + } + + /// Returns true if query node PREWHERE section is not empty, false otherwise + bool hasPrewhere() const + { + return children[prewhere_child_index] != nullptr; + } + + /// Get PREWHERE section node + const QueryTreeNodePtr & getPrewhere() const + { + return children[prewhere_child_index]; + } + + /// Get PREWHERE section node + QueryTreeNodePtr & getPrewhere() + { + return children[prewhere_child_index]; + } + + /// Returns true if query node WHERE section is not empty, false otherwise + bool hasWhere() const + { + return children[where_child_index] != nullptr; + } + + /// Get WHERE section node + const QueryTreeNodePtr & getWhere() const + { + return children[where_child_index]; + } + + /// Get WHERE section node + QueryTreeNodePtr & getWhere() + { + return children[where_child_index]; + } + + /// Returns true if query node GROUP BY section is not empty, false otherwise + bool hasGroupBy() const + { + return !getGroupBy().getNodes().empty(); + } + + /// Get GROUP BY section + const ListNode & getGroupBy() const + { + return children[group_by_child_index]->as(); + } + + /// Get GROUP BY section + ListNode & getGroupBy() + { + return children[group_by_child_index]->as(); + } + + /// Get GROUP BY section node + const QueryTreeNodePtr & getGroupByNode() const + { + return children[group_by_child_index]; + } + + /// Get GROUP BY section node + QueryTreeNodePtr & getGroupByNode() + { + return children[group_by_child_index]; + } + + /// Returns true if query node HAVING section is not empty, false otherwise + bool hasHaving() const + { + return getHaving() != nullptr; + } + + /// Get HAVING section node + const QueryTreeNodePtr & getHaving() const + { + return children[having_child_index]; + } + + /// Get HAVING section node + QueryTreeNodePtr & getHaving() + { + return children[having_child_index]; + } + + /// Returns true if query node WINDOW section is not empty, false otherwise + bool hasWindow() const + { + return !getWindow().getNodes().empty(); + } + + /// Get WINDOW section + const ListNode & getWindow() const + { + return children[window_child_index]->as(); + } + + /// Get WINDOW section + ListNode & getWindow() + { + return children[window_child_index]->as(); + } + + /// Get WINDOW section node + const QueryTreeNodePtr & getWindowNode() const + { + return children[window_child_index]; + } + + /// Get WINDOW section node + QueryTreeNodePtr & getWindowNode() + { + return children[window_child_index]; + } + + /// Returns true if query node ORDER BY section is not empty, false otherwise + bool hasOrderBy() const + { + return !getOrderBy().getNodes().empty(); + } + + /// Get ORDER BY section + const ListNode & getOrderBy() const + { + return children[order_by_child_index]->as(); + } + + /// Get ORDER BY section + ListNode & getOrderBy() + { + return children[order_by_child_index]->as(); + } + + /// Get ORDER BY section node + const QueryTreeNodePtr & getOrderByNode() const + { + return children[order_by_child_index]; + } + + /// Get ORDER BY section node + QueryTreeNodePtr & getOrderByNode() + { + return children[order_by_child_index]; + } + + /// Returns true if query node INTERPOLATE section is not empty, false otherwise + bool hasInterpolate() const + { + return getInterpolate() != nullptr; + } + + /// Get INTERPOLATE section node + const QueryTreeNodePtr & getInterpolate() const + { + return children[interpolate_child_index]; + } + + /// Get INTERPOLATE section node + QueryTreeNodePtr & getInterpolate() + { + return children[interpolate_child_index]; + } + + /// Returns true if query node LIMIT BY LIMIT section is not empty, false otherwise + bool hasLimitByLimit() const + { + return children[limit_by_limit_child_index] != nullptr; + } + + /// Get LIMIT BY LIMIT section node + const QueryTreeNodePtr & getLimitByLimit() const + { + return children[limit_by_limit_child_index]; + } + + /// Get LIMIT BY LIMIT section node + QueryTreeNodePtr & getLimitByLimit() + { + return children[limit_by_limit_child_index]; + } + + /// Returns true if query node LIMIT BY OFFSET section is not empty, false otherwise + bool hasLimitByOffset() const + { + return children[limit_by_offset_child_index] != nullptr; + } + + /// Get LIMIT BY OFFSET section node + const QueryTreeNodePtr & getLimitByOffset() const + { + return children[limit_by_offset_child_index]; + } + + /// Get LIMIT BY OFFSET section node + QueryTreeNodePtr & getLimitByOffset() + { + return children[limit_by_offset_child_index]; + } + + /// Returns true if query node LIMIT BY section is not empty, false otherwise + bool hasLimitBy() const + { + return !getLimitBy().getNodes().empty(); + } + + /// Get LIMIT BY section + const ListNode & getLimitBy() const + { + return children[limit_by_child_index]->as(); + } + + /// Get LIMIT BY section + ListNode & getLimitBy() + { + return children[limit_by_child_index]->as(); + } + + /// Get LIMIT BY section node + const QueryTreeNodePtr & getLimitByNode() const + { + return children[limit_by_child_index]; + } + + /// Get LIMIT BY section node + QueryTreeNodePtr & getLimitByNode() + { + return children[limit_by_child_index]; + } + + /// Returns true if query node LIMIT section is not empty, false otherwise + bool hasLimit() const + { + return children[limit_child_index] != nullptr; + } + + /// Get LIMIT section node + const QueryTreeNodePtr & getLimit() const + { + return children[limit_child_index]; + } + + /// Get LIMIT section node + QueryTreeNodePtr & getLimit() + { + return children[limit_child_index]; + } + + /// Returns true if query node OFFSET section is not empty, false otherwise + bool hasOffset() const + { + return children[offset_child_index] != nullptr; + } + + /// Get OFFSET section node + const QueryTreeNodePtr & getOffset() const + { + return children[offset_child_index]; + } + + /// Get OFFSET section node + QueryTreeNodePtr & getOffset() + { + return children[offset_child_index]; + } + + /// Returns true if query node has settings changes specified, false otherwise + bool hasSettingsChanges() const + { + return !settings_changes.empty(); + } + + /// Get query node settings changes + const SettingsChanges & getSettingsChanges() const + { + return settings_changes; + } + + /// Set query node settings changes value + void setSettingsChanges(SettingsChanges settings_changes_value) + { + settings_changes = std::move(settings_changes_value); + } + + /// Get query node projection columns + const NamesAndTypes & getProjectionColumns() const + { + return projection_columns; + } + + /// Resolve query node projection columns + void resolveProjectionColumns(NamesAndTypes projection_columns_value) + { + projection_columns = std::move(projection_columns_value); + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::QUERY; + } + + DataTypePtr getResultType() const override + { + if (constant_value) + return constant_value->getType(); + + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for non scalar query node"); + } + + /// Perform constant folding for scalar subquery node + void performConstantFolding(ConstantValuePtr constant_folded_value) + { + constant_value = std::move(constant_folded_value); + } + + ConstantValuePtr getConstantValueOrNull() const override + { + return constant_value; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState &) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + bool is_subquery = false; + bool is_cte = false; + bool is_distinct = false; + bool is_limit_with_ties = false; + bool is_group_by_with_totals = false; + bool is_group_by_with_rollup = false; + bool is_group_by_with_cube = false; + bool is_group_by_with_grouping_sets = false; + + std::string cte_name; + NamesAndTypes projection_columns; + ConstantValuePtr constant_value; + SettingsChanges settings_changes; + + static constexpr size_t with_child_index = 0; + static constexpr size_t projection_child_index = 1; + static constexpr size_t join_tree_child_index = 2; + static constexpr size_t prewhere_child_index = 3; + static constexpr size_t where_child_index = 4; + static constexpr size_t group_by_child_index = 5; + static constexpr size_t having_child_index = 6; + static constexpr size_t window_child_index = 7; + static constexpr size_t order_by_child_index = 8; + static constexpr size_t interpolate_child_index = 9; + static constexpr size_t limit_by_limit_child_index = 10; + static constexpr size_t limit_by_offset_child_index = 11; + static constexpr size_t limit_by_child_index = 12; + static constexpr size_t limit_child_index = 13; + static constexpr size_t offset_child_index = 14; + static constexpr size_t children_size = offset_child_index + 1; +}; + +} diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp new file mode 100644 index 00000000000..51745d820e7 --- /dev/null +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -0,0 +1,880 @@ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; + extern const int LOGICAL_ERROR; + extern const int EXPECTED_ALL_OR_ANY; + extern const int NOT_IMPLEMENTED; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + +class QueryTreeBuilder +{ +public: + explicit QueryTreeBuilder(ASTPtr query_, ContextPtr context_); + + QueryTreeNodePtr getQueryTreeNode() + { + return query_tree_node; + } + +private: + QueryTreeNodePtr buildSelectOrUnionExpression(const ASTPtr & select_or_union_query, bool is_subquery, const std::string & cte_name) const; + + QueryTreeNodePtr buildSelectWithUnionExpression(const ASTPtr & select_with_union_query, bool is_subquery, const std::string & cte_name) const; + + QueryTreeNodePtr buildSelectIntersectExceptQuery(const ASTPtr & select_intersect_except_query, bool is_subquery, const std::string & cte_name) const; + + QueryTreeNodePtr buildSelectExpression(const ASTPtr & select_query, bool is_subquery, const std::string & cte_name) const; + + QueryTreeNodePtr buildSortList(const ASTPtr & order_by_expression_list) const; + + QueryTreeNodePtr buildInterpolateList(const ASTPtr & interpolate_expression_list) const; + + QueryTreeNodePtr buildWindowList(const ASTPtr & window_definition_list) const; + + QueryTreeNodePtr buildExpressionList(const ASTPtr & expression_list) const; + + QueryTreeNodePtr buildExpression(const ASTPtr & expression) const; + + QueryTreeNodePtr buildWindow(const ASTPtr & window_definition) const; + + QueryTreeNodePtr buildJoinTree(const ASTPtr & tables_in_select_query) const; + + ColumnTransformersNodes buildColumnTransformers(const ASTPtr & matcher_expression, size_t start_child_index) const; + + ASTPtr query; + ContextPtr context; + QueryTreeNodePtr query_tree_node; + +}; + +QueryTreeBuilder::QueryTreeBuilder(ASTPtr query_, ContextPtr context_) + : query(query_->clone()) + , context(std::move(context_)) +{ + if (query->as() || + query->as() || + query->as()) + query_tree_node = buildSelectOrUnionExpression(query, false /*is_subquery*/, {} /*cte_name*/); + else if (query->as()) + query_tree_node = buildExpressionList(query); + else + query_tree_node = buildExpression(query); +} + +QueryTreeNodePtr QueryTreeBuilder::buildSelectOrUnionExpression(const ASTPtr & select_or_union_query, bool is_subquery, const std::string & cte_name) const +{ + QueryTreeNodePtr query_node; + + if (select_or_union_query->as()) + query_node = buildSelectWithUnionExpression(select_or_union_query, is_subquery /*is_subquery*/, cte_name /*cte_name*/); + else if (select_or_union_query->as()) + query_node = buildSelectIntersectExceptQuery(select_or_union_query, is_subquery /*is_subquery*/, cte_name /*cte_name*/); + else if (select_or_union_query->as()) + query_node = buildSelectExpression(select_or_union_query, is_subquery /*is_subquery*/, cte_name /*cte_name*/); + else + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "SELECT or UNION query {} is not supported", select_or_union_query->formatForErrorMessage()); + + return query_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildSelectWithUnionExpression(const ASTPtr & select_with_union_query, bool is_subquery, const std::string & cte_name) const +{ + auto & select_with_union_query_typed = select_with_union_query->as(); + auto & select_lists = select_with_union_query_typed.list_of_selects->as(); + + if (select_lists.children.size() == 1) + return buildSelectOrUnionExpression(select_lists.children[0], is_subquery, cte_name); + + auto union_node = std::make_shared(select_with_union_query_typed.union_mode); + union_node->setIsSubquery(is_subquery); + union_node->setIsCTE(!cte_name.empty()); + union_node->setCTEName(cte_name); + union_node->setOriginalAST(select_with_union_query); + + size_t select_lists_children_size = select_lists.children.size(); + + for (size_t i = 0; i < select_lists_children_size; ++i) + { + auto & select_list_node = select_lists.children[i]; + QueryTreeNodePtr query_node = buildSelectOrUnionExpression(select_list_node, false /*is_subquery*/, {} /*cte_name*/); + union_node->getQueries().getNodes().push_back(std::move(query_node)); + } + + return union_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildSelectIntersectExceptQuery(const ASTPtr & select_intersect_except_query, bool is_subquery, const std::string & cte_name) const +{ + auto & select_intersect_except_query_typed = select_intersect_except_query->as(); + auto select_lists = select_intersect_except_query_typed.getListOfSelects(); + + if (select_lists.size() == 1) + return buildSelectExpression(select_lists[0], is_subquery, cte_name); + + SelectUnionMode union_mode; + if (select_intersect_except_query_typed.final_operator == ASTSelectIntersectExceptQuery::Operator::INTERSECT_ALL) + union_mode = SelectUnionMode::INTERSECT_ALL; + else if (select_intersect_except_query_typed.final_operator == ASTSelectIntersectExceptQuery::Operator::INTERSECT_DISTINCT) + union_mode = SelectUnionMode::INTERSECT_DISTINCT; + else if (select_intersect_except_query_typed.final_operator == ASTSelectIntersectExceptQuery::Operator::EXCEPT_ALL) + union_mode = SelectUnionMode::EXCEPT_ALL; + else if (select_intersect_except_query_typed.final_operator == ASTSelectIntersectExceptQuery::Operator::EXCEPT_DISTINCT) + union_mode = SelectUnionMode::EXCEPT_DISTINCT; + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "UNION type is not initialized"); + + auto union_node = std::make_shared(union_mode); + union_node->setIsSubquery(is_subquery); + union_node->setIsCTE(!cte_name.empty()); + union_node->setCTEName(cte_name); + union_node->setOriginalAST(select_intersect_except_query); + + size_t select_lists_size = select_lists.size(); + + for (size_t i = 0; i < select_lists_size; ++i) + { + auto & select_list_node = select_lists[i]; + QueryTreeNodePtr query_node = buildSelectOrUnionExpression(select_list_node, false /*is_subquery*/, {} /*cte_name*/); + union_node->getQueries().getNodes().push_back(std::move(query_node)); + } + + return union_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_query, bool is_subquery, const std::string & cte_name) const +{ + const auto & select_query_typed = select_query->as(); + auto current_query_tree = std::make_shared(); + + current_query_tree->setIsSubquery(is_subquery); + current_query_tree->setIsCTE(!cte_name.empty()); + current_query_tree->setCTEName(cte_name); + current_query_tree->setIsDistinct(select_query_typed.distinct); + current_query_tree->setIsLimitWithTies(select_query_typed.limit_with_ties); + current_query_tree->setIsGroupByWithTotals(select_query_typed.group_by_with_totals); + current_query_tree->setIsGroupByWithCube(select_query_typed.group_by_with_cube); + current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup); + current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets); + current_query_tree->setOriginalAST(select_query); + + auto select_settings = select_query_typed.settings(); + if (select_settings) + { + auto & set_query = select_settings->as(); + current_query_tree->setSettingsChanges(set_query.changes); + } + + current_query_tree->getJoinTree() = buildJoinTree(select_query_typed.tables()); + + auto select_with_list = select_query_typed.with(); + if (select_with_list) + current_query_tree->getWithNode() = buildExpressionList(select_with_list); + + auto select_expression_list = select_query_typed.select(); + if (select_expression_list) + current_query_tree->getProjectionNode() = buildExpressionList(select_expression_list); + + auto prewhere_expression = select_query_typed.prewhere(); + if (prewhere_expression) + current_query_tree->getPrewhere() = buildExpression(prewhere_expression); + + auto where_expression = select_query_typed.where(); + if (where_expression) + current_query_tree->getWhere() = buildExpression(where_expression); + + auto group_by_list = select_query_typed.groupBy(); + if (group_by_list) + { + auto & group_by_children = group_by_list->children; + + if (current_query_tree->isGroupByWithGroupingSets()) + { + auto grouping_sets_list_node = std::make_shared(); + + for (auto & grouping_sets_keys : group_by_children) + { + auto grouping_sets_keys_list_node = buildExpressionList(grouping_sets_keys); + current_query_tree->getGroupBy().getNodes().emplace_back(std::move(grouping_sets_keys_list_node)); + } + } + else + { + current_query_tree->getGroupByNode() = buildExpressionList(group_by_list); + } + } + + auto having_expression = select_query_typed.having(); + if (having_expression) + current_query_tree->getHaving() = buildExpression(having_expression); + + auto window_list = select_query_typed.window(); + if (window_list) + current_query_tree->getWindowNode() = buildWindowList(window_list); + + auto select_order_by_list = select_query_typed.orderBy(); + if (select_order_by_list) + current_query_tree->getOrderByNode() = buildSortList(select_order_by_list); + + auto interpolate_list = select_query_typed.interpolate(); + if (interpolate_list) + current_query_tree->getInterpolate() = buildInterpolateList(interpolate_list); + + auto select_limit_by_limit = select_query_typed.limitByLength(); + if (select_limit_by_limit) + current_query_tree->getLimitByLimit() = buildExpression(select_limit_by_limit); + + auto select_limit_by_offset = select_query_typed.limitOffset(); + if (select_limit_by_offset) + current_query_tree->getLimitByOffset() = buildExpression(select_limit_by_offset); + + auto select_limit_by = select_query_typed.limitBy(); + if (select_limit_by) + current_query_tree->getLimitByNode() = buildExpressionList(select_limit_by); + + auto select_limit = select_query_typed.limitLength(); + if (select_limit) + current_query_tree->getLimit() = buildExpression(select_limit); + + auto select_offset = select_query_typed.limitOffset(); + if (select_offset) + current_query_tree->getOffset() = buildExpression(select_offset); + + return current_query_tree; +} + +QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_expression_list) const +{ + auto list_node = std::make_shared(); + + auto & expression_list_typed = order_by_expression_list->as(); + list_node->getNodes().reserve(expression_list_typed.children.size()); + + for (auto & expression : expression_list_typed.children) + { + const auto & order_by_element = expression->as(); + + auto sort_direction = order_by_element.direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING; + std::optional nulls_sort_direction; + if (order_by_element.nulls_direction_was_explicitly_specified) + nulls_sort_direction = order_by_element.nulls_direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING; + + std::shared_ptr collator; + if (order_by_element.collation) + collator = std::make_shared(order_by_element.collation->as().value.get()); + + const auto & sort_expression_ast = order_by_element.children.at(0); + auto sort_expression = buildExpression(sort_expression_ast); + auto sort_node = std::make_shared(std::move(sort_expression), + sort_direction, + nulls_sort_direction, + std::move(collator), + order_by_element.with_fill); + + if (order_by_element.fill_from) + sort_node->getFillFrom() = buildExpression(order_by_element.fill_from); + if (order_by_element.fill_to) + sort_node->getFillTo() = buildExpression(order_by_element.fill_to); + if (order_by_element.fill_step) + sort_node->getFillStep() = buildExpression(order_by_element.fill_step); + + list_node->getNodes().push_back(std::move(sort_node)); + } + + return list_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildInterpolateList(const ASTPtr & interpolate_expression_list) const +{ + auto list_node = std::make_shared(); + + auto & expression_list_typed = interpolate_expression_list->as(); + list_node->getNodes().reserve(expression_list_typed.children.size()); + + for (auto & expression : expression_list_typed.children) + { + const auto & interpolate_element = expression->as(); + auto expression_to_interpolate = std::make_shared(Identifier(interpolate_element.column)); + auto interpolate_expression = buildExpression(interpolate_element.expr); + auto interpolate_node = std::make_shared(std::move(expression_to_interpolate), std::move(interpolate_expression)); + + list_node->getNodes().push_back(std::move(interpolate_node)); + } + + return list_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildWindowList(const ASTPtr & window_definition_list) const +{ + auto list_node = std::make_shared(); + + auto & expression_list_typed = window_definition_list->as(); + list_node->getNodes().reserve(expression_list_typed.children.size()); + + for (auto & window_list_element : expression_list_typed.children) + { + const auto & window_list_element_typed = window_list_element->as(); + + auto window_node = buildWindow(window_list_element_typed.definition); + window_node->setAlias(window_list_element_typed.name); + + list_node->getNodes().push_back(std::move(window_node)); + } + + return list_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildExpressionList(const ASTPtr & expression_list) const +{ + auto list_node = std::make_shared(); + + auto & expression_list_typed = expression_list->as(); + list_node->getNodes().reserve(expression_list_typed.children.size()); + + for (auto & expression : expression_list_typed.children) + { + auto expression_node = buildExpression(expression); + list_node->getNodes().push_back(std::move(expression_node)); + } + + return list_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression) const +{ + QueryTreeNodePtr result; + + if (const auto * ast_identifier = expression->as()) + { + auto identifier = Identifier(ast_identifier->name_parts); + result = std::make_shared(std::move(identifier)); + } + else if (const auto * asterisk = expression->as()) + { + auto column_transformers = buildColumnTransformers(expression, 0 /*start_child_index*/); + result = std::make_shared(std::move(column_transformers)); + } + else if (const auto * qualified_asterisk = expression->as()) + { + auto & qualified_identifier = qualified_asterisk->children.at(0)->as(); + auto column_transformers = buildColumnTransformers(expression, 1 /*start_child_index*/); + result = std::make_shared(Identifier(qualified_identifier.name_parts), std::move(column_transformers)); + } + else if (const auto * ast_literal = expression->as()) + { + result = std::make_shared(ast_literal->value); + } + else if (const auto * function = expression->as()) + { + if (function->is_lambda_function) + { + const auto & lambda_arguments_and_expression = function->arguments->as().children; + auto & lambda_arguments_tuple = lambda_arguments_and_expression.at(0)->as(); + + auto lambda_arguments_nodes = std::make_shared(); + Names lambda_arguments; + NameSet lambda_arguments_set; + + if (lambda_arguments_tuple.arguments) + { + const auto & lambda_arguments_list = lambda_arguments_tuple.arguments->as().children; + for (const auto & lambda_argument : lambda_arguments_list) + { + const auto * lambda_argument_identifier = lambda_argument->as(); + + if (!lambda_argument_identifier) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Lambda {} argument is not identifier", + function->formatForErrorMessage()); + + if (lambda_argument_identifier->name_parts.size() > 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Lambda {} argument identifier must contain single part. Actual {}", + function->formatForErrorMessage(), + lambda_argument_identifier->full_name); + + const auto & argument_name = lambda_argument_identifier->name_parts[0]; + auto [_, inserted] = lambda_arguments_set.insert(argument_name); + if (!inserted) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Lambda {} multiple arguments with same name {}", + function->formatForErrorMessage(), + argument_name); + + lambda_arguments.push_back(argument_name); + } + } + + const auto & lambda_expression = lambda_arguments_and_expression.at(1); + auto lambda_expression_node = buildExpression(lambda_expression); + + result = std::make_shared(std::move(lambda_arguments), std::move(lambda_expression_node)); + } + else + { + auto function_node = std::make_shared(function->name); + + if (function->parameters) + { + const auto & function_parameters_list = function->parameters->as()->children; + for (const auto & argument : function_parameters_list) + function_node->getParameters().getNodes().push_back(buildExpression(argument)); + } + + if (function->arguments) + { + const auto & function_arguments_list = function->arguments->as()->children; + for (const auto & argument : function_arguments_list) + function_node->getArguments().getNodes().push_back(buildExpression(argument)); + } + + if (function->is_window_function) + { + if (function->window_definition) + function_node->getWindowNode() = buildWindow(function->window_definition); + else + function_node->getWindowNode() = std::make_shared(Identifier(function->window_name)); + } + + result = std::move(function_node); + } + } + else if (const auto * subquery = expression->as()) + { + auto subquery_query = subquery->children[0]; + auto query_node = buildSelectWithUnionExpression(subquery_query, true /*is_subquery*/, {} /*cte_name*/); + + result = std::move(query_node); + } + else if (const auto * with_element = expression->as()) + { + auto with_element_subquery = with_element->subquery->as().children.at(0); + auto query_node = buildSelectWithUnionExpression(with_element_subquery, true /*is_subquery*/, with_element->name /*cte_name*/); + + result = std::move(query_node); + } + else if (const auto * columns_regexp_matcher = expression->as()) + { + auto column_transformers = buildColumnTransformers(expression, 0 /*start_child_index*/); + result = std::make_shared(columns_regexp_matcher->getMatcher(), std::move(column_transformers)); + } + else if (const auto * columns_list_matcher = expression->as()) + { + Identifiers column_list_identifiers; + column_list_identifiers.reserve(columns_list_matcher->column_list->children.size()); + + for (auto & column_list_child : columns_list_matcher->column_list->children) + { + auto & column_list_identifier = column_list_child->as(); + column_list_identifiers.emplace_back(Identifier{column_list_identifier.name_parts}); + } + + auto column_transformers = buildColumnTransformers(expression, 0 /*start_child_index*/); + result = std::make_shared(std::move(column_list_identifiers), std::move(column_transformers)); + } + else if (const auto * qualified_columns_regexp_matcher = expression->as()) + { + auto & qualified_identifier = qualified_columns_regexp_matcher->children.at(0)->as(); + auto column_transformers = buildColumnTransformers(expression, 1 /*start_child_index*/); + result = std::make_shared(Identifier(qualified_identifier.name_parts), qualified_columns_regexp_matcher->getMatcher(), std::move(column_transformers)); + } + else if (const auto * qualified_columns_list_matcher = expression->as()) + { + auto & qualified_identifier = qualified_columns_list_matcher->children.at(0)->as(); + + Identifiers column_list_identifiers; + column_list_identifiers.reserve(qualified_columns_list_matcher->column_list->children.size()); + + for (auto & column_list_child : qualified_columns_list_matcher->column_list->children) + { + auto & column_list_identifier = column_list_child->as(); + column_list_identifiers.emplace_back(Identifier{column_list_identifier.name_parts}); + } + + auto column_transformers = buildColumnTransformers(expression, 1 /*start_child_index*/); + result = std::make_shared(Identifier(qualified_identifier.name_parts), std::move(column_list_identifiers), std::move(column_transformers)); + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Invalid expression. Expected identifier, literal, matcher, function, subquery. Actual {}", + expression->formatForErrorMessage()); + } + + result->setAlias(expression->tryGetAlias()); + result->setOriginalAST(expression); + + return result; +} + +QueryTreeNodePtr QueryTreeBuilder::buildWindow(const ASTPtr & window_definition) const +{ + const auto & window_definition_typed = window_definition->as(); + WindowFrame window_frame; + + if (!window_definition_typed.frame_is_default) + { + window_frame.is_default = false; + window_frame.type = window_definition_typed.frame_type; + window_frame.begin_type = window_definition_typed.frame_begin_type; + window_frame.begin_preceding = window_definition_typed.frame_begin_preceding; + window_frame.end_type = window_definition_typed.frame_end_type; + window_frame.end_preceding = window_definition_typed.frame_end_preceding; + } + + auto window_node = std::make_shared(window_frame); + window_node->setParentWindowName(window_definition_typed.parent_window_name); + + if (window_definition_typed.partition_by) + window_node->getPartitionByNode() = buildExpressionList(window_definition_typed.partition_by); + + if (window_definition_typed.order_by) + window_node->getOrderByNode() = buildSortList(window_definition_typed.order_by); + + if (window_definition_typed.frame_begin_offset) + window_node->getFrameBeginOffsetNode() = buildExpression(window_definition_typed.frame_begin_offset); + + if (window_definition_typed.frame_end_offset) + window_node->getFrameEndOffsetNode() = buildExpression(window_definition_typed.frame_end_offset); + + window_node->setOriginalAST(window_definition); + + return window_node; +} + +QueryTreeNodePtr QueryTreeBuilder::buildJoinTree(const ASTPtr & tables_in_select_query) const +{ + if (!tables_in_select_query) + { + /** If no table is specified in SELECT query we substitute system.one table. + * SELECT * FROM system.one; + */ + Identifier storage_identifier("system.one"); + return std::make_shared(storage_identifier); + } + + auto & tables = tables_in_select_query->as(); + + QueryTreeNodes table_expressions; + + for (const auto & table_element_untyped : tables.children) + { + const auto & table_element = table_element_untyped->as(); + + if (table_element.table_expression) + { + auto & table_expression = table_element.table_expression->as(); + std::optional table_expression_modifiers; + + if (table_expression.final || table_expression.sample_size) + { + bool has_final = table_expression.final; + std::optional sample_size_ratio; + std::optional sample_offset_ratio; + + if (table_expression.sample_size) + { + auto & ast_sample_size_ratio = table_expression.sample_size->as(); + sample_size_ratio = ast_sample_size_ratio.ratio; + + if (table_expression.sample_offset) + { + auto & ast_sample_offset_ratio = table_expression.sample_offset->as(); + sample_offset_ratio = ast_sample_offset_ratio.ratio; + } + } + + table_expression_modifiers = TableExpressionModifiers(has_final, sample_size_ratio, sample_offset_ratio); + } + + if (table_expression.database_and_table_name) + { + auto & table_identifier_typed = table_expression.database_and_table_name->as(); + auto storage_identifier = Identifier(table_identifier_typed.name_parts); + QueryTreeNodePtr table_identifier_node; + + if (table_expression_modifiers) + table_identifier_node = std::make_shared(storage_identifier, *table_expression_modifiers); + else + table_identifier_node = std::make_shared(storage_identifier); + + table_identifier_node->setAlias(table_identifier_typed.tryGetAlias()); + table_identifier_node->setOriginalAST(table_element.table_expression); + + table_expressions.push_back(std::move(table_identifier_node)); + } + else if (table_expression.subquery) + { + auto & subquery_expression = table_expression.subquery->as(); + const auto & select_with_union_query = subquery_expression.children[0]; + + auto node = buildSelectWithUnionExpression(select_with_union_query, true /*is_subquery*/, {} /*cte_name*/); + node->setAlias(subquery_expression.tryGetAlias()); + node->setOriginalAST(select_with_union_query); + + if (table_expression_modifiers) + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Table expression modifiers {} are not supported for subquery {}", + table_expression_modifiers->formatForErrorMessage(), + node->formatASTForErrorMessage()); + } + + table_expressions.push_back(std::move(node)); + } + else if (table_expression.table_function) + { + auto & table_function_expression = table_expression.table_function->as(); + + auto node = std::make_shared(table_function_expression.name); + + if (table_function_expression.arguments) + { + const auto & function_arguments_list = table_function_expression.arguments->as().children; + for (const auto & argument : function_arguments_list) + { + if (argument->as() || argument->as() || argument->as()) + node->getArguments().getNodes().push_back(buildSelectOrUnionExpression(argument, false /*is_subquery*/, {} /*cte_name*/)); + else + node->getArguments().getNodes().push_back(buildExpression(argument)); + } + } + + if (table_expression_modifiers) + node->setTableExpressionModifiers(*table_expression_modifiers); + node->setAlias(table_function_expression.tryGetAlias()); + node->setOriginalAST(table_expression.table_function); + + table_expressions.push_back(std::move(node)); + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Unsupported table expression node {}", table_element.table_expression->formatForErrorMessage()); + } + } + + if (table_element.table_join) + { + const auto & table_join = table_element.table_join->as(); + + auto right_table_expression = std::move(table_expressions.back()); + table_expressions.pop_back(); + + auto left_table_expression = std::move(table_expressions.back()); + table_expressions.pop_back(); + + QueryTreeNodePtr join_expression; + + if (table_join.using_expression_list) + join_expression = buildExpressionList(table_join.using_expression_list); + else if (table_join.on_expression) + join_expression = buildExpression(table_join.on_expression); + + const auto & settings = context->getSettingsRef(); + auto join_default_strictness = settings.join_default_strictness; + auto any_join_distinct_right_table_keys = settings.any_join_distinct_right_table_keys; + + JoinStrictness result_join_strictness = table_join.strictness; + JoinKind result_join_kind = table_join.kind; + + if (result_join_strictness == JoinStrictness::Unspecified && (result_join_kind != JoinKind::Cross && result_join_kind != JoinKind::Comma)) + { + if (join_default_strictness == JoinStrictness::Any) + result_join_strictness = JoinStrictness::Any; + else if (join_default_strictness == JoinStrictness::All) + result_join_strictness = JoinStrictness::All; + else + throw Exception(ErrorCodes::EXPECTED_ALL_OR_ANY, + "Expected ANY or ALL in JOIN section, because setting (join_default_strictness) is empty"); + } + + if (any_join_distinct_right_table_keys) + { + if (result_join_strictness == JoinStrictness::Any && result_join_kind == JoinKind::Inner) + { + result_join_strictness = JoinStrictness::Semi; + result_join_kind = JoinKind::Left; + } + + if (result_join_strictness == JoinStrictness::Any) + result_join_strictness = JoinStrictness::RightAny; + } + else if (result_join_strictness == JoinStrictness::Any && result_join_kind == JoinKind::Full) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ANY FULL JOINs are not implemented"); + } + + auto join_node = std::make_shared(std::move(left_table_expression), + std::move(right_table_expression), + std::move(join_expression), + table_join.locality, + result_join_strictness, + result_join_kind); + + /** Original AST is not set because it will contain only join part and does + * not include left table expression. + */ + table_expressions.emplace_back(std::move(join_node)); + } + + if (table_element.array_join) + { + auto & array_join_expression = table_element.array_join->as(); + bool is_left_array_join = array_join_expression.kind == ASTArrayJoin::Kind::Left; + + auto last_table_expression = std::move(table_expressions.back()); + table_expressions.pop_back(); + + auto array_join_expressions_list = buildExpressionList(array_join_expression.expression_list); + auto array_join_node = std::make_shared(std::move(last_table_expression), std::move(array_join_expressions_list), is_left_array_join); + + /** Original AST is not set because it will contain only array join part and does + * not include left table expression. + */ + table_expressions.push_back(std::move(array_join_node)); + } + } + + if (table_expressions.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query FROM section cannot be empty"); + + if (table_expressions.size() > 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query FROM section cannot have more than 1 root table expression"); + + return table_expressions.back(); +} + + +ColumnTransformersNodes QueryTreeBuilder::buildColumnTransformers(const ASTPtr & matcher_expression, size_t start_child_index) const +{ + ColumnTransformersNodes column_transformers; + size_t children_size = matcher_expression->children.size(); + + for (; start_child_index < children_size; ++start_child_index) + { + const auto & child = matcher_expression->children[start_child_index]; + + if (auto * apply_transformer = child->as()) + { + if (apply_transformer->lambda) + { + auto lambda_query_tree_node = buildExpression(apply_transformer->lambda); + column_transformers.emplace_back(std::make_shared(std::move(lambda_query_tree_node))); + } + else + { + auto function_node = std::make_shared(apply_transformer->func_name); + if (apply_transformer->parameters) + function_node->getParametersNode() = buildExpressionList(apply_transformer->parameters); + + column_transformers.emplace_back(std::make_shared(std::move(function_node))); + } + } + else if (auto * except_transformer = child->as()) + { + auto matcher = except_transformer->getMatcher(); + if (matcher) + { + column_transformers.emplace_back(std::make_shared(std::move(matcher))); + } + else + { + Names except_column_names; + except_column_names.reserve(except_transformer->children.size()); + + for (auto & except_transformer_child : except_transformer->children) + except_column_names.push_back(except_transformer_child->as().full_name); + + column_transformers.emplace_back(std::make_shared(std::move(except_column_names), except_transformer->is_strict)); + } + } + else if (auto * replace_transformer = child->as()) + { + std::vector replacements; + replacements.reserve(replace_transformer->children.size()); + + for (const auto & replace_transformer_child : replace_transformer->children) + { + auto & replacement = replace_transformer_child->as(); + replacements.emplace_back(ReplaceColumnTransformerNode::Replacement{replacement.name, buildExpression(replacement.expr)}); + } + + column_transformers.emplace_back(std::make_shared(replacements, replace_transformer->is_strict)); + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Unsupported column matcher {}", child->formatForErrorMessage()); + } + } + + return column_transformers; +} + +} + +QueryTreeNodePtr buildQueryTree(ASTPtr query, ContextPtr context) +{ + QueryTreeBuilder builder(std::move(query), context); + return builder.getQueryTreeNode(); +} + +} diff --git a/src/Analyzer/QueryTreeBuilder.h b/src/Analyzer/QueryTreeBuilder.h new file mode 100644 index 00000000000..de0f6270230 --- /dev/null +++ b/src/Analyzer/QueryTreeBuilder.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + +/** Build query tree from AST. + * AST that represent query ASTSelectWithUnionQuery, ASTSelectIntersectExceptQuery, ASTSelectQuery. + * AST that represent a list of expressions ASTExpressionList. + * AST that represent expression ASTIdentifier, ASTAsterisk, ASTLiteral, ASTFunction. + */ +QueryTreeNodePtr buildQueryTree(ASTPtr query, ContextPtr context); + +} diff --git a/src/Analyzer/QueryTreePassManager.cpp b/src/Analyzer/QueryTreePassManager.cpp new file mode 100644 index 00000000000..853b4a23f38 --- /dev/null +++ b/src/Analyzer/QueryTreePassManager.cpp @@ -0,0 +1,151 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +/** ClickHouse query tree pass manager. + * + * TODO: Support _shard_num into shardNum() rewriting. + * TODO: Support logical expressions optimizer. + * TODO: Support fuse sum count optimize_fuse_sum_count_avg, optimize_syntax_fuse_functions. + * TODO: Support setting convert_query_to_cnf. + * TODO: Support setting optimize_using_constraints. + * TODO: Support setting optimize_substitute_columns. + * TODO: Support GROUP BY injective function elimination. + * TODO: Support GROUP BY functions of other keys elimination. + * TODO: Support setting optimize_move_functions_out_of_any. + * TODO: Support setting optimize_aggregators_of_group_by_keys. + * TODO: Support setting optimize_duplicate_order_by_and_distinct. + * TODO: Support setting optimize_redundant_functions_in_order_by. + * TODO: Support setting optimize_monotonous_functions_in_order_by. + * TODO: Support setting optimize_if_transform_strings_to_enum. + * TODO: Support settings.optimize_syntax_fuse_functions. + * TODO: Support settings.optimize_or_like_chain. + * TODO: Add optimizations based on function semantics. Example: SELECT * FROM test_table WHERE id != id. (id is not nullable column). + */ + +QueryTreePassManager::QueryTreePassManager(ContextPtr context_) : WithContext(context_) {} + +void QueryTreePassManager::addPass(QueryTreePassPtr pass) +{ + passes.push_back(std::move(pass)); +} + +void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node) +{ + auto current_context = getContext(); + size_t passes_size = passes.size(); + + for (size_t i = 0; i < passes_size; ++i) + passes[i]->run(query_tree_node, current_context); +} + +void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node, size_t up_to_pass_index) +{ + size_t passes_size = passes.size(); + if (up_to_pass_index > passes_size) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Requested to run passes up to {} pass. There are only {} passes", + up_to_pass_index, + passes_size); + + auto current_context = getContext(); + for (size_t i = 0; i < up_to_pass_index; ++i) + passes[i]->run(query_tree_node, current_context); +} + +void QueryTreePassManager::dump(WriteBuffer & buffer) +{ + size_t passes_size = passes.size(); + + for (size_t i = 0; i < passes_size; ++i) + { + auto & pass = passes[i]; + buffer << "Pass " << (i + 1) << ' ' << pass->getName() << " - " << pass->getDescription(); + if (i + 1 != passes_size) + buffer << '\n'; + } +} + +void QueryTreePassManager::dump(WriteBuffer & buffer, size_t up_to_pass_index) +{ + size_t passes_size = passes.size(); + if (up_to_pass_index > passes_size) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Requested to dump passes up to {} pass. There are only {} passes", + up_to_pass_index, + passes_size); + + for (size_t i = 0; i < up_to_pass_index; ++i) + { + auto & pass = passes[i]; + buffer << "Pass " << (i + 1) << " " << pass->getName() << " - " << pass->getDescription(); + if (i + 1 != up_to_pass_index) + buffer << '\n'; + } +} + +void addQueryTreePasses(QueryTreePassManager & manager) +{ + auto context = manager.getContext(); + const auto & settings = context->getSettingsRef(); + + manager.addPass(std::make_shared()); + + if (settings.optimize_functions_to_subcolumns) + manager.addPass(std::make_shared()); + + if (settings.count_distinct_optimization) + manager.addPass(std::make_shared()); + + if (settings.optimize_rewrite_sum_if_to_count_if) + manager.addPass(std::make_shared()); + + if (settings.optimize_normalize_count_variants) + manager.addPass(std::make_shared()); + + manager.addPass(std::make_shared()); + + if (settings.optimize_arithmetic_operations_in_aggregate_functions) + manager.addPass(std::make_shared()); + + if (settings.optimize_injective_functions_inside_uniq) + manager.addPass(std::make_shared()); + + if (settings.optimize_multiif_to_if) + manager.addPass(std::make_shared()); + + manager.addPass(std::make_shared()); + + if (settings.optimize_if_chain_to_multiif) + manager.addPass(std::make_shared()); + + manager.addPass(std::make_shared()); + manager.addPass(std::make_shared()); +} + +} diff --git a/src/Analyzer/QueryTreePassManager.h b/src/Analyzer/QueryTreePassManager.h new file mode 100644 index 00000000000..3c67fc36178 --- /dev/null +++ b/src/Analyzer/QueryTreePassManager.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +#include + +namespace DB +{ + +/** Query tree pass manager provide functionality to register and run passes + * on query tree. + */ +class QueryTreePassManager : public WithContext +{ +public: + explicit QueryTreePassManager(ContextPtr context_); + + /// Get registered passes + const std::vector & getPasses() const + { + return passes; + } + + /// Add query tree pass + void addPass(QueryTreePassPtr pass); + + /// Run query tree passes on query tree + void run(QueryTreeNodePtr query_tree_node); + + /** Run query tree passes on query tree up to up_to_pass_index. + * Throws exception if up_to_pass_index is greater than passes size. + */ + void run(QueryTreeNodePtr query_tree_node, size_t up_to_pass_index); + + /// Dump query tree passes + void dump(WriteBuffer & buffer); + + /** Dump query tree passes to up_to_pass_index. + * Throws exception if up_to_pass_index is greater than passes size. + */ + void dump(WriteBuffer & buffer, size_t up_to_pass_index); + +private: + std::vector passes; +}; + +void addQueryTreePasses(QueryTreePassManager & manager); + +} diff --git a/src/Analyzer/SetUtils.cpp b/src/Analyzer/SetUtils.cpp new file mode 100644 index 00000000000..a72879d2145 --- /dev/null +++ b/src/Analyzer/SetUtils.cpp @@ -0,0 +1,182 @@ +#include + +#include + +#include +#include +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INCORRECT_ELEMENT_OF_SET; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + +namespace +{ + +size_t getCompoundTypeDepth(const IDataType & type) +{ + size_t result = 0; + + const IDataType * current_type = &type; + + while (true) + { + WhichDataType which_type(*current_type); + + if (which_type.isArray()) + { + current_type = assert_cast(*current_type).getNestedType().get(); + ++result; + } + else if (which_type.isTuple()) + { + const auto & tuple_elements = assert_cast(*current_type).getElements(); + if (!tuple_elements.empty()) + current_type = tuple_elements.at(0).get(); + + ++result; + } + else + { + break; + } + } + + return result; +} + +template +Block createBlockFromCollection(const Collection & collection, const DataTypes & block_types, bool transform_null_in) +{ + size_t columns_size = block_types.size(); + MutableColumns columns(columns_size); + for (size_t i = 0; i < columns_size; ++i) + { + columns[i] = block_types[i]->createColumn(); + columns[i]->reserve(collection.size()); + } + + Row tuple_values; + + for (const auto & value : collection) + { + if (columns_size == 1) + { + auto field = convertFieldToType(value, *block_types[0]); + bool need_insert_null = transform_null_in && block_types[0]->isNullable(); + if (!field.isNull() || need_insert_null) + columns[0]->insert(std::move(field)); + + continue; + } + + if (value.getType() != Field::Types::Tuple) + throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, + "Invalid type in set. Expected tuple, got {}", + value.getTypeName()); + + const auto & tuple = value.template get(); + size_t tuple_size = tuple.size(); + + if (tuple_size != columns_size) + throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, + "Incorrect size of tuple in set: {} instead of {}", + tuple_size, + columns_size); + + if (tuple_values.empty()) + tuple_values.resize(tuple_size); + + size_t i = 0; + for (; i < tuple_size; ++i) + { + tuple_values[i] = convertFieldToType(tuple[i], *block_types[i]); + bool need_insert_null = transform_null_in && block_types[i]->isNullable(); + if (tuple_values[i].isNull() && !need_insert_null) + break; + } + + if (i == tuple_size) + for (i = 0; i < tuple_size; ++i) + columns[i]->insert(tuple_values[i]); + } + + Block res; + for (size_t i = 0; i < columns_size; ++i) + res.insert(ColumnWithTypeAndName{std::move(columns[i]), block_types[i], "argument_" + toString(i)}); + + return res; +} + +} + +SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field & value, const DataTypePtr & value_type, const Settings & settings) +{ + DataTypes set_element_types = {expression_type}; + const auto * lhs_tuple_type = typeid_cast(expression_type.get()); + + if (lhs_tuple_type && lhs_tuple_type->getElements().size() != 1) + set_element_types = lhs_tuple_type->getElements(); + + for (auto & set_element_type : set_element_types) + { + if (const auto * set_element_low_cardinality_type = typeid_cast(set_element_type.get())) + set_element_type = set_element_low_cardinality_type->getDictionaryType(); + } + + size_t lhs_type_depth = getCompoundTypeDepth(*expression_type); + size_t rhs_type_depth = getCompoundTypeDepth(*value_type); + + SizeLimits size_limits_for_set = {settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode}; + bool tranform_null_in = settings.transform_null_in; + + Block result_block; + + if (lhs_type_depth == rhs_type_depth) + { + /// 1 in 1; (1, 2) in (1, 2); identity(tuple(tuple(tuple(1)))) in tuple(tuple(tuple(1))); etc. + + Array array{value}; + result_block = createBlockFromCollection(array, set_element_types, tranform_null_in); + } + else if (lhs_type_depth + 1 == rhs_type_depth) + { + /// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4)) + + WhichDataType rhs_which_type(value_type); + + if (rhs_which_type.isArray()) + result_block = createBlockFromCollection(value.get(), set_element_types, tranform_null_in); + else if (rhs_which_type.isTuple()) + result_block = createBlockFromCollection(value.get(), set_element_types, tranform_null_in); + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Unsupported type at the right-side of IN. Expected Array or Tuple. Actual {}", + value_type->getName()); + } + else + { + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Unsupported types for IN. First argument type {}. Second argument type {}", + expression_type->getName(), + value_type->getName()); + } + + auto set = std::make_shared(size_limits_for_set, false /*fill_set_elements*/, tranform_null_in); + + set->setHeader(result_block.cloneEmpty().getColumnsWithTypeAndName()); + set->insertFromBlock(result_block.getColumnsWithTypeAndName()); + set->finishInsert(); + + return set; +} + +} diff --git a/src/Analyzer/SetUtils.h b/src/Analyzer/SetUtils.h new file mode 100644 index 00000000000..7afc8e5259c --- /dev/null +++ b/src/Analyzer/SetUtils.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include + +#include + +namespace DB +{ + +class Set; +using SetPtr = std::shared_ptr; + +/** Make set for constant part of IN subquery. + * Throws exception if parameters are not valid for IN function. + * + * Example: SELECT id FROM test_table WHERE id IN (1, 2, 3, 4); + * Example: SELECT id FROM test_table WHERE id IN ((1, 2), (3, 4)); + * + * @param expression_type - type of first argument of function IN. + * @param value - constant value of second argument of function IN. + * @param value_type - type of second argument of function IN. + * @param settings - query settings. + * + * @return SetPtr for constant value. + */ +SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field & value, const DataTypePtr & value_type, const Settings & settings); + +} diff --git a/src/Analyzer/SortNode.cpp b/src/Analyzer/SortNode.cpp new file mode 100644 index 00000000000..3f91724e9b7 --- /dev/null +++ b/src/Analyzer/SortNode.cpp @@ -0,0 +1,136 @@ +#include + +#include + +#include +#include + +#include +#include +#include + +namespace DB +{ + +const char * toString(SortDirection sort_direction) +{ + switch (sort_direction) + { + case SortDirection::ASCENDING: return "ASCENDING"; + case SortDirection::DESCENDING: return "DESCENDING"; + } +} + +SortNode::SortNode(QueryTreeNodePtr expression_, + SortDirection sort_direction_, + std::optional nulls_sort_direction_, + std::shared_ptr collator_, + bool with_fill_) + : IQueryTreeNode(children_size) + , sort_direction(sort_direction_) + , nulls_sort_direction(nulls_sort_direction_) + , collator(std::move(collator_)) + , with_fill(with_fill_) +{ + children[sort_expression_child_index] = std::move(expression_); +} + +void SortNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "SORT id: " << format_state.getNodeId(this); + + buffer << ", sort_direction: " << toString(sort_direction); + if (nulls_sort_direction) + buffer << ", nulls_sort_direction: " << toString(*nulls_sort_direction); + + if (collator) + buffer << ", collator: " << collator->getLocale(); + + buffer << ", with_fill: " << with_fill; + + buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n"; + getExpression()->dumpTreeImpl(buffer, format_state, indent + 4); + + if (hasFillFrom()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FILL FROM\n"; + getFillFrom()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasFillTo()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FILL TO\n"; + getFillTo()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasFillStep()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FILL STEP\n"; + getFillStep()->dumpTreeImpl(buffer, format_state, indent + 4); + } +} + +bool SortNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + if (sort_direction != rhs_typed.sort_direction || + nulls_sort_direction != rhs_typed.nulls_sort_direction || + with_fill != rhs_typed.with_fill) + return false; + + if (!collator && !rhs_typed.collator) + return true; + else if (collator && !rhs_typed.collator) + return false; + else if (!collator && rhs_typed.collator) + return false; + + return collator->getLocale() == rhs_typed.collator->getLocale(); +} + +void SortNode::updateTreeHashImpl(HashState & hash_state) const +{ + hash_state.update(sort_direction); + hash_state.update(nulls_sort_direction); + hash_state.update(with_fill); + + if (collator) + { + const auto & locale = collator->getLocale(); + + hash_state.update(locale.size()); + hash_state.update(locale); + } +} + +QueryTreeNodePtr SortNode::cloneImpl() const +{ + return std::make_shared(nullptr /*expression*/, sort_direction, nulls_sort_direction, collator, with_fill); +} + +ASTPtr SortNode::toASTImpl() const +{ + auto result = std::make_shared(); + result->direction = sort_direction == SortDirection::ASCENDING ? 1 : -1; + result->nulls_direction = result->direction; + if (nulls_sort_direction) + result->nulls_direction = *nulls_sort_direction == SortDirection::ASCENDING ? 1 : -1; + + result->nulls_direction_was_explicitly_specified = nulls_sort_direction.has_value(); + + result->with_fill = with_fill; + result->fill_from = hasFillFrom() ? getFillFrom()->toAST() : nullptr; + result->fill_to = hasFillTo() ? getFillTo()->toAST() : nullptr; + result->fill_step = hasFillStep() ? getFillStep()->toAST() : nullptr; + result->children.push_back(getExpression()->toAST()); + + if (collator) + { + result->children.push_back(std::make_shared(Field(collator->getLocale()))); + result->collation = result->children.back(); + } + + return result; +} + +} diff --git a/src/Analyzer/SortNode.h b/src/Analyzer/SortNode.h new file mode 100644 index 00000000000..04f9fe798e1 --- /dev/null +++ b/src/Analyzer/SortNode.h @@ -0,0 +1,155 @@ +#pragma once + +#include + +#include +#include + +namespace DB +{ + +/** Sort node represents sort description for expression that is part of ORDER BY in query tree. + * Example: SELECT * FROM test_table ORDER BY sort_column_1, sort_column_2; + * Sort node optionally contain collation, fill from, fill to, and fill step. + */ +class SortNode; +using SortNodePtr = std::shared_ptr; + +enum class SortDirection +{ + ASCENDING = 0, + DESCENDING = 1 +}; + +const char * toString(SortDirection sort_direction); + +class SortNode final : public IQueryTreeNode +{ +public: + /// Initialize sort node with sort expression + explicit SortNode(QueryTreeNodePtr expression_, + SortDirection sort_direction_ = SortDirection::ASCENDING, + std::optional nulls_sort_direction_ = {}, + std::shared_ptr collator_ = nullptr, + bool with_fill = false); + + /// Get sort expression + const QueryTreeNodePtr & getExpression() const + { + return children[sort_expression_child_index]; + } + + /// Get sort expression + QueryTreeNodePtr & getExpression() + { + return children[sort_expression_child_index]; + } + + /// Returns true if sort node has with fill, false otherwise + bool withFill() const + { + return with_fill; + } + + /// Returns true if sort node has fill from, false otherwise + bool hasFillFrom() const + { + return children[fill_from_child_index] != nullptr; + } + + /// Get fill from + const QueryTreeNodePtr & getFillFrom() const + { + return children[fill_from_child_index]; + } + + /// Get fill from + QueryTreeNodePtr & getFillFrom() + { + return children[fill_from_child_index]; + } + + /// Returns true if sort node has fill to, false otherwise + bool hasFillTo() const + { + return children[fill_to_child_index] != nullptr; + } + + /// Get fill to + const QueryTreeNodePtr & getFillTo() const + { + return children[fill_to_child_index]; + } + + /// Get fill to + QueryTreeNodePtr & getFillTo() + { + return children[fill_to_child_index]; + } + + /// Returns true if sort node has fill step, false otherwise + bool hasFillStep() const + { + return children[fill_step_child_index] != nullptr; + } + + /// Get fill step + const QueryTreeNodePtr & getFillStep() const + { + return children[fill_step_child_index]; + } + + /// Get fill step + QueryTreeNodePtr & getFillStep() + { + return children[fill_step_child_index]; + } + + /// Get collator + const std::shared_ptr & getCollator() const + { + return collator; + } + + /// Get sort direction + SortDirection getSortDirection() const + { + return sort_direction; + } + + /// Get nulls sort direction + std::optional getNullsSortDirection() const + { + return nulls_sort_direction; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::SORT; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + static constexpr size_t sort_expression_child_index = 0; + static constexpr size_t fill_from_child_index = 1; + static constexpr size_t fill_to_child_index = 2; + static constexpr size_t fill_step_child_index = 3; + static constexpr size_t children_size = fill_step_child_index + 1; + + SortDirection sort_direction = SortDirection::ASCENDING; + std::optional nulls_sort_direction; + std::shared_ptr collator; + bool with_fill = false; +}; + +} diff --git a/src/Analyzer/TableExpressionModifiers.cpp b/src/Analyzer/TableExpressionModifiers.cpp new file mode 100644 index 00000000000..c8002f44c97 --- /dev/null +++ b/src/Analyzer/TableExpressionModifiers.cpp @@ -0,0 +1,66 @@ +#include + +#include + +#include +#include +#include +#include + +namespace DB +{ + +void TableExpressionModifiers::dump(WriteBuffer & buffer) const +{ + buffer << "final: " << has_final; + + if (sample_size_ratio) + buffer << ", sample_size: " << ASTSampleRatio::toString(*sample_size_ratio); + + if (sample_offset_ratio) + buffer << ", sample_offset: " << ASTSampleRatio::toString(*sample_offset_ratio); +} + +void TableExpressionModifiers::updateTreeHash(SipHash & hash_state) const +{ + hash_state.update(has_final); + hash_state.update(sample_size_ratio.has_value()); + hash_state.update(sample_offset_ratio.has_value()); + + if (sample_size_ratio.has_value()) + { + hash_state.update(sample_size_ratio->numerator); + hash_state.update(sample_size_ratio->denominator); + } + + if (sample_offset_ratio.has_value()) + { + hash_state.update(sample_offset_ratio->numerator); + hash_state.update(sample_offset_ratio->denominator); + } +} + +String TableExpressionModifiers::formatForErrorMessage() const +{ + WriteBufferFromOwnString buffer; + if (has_final) + buffer << "FINAL"; + + if (sample_size_ratio) + { + if (has_final) + buffer << ' '; + buffer << "SAMPLE " << ASTSampleRatio::toString(*sample_size_ratio); + } + + if (sample_offset_ratio) + { + if (has_final || sample_size_ratio) + buffer << ' '; + buffer << "OFFSET " << ASTSampleRatio::toString(*sample_offset_ratio); + } + + return buffer.str(); +} + +} diff --git a/src/Analyzer/TableExpressionModifiers.h b/src/Analyzer/TableExpressionModifiers.h new file mode 100644 index 00000000000..f61c2a61610 --- /dev/null +++ b/src/Analyzer/TableExpressionModifiers.h @@ -0,0 +1,80 @@ +#pragma once + +#include + +namespace DB +{ + +/** Modifiers that can be used for table, table function and subquery in JOIN TREE. + * + * Example: SELECT * FROM test_table SAMPLE 0.1 OFFSET 0.1 FINAL + */ +class TableExpressionModifiers +{ +public: + using Rational = ASTSampleRatio::Rational; + + TableExpressionModifiers(bool has_final_, + std::optional sample_size_ratio_, + std::optional sample_offset_ratio_) + : has_final(has_final_) + , sample_size_ratio(sample_size_ratio_) + , sample_offset_ratio(sample_offset_ratio_) + {} + + /// Returns true if final is specified, false otherwise + bool hasFinal() const + { + return has_final; + } + + /// Returns true if sample size ratio is specified, false otherwise + bool hasSampleSizeRatio() const + { + return sample_size_ratio.has_value(); + } + + /// Get sample size ratio + std::optional getSampleSizeRatio() const + { + return sample_size_ratio; + } + + /// Returns true if sample offset ratio is specified, false otherwise + bool hasSampleOffsetRatio() const + { + return sample_offset_ratio.has_value(); + } + + /// Get sample offset ratio + std::optional getSampleOffsetRatio() const + { + return sample_offset_ratio; + } + + /// Dump into buffer + void dump(WriteBuffer & buffer) const; + + /// Update tree hash + void updateTreeHash(SipHash & hash_state) const; + + /// Format for error message + String formatForErrorMessage() const; + +private: + bool has_final = false; + std::optional sample_size_ratio; + std::optional sample_offset_ratio; +}; + +inline bool operator==(const TableExpressionModifiers & lhs, const TableExpressionModifiers & rhs) +{ + return lhs.hasFinal() == rhs.hasFinal() && lhs.getSampleSizeRatio() == rhs.getSampleSizeRatio() && lhs.getSampleOffsetRatio() == rhs.getSampleOffsetRatio(); +} + +inline bool operator!=(const TableExpressionModifiers & lhs, const TableExpressionModifiers & rhs) +{ + return !(lhs == rhs); +} + +} diff --git a/src/Analyzer/TableFunctionNode.cpp b/src/Analyzer/TableFunctionNode.cpp new file mode 100644 index 00000000000..c8cd05cf685 --- /dev/null +++ b/src/Analyzer/TableFunctionNode.cpp @@ -0,0 +1,136 @@ +#include + +#include +#include +#include + +#include + +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +TableFunctionNode::TableFunctionNode(String table_function_name_) + : IQueryTreeNode(children_size) + , table_function_name(table_function_name_) + , storage_id("system", "one") +{ + children[arguments_child_index] = std::make_shared(); +} + +void TableFunctionNode::resolve(TableFunctionPtr table_function_value, StoragePtr storage_value, ContextPtr context) +{ + table_function = std::move(table_function_value); + storage = std::move(storage_value); + storage_id = storage->getStorageID(); + storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context); +} + +const StorageID & TableFunctionNode::getStorageID() const +{ + if (!storage) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table function node {} is not resolved", table_function_name); + + return storage_id; +} + +const StorageSnapshotPtr & TableFunctionNode::getStorageSnapshot() const +{ + if (!storage) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table function node {} is not resolved", table_function_name); + + return storage_snapshot; +} + +void TableFunctionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "TABLE_FUNCTION id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + buffer << ", table_function_name: " << table_function_name; + + if (table_expression_modifiers) + { + buffer << ", "; + table_expression_modifiers->dump(buffer); + } + + const auto & arguments = getArguments(); + if (!arguments.getNodes().empty()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "ARGUMENTS\n"; + arguments.dumpTreeImpl(buffer, format_state, indent + 4); + } +} + +bool TableFunctionNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + if (table_function_name != rhs_typed.table_function_name) + return false; + + if (storage && rhs_typed.storage) + return storage_id == rhs_typed.storage_id; + + if (table_expression_modifiers && rhs_typed.table_expression_modifiers && table_expression_modifiers != rhs_typed.table_expression_modifiers) + return false; + else if (table_expression_modifiers && !rhs_typed.table_expression_modifiers) + return false; + else if (!table_expression_modifiers && rhs_typed.table_expression_modifiers) + return false; + + return true; +} + +void TableFunctionNode::updateTreeHashImpl(HashState & state) const +{ + state.update(table_function_name.size()); + state.update(table_function_name); + + if (storage) + { + auto full_name = storage_id.getFullNameNotQuoted(); + state.update(full_name.size()); + state.update(full_name); + } + + if (table_expression_modifiers) + table_expression_modifiers->updateTreeHash(state); +} + +QueryTreeNodePtr TableFunctionNode::cloneImpl() const +{ + auto result = std::make_shared(table_function_name); + + result->storage = storage; + result->storage_id = storage_id; + result->storage_snapshot = storage_snapshot; + result->table_expression_modifiers = table_expression_modifiers; + + return result; +} + +ASTPtr TableFunctionNode::toASTImpl() const +{ + auto table_function_ast = std::make_shared(); + + table_function_ast->name = table_function_name; + + const auto & arguments = getArguments(); + table_function_ast->children.push_back(arguments.toAST()); + table_function_ast->arguments = table_function_ast->children.back(); + + return table_function_ast; +} + +} diff --git a/src/Analyzer/TableFunctionNode.h b/src/Analyzer/TableFunctionNode.h new file mode 100644 index 00000000000..292ab740c5b --- /dev/null +++ b/src/Analyzer/TableFunctionNode.h @@ -0,0 +1,154 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +/** Table function node represents table function in query tree. + * Example: SELECT a FROM table_function(arguments...). + * + * In query tree table function arguments are represented by ListNode. + * + * Table function resolution must be done during query analysis pass. + */ +class ITableFunction; +using TableFunctionPtr = std::shared_ptr; + +class TableFunctionNode; +using TableFunctionNodePtr = std::shared_ptr; + +class TableFunctionNode : public IQueryTreeNode +{ +public: + /// Construct table function node with table function name + explicit TableFunctionNode(String table_function_name); + + /// Get table function name + const String & getTableFunctionName() const + { + return table_function_name; + } + + /// Get arguments + const ListNode & getArguments() const + { + return children[arguments_child_index]->as(); + } + + /// Get arguments + ListNode & getArguments() + { + return children[arguments_child_index]->as(); + } + + /// Get arguments node + const QueryTreeNodePtr & getArgumentsNode() const + { + return children[arguments_child_index]; + } + + /// Get arguments node + QueryTreeNodePtr & getArgumentsNode() + { + return children[arguments_child_index]; + } + + /// Returns true, if table function is resolved, false otherwise + bool isResolved() const + { + return storage != nullptr && table_function != nullptr; + } + + /// Get table function, returns nullptr if table function node is not resolved + const TableFunctionPtr & getTableFunction() const + { + return table_function; + } + + /// Get storage, returns nullptr if table function node is not resolved + const StoragePtr & getStorage() const + { + return storage; + } + + /// Get storage, throws exception if table function node is not resolved + const StoragePtr & getStorageOrThrow() const + { + if (!storage) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table function node is not resolved"); + + return storage; + } + + /// Resolve table function with table function, storage and context + void resolve(TableFunctionPtr table_function_value, StoragePtr storage_value, ContextPtr context); + + /// Get storage id, throws exception if function node is not resolved + const StorageID & getStorageID() const; + + /// Get storage snapshot, throws exception if function node is not resolved + const StorageSnapshotPtr & getStorageSnapshot() const; + + /// Return true if table function node has table expression modifiers, false otherwise + bool hasTableExpressionModifiers() const + { + return table_expression_modifiers.has_value(); + } + + /// Get table expression modifiers + const std::optional & getTableExpressionModifiers() const + { + return table_expression_modifiers; + } + + /// Set table expression modifiers + void setTableExpressionModifiers(TableExpressionModifiers table_expression_modifiers_value) + { + table_expression_modifiers = std::move(table_expression_modifiers_value); + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::TABLE_FUNCTION; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + String table_function_name; + TableFunctionPtr table_function; + StoragePtr storage; + StorageID storage_id; + StorageSnapshotPtr storage_snapshot; + std::optional table_expression_modifiers; + + static constexpr size_t arguments_child_index = 0; + static constexpr size_t children_size = arguments_child_index + 1; +}; + +} + diff --git a/src/Analyzer/TableNode.cpp b/src/Analyzer/TableNode.cpp new file mode 100644 index 00000000000..0d9a351e9a2 --- /dev/null +++ b/src/Analyzer/TableNode.cpp @@ -0,0 +1,82 @@ +#include + +#include +#include +#include + +#include + +#include + +#include + +namespace DB +{ + +TableNode::TableNode(StoragePtr storage_, StorageID storage_id_, TableLockHolder storage_lock_, StorageSnapshotPtr storage_snapshot_) + : IQueryTreeNode(children_size) + , storage(std::move(storage_)) + , storage_id(std::move(storage_id_)) + , storage_lock(std::move(storage_lock_)) + , storage_snapshot(std::move(storage_snapshot_)) +{} + +TableNode::TableNode(StoragePtr storage_, TableLockHolder storage_lock_, StorageSnapshotPtr storage_snapshot_) + : TableNode(storage_, storage_->getStorageID(), std::move(storage_lock_), std::move(storage_snapshot_)) +{ +} + +void TableNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "TABLE id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + buffer << ", table_name: " << storage_id.getFullNameNotQuoted(); + + if (table_expression_modifiers) + { + buffer << ", "; + table_expression_modifiers->dump(buffer); + } +} + +bool TableNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + + if (table_expression_modifiers && rhs_typed.table_expression_modifiers && table_expression_modifiers != rhs_typed.table_expression_modifiers) + return false; + else if (table_expression_modifiers && !rhs_typed.table_expression_modifiers) + return false; + else if (!table_expression_modifiers && rhs_typed.table_expression_modifiers) + return false; + + return storage_id == rhs_typed.storage_id; +} + +void TableNode::updateTreeHashImpl(HashState & state) const +{ + auto full_name = storage_id.getFullNameNotQuoted(); + state.update(full_name.size()); + state.update(full_name); + + if (table_expression_modifiers) + table_expression_modifiers->updateTreeHash(state); +} + +QueryTreeNodePtr TableNode::cloneImpl() const +{ + auto result_table_node = std::make_shared(storage, storage_id, storage_lock, storage_snapshot); + result_table_node->table_expression_modifiers = table_expression_modifiers; + + return result_table_node; +} + +ASTPtr TableNode::toASTImpl() const +{ + return std::make_shared(storage_id.getDatabaseName(), storage_id.getTableName()); +} + +} diff --git a/src/Analyzer/TableNode.h b/src/Analyzer/TableNode.h new file mode 100644 index 00000000000..c7feedd908f --- /dev/null +++ b/src/Analyzer/TableNode.h @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include + +namespace DB +{ + +/** Table node represents table in query tree. + * Example: SELECT a FROM test_table. + * test_table - is identifier, that during query analysis pass must be resolved into table node. + */ +class TableNode; +using TableNodePtr = std::shared_ptr; + +class TableNode : public IQueryTreeNode +{ +public: + /// Construct table node with storage, storage id, storage lock, storage snapshot + explicit TableNode(StoragePtr storage_, StorageID storage_id_, TableLockHolder storage_lock_, StorageSnapshotPtr storage_snapshot_); + + /// Construct table node with storage, storage lock, storage snapshot + explicit TableNode(StoragePtr storage_, TableLockHolder storage_lock_, StorageSnapshotPtr storage_snapshot_); + + /// Get storage + const StoragePtr & getStorage() const + { + return storage; + } + + /// Get storage id + const StorageID & getStorageID() const + { + return storage_id; + } + + /// Get storage snapshot + const StorageSnapshotPtr & getStorageSnapshot() const + { + return storage_snapshot; + } + + /// Get storage lock + const TableLockHolder & getStorageLock() const + { + return storage_lock; + } + + /// Return true if table node has table expression modifiers, false otherwise + bool hasTableExpressionModifiers() const + { + return table_expression_modifiers.has_value(); + } + + /// Get table expression modifiers + const std::optional & getTableExpressionModifiers() const + { + return table_expression_modifiers; + } + + /// Set table expression modifiers + void setTableExpressionModifiers(TableExpressionModifiers table_expression_modifiers_value) + { + table_expression_modifiers = std::move(table_expression_modifiers_value); + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::TABLE; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + StoragePtr storage; + StorageID storage_id; + TableLockHolder storage_lock; + StorageSnapshotPtr storage_snapshot; + std::optional table_expression_modifiers; + + static constexpr size_t children_size = 0; +}; + +} + diff --git a/src/Analyzer/UnionNode.cpp b/src/Analyzer/UnionNode.cpp new file mode 100644 index 00000000000..67860438335 --- /dev/null +++ b/src/Analyzer/UnionNode.cpp @@ -0,0 +1,176 @@ +#include + +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int TYPE_MISMATCH; + extern const int BAD_ARGUMENTS; +} + +UnionNode::UnionNode(SelectUnionMode union_mode_) + : IQueryTreeNode(children_size) + , union_mode(union_mode_) +{ + if (union_mode == SelectUnionMode::UNION_DEFAULT || + union_mode == SelectUnionMode::EXCEPT_DEFAULT || + union_mode == SelectUnionMode::INTERSECT_DEFAULT) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "UNION mode {} must be normalized", toString(union_mode)); + + children[queries_child_index] = std::make_shared(); +} + +NamesAndTypes UnionNode::computeProjectionColumns() const +{ + std::vector projections; + + NamesAndTypes query_node_projection; + + const auto & query_nodes = getQueries().getNodes(); + projections.reserve(query_nodes.size()); + + for (const auto & query_node : query_nodes) + { + if (auto * query_node_typed = query_node->as()) + query_node_projection = query_node_typed->getProjectionColumns(); + else if (auto * union_node_typed = query_node->as()) + query_node_projection = union_node_typed->computeProjectionColumns(); + + projections.push_back(query_node_projection); + + if (query_node_projection.size() != projections.front().size()) + throw Exception(ErrorCodes::TYPE_MISMATCH, "UNION different number of columns in queries"); + } + + NamesAndTypes result_columns; + + size_t projections_size = projections.size(); + DataTypes projection_column_types; + projection_column_types.resize(projections_size); + + size_t columns_size = query_node_projection.size(); + for (size_t column_index = 0; column_index < columns_size; ++column_index) + { + for (size_t projection_index = 0; projection_index < projections_size; ++projection_index) + projection_column_types[projection_index] = projections[projection_index][column_index].type; + + auto result_type = getLeastSupertype(projection_column_types); + result_columns.emplace_back(projections.front()[column_index].name, std::move(result_type)); + } + + return result_columns; +} + +void UnionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "UNION id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + if (is_subquery) + buffer << ", is_subquery: " << is_subquery; + + if (is_cte) + buffer << ", is_cte: " << is_cte; + + if (!cte_name.empty()) + buffer << ", cte_name: " << cte_name; + + if (constant_value) + { + buffer << ", constant_value: " << constant_value->getValue().dump(); + buffer << ", constant_value_type: " << constant_value->getType()->getName(); + } + + buffer << ", union_mode: " << toString(union_mode); + + buffer << '\n' << std::string(indent + 2, ' ') << "QUERIES\n"; + getQueriesNode()->dumpTreeImpl(buffer, format_state, indent + 4); +} + +bool UnionNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + if (constant_value && rhs_typed.constant_value && *constant_value != *rhs_typed.constant_value) + return false; + else if (constant_value && !rhs_typed.constant_value) + return false; + else if (!constant_value && rhs_typed.constant_value) + return false; + + return is_subquery == rhs_typed.is_subquery && is_cte == rhs_typed.is_cte && cte_name == rhs_typed.cte_name && + union_mode == rhs_typed.union_mode; +} + +void UnionNode::updateTreeHashImpl(HashState & state) const +{ + state.update(is_subquery); + state.update(is_cte); + + state.update(cte_name.size()); + state.update(cte_name); + + state.update(static_cast(union_mode)); + + if (constant_value) + { + auto constant_dump = applyVisitor(FieldVisitorToString(), constant_value->getValue()); + state.update(constant_dump.size()); + state.update(constant_dump); + + auto constant_value_type_name = constant_value->getType()->getName(); + state.update(constant_value_type_name.size()); + state.update(constant_value_type_name); + } +} + +QueryTreeNodePtr UnionNode::cloneImpl() const +{ + auto result_union_node = std::make_shared(union_mode); + + result_union_node->is_subquery = is_subquery; + result_union_node->is_cte = is_cte; + result_union_node->cte_name = cte_name; + result_union_node->constant_value = constant_value; + + return result_union_node; +} + +ASTPtr UnionNode::toASTImpl() const +{ + auto select_with_union_query = std::make_shared(); + select_with_union_query->union_mode = union_mode; + select_with_union_query->is_normalized = true; + select_with_union_query->children.push_back(getQueriesNode()->toAST()); + select_with_union_query->list_of_selects = select_with_union_query->children.back(); + + return select_with_union_query; +} + +} diff --git a/src/Analyzer/UnionNode.h b/src/Analyzer/UnionNode.h new file mode 100644 index 00000000000..9ef76591597 --- /dev/null +++ b/src/Analyzer/UnionNode.h @@ -0,0 +1,163 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; +} + +/** Union node represents union of queries in query tree. + * Union node must be initialized with normalized union mode. + * + * Example: (SELECT id FROM test_table) UNION ALL (SELECT id FROM test_table_2); + * Example: (SELECT id FROM test_table) UNION DISTINCT (SELECT id FROM test_table_2); + * Example: (SELECT id FROM test_table) EXCEPT ALL (SELECT id FROM test_table_2); + * Example: (SELECT id FROM test_table) EXCEPT DISTINCT (SELECT id FROM test_table_2); + * Example: (SELECT id FROM test_table) INTERSECT ALL (SELECT id FROM test_table_2); + * Example: (SELECT id FROM test_table) INTERSECT DISTINCT (SELECT id FROM test_table_2); + * + * Union node can be used as CTE. + * Example: WITH cte_subquery AS ((SELECT id FROM test_table) UNION ALL (SELECT id FROM test_table_2)) SELECT * FROM cte_subquery; + * + * Union node can be used as scalar subquery. + * Example: SELECT (SELECT 1 UNION DISTINCT SELECT 1); + * + * During query analysis pass union node queries must be resolved. + */ +class UnionNode; +using UnionNodePtr = std::shared_ptr; + +class UnionNode final : public IQueryTreeNode +{ +public: + /// Construct union node with normalized union mode + explicit UnionNode(SelectUnionMode union_mode_); + + /// Returns true if union node is subquery, false otherwise + bool isSubquery() const + { + return is_subquery; + } + + /// Set union node is subquery value + void setIsSubquery(bool is_subquery_value) + { + is_subquery = is_subquery_value; + } + + /// Returns true if union node is CTE, false otherwise + bool isCTE() const + { + return is_cte; + } + + /// Set union node is CTE + void setIsCTE(bool is_cte_value) + { + is_cte = is_cte_value; + } + + /// Get union node CTE name + const std::string & getCTEName() const + { + return cte_name; + } + + /// Set union node CTE name + void setCTEName(std::string cte_name_value) + { + cte_name = std::move(cte_name_value); + } + + /// Get union mode + SelectUnionMode getUnionMode() const + { + return union_mode; + } + + /// Get union node queries + const ListNode & getQueries() const + { + return children[queries_child_index]->as(); + } + + /// Get union node queries + ListNode & getQueries() + { + return children[queries_child_index]->as(); + } + + /// Get union node queries node + const QueryTreeNodePtr & getQueriesNode() const + { + return children[queries_child_index]; + } + + /// Get union node queries node + QueryTreeNodePtr & getQueriesNode() + { + return children[queries_child_index]; + } + + /// Compute union node projection columns + NamesAndTypes computeProjectionColumns() const; + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::UNION; + } + + DataTypePtr getResultType() const override + { + if (constant_value) + return constant_value->getType(); + + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for non scalar union node"); + } + + /// Perform constant folding for scalar union node + void performConstantFolding(ConstantValuePtr constant_folded_value) + { + constant_value = std::move(constant_folded_value); + } + + ConstantValuePtr getConstantValueOrNull() const override + { + return constant_value; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState &) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + bool is_subquery = false; + bool is_cte = false; + std::string cte_name; + SelectUnionMode union_mode; + ConstantValuePtr constant_value; + + static constexpr size_t queries_child_index = 0; + static constexpr size_t children_size = queries_child_index + 1; +}; + +} diff --git a/src/Analyzer/Utils.cpp b/src/Analyzer/Utils.cpp new file mode 100644 index 00000000000..b504a5b5787 --- /dev/null +++ b/src/Analyzer/Utils.cpp @@ -0,0 +1,329 @@ +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root) +{ + std::vector nodes_to_process; + nodes_to_process.push_back(root); + + while (!nodes_to_process.empty()) + { + const auto * subtree_node = nodes_to_process.back(); + nodes_to_process.pop_back(); + + if (subtree_node == node) + return true; + + for (const auto & child : subtree_node->getChildren()) + { + if (child) + nodes_to_process.push_back(child.get()); + } + } + + return false; +} + +bool isNameOfInFunction(const std::string & function_name) +{ + bool is_special_function_in = function_name == "in" || + function_name == "globalIn" || + function_name == "notIn" || + function_name == "globalNotIn" || + function_name == "nullIn" || + function_name == "globalNullIn" || + function_name == "notNullIn" || + function_name == "globalNotNullIn" || + function_name == "inIgnoreSet" || + function_name == "globalInIgnoreSet" || + function_name == "notInIgnoreSet" || + function_name == "globalNotInIgnoreSet" || + function_name == "nullInIgnoreSet" || + function_name == "globalNullInIgnoreSet" || + function_name == "notNullInIgnoreSet" || + function_name == "globalNotNullInIgnoreSet"; + + return is_special_function_in; +} + +static ASTPtr convertIntoTableExpressionAST(const QueryTreeNodePtr & table_expression_node) +{ + ASTPtr table_expression_node_ast; + auto node_type = table_expression_node->getNodeType(); + + if (node_type == QueryTreeNodeType::IDENTIFIER) + { + const auto & identifier_node = table_expression_node->as(); + const auto & identifier = identifier_node.getIdentifier(); + + if (identifier.getPartsSize() == 1) + table_expression_node_ast = std::make_shared(identifier[0]); + else if (identifier.getPartsSize() == 2) + table_expression_node_ast = std::make_shared(identifier[0], identifier[1]); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Identifier for table expression must contain 1 or 2 parts. Actual '{}'", + identifier.getFullName()); + } + else + { + table_expression_node_ast = table_expression_node->toAST(); + } + + auto result_table_expression = std::make_shared(); + result_table_expression->children.push_back(table_expression_node_ast); + + std::optional table_expression_modifiers; + + if (node_type == QueryTreeNodeType::QUERY || node_type == QueryTreeNodeType::UNION) + { + result_table_expression->subquery = result_table_expression->children.back(); + } + else if (node_type == QueryTreeNodeType::TABLE || node_type == QueryTreeNodeType::IDENTIFIER) + { + if (auto * table_node = table_expression_node->as()) + table_expression_modifiers = table_node->getTableExpressionModifiers(); + else if (auto * identifier_node = table_expression_node->as()) + table_expression_modifiers = identifier_node->getTableExpressionModifiers(); + + result_table_expression->database_and_table_name = result_table_expression->children.back(); + } + else if (node_type == QueryTreeNodeType::TABLE_FUNCTION) + { + if (auto * table_function_node = table_expression_node->as()) + table_expression_modifiers = table_function_node->getTableExpressionModifiers(); + + result_table_expression->table_function = result_table_expression->children.back(); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected identifier, table, query, union or table function. Actual {}", + table_expression_node->formatASTForErrorMessage()); + } + + if (table_expression_modifiers) + { + result_table_expression->final = table_expression_modifiers->hasFinal(); + + const auto & sample_size_ratio = table_expression_modifiers->getSampleSizeRatio(); + if (sample_size_ratio.has_value()) + result_table_expression->sample_size = std::make_shared(*sample_size_ratio); + + const auto & sample_offset_ratio = table_expression_modifiers->getSampleOffsetRatio(); + if (sample_offset_ratio.has_value()) + result_table_expression->sample_offset = std::make_shared(*sample_offset_ratio); + } + + return result_table_expression; +} + +void addTableExpressionOrJoinIntoTablesInSelectQuery(ASTPtr & tables_in_select_query_ast, const QueryTreeNodePtr & table_expression) +{ + auto table_expression_node_type = table_expression->getNodeType(); + + switch (table_expression_node_type) + { + case QueryTreeNodeType::IDENTIFIER: + [[fallthrough]]; + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + { + auto table_expression_ast = convertIntoTableExpressionAST(table_expression); + + auto tables_in_select_query_element_ast = std::make_shared(); + tables_in_select_query_element_ast->children.push_back(std::move(table_expression_ast)); + tables_in_select_query_element_ast->table_expression = tables_in_select_query_element_ast->children.back(); + + tables_in_select_query_ast->children.push_back(std::move(tables_in_select_query_element_ast)); + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + [[fallthrough]]; + case QueryTreeNodeType::JOIN: + { + auto table_expression_tables_in_select_query_ast = table_expression->toAST(); + tables_in_select_query_ast->children.reserve(table_expression_tables_in_select_query_ast->children.size()); + for (auto && table_element_ast : table_expression_tables_in_select_query_ast->children) + tables_in_select_query_ast->children.push_back(std::move(table_element_ast)); + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected node type for table expression. Expected identifier, table, table function, query, union, join or array join. Actual {}", + table_expression->getNodeTypeName()); + } + } +} + +QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node) +{ + QueryTreeNodes result; + + std::deque nodes_to_process; + nodes_to_process.push_back(join_tree_node); + + while (!nodes_to_process.empty()) + { + auto node_to_process = std::move(nodes_to_process.front()); + nodes_to_process.pop_front(); + + auto node_type = node_to_process->getNodeType(); + + switch (node_type) + { + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + { + result.push_back(std::move(node_to_process)); + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + { + auto & array_join_node = node_to_process->as(); + nodes_to_process.push_front(array_join_node.getTableExpression()); + break; + } + case QueryTreeNodeType::JOIN: + { + auto & join_node = node_to_process->as(); + nodes_to_process.push_front(join_node.getRightTableExpression()); + nodes_to_process.push_front(join_node.getLeftTableExpression()); + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected node type for table expression. Expected table, table function, query, union, join or array join. Actual {}", + node_to_process->getNodeTypeName()); + } + } + } + + return result; +} + +namespace +{ + +void buildTableExpressionsStackImpl(const QueryTreeNodePtr & join_tree_node, QueryTreeNodes & result) +{ + auto node_type = join_tree_node->getNodeType(); + + switch (node_type) + { + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + { + result.push_back(join_tree_node); + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + { + auto & array_join_node = join_tree_node->as(); + buildTableExpressionsStackImpl(array_join_node.getTableExpression(), result); + result.push_back(join_tree_node); + break; + } + case QueryTreeNodeType::JOIN: + { + auto & join_node = join_tree_node->as(); + buildTableExpressionsStackImpl(join_node.getLeftTableExpression(), result); + buildTableExpressionsStackImpl(join_node.getRightTableExpression(), result); + result.push_back(join_tree_node); + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected node type for table expression. Expected table, table function, query, union, join or array join. Actual {}", + join_tree_node->getNodeTypeName()); + } + } +} + +} + +QueryTreeNodes buildTableExpressionsStack(const QueryTreeNodePtr & join_tree_node) +{ + QueryTreeNodes result; + buildTableExpressionsStackImpl(join_tree_node, result); + + return result; +} + +QueryTreeNodePtr getColumnSourceForJoinNodeWithUsing(const QueryTreeNodePtr & join_node) +{ + QueryTreeNodePtr column_source_node = join_node; + + while (true) + { + auto column_source_node_type = column_source_node->getNodeType(); + if (column_source_node_type == QueryTreeNodeType::TABLE || + column_source_node_type == QueryTreeNodeType::TABLE_FUNCTION || + column_source_node_type == QueryTreeNodeType::QUERY || + column_source_node_type == QueryTreeNodeType::UNION) + { + break; + } + else if (column_source_node_type == QueryTreeNodeType::ARRAY_JOIN) + { + auto & array_join_node = column_source_node->as(); + column_source_node = array_join_node.getTableExpression(); + continue; + } + else if (column_source_node_type == QueryTreeNodeType::JOIN) + { + auto & join_node_typed = column_source_node->as(); + column_source_node = isRight(join_node_typed.getKind()) ? join_node_typed.getRightTableExpression() : join_node_typed.getLeftTableExpression(); + continue; + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected node type for table expression. Expected table, table function, query, union, join or array join. Actual {}", + column_source_node->getNodeTypeName()); + } + } + + return column_source_node; +} + +} diff --git a/src/Analyzer/Utils.h b/src/Analyzer/Utils.h new file mode 100644 index 00000000000..325a7d2fcc8 --- /dev/null +++ b/src/Analyzer/Utils.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +namespace DB +{ + +/// Returns true if node part of root tree, false otherwise +bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root); + +/// Returns true if function name is name of IN function or its variations, false otherwise +bool isNameOfInFunction(const std::string & function_name); + +/** Add table expression in tables in select query children. + * If table expression node is not of identifier node, table node, query node, table function node, join node or array join node type throws logical error exception. + */ +void addTableExpressionOrJoinIntoTablesInSelectQuery(ASTPtr & tables_in_select_query_ast, const QueryTreeNodePtr & table_expression); + +/// Extract table, table function, query, union from join tree +QueryTreeNodes extractTableExpressions(const QueryTreeNodePtr & join_tree_node); + +/** Build table expressions stack that consists from table, table function, query, union, join, array join from join tree. + * + * Example: SELECT * FROM t1 INNER JOIN t2 INNER JOIN t3. + * Result table expressions stack: + * 1. t1 INNER JOIN t2 INNER JOIN t3 + * 2. t3 + * 3. t1 INNER JOIN t2 + * 4. t2 + * 5. t1 + */ +QueryTreeNodes buildTableExpressionsStack(const QueryTreeNodePtr & join_tree_node); + +/** Get column source for JOIN node with USING. + * Example: SELECT id FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 USING (id); + */ +QueryTreeNodePtr getColumnSourceForJoinNodeWithUsing(const QueryTreeNodePtr & join_node); + +} diff --git a/src/Analyzer/WindowFunctionsUtils.cpp b/src/Analyzer/WindowFunctionsUtils.cpp new file mode 100644 index 00000000000..fb411f2418c --- /dev/null +++ b/src/Analyzer/WindowFunctionsUtils.cpp @@ -0,0 +1,78 @@ +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_AGGREGATION; +} + +namespace +{ + +class CollectWindowFunctionNodeVisitor : public ConstInDepthQueryTreeVisitor +{ +public: + explicit CollectWindowFunctionNodeVisitor(QueryTreeNodes * window_function_nodes_) + : window_function_nodes(window_function_nodes_) + {} + + explicit CollectWindowFunctionNodeVisitor(String assert_no_window_functions_place_message_) + : assert_no_window_functions_place_message(std::move(assert_no_window_functions_place_message_)) + {} + + void visitImpl(const QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || !function_node->isWindowFunction()) + return; + + if (!assert_no_window_functions_place_message.empty()) + throw Exception(ErrorCodes::ILLEGAL_AGGREGATION, + "Window function {} is found {} in query", + function_node->formatASTForErrorMessage(), + assert_no_window_functions_place_message); + + if (window_function_nodes) + window_function_nodes->push_back(node); + } + + static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + QueryTreeNodes * window_function_nodes = nullptr; + String assert_no_window_functions_place_message; +}; + +} + +QueryTreeNodes collectWindowFunctionNodes(const QueryTreeNodePtr & node) +{ + QueryTreeNodes window_function_nodes; + CollectWindowFunctionNodeVisitor visitor(&window_function_nodes); + visitor.visit(node); + + return window_function_nodes; +} + +void collectWindowFunctionNodes(const QueryTreeNodePtr & node, QueryTreeNodes & result) +{ + CollectWindowFunctionNodeVisitor visitor(&result); + visitor.visit(node); +} + +void assertNoWindowFunctionNodes(const QueryTreeNodePtr & node, const String & assert_no_window_functions_place_message) +{ + CollectWindowFunctionNodeVisitor visitor(assert_no_window_functions_place_message); + visitor.visit(node); +} + +} diff --git a/src/Analyzer/WindowFunctionsUtils.h b/src/Analyzer/WindowFunctionsUtils.h new file mode 100644 index 00000000000..b6ff5f22f93 --- /dev/null +++ b/src/Analyzer/WindowFunctionsUtils.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace DB +{ + +/** Collect window function nodes in node children. + * Do not visit subqueries. + */ +QueryTreeNodes collectWindowFunctionNodes(const QueryTreeNodePtr & node); + +/** Collect window function nodes in node children and add them into result. + * Do not visit subqueries. + */ +void collectWindowFunctionNodes(const QueryTreeNodePtr & node, QueryTreeNodes & result); + +/** Assert that there are no window function nodes in node children. + * Do not visit subqueries. + */ +void assertNoWindowFunctionNodes(const QueryTreeNodePtr & node, const String & assert_no_window_functions_place_message); + +} diff --git a/src/Analyzer/WindowNode.cpp b/src/Analyzer/WindowNode.cpp new file mode 100644 index 00000000000..3e8537302e5 --- /dev/null +++ b/src/Analyzer/WindowNode.cpp @@ -0,0 +1,144 @@ +#include + +#include + +#include +#include + +#include + +namespace DB +{ + +WindowNode::WindowNode(WindowFrame window_frame_) + : IQueryTreeNode(children_size) + , window_frame(std::move(window_frame_)) +{ + children[partition_by_child_index] = std::make_shared(); + children[order_by_child_index] = std::make_shared(); +} + +void WindowNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const +{ + buffer << std::string(indent, ' ') << "WINDOW id: " << format_state.getNodeId(this); + + if (hasAlias()) + buffer << ", alias: " << getAlias(); + + if (!parent_window_name.empty()) + buffer << ", parent_window_name: " << parent_window_name; + + buffer << ", frame_type: " << window_frame.type; + + auto window_frame_bound_type_to_string = [](WindowFrame::BoundaryType boundary_type, bool boundary_preceding) + { + std::string value; + + if (boundary_type == WindowFrame::BoundaryType::Unbounded) + value = "unbounded"; + else if (boundary_type == WindowFrame::BoundaryType::Current) + value = "current"; + else if (boundary_type == WindowFrame::BoundaryType::Offset) + value = "offset"; + + if (boundary_type != WindowFrame::BoundaryType::Current) + { + if (boundary_preceding) + value += " preceding"; + else + value += " following"; + } + + return value; + }; + + buffer << ", frame_begin_type: " << window_frame_bound_type_to_string(window_frame.begin_type, window_frame.begin_preceding); + buffer << ", frame_end_type: " << window_frame_bound_type_to_string(window_frame.end_type, window_frame.end_preceding); + + if (hasPartitionBy()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "PARTITION BY\n"; + getPartitionBy().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasOrderBy()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "ORDER BY\n"; + getOrderBy().dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasFrameBeginOffset()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FRAME BEGIN OFFSET\n"; + getFrameBeginOffsetNode()->dumpTreeImpl(buffer, format_state, indent + 4); + } + + if (hasFrameEndOffset()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FRAME END OFFSET\n"; + getFrameEndOffsetNode()->dumpTreeImpl(buffer, format_state, indent + 4); + } +} + +bool WindowNode::isEqualImpl(const IQueryTreeNode & rhs) const +{ + const auto & rhs_typed = assert_cast(rhs); + + return window_frame == rhs_typed.window_frame && parent_window_name == rhs_typed.parent_window_name; +} + +void WindowNode::updateTreeHashImpl(HashState & hash_state) const +{ + hash_state.update(window_frame.is_default); + hash_state.update(window_frame.type); + hash_state.update(window_frame.begin_type); + hash_state.update(window_frame.begin_preceding); + hash_state.update(window_frame.end_type); + hash_state.update(window_frame.end_preceding); + + hash_state.update(parent_window_name); +} + +QueryTreeNodePtr WindowNode::cloneImpl() const +{ + auto window_node = std::make_shared(window_frame); + window_node->parent_window_name = parent_window_name; + + return window_node; +} + +ASTPtr WindowNode::toASTImpl() const +{ + auto window_definition = std::make_shared(); + + window_definition->parent_window_name = parent_window_name; + + window_definition->children.push_back(getPartitionByNode()->toAST()); + window_definition->partition_by = window_definition->children.back(); + + window_definition->children.push_back(getOrderByNode()->toAST()); + window_definition->order_by = window_definition->children.back(); + + window_definition->frame_is_default = window_frame.is_default; + window_definition->frame_type = window_frame.type; + window_definition->frame_begin_type = window_frame.begin_type; + window_definition->frame_begin_preceding = window_frame.begin_preceding; + + if (hasFrameBeginOffset()) + { + window_definition->children.push_back(getFrameBeginOffsetNode()->toAST()); + window_definition->frame_begin_offset = window_definition->children.back(); + } + + window_definition->frame_end_type = window_frame.end_type; + window_definition->frame_end_preceding = window_frame.end_preceding; + if (hasFrameEndOffset()) + { + window_definition->children.push_back(getFrameEndOffsetNode()->toAST()); + window_definition->frame_end_offset = window_definition->children.back(); + } + + return window_definition; +} + +} diff --git a/src/Analyzer/WindowNode.h b/src/Analyzer/WindowNode.h new file mode 100644 index 00000000000..9dfb3e6ef2a --- /dev/null +++ b/src/Analyzer/WindowNode.h @@ -0,0 +1,191 @@ +#pragma once + +#include +#include + +#include + +namespace DB +{ + +/** Window node represents window function window description. + * + * Example: SELECT * FROM test_table WINDOW window AS (PARTITION BY id); + * window AS (PARTITION BY id) - window node. + * + * Example: SELECT count() OVER (PARTITION BY id) FROM test_table; + * PARTITION BY id - window node. + * + * Window node can also refer to its parent window node. + * Example: SELECT count() OVER (parent_window ORDER BY id) FROM test_table WINDOW parent_window AS (PARTITION BY id); + * parent_window ORDER BY id - window node. + * + * Window node initially initialized with window frame. + * + * If window frame has OFFSET begin type, additionally frame begin offset node must be initialized. + * If window frame has OFFSET end type, additionally frame end offset node must be initialized. + * During query analysis pass they must be resolved, validated and window node window frame offset constants must be updated. + */ +class WindowNode; +using WindowNodePtr = std::shared_ptr; + +class WindowNode final : public IQueryTreeNode +{ +public: + /// Initialize window node with window frame + explicit WindowNode(WindowFrame window_frame_); + + /// Get window node window frame + const WindowFrame & getWindowFrame() const + { + return window_frame; + } + + /// Get window node window frame + WindowFrame & getWindowFrame() + { + return window_frame; + } + + /// Returns true if window node has parent window name, false otherwise + bool hasParentWindowName() const + { + return parent_window_name.empty(); + } + + /// Get parent window name + const String & getParentWindowName() const + { + return parent_window_name; + } + + /// Set parent window name + void setParentWindowName(String parent_window_name_value) + { + parent_window_name = std::move(parent_window_name_value); + } + + /// Returns true if window node has order by, false otherwise + bool hasOrderBy() const + { + return !getOrderBy().getNodes().empty(); + } + + /// Get order by + const ListNode & getOrderBy() const + { + return children[order_by_child_index]->as(); + } + + /// Get order by + ListNode & getOrderBy() + { + return children[order_by_child_index]->as(); + } + + /// Get order by node + const QueryTreeNodePtr & getOrderByNode() const + { + return children[order_by_child_index]; + } + + /// Get order by node + QueryTreeNodePtr & getOrderByNode() + { + return children[order_by_child_index]; + } + + /// Returns true if window node has partition by, false otherwise + bool hasPartitionBy() const + { + return !getPartitionBy().getNodes().empty(); + } + + /// Get partition by + const ListNode & getPartitionBy() const + { + return children[partition_by_child_index]->as(); + } + + /// Get partition by + ListNode & getPartitionBy() + { + return children[partition_by_child_index]->as(); + } + + /// Get partition by node + const QueryTreeNodePtr & getPartitionByNode() const + { + return children[partition_by_child_index]; + } + + /// Get partition by node + QueryTreeNodePtr & getPartitionByNode() + { + return children[partition_by_child_index]; + } + + /// Returns true if window node has FRAME begin offset, false otherwise + bool hasFrameBeginOffset() const + { + return getFrameBeginOffsetNode() != nullptr; + } + + /// Get FRAME begin offset node + const QueryTreeNodePtr & getFrameBeginOffsetNode() const + { + return children[frame_begin_offset_child_index]; + } + + /// Get FRAME begin offset node + QueryTreeNodePtr & getFrameBeginOffsetNode() + { + return children[frame_begin_offset_child_index]; + } + + /// Returns true if window node has FRAME end offset, false otherwise + bool hasFrameEndOffset() const + { + return getFrameEndOffsetNode() != nullptr; + } + + /// Get FRAME end offset node + const QueryTreeNodePtr & getFrameEndOffsetNode() const + { + return children[frame_end_offset_child_index]; + } + + /// Get FRAME end offset node + QueryTreeNodePtr & getFrameEndOffsetNode() + { + return children[frame_end_offset_child_index]; + } + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::WINDOW; + } + + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; + +protected: + bool isEqualImpl(const IQueryTreeNode & rhs) const override; + + void updateTreeHashImpl(HashState & hash_state) const override; + + QueryTreeNodePtr cloneImpl() const override; + + ASTPtr toASTImpl() const override; + +private: + static constexpr size_t order_by_child_index = 0; + static constexpr size_t partition_by_child_index = 1; + static constexpr size_t frame_begin_offset_child_index = 3; + static constexpr size_t frame_end_offset_child_index = 4; + static constexpr size_t children_size = frame_end_offset_child_index + 1; + + WindowFrame window_frame; + String parent_window_name; +}; + +} diff --git a/src/Analyzer/examples/CMakeLists.txt b/src/Analyzer/examples/CMakeLists.txt new file mode 100644 index 00000000000..c6b1b0b3c5f --- /dev/null +++ b/src/Analyzer/examples/CMakeLists.txt @@ -0,0 +1,3 @@ +add_executable (query_analyzer query_analyzer.cpp) +target_include_directories (query_analyzer SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR}) +target_link_libraries (query_analyzer PRIVATE dbms) diff --git a/src/Analyzer/examples/query_analyzer.cpp b/src/Analyzer/examples/query_analyzer.cpp new file mode 100644 index 00000000000..5a20b46b346 --- /dev/null +++ b/src/Analyzer/examples/query_analyzer.cpp @@ -0,0 +1,9 @@ +#include + +int main(int argc, char ** argv) +{ + (void)(argc); + (void)(argv); + + return 0; +} diff --git a/src/Analyzer/tests/CMakeLists.txt b/src/Analyzer/tests/CMakeLists.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/Analyzer/tests/gtest_identifier.cpp b/src/Analyzer/tests/gtest_identifier.cpp new file mode 100644 index 00000000000..4f224dfbb07 --- /dev/null +++ b/src/Analyzer/tests/gtest_identifier.cpp @@ -0,0 +1,227 @@ +#include + +#include + +using namespace DB; + +TEST(Identifier, IdentifierBasics) +{ + { + Identifier identifier; + + ASSERT_TRUE(identifier.empty()); + ASSERT_TRUE(identifier.isEmpty()); + ASSERT_EQ(identifier.getPartsSize(), 0); + ASSERT_FALSE(identifier.isShort()); + ASSERT_FALSE(identifier.isCompound()); + ASSERT_FALSE(identifier.startsWith("test")); + ASSERT_FALSE(identifier.endsWith("test")); + ASSERT_EQ(identifier.begin(), identifier.end()); + ASSERT_EQ(identifier.getFullName(), ""); + } + { + Identifier identifier("value"); + + ASSERT_FALSE(identifier.empty()); + ASSERT_FALSE(identifier.isEmpty()); + ASSERT_EQ(identifier.getPartsSize(), 1); + ASSERT_TRUE(identifier.isShort()); + ASSERT_FALSE(identifier.isCompound()); + ASSERT_EQ(identifier.front(), "value"); + ASSERT_EQ(identifier.back(), "value"); + ASSERT_FALSE(identifier.startsWith("test")); + ASSERT_FALSE(identifier.endsWith("test")); + ASSERT_TRUE(identifier.startsWith("value")); + ASSERT_TRUE(identifier.endsWith("value")); + ASSERT_EQ(identifier[0], "value"); + ASSERT_NE(identifier.begin(), identifier.end()); + ASSERT_EQ(identifier.getFullName(), "value"); + } + { + Identifier identifier("value1.value2"); + + ASSERT_FALSE(identifier.empty()); + ASSERT_FALSE(identifier.isEmpty()); + ASSERT_EQ(identifier.getPartsSize(), 2); + ASSERT_FALSE(identifier.isShort()); + ASSERT_TRUE(identifier.isCompound()); + ASSERT_EQ(identifier.front(), "value1"); + ASSERT_EQ(identifier.back(), "value2"); + ASSERT_FALSE(identifier.startsWith("test")); + ASSERT_FALSE(identifier.endsWith("test")); + ASSERT_TRUE(identifier.startsWith("value1")); + ASSERT_TRUE(identifier.endsWith("value2")); + ASSERT_EQ(identifier[0], "value1"); + ASSERT_EQ(identifier[1], "value2"); + ASSERT_NE(identifier.begin(), identifier.end()); + ASSERT_EQ(identifier.getFullName(), "value1.value2"); + } + { + Identifier identifier1("value1.value2"); + Identifier identifier2("value1.value2"); + + ASSERT_EQ(identifier1, identifier2); + } + { + Identifier identifier1("value1.value2"); + Identifier identifier2("value1.value3"); + + ASSERT_NE(identifier1, identifier2); + } +} + +TEST(Identifier, IdentifierPopParts) +{ + { + Identifier identifier("value1.value2.value3"); + + ASSERT_EQ(identifier.getFullName(), "value1.value2.value3"); + identifier.popLast(); + ASSERT_EQ(identifier.getFullName(), "value1.value2"); + identifier.popLast(); + ASSERT_EQ(identifier.getFullName(), "value1"); + identifier.popLast(); + ASSERT_EQ(identifier.getFullName(), ""); + ASSERT_TRUE(identifier.isEmpty()); + } + { + Identifier identifier("value1.value2.value3"); + + ASSERT_EQ(identifier.getFullName(), "value1.value2.value3"); + identifier.popFirst(); + ASSERT_EQ(identifier.getFullName(), "value2.value3"); + identifier.popFirst(); + ASSERT_EQ(identifier.getFullName(), "value3"); + identifier.popFirst(); + ASSERT_EQ(identifier.getFullName(), ""); + ASSERT_TRUE(identifier.isEmpty()); + } + { + Identifier identifier("value1.value2.value3"); + + ASSERT_EQ(identifier.getFullName(), "value1.value2.value3"); + identifier.popLast(); + ASSERT_EQ(identifier.getFullName(), "value1.value2"); + identifier.popFirst(); + ASSERT_EQ(identifier.getFullName(), "value2"); + identifier.popLast(); + ASSERT_EQ(identifier.getFullName(), ""); + ASSERT_TRUE(identifier.isEmpty()); + } +} + +TEST(Identifier, IdentifierViewBasics) +{ + { + Identifier identifier; + IdentifierView identifier_view(identifier); + + ASSERT_TRUE(identifier_view.empty()); + ASSERT_TRUE(identifier_view.isEmpty()); + ASSERT_EQ(identifier_view.getPartsSize(), 0); + ASSERT_FALSE(identifier_view.isShort()); + ASSERT_FALSE(identifier_view.isCompound()); + ASSERT_FALSE(identifier_view.startsWith("test")); + ASSERT_FALSE(identifier_view.endsWith("test")); + ASSERT_EQ(identifier_view.begin(), identifier_view.end()); + ASSERT_EQ(identifier_view.getFullName(), ""); + } + { + Identifier identifier("value"); + IdentifierView identifier_view(identifier); + + ASSERT_FALSE(identifier_view.empty()); + ASSERT_FALSE(identifier_view.isEmpty()); + ASSERT_EQ(identifier_view.getPartsSize(), 1); + ASSERT_TRUE(identifier_view.isShort()); + ASSERT_FALSE(identifier_view.isCompound()); + ASSERT_EQ(identifier_view.front(), "value"); + ASSERT_EQ(identifier_view.back(), "value"); + ASSERT_FALSE(identifier_view.startsWith("test")); + ASSERT_FALSE(identifier_view.endsWith("test")); + ASSERT_TRUE(identifier_view.startsWith("value")); + ASSERT_TRUE(identifier_view.endsWith("value")); + ASSERT_EQ(identifier_view[0], "value"); + ASSERT_NE(identifier_view.begin(), identifier_view.end()); + ASSERT_EQ(identifier_view.getFullName(), "value"); + } + { + Identifier identifier("value1.value2"); + IdentifierView identifier_view(identifier); + + ASSERT_FALSE(identifier_view.empty()); + ASSERT_FALSE(identifier_view.isEmpty()); + ASSERT_EQ(identifier_view.getPartsSize(), 2); + ASSERT_FALSE(identifier_view.isShort()); + ASSERT_TRUE(identifier_view.isCompound()); + ASSERT_FALSE(identifier_view.startsWith("test")); + ASSERT_FALSE(identifier_view.endsWith("test")); + ASSERT_TRUE(identifier_view.startsWith("value1")); + ASSERT_TRUE(identifier_view.endsWith("value2")); + ASSERT_EQ(identifier_view[0], "value1"); + ASSERT_EQ(identifier_view[1], "value2"); + ASSERT_NE(identifier_view.begin(), identifier_view.end()); + ASSERT_EQ(identifier_view.getFullName(), "value1.value2"); + } + { + Identifier identifier1("value1.value2"); + IdentifierView identifier_view1(identifier1); + + Identifier identifier2("value1.value2"); + IdentifierView identifier_view2(identifier2); + + ASSERT_EQ(identifier_view1, identifier_view2); + } + { + Identifier identifier1("value1.value2"); + IdentifierView identifier_view1(identifier1); + + Identifier identifier2("value1.value3"); + IdentifierView identifier_view2(identifier2); + + ASSERT_NE(identifier_view1, identifier_view2); + } +} + +TEST(Identifier, IdentifierViewPopParts) +{ + { + Identifier identifier("value1.value2.value3"); + IdentifierView identifier_view(identifier); + + ASSERT_EQ(identifier_view.getFullName(), "value1.value2.value3"); + identifier_view.popLast(); + ASSERT_EQ(identifier_view.getFullName(), "value1.value2"); + identifier_view.popLast(); + ASSERT_EQ(identifier_view.getFullName(), "value1"); + identifier_view.popLast(); + ASSERT_EQ(identifier_view.getFullName(), ""); + ASSERT_TRUE(identifier_view.isEmpty()); + } + { + Identifier identifier("value1.value2.value3"); + IdentifierView identifier_view(identifier); + + ASSERT_EQ(identifier_view.getFullName(), "value1.value2.value3"); + identifier_view.popFirst(); + ASSERT_EQ(identifier_view.getFullName(), "value2.value3"); + identifier_view.popFirst(); + ASSERT_EQ(identifier_view.getFullName(), "value3"); + identifier_view.popFirst(); + ASSERT_EQ(identifier_view.getFullName(), ""); + ASSERT_TRUE(identifier_view.isEmpty()); + } + { + Identifier identifier("value1.value2.value3"); + IdentifierView identifier_view(identifier); + + ASSERT_EQ(identifier_view.getFullName(), "value1.value2.value3"); + identifier_view.popLast(); + ASSERT_EQ(identifier_view.getFullName(), "value1.value2"); + identifier_view.popFirst(); + ASSERT_EQ(identifier_view.getFullName(), "value2"); + identifier_view.popLast(); + ASSERT_EQ(identifier_view.getFullName(), ""); + ASSERT_TRUE(identifier_view.isEmpty()); + } +} diff --git a/src/Analyzer/tests/gtest_query_tree_node.cpp b/src/Analyzer/tests/gtest_query_tree_node.cpp new file mode 100644 index 00000000000..079869b2a53 --- /dev/null +++ b/src/Analyzer/tests/gtest_query_tree_node.cpp @@ -0,0 +1,86 @@ +#include + +#include + +#include +#include +#include + +using namespace DB; + +class SourceNode final : public IQueryTreeNode +{ +public: + SourceNode() : IQueryTreeNode(0 /*children_size*/) {} + + QueryTreeNodeType getNodeType() const override + { + return QueryTreeNodeType::TABLE; + } + + void dumpTreeImpl(WriteBuffer &, FormatState &, size_t) const override + { + } + + bool isEqualImpl(const IQueryTreeNode &) const override + { + return true; + } + + void updateTreeHashImpl(HashState &) const override + { + } + + QueryTreeNodePtr cloneImpl() const override + { + return std::make_shared(); + } + + ASTPtr toASTImpl() const override + { + return nullptr; + } +}; + +TEST(QueryTreeNode, Clone) +{ + { + auto source_node = std::make_shared(); + + NameAndTypePair column_name_and_type("value", std::make_shared()); + auto column_node = std::make_shared(column_name_and_type, source_node); + + ASSERT_EQ(column_node->getColumnSource().get(), source_node.get()); + + auto cloned_column_node = column_node->clone(); + + /// If in subtree source was not cloned, source pointer must remain same + ASSERT_NE(column_node.get(), cloned_column_node.get()); + ASSERT_EQ(cloned_column_node->as().getColumnSource().get(), source_node.get()); + } + { + auto root_node = std::make_shared(); + auto source_node = std::make_shared(); + + NameAndTypePair column_name_and_type("value", std::make_shared()); + auto column_node = std::make_shared(column_name_and_type, source_node); + + root_node->getNodes().push_back(source_node); + root_node->getNodes().push_back(column_node); + + ASSERT_EQ(column_node->getColumnSource().get(), source_node.get()); + + auto cloned_root_node = std::static_pointer_cast(root_node->clone()); + auto cloned_source_node = cloned_root_node->getNodes()[0]; + auto cloned_column_node = std::static_pointer_cast(cloned_root_node->getNodes()[1]); + + /** If in subtree source was cloned. + * Source pointer for node that was cloned must remain same. + * Source pointer for cloned node must be updated. + */ + ASSERT_NE(column_node.get(), cloned_column_node.get()); + ASSERT_NE(source_node.get(), cloned_source_node.get()); + ASSERT_EQ(column_node->getColumnSource().get(), source_node.get()); + ASSERT_EQ(cloned_column_node->getColumnSource().get(), cloned_source_node.get()); + } +} diff --git a/src/Backups/BackupFactory.cpp b/src/Backups/BackupFactory.cpp index a23cc70658b..7c870737b1d 100644 --- a/src/Backups/BackupFactory.cpp +++ b/src/Backups/BackupFactory.cpp @@ -32,10 +32,12 @@ void BackupFactory::registerBackupEngine(const String & engine_name, const Creat } void registerBackupEnginesFileAndDisk(BackupFactory &); +void registerBackupEngineS3(BackupFactory &); void registerBackupEngines(BackupFactory & factory) { registerBackupEnginesFileAndDisk(factory); + registerBackupEngineS3(factory); } BackupFactory::BackupFactory() diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp new file mode 100644 index 00000000000..12038a8a30c --- /dev/null +++ b/src/Backups/BackupIO_S3.cpp @@ -0,0 +1,375 @@ +#include + +#if USE_AWS_S3 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +namespace fs = std::filesystem; + +namespace DB +{ +namespace ErrorCodes +{ + extern const int S3_ERROR; + extern const int LOGICAL_ERROR; +} + +namespace +{ + std::shared_ptr + makeS3Client(const S3::URI & s3_uri, const String & access_key_id, const String & secret_access_key, const ContextPtr & context) + { + auto settings = context->getStorageS3Settings().getSettings(s3_uri.uri.toString()); + + Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key); + HeaderCollection headers; + if (access_key_id.empty()) + { + credentials = Aws::Auth::AWSCredentials(settings.auth_settings.access_key_id, settings.auth_settings.secret_access_key); + headers = settings.auth_settings.headers; + } + + S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration( + settings.auth_settings.region, + context->getRemoteHostFilter(), + static_cast(context->getGlobalContext()->getSettingsRef().s3_max_redirects), + context->getGlobalContext()->getSettingsRef().enable_s3_requests_logging, + /* for_disk_s3 = */ false); + + client_configuration.endpointOverride = s3_uri.endpoint; + client_configuration.maxConnections = static_cast(context->getSettingsRef().s3_max_connections); + /// Increase connect timeout + client_configuration.connectTimeoutMs = 10 * 1000; + /// Requests in backups can be extremely long, set to one hour + client_configuration.requestTimeoutMs = 60 * 60 * 1000; + + return S3::ClientFactory::instance().create( + client_configuration, + s3_uri.is_virtual_hosted_style, + credentials.GetAWSAccessKeyId(), + credentials.GetAWSSecretKey(), + settings.auth_settings.server_side_encryption_customer_key_base64, + std::move(headers), + settings.auth_settings.use_environment_credentials.value_or( + context->getConfigRef().getBool("s3.use_environment_credentials", false)), + settings.auth_settings.use_insecure_imds_request.value_or( + context->getConfigRef().getBool("s3.use_insecure_imds_request", false))); + } + + Aws::Vector listObjects(Aws::S3::S3Client & client, const S3::URI & s3_uri, const String & file_name) + { + Aws::S3::Model::ListObjectsRequest request; + request.SetBucket(s3_uri.bucket); + request.SetPrefix(fs::path{s3_uri.key} / file_name); + request.SetMaxKeys(1); + auto outcome = client.ListObjects(request); + if (!outcome.IsSuccess()) + throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + return outcome.GetResult().GetContents(); + } +} + + +BackupReaderS3::BackupReaderS3( + const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_) + : s3_uri(s3_uri_) + , client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_)) + , max_single_read_retries(context_->getSettingsRef().s3_max_single_read_retries) + , read_settings(context_->getReadSettings()) +{ +} + +DataSourceDescription BackupReaderS3::getDataSourceDescription() const +{ + return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false}; +} + + +BackupReaderS3::~BackupReaderS3() = default; + +bool BackupReaderS3::fileExists(const String & file_name) +{ + return !listObjects(*client, s3_uri, file_name).empty(); +} + +UInt64 BackupReaderS3::getFileSize(const String & file_name) +{ + auto objects = listObjects(*client, s3_uri, file_name); + if (objects.empty()) + throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist"); + return objects[0].GetSize(); +} + +std::unique_ptr BackupReaderS3::readFile(const String & file_name) +{ + return std::make_unique( + client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, max_single_read_retries, read_settings); +} + + +BackupWriterS3::BackupWriterS3( + const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_) + : s3_uri(s3_uri_) + , client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_)) + , max_single_read_retries(context_->getSettingsRef().s3_max_single_read_retries) + , read_settings(context_->getReadSettings()) + , rw_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).rw_settings) +{ + rw_settings.updateFromSettingsIfEmpty(context_->getSettingsRef()); +} + +DataSourceDescription BackupWriterS3::getDataSourceDescription() const +{ + return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false}; +} + +bool BackupWriterS3::supportNativeCopy(DataSourceDescription data_source_description) const +{ + return getDataSourceDescription() == data_source_description; +} + + +void BackupWriterS3::copyObjectImpl( + const String & src_bucket, + const String & src_key, + const String & dst_bucket, + const String & dst_key, + std::optional head, + std::optional metadata) const +{ + Aws::S3::Model::CopyObjectRequest request; + request.SetCopySource(src_bucket + "/" + src_key); + request.SetBucket(dst_bucket); + request.SetKey(dst_key); + if (metadata) + { + request.SetMetadata(*metadata); + request.SetMetadataDirective(Aws::S3::Model::MetadataDirective::REPLACE); + } + + auto outcome = client->CopyObject(request); + + if (!outcome.IsSuccess() && outcome.GetError().GetExceptionName() == "EntityTooLarge") + { // Can't come here with MinIO, MinIO allows single part upload for large objects. + copyObjectMultipartImpl(src_bucket, src_key, dst_bucket, dst_key, head, metadata); + return; + } + + if (!outcome.IsSuccess()) + throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + +} + +Aws::S3::Model::HeadObjectOutcome BackupWriterS3::requestObjectHeadData(const std::string & bucket_from, const std::string & key) const +{ + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(bucket_from); + request.SetKey(key); + + return client->HeadObject(request); +} + +void BackupWriterS3::copyObjectMultipartImpl( + const String & src_bucket, + const String & src_key, + const String & dst_bucket, + const String & dst_key, + std::optional head, + std::optional metadata) const +{ + if (!head) + head = requestObjectHeadData(src_bucket, src_key).GetResult(); + + size_t size = head->GetContentLength(); + + String multipart_upload_id; + + { + Aws::S3::Model::CreateMultipartUploadRequest request; + request.SetBucket(dst_bucket); + request.SetKey(dst_key); + if (metadata) + request.SetMetadata(*metadata); + + auto outcome = client->CreateMultipartUpload(request); + + if (!outcome.IsSuccess()) + throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + + multipart_upload_id = outcome.GetResult().GetUploadId(); + } + + std::vector part_tags; + + size_t upload_part_size = rw_settings.min_upload_part_size; + for (size_t position = 0, part_number = 1; position < size; ++part_number, position += upload_part_size) + { + Aws::S3::Model::UploadPartCopyRequest part_request; + part_request.SetCopySource(src_bucket + "/" + src_key); + part_request.SetBucket(dst_bucket); + part_request.SetKey(dst_key); + part_request.SetUploadId(multipart_upload_id); + part_request.SetPartNumber(static_cast(part_number)); + part_request.SetCopySourceRange(fmt::format("bytes={}-{}", position, std::min(size, position + upload_part_size) - 1)); + + auto outcome = client->UploadPartCopy(part_request); + if (!outcome.IsSuccess()) + { + Aws::S3::Model::AbortMultipartUploadRequest abort_request; + abort_request.SetBucket(dst_bucket); + abort_request.SetKey(dst_key); + abort_request.SetUploadId(multipart_upload_id); + client->AbortMultipartUpload(abort_request); + // In error case we throw exception later with first error from UploadPartCopy + } + if (!outcome.IsSuccess()) + throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + + auto etag = outcome.GetResult().GetCopyPartResult().GetETag(); + part_tags.push_back(etag); + } + + { + Aws::S3::Model::CompleteMultipartUploadRequest req; + req.SetBucket(dst_bucket); + req.SetKey(dst_key); + req.SetUploadId(multipart_upload_id); + + Aws::S3::Model::CompletedMultipartUpload multipart_upload; + for (size_t i = 0; i < part_tags.size(); ++i) + { + Aws::S3::Model::CompletedPart part; + multipart_upload.AddParts(part.WithETag(part_tags[i]).WithPartNumber(static_cast(i) + 1)); + } + + req.SetMultipartUpload(multipart_upload); + + auto outcome = client->CompleteMultipartUpload(req); + + if (!outcome.IsSuccess()) + throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + } +} + +void BackupWriterS3::copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) +{ + if (!from_disk) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk"); + + auto objects = from_disk->getStorageObjects(file_name_from); + if (objects.size() > 1) + { + copyFileThroughBuffer(from_disk->readFile(file_name_from), file_name_to); + } + else + { + auto object_storage = from_disk->getObjectStorage(); + std::string source_bucket = object_storage->getObjectsNamespace(); + auto file_path = fs::path(s3_uri.key) / file_name_to; + + auto head = requestObjectHeadData(source_bucket, objects[0].absolute_path).GetResult(); + static constexpr int64_t multipart_upload_threashold = 5UL * 1024 * 1024 * 1024; + if (head.GetContentLength() >= multipart_upload_threashold) + { + copyObjectMultipartImpl( + source_bucket, objects[0].absolute_path, s3_uri.bucket, file_path, head); + } + else + { + copyObjectImpl( + source_bucket, objects[0].absolute_path, s3_uri.bucket, file_path, head); + } + } +} + + +BackupWriterS3::~BackupWriterS3() = default; + +bool BackupWriterS3::fileExists(const String & file_name) +{ + return !listObjects(*client, s3_uri, file_name).empty(); +} + +UInt64 BackupWriterS3::getFileSize(const String & file_name) +{ + auto objects = listObjects(*client, s3_uri, file_name); + if (objects.empty()) + throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist"); + return objects[0].GetSize(); +} + +bool BackupWriterS3::fileContentsEqual(const String & file_name, const String & expected_file_contents) +{ + if (listObjects(*client, s3_uri, file_name).empty()) + return false; + + try + { + auto in = std::make_unique( + client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, max_single_read_retries, read_settings); + String actual_file_contents(expected_file_contents.size(), ' '); + return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size()) + && (actual_file_contents == expected_file_contents) && in->eof(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + return false; + } +} + +std::unique_ptr BackupWriterS3::writeFile(const String & file_name) +{ + return std::make_unique( + client, + s3_uri.bucket, + fs::path(s3_uri.key) / file_name, + rw_settings, + std::nullopt, + DBMS_DEFAULT_BUFFER_SIZE, + threadPoolCallbackRunner(IOThreadPool::get(), "BackupWriterS3")); +} + +void BackupWriterS3::removeFiles(const Strings & file_names) +{ + /// One call of DeleteObjects() cannot remove more than 1000 keys. + size_t chunk_size_limit = 1000; + + size_t current_position = 0; + while (current_position < file_names.size()) + { + std::vector current_chunk; + for (; current_position < file_names.size() && current_chunk.size() < chunk_size_limit; ++current_position) + { + Aws::S3::Model::ObjectIdentifier obj; + obj.SetKey(fs::path(s3_uri.key) / file_names[current_position]); + current_chunk.push_back(obj); + } + + Aws::S3::Model::Delete delkeys; + delkeys.SetObjects(current_chunk); + Aws::S3::Model::DeleteObjectsRequest request; + request.SetBucket(s3_uri.bucket); + request.SetDelete(delkeys); + + auto outcome = client->DeleteObjects(request); + if (!outcome.IsSuccess()) + throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); + } +} + +} + +#endif diff --git a/src/Backups/BackupIO_S3.h b/src/Backups/BackupIO_S3.h new file mode 100644 index 00000000000..471ddcc06e6 --- /dev/null +++ b/src/Backups/BackupIO_S3.h @@ -0,0 +1,92 @@ +#pragma once + +#include "config.h" + +#if USE_AWS_S3 +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +/// Represents a backup stored to AWS S3. +class BackupReaderS3 : public IBackupReader +{ +public: + BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_); + ~BackupReaderS3() override; + + bool fileExists(const String & file_name) override; + UInt64 getFileSize(const String & file_name) override; + std::unique_ptr readFile(const String & file_name) override; + DataSourceDescription getDataSourceDescription() const override; + +private: + S3::URI s3_uri; + std::shared_ptr client; + UInt64 max_single_read_retries; + ReadSettings read_settings; +}; + + +class BackupWriterS3 : public IBackupWriter +{ +public: + BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_); + ~BackupWriterS3() override; + + bool fileExists(const String & file_name) override; + UInt64 getFileSize(const String & file_name) override; + bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override; + std::unique_ptr writeFile(const String & file_name) override; + void removeFiles(const Strings & file_names) override; + + DataSourceDescription getDataSourceDescription() const override; + bool supportNativeCopy(DataSourceDescription data_source_description) const override; + void copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) override; + +private: + + Aws::S3::Model::HeadObjectOutcome requestObjectHeadData(const std::string & bucket_from, const std::string & key) const; + + void copyObjectImpl( + const String & src_bucket, + const String & src_key, + const String & dst_bucket, + const String & dst_key, + std::optional head = std::nullopt, + std::optional metadata = std::nullopt) const; + + void copyObjectMultipartImpl( + const String & src_bucket, + const String & src_key, + const String & dst_bucket, + const String & dst_key, + std::optional head = std::nullopt, + std::optional metadata = std::nullopt) const; + + S3::URI s3_uri; + std::shared_ptr client; + UInt64 max_single_read_retries; + ReadSettings read_settings; + S3Settings::ReadWriteSettings rw_settings; +}; + +} + +#endif diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 8b648af44ec..2d58e993364 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -455,6 +455,7 @@ void BackupImpl::createLockFile() assert(uuid); auto out = writer->writeFile(lock_file_name); writeUUIDText(*uuid, *out); + out->finalize(); } bool BackupImpl::checkLockFile(bool throw_if_failed) const diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index a4b20e0b863..295ab723326 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -62,7 +62,6 @@ namespace #define LIST_OF_BACKUP_SETTINGS(M) \ M(String, id) \ M(String, compression_method) \ - M(Int64, compression_level) \ M(String, password) \ M(Bool, structure_only) \ M(Bool, async) \ @@ -72,6 +71,7 @@ namespace M(String, host_id) \ M(String, coordination_zk_path) \ M(OptionalUUID, backup_uuid) + /// M(Int64, compression_level) BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query) { @@ -82,6 +82,9 @@ BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query) const auto & settings = query.settings->as().changes; for (const auto & setting : settings) { + if (setting.name == "compression_level") + res.compression_level = static_cast(SettingFieldInt64{setting.value}.value); + else #define GET_SETTINGS_FROM_BACKUP_QUERY_HELPER(TYPE, NAME) \ if (setting.name == #NAME) \ res.NAME = SettingField##TYPE{setting.value}.value; \ diff --git a/src/Backups/registerBackupEngineS3.cpp b/src/Backups/registerBackupEngineS3.cpp new file mode 100644 index 00000000000..4d628e57b5c --- /dev/null +++ b/src/Backups/registerBackupEngineS3.cpp @@ -0,0 +1,129 @@ +#include "config.h" + +#include +#include + +#if USE_AWS_S3 +#include +#include +#include +#include +#include +#include +#endif + + +namespace DB +{ +namespace fs = std::filesystem; + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int SUPPORT_IS_DISABLED; +} + +#if USE_AWS_S3 +namespace +{ + String removeFileNameFromURL(String & url) + { + Poco::URI url2{url}; + String path = url2.getPath(); + size_t slash_pos = path.find_last_of('/'); + String file_name = path.substr(slash_pos + 1); + path.resize(slash_pos + 1); + url2.setPath(path); + url = url2.toString(); + return file_name; + } +} +#endif + + +void registerBackupEngineS3(BackupFactory & factory) +{ + auto creator_fn = []([[maybe_unused]] const BackupFactory::CreateParams & params) -> std::unique_ptr + { +#if USE_AWS_S3 + String backup_name = params.backup_info.toString(); + const String & id_arg = params.backup_info.id_arg; + const auto & args = params.backup_info.args; + + String s3_uri, access_key_id, secret_access_key; + + if (!id_arg.empty()) + { + const auto & config = params.context->getConfigRef(); + auto config_prefix = "named_collections." + id_arg; + + if (!config.has(config_prefix)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", id_arg); + + s3_uri = config.getString(config_prefix + ".url"); + access_key_id = config.getString(config_prefix + ".access_key_id", ""); + secret_access_key = config.getString(config_prefix + ".secret_access_key", ""); + + if (config.has(config_prefix + ".filename")) + s3_uri = fs::path(s3_uri) / config.getString(config_prefix + ".filename"); + + if (args.size() > 1) + throw Exception( + "Backup S3 requires 1 or 2 arguments: named_collection, [filename]", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + if (args.size() == 1) + s3_uri = fs::path(s3_uri) / args[0].safeGet(); + } + else + { + if ((args.size() != 1) && (args.size() != 3)) + throw Exception( + "Backup S3 requires 1 or 3 arguments: url, [access_key_id, secret_access_key]", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + s3_uri = args[0].safeGet(); + if (args.size() >= 3) + { + access_key_id = args[1].safeGet(); + secret_access_key = args[2].safeGet(); + } + } + + BackupImpl::ArchiveParams archive_params; + if (hasRegisteredArchiveFileExtension(s3_uri)) + { + if (params.is_internal_backup) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Using archives with backups on clusters is disabled"); + + archive_params.archive_name = removeFileNameFromURL(s3_uri); + archive_params.compression_method = params.compression_method; + archive_params.compression_level = params.compression_level; + archive_params.password = params.password; + } + else + { + if (!params.password.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Password is not applicable, backup cannot be encrypted"); + } + + if (params.open_mode == IBackup::OpenMode::READ) + { + auto reader = std::make_shared(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context); + return std::make_unique(backup_name, archive_params, params.base_backup_info, reader, params.context); + } + else + { + auto writer = std::make_shared(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context); + return std::make_unique(backup_name, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid); + } +#else + throw Exception("S3 support is disabled", ErrorCodes::SUPPORT_IS_DISABLED); +#endif + }; + + factory.registerBackupEngine("S3", creator_fn); +} + +} diff --git a/src/Bridge/IBridge.h b/src/Bridge/IBridge.h index 2f35c361cca..68af8860cb3 100644 --- a/src/Bridge/IBridge.h +++ b/src/Bridge/IBridge.h @@ -43,7 +43,7 @@ private: std::string hostname; size_t port; std::string log_level; - size_t max_server_connections; + unsigned max_server_connections; size_t http_timeout; Poco::Logger * log; diff --git a/src/BridgeHelper/IBridgeHelper.h b/src/BridgeHelper/IBridgeHelper.h index a3348c81b68..b7fac3f1303 100644 --- a/src/BridgeHelper/IBridgeHelper.h +++ b/src/BridgeHelper/IBridgeHelper.h @@ -43,7 +43,7 @@ protected: virtual String serviceFileName() const = 0; - virtual size_t getDefaultPort() const = 0; + virtual unsigned getDefaultPort() const = 0; virtual bool startBridgeManually() const = 0; diff --git a/src/BridgeHelper/LibraryBridgeHelper.h b/src/BridgeHelper/LibraryBridgeHelper.h index 447a4c713f4..1723d1f8fb4 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.h +++ b/src/BridgeHelper/LibraryBridgeHelper.h @@ -23,7 +23,7 @@ protected: String serviceFileName() const override { return serviceAlias(); } - size_t getDefaultPort() const override { return DEFAULT_PORT; } + unsigned getDefaultPort() const override { return DEFAULT_PORT; } bool startBridgeManually() const override { return false; } diff --git a/src/BridgeHelper/XDBCBridgeHelper.h b/src/BridgeHelper/XDBCBridgeHelper.h index b62cb277ecb..139c1ab9726 100644 --- a/src/BridgeHelper/XDBCBridgeHelper.h +++ b/src/BridgeHelper/XDBCBridgeHelper.h @@ -109,7 +109,7 @@ protected: String getName() const override { return BridgeHelperMixin::getName(); } - size_t getDefaultPort() const override { return DEFAULT_PORT; } + unsigned getDefaultPort() const override { return DEFAULT_PORT; } String serviceAlias() const override { return BridgeHelperMixin::serviceAlias(); } diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 419a34fc2ab..ce2cc862b32 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -66,6 +66,8 @@ add_subdirectory (Storages) add_subdirectory (Parsers) add_subdirectory (IO) add_subdirectory (Functions) +add_subdirectory (Analyzer) +add_subdirectory (Planner) add_subdirectory (Interpreters) add_subdirectory (AggregateFunctions) add_subdirectory (Client) @@ -254,6 +256,9 @@ add_object_library(clickhouse_datatypes_serializations DataTypes/Serializations) add_object_library(clickhouse_databases Databases) add_object_library(clickhouse_databases_mysql Databases/MySQL) add_object_library(clickhouse_disks Disks) +add_object_library(clickhouse_analyzer Analyzer) +add_object_library(clickhouse_analyzer_passes Analyzer/Passes) +add_object_library(clickhouse_planner Planner) add_object_library(clickhouse_interpreters Interpreters) add_object_library(clickhouse_interpreters_cache Interpreters/Cache) add_object_library(clickhouse_interpreters_access Interpreters/Access) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 0a2fbcf9f46..5d7de8ec799 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1,7 +1,6 @@ #include #include -#include #include #include #include @@ -9,7 +8,6 @@ #include "config.h" #include -#include #include #include #include @@ -32,7 +30,6 @@ #include #include #include -#include #include #include @@ -70,10 +67,10 @@ #include #include #include -#include #include #include + namespace fs = std::filesystem; using namespace std::literals; @@ -340,7 +337,7 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu /// Consumes trailing semicolons and tries to consume the same-line trailing comment. -void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, int max_parser_depth) +void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth) { // We have to skip the trailing semicolon that might be left // after VALUES parsing or just after a normal semicolon-terminated query. @@ -553,7 +550,7 @@ try out_file_buf = wrapWriteBufferWithCompressionMethod( std::make_unique(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT), compression_method, - compression_level + static_cast(compression_level) ); if (query_with_output->is_into_outfile_with_stdout) @@ -1605,6 +1602,8 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( if (this_query_begin >= all_queries_end) return MultiQueryProcessingStage::QUERIES_END; + unsigned max_parser_depth = static_cast(global_context->getSettingsRef().max_parser_depth); + // If there are only comments left until the end of file, we just // stop. The parser can't handle this situation because it always // expects that there is some query that it can parse. @@ -1614,7 +1613,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( // and it makes more sense to treat them as such. { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, global_context->getSettingsRef().max_parser_depth); + IParser::Pos token_iterator(tokens, max_parser_depth); if (!token_iterator.isValid()) return MultiQueryProcessingStage::QUERIES_END; } @@ -1635,7 +1634,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( if (ignore_error) { Tokens tokens(this_query_begin, all_queries_end); - IParser::Pos token_iterator(tokens, global_context->getSettingsRef().max_parser_depth); + IParser::Pos token_iterator(tokens, max_parser_depth); while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid()) ++token_iterator; this_query_begin = token_iterator->end; @@ -1675,7 +1674,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( // after we have processed the query. But even this guess is // beneficial so that we see proper trailing comments in "echo" and // server log. - adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth); + adjustQueryEnd(this_query_end, all_queries_end, max_parser_depth); return MultiQueryProcessingStage::EXECUTE_QUERY; } @@ -1869,7 +1868,9 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) if (insert_ast && isSyncInsertWithData(*insert_ast, global_context)) { this_query_end = insert_ast->end; - adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth); + adjustQueryEnd( + this_query_end, all_queries_end, + static_cast(global_context->getSettingsRef().max_parser_depth)); } // Report error. @@ -1925,7 +1926,7 @@ bool ClientBase::processQueryText(const String & text) String ClientBase::prompt() const { - return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default")); + return prompt_by_server_display_name; } @@ -2350,7 +2351,7 @@ void ClientBase::init(int argc, char ** argv) if (options.count("print-profile-events")) config().setBool("print-profile-events", true); if (options.count("profile-events-delay-ms")) - config().setInt("profile-events-delay-ms", options["profile-events-delay-ms"].as()); + config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as()); if (options.count("progress")) config().setBool("progress", true); if (options.count("echo")) diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 01edb65e135..27f29f24949 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -72,7 +72,7 @@ protected: void processParsedSingleQuery(const String & full_query, const String & query_to_execute, ASTPtr parsed_query, std::optional echo_query_ = {}, bool report_error = false); - static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, int max_parser_depth); + static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth); ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) const; static void setupSignalHandler(); diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp index f1802467b57..c7392a86a7e 100644 --- a/src/Client/HedgedConnections.cpp +++ b/src/Client/HedgedConnections.cpp @@ -338,7 +338,7 @@ HedgedConnections::ReplicaLocation HedgedConnections::getReadyReplicaLocation(As offset_states[location.offset].replicas[location.index].change_replica_timeout.reset(); offset_states[location.offset].replicas[location.index].is_change_replica_timeout_expired = true; offset_states[location.offset].next_replica_in_process = true; - offsets_queue.push(location.offset); + offsets_queue.push(static_cast(location.offset)); ProfileEvents::increment(ProfileEvents::HedgedRequestsChangeReplica); startNewReplica(); } diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index 81067f51d29..bed73b1c200 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -362,7 +362,7 @@ void HedgedConnectionsFactory::removeReplicaFromEpoll(int index, int fd) timeout_fd_to_replica_index.erase(replicas[index].change_replica_timeout.getDescriptor()); } -int HedgedConnectionsFactory::numberOfProcessingReplicas() const +size_t HedgedConnectionsFactory::numberOfProcessingReplicas() const { if (epoll.empty()) return 0; @@ -381,7 +381,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::setBestUsableReplica(C && result.is_usable && !replicas[i].is_ready && (!skip_replicas_with_two_level_aggregation_incompatibility || !isTwoLevelAggregationIncompatible(&*result.entry))) - indexes.push_back(i); + indexes.push_back(static_cast(i)); } if (indexes.empty()) diff --git a/src/Client/HedgedConnectionsFactory.h b/src/Client/HedgedConnectionsFactory.h index c5e8d493efa..194e962d549 100644 --- a/src/Client/HedgedConnectionsFactory.h +++ b/src/Client/HedgedConnectionsFactory.h @@ -70,7 +70,7 @@ public: const ConnectionTimeouts & getConnectionTimeouts() const { return timeouts; } - int numberOfProcessingReplicas() const; + size_t numberOfProcessingReplicas() const; /// Tell Factory to not return connections with two level aggregation incompatibility. void skipReplicasWithTwoLevelAggregationIncompatibility() { skip_replicas_with_two_level_aggregation_incompatibility = true; } diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index 7ac68324915..476386889d2 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -6,8 +6,6 @@ #include #include #include -#include -#include namespace DB diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index 72cd4c46477..87eda765a7a 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -393,24 +393,38 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead Poco::Net::Socket::SocketList write_list; Poco::Net::Socket::SocketList except_list; - for (const ReplicaState & state : replica_states) - { - Connection * connection = state.connection; - if (connection != nullptr) - read_list.push_back(*connection->socket); - } - auto timeout = is_draining ? drain_timeout : receive_timeout; - int n = Poco::Net::Socket::select( - read_list, - write_list, - except_list, - timeout); + int n = 0; + + /// EINTR loop + while (true) + { + read_list.clear(); + for (const ReplicaState & state : replica_states) + { + Connection * connection = state.connection; + if (connection != nullptr) + read_list.push_back(*connection->socket); + } + + /// poco returns 0 on EINTR, let's reset errno to ensure that EINTR came from select(). + errno = 0; + + n = Poco::Net::Socket::select( + read_list, + write_list, + except_list, + timeout); + if (n <= 0 && errno == EINTR) + continue; + break; + } /// We treat any error as timeout for simplicity. /// And we also check if read_list is still empty just in case. if (n <= 0 || read_list.empty()) { + const auto & addresses = dumpAddressesUnlocked(); for (ReplicaState & state : replica_states) { Connection * connection = state.connection; @@ -423,7 +437,7 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Timeout ({} ms) exceeded while reading from {}", timeout.totalMilliseconds(), - dumpAddressesUnlocked()); + addresses); } } diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp index 6c5f5850b92..d5cd4ef1548 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -81,9 +81,9 @@ Field QueryFuzzer::getRandomField(int type) { static constexpr UInt64 scales[] = {0, 1, 2, 10}; return DecimalField( - bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) - / sizeof(*bad_int64_values))], - scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]); + bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) / sizeof(*bad_int64_values))], + static_cast(scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]) + ); } default: assert(false); diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index bb56baf9216..d34ae640962 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -277,13 +277,13 @@ void ColumnArray::updateWeakHash32(WeakHash32 & hash) const { /// This row improves hash a little bit according to integration tests. /// It is the same as to use previous hash value as the first element of array. - hash_data[i] = intHashCRC32(hash_data[i]); + hash_data[i] = static_cast(intHashCRC32(hash_data[i])); for (size_t row = prev_offset; row < offsets_data[i]; ++row) /// It is probably not the best way to combine hashes. /// But much better then xor which lead to similar hash for arrays like [1], [1, 1, 1], [1, 1, 1, 1, 1], ... /// Much better implementation - to add offsets as an optional argument to updateWeakHash32. - hash_data[i] = intHashCRC32(internal_hash_data[row], hash_data[i]); + hash_data[i] = static_cast(intHashCRC32(internal_hash_data[row], hash_data[i])); prev_offset = offsets_data[i]; } @@ -569,8 +569,8 @@ void ColumnArray::expand(const IColumn::Filter & mask, bool inverted) if (mask.size() < offsets_data.size()) throw Exception("Mask size should be no less than data size.", ErrorCodes::LOGICAL_ERROR); - int index = mask.size() - 1; - int from = offsets_data.size() - 1; + ssize_t index = mask.size() - 1; + ssize_t from = offsets_data.size() - 1; offsets_data.resize(mask.size()); UInt64 last_offset = offsets_data[from]; while (index >= 0) diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index 4e951ec28b8..5970802f598 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -176,6 +176,9 @@ public: void getIndicesOfNonDefaultRows(Offsets & indices, size_t from, size_t limit) const override; + void finalize() override { data->finalize(); } + bool isFinalized() const override { return data->isFinalized(); } + bool isCollationSupported() const override { return getData().isCollationSupported(); } size_t getNumberOfDimensions() const; diff --git a/src/Columns/ColumnCompressed.cpp b/src/Columns/ColumnCompressed.cpp index 292c6968b86..cdf604d89f7 100644 --- a/src/Columns/ColumnCompressed.cpp +++ b/src/Columns/ColumnCompressed.cpp @@ -27,8 +27,8 @@ std::shared_ptr> ColumnCompressed::compressBuffer(const void * data, si auto compressed_size = LZ4_compress_default( reinterpret_cast(data), compressed.data(), - data_size, - max_dest_size); + static_cast(data_size), + static_cast(max_dest_size)); if (compressed_size <= 0) throw Exception(ErrorCodes::CANNOT_COMPRESS, "Cannot compress column"); @@ -51,8 +51,8 @@ void ColumnCompressed::decompressBuffer( auto processed_size = LZ4_decompress_safe( reinterpret_cast(compressed_data), reinterpret_cast(decompressed_data), - compressed_size, - decompressed_size); + static_cast(compressed_size), + static_cast(decompressed_size)); if (processed_size <= 0) throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress column"); diff --git a/src/Columns/ColumnConst.cpp b/src/Columns/ColumnConst.cpp index 1864c0194f7..ca691d16f36 100644 --- a/src/Columns/ColumnConst.cpp +++ b/src/Columns/ColumnConst.cpp @@ -148,7 +148,7 @@ void ColumnConst::updateWeakHash32(WeakHash32 & hash) const size_t data_hash = element_hash.getData()[0]; for (auto & value : hash.getData()) - value = intHashCRC32(data_hash, value); + value = static_cast(intHashCRC32(data_hash, value)); } void ColumnConst::compareColumn( diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp index 63b76dbb230..33efe440220 100644 --- a/src/Columns/ColumnDecimal.cpp +++ b/src/Columns/ColumnDecimal.cpp @@ -109,7 +109,7 @@ void ColumnDecimal::updateWeakHash32(WeakHash32 & hash) const while (begin < end) { - *hash_data = intHashCRC32(*begin, *hash_data); + *hash_data = static_cast(intHashCRC32(*begin, *hash_data)); ++begin; ++hash_data; } diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index 7038579d436..a45d4a0b5f1 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -277,8 +277,8 @@ void ColumnFixedString::expand(const IColumn::Filter & mask, bool inverted) if (mask.size() < size()) throw Exception("Mask size should be no less than data size.", ErrorCodes::LOGICAL_ERROR); - int index = mask.size() - 1; - int from = size() - 1; + ssize_t index = mask.size() - 1; + ssize_t from = size() - 1; chars.resize_fill(mask.size() * n, 0); while (index >= 0) { diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp index 17e9bd97669..0981a5b01fa 100644 --- a/src/Columns/ColumnLowCardinality.cpp +++ b/src/Columns/ColumnLowCardinality.cpp @@ -46,7 +46,7 @@ namespace HashMap hash_map; for (auto val : index) - hash_map.insert({val, hash_map.size()}); + hash_map.insert({val, static_cast(hash_map.size())}); auto res_col = ColumnVector::create(); auto & data = res_col->getData(); @@ -632,7 +632,7 @@ void ColumnLowCardinality::Index::convertPositions() /// TODO: Optimize with SSE? for (size_t i = 0; i < size; ++i) - new_data[i] = data[i]; + new_data[i] = static_cast(data[i]); positions = std::move(new_positions); size_of_type = sizeof(IndexType); @@ -717,7 +717,7 @@ void ColumnLowCardinality::Index::insertPositionsRange(const IColumn & column, U positions_data.resize(size + limit); for (UInt64 i = 0; i < limit; ++i) - positions_data[size + i] = column_data[offset + i]; + positions_data[size + i] = static_cast(column_data[offset + i]); }; callForType(std::move(copy), size_of_type); @@ -789,7 +789,7 @@ void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 & auto size = data.size(); for (size_t i = 0; i < size; ++i) - hash_data[i] = intHashCRC32(dict_hash_data[data[i]], hash_data[i]); + hash_data[i] = static_cast(intHashCRC32(dict_hash_data[data[i]], hash_data[i])); }; callForType(std::move(update_weak_hash), size_of_type); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index a3e171008ff..1e03633ced7 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -93,6 +93,8 @@ public: bool structureEquals(const IColumn & rhs) const override; double getRatioOfDefaultRows(double sample_ratio) const override; void getIndicesOfNonDefaultRows(Offsets & indices, size_t from, size_t limit) const override; + void finalize() override { nested->finalize(); } + bool isFinalized() const override { return nested->isFinalized(); } const ColumnArray & getNestedColumn() const { return assert_cast(*nested); } ColumnArray & getNestedColumn() { return assert_cast(*nested); } diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 86586559ff7..bf4630137d5 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -732,8 +732,8 @@ void ColumnObject::get(size_t n, Field & res) const { assert(n < size()); res = Object(); - auto & object = res.get(); + for (const auto & entry : subcolumns) { auto it = object.try_emplace(entry->path.getPath()).first; @@ -744,7 +744,6 @@ void ColumnObject::get(size_t n, Field & res) const void ColumnObject::insertFrom(const IColumn & src, size_t n) { insert(src[n]); - finalize(); } void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -792,9 +791,8 @@ MutableColumnPtr ColumnObject::applyForSubcolumns(Func && func) const { if (!isFinalized()) { - auto finalized = IColumn::mutate(getPtr()); + auto finalized = cloneFinalized(); auto & finalized_object = assert_cast(*finalized); - finalized_object.finalize(); return finalized_object.applyForSubcolumns(std::forward(func)); } diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index f32356fed6e..8fcf3d41fba 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -198,10 +198,6 @@ public: Subcolumns & getSubcolumns() { return subcolumns; } PathsInData getKeys() const; - /// Finalizes all subcolumns. - void finalize(); - bool isFinalized() const; - /// Part of interface const char * getFamilyName() const override { return "Object"; } @@ -219,12 +215,17 @@ public: void popBack(size_t length) override; Field operator[](size_t n) const override; void get(size_t n, Field & res) const override; + ColumnPtr permute(const Permutation & perm, size_t limit) const override; ColumnPtr filter(const Filter & filter, ssize_t result_size_hint) const override; ColumnPtr index(const IColumn & indexes, size_t limit) const override; ColumnPtr replicate(const Offsets & offsets) const override; MutableColumnPtr cloneResized(size_t new_size) const override; + /// Finalizes all subcolumns. + void finalize() override; + bool isFinalized() const override; + /// Order of rows in ColumnObject is undefined. void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override; void compareColumn(const IColumn & rhs, size_t rhs_row_num, @@ -264,9 +265,7 @@ private: template MutableColumnPtr applyForSubcolumns(Func && func) const; - /// For given subcolumn return subcolumn from the same Nested type. /// It's used to get shared sized of Nested to insert correct default values. const Subcolumns::Node * getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const; }; - } diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 9c8082dcd22..982951f05b0 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -168,8 +168,8 @@ void ColumnString::expand(const IColumn::Filter & mask, bool inverted) /// We cannot change only offsets, because each string should end with terminating zero byte. /// So, we will insert one zero byte when mask value is zero. - int index = mask.size() - 1; - int from = offsets_data.size() - 1; + ssize_t index = mask.size() - 1; + ssize_t from = offsets_data.size() - 1; /// mask.size() - offsets_data.size() should be equal to the number of zeros in mask /// (if not, one of exceptions below will throw) and we can calculate the resulting chars size. UInt64 last_offset = offsets_data[from] + (mask.size() - offsets_data.size()); diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 3577b6dee28..d8a43bf510d 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -570,4 +570,15 @@ void ColumnTuple::getIndicesOfNonDefaultRows(Offsets & indices, size_t from, siz return getIndicesOfNonDefaultRowsImpl(indices, from, limit); } +void ColumnTuple::finalize() +{ + for (auto & column : columns) + column->finalize(); +} + +bool ColumnTuple::isFinalized() const +{ + return std::all_of(columns.begin(), columns.end(), [](const auto & column) { return column->isFinalized(); }); +} + } diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 385de7db1e7..96395d4edfb 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -103,6 +103,8 @@ public: ColumnPtr compress() const override; double getRatioOfDefaultRows(double sample_ratio) const override; void getIndicesOfNonDefaultRows(Offsets & indices, size_t from, size_t limit) const override; + void finalize() override; + bool isFinalized() const override; size_t tupleSize() const { return columns.size(); } diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index bba10bfebf0..a1579a46ae0 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -550,7 +550,7 @@ MutableColumnPtr ColumnUnique::uniqueInsertRangeImpl( auto insert_key = [&](StringRef ref, ReverseIndex & cur_index) -> MutableColumnPtr { auto inserted_pos = cur_index.insert(ref); - positions[num_added_rows] = inserted_pos; + positions[num_added_rows] = static_cast(inserted_pos); if (inserted_pos == next_position) return update_position(next_position); @@ -562,9 +562,9 @@ MutableColumnPtr ColumnUnique::uniqueInsertRangeImpl( auto row = start + num_added_rows; if (null_map && (*null_map)[row]) - positions[num_added_rows] = getNullValueIndex(); + positions[num_added_rows] = static_cast(getNullValueIndex()); else if (column->compareAt(getNestedTypeDefaultValueIndex(), row, *src_column, 1) == 0) - positions[num_added_rows] = getNestedTypeDefaultValueIndex(); + positions[num_added_rows] = static_cast(getNestedTypeDefaultValueIndex()); else { auto ref = src_column->getDataAt(row); @@ -576,7 +576,7 @@ MutableColumnPtr ColumnUnique::uniqueInsertRangeImpl( if (insertion_point == reverse_index.lastInsertionPoint()) res = insert_key(ref, *secondary_index); else - positions[num_added_rows] = insertion_point; + positions[num_added_rows] = static_cast(insertion_point); } else res = insert_key(ref, reverse_index); diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index cb570c87498..30e7423fde0 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -12,12 +12,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -25,6 +27,10 @@ # include #endif +#if USE_MULTITARGET_CODE +# include +#endif + #if USE_EMBEDDED_COMPILER #include #include @@ -84,7 +90,7 @@ void ColumnVector::updateWeakHash32(WeakHash32 & hash) const while (begin < end) { - *hash_data = hashCRC32(*begin, *hash_data); + *hash_data = static_cast(hashCRC32(*begin, *hash_data)); ++begin; ++hash_data; } @@ -471,6 +477,128 @@ void ColumnVector::insertRangeFrom(const IColumn & src, size_t start, size_t memcpy(data.data() + old_size, &src_vec.data[start], length * sizeof(data[0])); } +static inline UInt64 blsr(UInt64 mask) +{ +#ifdef __BMI__ + return _blsr_u64(mask); +#else + return mask & (mask-1); +#endif +} + +DECLARE_DEFAULT_CODE( +template +inline void doFilterAligned(const UInt8 *& filt_pos, const UInt8 *& filt_end_aligned, const T *& data_pos, Container & res_data) +{ + while (filt_pos < filt_end_aligned) + { + UInt64 mask = bytes64MaskToBits64Mask(filt_pos); + + if (0xffffffffffffffff == mask) + { + res_data.insert(data_pos, data_pos + SIMD_ELEMENTS); + } + else + { + while (mask) + { + size_t index = std::countr_zero(mask); + res_data.push_back(data_pos[index]); + mask = blsr(mask); + } + } + + filt_pos += SIMD_ELEMENTS; + data_pos += SIMD_ELEMENTS; + } +} +) + +namespace +{ +template +void resize(Container & res_data, size_t reserve_size) +{ +#if defined(MEMORY_SANITIZER) + res_data.resize_fill(reserve_size, static_cast(0)); // MSan doesn't recognize that all allocated memory is written by AVX-512 intrinsics. +#else + res_data.resize(reserve_size); +#endif +} +} + +DECLARE_AVX512VBMI2_SPECIFIC_CODE( +template +inline void compressStoreAVX512(const void *src, void *dst, const UInt64 mask) +{ + __m512i vsrc = _mm512_loadu_si512(src); + if constexpr (ELEMENT_WIDTH == 1) + _mm512_mask_compressstoreu_epi8(dst, static_cast<__mmask64>(mask), vsrc); + else if constexpr (ELEMENT_WIDTH == 2) + _mm512_mask_compressstoreu_epi16(dst, static_cast<__mmask32>(mask), vsrc); + else if constexpr (ELEMENT_WIDTH == 4) + _mm512_mask_compressstoreu_epi32(dst, static_cast<__mmask16>(mask), vsrc); + else if constexpr (ELEMENT_WIDTH == 8) + _mm512_mask_compressstoreu_epi64(dst, static_cast<__mmask8>(mask), vsrc); +} + +template +inline void doFilterAligned(const UInt8 *& filt_pos, const UInt8 *& filt_end_aligned, const T *& data_pos, Container & res_data) +{ + static constexpr size_t VEC_LEN = 64; /// AVX512 vector length - 64 bytes + static constexpr size_t ELEMENT_WIDTH = sizeof(T); + static constexpr size_t ELEMENTS_PER_VEC = VEC_LEN / ELEMENT_WIDTH; + static constexpr UInt64 KMASK = 0xffffffffffffffff >> (64 - ELEMENTS_PER_VEC); + + size_t current_offset = res_data.size(); + size_t reserve_size = res_data.size(); + size_t alloc_size = SIMD_ELEMENTS * 2; + + while (filt_pos < filt_end_aligned) + { + /// to avoid calling resize too frequently, resize to reserve buffer. + if (reserve_size - current_offset < SIMD_ELEMENTS) + { + reserve_size += alloc_size; + resize(res_data, reserve_size); + alloc_size *= 2; + } + + UInt64 mask = bytes64MaskToBits64Mask(filt_pos); + + if (0xffffffffffffffff == mask) + { + for (size_t i = 0; i < SIMD_ELEMENTS; i += ELEMENTS_PER_VEC) + _mm512_storeu_si512(reinterpret_cast(&res_data[current_offset + i]), + _mm512_loadu_si512(reinterpret_cast(data_pos + i))); + current_offset += SIMD_ELEMENTS; + } + else + { + if (mask) + { + for (size_t i = 0; i < SIMD_ELEMENTS; i += ELEMENTS_PER_VEC) + { + compressStoreAVX512(reinterpret_cast(data_pos + i), + reinterpret_cast(&res_data[current_offset]), mask & KMASK); + current_offset += std::popcount(mask & KMASK); + /// prepare mask for next iter, if ELEMENTS_PER_VEC = 64, no next iter + if (ELEMENTS_PER_VEC < 64) + { + mask >>= ELEMENTS_PER_VEC; + } + } + } + } + + filt_pos += SIMD_ELEMENTS; + data_pos += SIMD_ELEMENTS; + } + /// resize to the real size. + res_data.resize(current_offset); +} +) + template ColumnPtr ColumnVector::filter(const IColumn::Filter & filt, ssize_t result_size_hint) const { @@ -491,36 +619,18 @@ ColumnPtr ColumnVector::filter(const IColumn::Filter & filt, ssize_t result_s /** A slightly more optimized version. * Based on the assumption that often pieces of consecutive values * completely pass or do not pass the filter. - * Therefore, we will optimistically check the parts of `SIMD_BYTES` values. + * Therefore, we will optimistically check the parts of `SIMD_ELEMENTS` values. */ - static constexpr size_t SIMD_BYTES = 64; - const UInt8 * filt_end_aligned = filt_pos + size / SIMD_BYTES * SIMD_BYTES; + static constexpr size_t SIMD_ELEMENTS = 64; + const UInt8 * filt_end_aligned = filt_pos + size / SIMD_ELEMENTS * SIMD_ELEMENTS; - while (filt_pos < filt_end_aligned) - { - UInt64 mask = bytes64MaskToBits64Mask(filt_pos); - - if (0xffffffffffffffff == mask) - { - res_data.insert(data_pos, data_pos + SIMD_BYTES); - } - else - { - while (mask) - { - size_t index = std::countr_zero(mask); - res_data.push_back(data_pos[index]); - #ifdef __BMI__ - mask = _blsr_u64(mask); - #else - mask = mask & (mask-1); - #endif - } - } - - filt_pos += SIMD_BYTES; - data_pos += SIMD_BYTES; - } +#if USE_MULTITARGET_CODE + static constexpr bool VBMI2_CAPABLE = sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8; + if (VBMI2_CAPABLE && isArchSupported(TargetArch::AVX512VBMI2)) + TargetSpecific::AVX512VBMI2::doFilterAligned(filt_pos, filt_end_aligned, data_pos, res_data); + else +#endif + TargetSpecific::Default::doFilterAligned(filt_pos, filt_end_aligned, data_pos, res_data); while (filt_pos < filt_end) { diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 0f388ef8ac3..a601dd8b405 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -7,11 +7,15 @@ #include #include #include +#include #include #include #include "config.h" +#if USE_MULTITARGET_CODE +# include +#endif namespace DB { @@ -391,6 +395,127 @@ protected: Container data; }; +DECLARE_DEFAULT_CODE( +template +inline void vectorIndexImpl(const Container & data, const PaddedPODArray & indexes, size_t limit, Container & res_data) +{ + for (size_t i = 0; i < limit; ++i) + res_data[i] = data[indexes[i]]; +} +); + +DECLARE_AVX512VBMI_SPECIFIC_CODE( +template +inline void vectorIndexImpl(const Container & data, const PaddedPODArray & indexes, size_t limit, Container & res_data) +{ + static constexpr UInt64 MASK64 = 0xffffffffffffffff; + const size_t limit64 = limit & ~63; + size_t pos = 0; + size_t data_size = data.size(); + + auto data_pos = reinterpret_cast(data.data()); + auto indexes_pos = reinterpret_cast(indexes.data()); + auto res_pos = reinterpret_cast(res_data.data()); + + if (limit == 0) + return; /// nothing to do, just return + + if (data_size <= 64) + { + /// one single mask load for table size <= 64 + __mmask64 last_mask = MASK64 >> (64 - data_size); + __m512i table1 = _mm512_maskz_loadu_epi8(last_mask, data_pos); + + /// 64 bytes table lookup using one single permutexvar_epi8 + while (pos < limit64) + { + __m512i vidx = _mm512_loadu_epi8(indexes_pos + pos); + __m512i out = _mm512_permutexvar_epi8(vidx, table1); + _mm512_storeu_epi8(res_pos + pos, out); + pos += 64; + } + /// tail handling + if (limit > limit64) + { + __mmask64 tail_mask = MASK64 >> (limit64 + 64 - limit); + __m512i vidx = _mm512_maskz_loadu_epi8(tail_mask, indexes_pos + pos); + __m512i out = _mm512_permutexvar_epi8(vidx, table1); + _mm512_mask_storeu_epi8(res_pos + pos, tail_mask, out); + } + } + else if (data_size <= 128) + { + /// table size (64, 128] requires 2 zmm load + __mmask64 last_mask = MASK64 >> (128 - data_size); + __m512i table1 = _mm512_loadu_epi8(data_pos); + __m512i table2 = _mm512_maskz_loadu_epi8(last_mask, data_pos + 64); + + /// 128 bytes table lookup using one single permute2xvar_epi8 + while (pos < limit64) + { + __m512i vidx = _mm512_loadu_epi8(indexes_pos + pos); + __m512i out = _mm512_permutex2var_epi8(table1, vidx, table2); + _mm512_storeu_epi8(res_pos + pos, out); + pos += 64; + } + if (limit > limit64) + { + __mmask64 tail_mask = MASK64 >> (limit64 + 64 - limit); + __m512i vidx = _mm512_maskz_loadu_epi8(tail_mask, indexes_pos + pos); + __m512i out = _mm512_permutex2var_epi8(table1, vidx, table2); + _mm512_mask_storeu_epi8(res_pos + pos, tail_mask, out); + } + } + else + { + if (data_size > 256) + { + /// byte index will not exceed 256 boundary. + data_size = 256; + } + + __m512i table1 = _mm512_loadu_epi8(data_pos); + __m512i table2 = _mm512_loadu_epi8(data_pos + 64); + __m512i table3, table4; + if (data_size <= 192) + { + /// only 3 tables need to load if size <= 192 + __mmask64 last_mask = MASK64 >> (192 - data_size); + table3 = _mm512_maskz_loadu_epi8(last_mask, data_pos + 128); + table4 = _mm512_setzero_si512(); + } + else + { + __mmask64 last_mask = MASK64 >> (256 - data_size); + table3 = _mm512_loadu_epi8(data_pos + 128); + table4 = _mm512_maskz_loadu_epi8(last_mask, data_pos + 192); + } + + /// 256 bytes table lookup can use: 2 permute2xvar_epi8 plus 1 blender with MSB + while (pos < limit64) + { + __m512i vidx = _mm512_loadu_epi8(indexes_pos + pos); + __m512i tmp1 = _mm512_permutex2var_epi8(table1, vidx, table2); + __m512i tmp2 = _mm512_permutex2var_epi8(table3, vidx, table4); + __mmask64 msb = _mm512_movepi8_mask(vidx); + __m512i out = _mm512_mask_blend_epi8(msb, tmp1, tmp2); + _mm512_storeu_epi8(res_pos + pos, out); + pos += 64; + } + if (limit > limit64) + { + __mmask64 tail_mask = MASK64 >> (limit64 + 64 - limit); + __m512i vidx = _mm512_maskz_loadu_epi8(tail_mask, indexes_pos + pos); + __m512i tmp1 = _mm512_permutex2var_epi8(table1, vidx, table2); + __m512i tmp2 = _mm512_permutex2var_epi8(table3, vidx, table4); + __mmask64 msb = _mm512_movepi8_mask(vidx); + __m512i out = _mm512_mask_blend_epi8(msb, tmp1, tmp2); + _mm512_mask_storeu_epi8(res_pos + pos, tail_mask, out); + } + } +} +); + template template ColumnPtr ColumnVector::indexImpl(const PaddedPODArray & indexes, size_t limit) const @@ -399,8 +524,18 @@ ColumnPtr ColumnVector::indexImpl(const PaddedPODArray & indexes, size_ auto res = this->create(limit); typename Self::Container & res_data = res->getData(); - for (size_t i = 0; i < limit; ++i) - res_data[i] = data[indexes[i]]; +#if USE_MULTITARGET_CODE + if constexpr (sizeof(T) == 1 && sizeof(Type) == 1) + { + /// VBMI optimization only applicable for (U)Int8 types + if (isArchSupported(TargetArch::AVX512VBMI)) + { + TargetSpecific::AVX512VBMI::vectorIndexImpl(data, indexes, limit, res_data); + return res; + } + } +#endif + TargetSpecific::Default::vectorIndexImpl(data, indexes, limit, res_data); return res; } diff --git a/src/Columns/ColumnVectorHelper.h b/src/Columns/ColumnVectorHelper.h index 36cbfbf640e..b8ea6ca427f 100644 --- a/src/Columns/ColumnVectorHelper.h +++ b/src/Columns/ColumnVectorHelper.h @@ -28,13 +28,17 @@ public: template const char * getRawDataBegin() const { - return reinterpret_cast, 15, 16> *>(reinterpret_cast(this) + sizeof(*this))->raw_data(); + return reinterpret_cast, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD> *>( + reinterpret_cast(this) + sizeof(*this)) + ->raw_data(); } template void insertRawData(const char * ptr) { - return reinterpret_cast, 15, 16> *>(reinterpret_cast(this) + sizeof(*this))->push_back_raw(ptr); + return reinterpret_cast, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD> *>( + reinterpret_cast(this) + sizeof(*this)) + ->push_back_raw(ptr); } }; diff --git a/src/Columns/ColumnsDateTime.h b/src/Columns/ColumnsDateTime.h new file mode 100644 index 00000000000..90d21ed5ff7 --- /dev/null +++ b/src/Columns/ColumnsDateTime.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include +#include +#include + + +namespace DB +{ + +/** Convenience typedefs for columns of SQL types Date, Date32, DateTime and DateTime64. */ + +using ColumnDate = DataTypeDate::ColumnType; +using ColumnDate32 = DataTypeDate32::ColumnType; +using ColumnDateTime = DataTypeDateTime::ColumnType; +using ColumnDateTime64 = DataTypeDateTime64::ColumnType; + +} diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index 19f3dea4f82..461e41e3eec 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -85,8 +85,8 @@ public: [[nodiscard]] virtual MutablePtr cloneEmpty() const { return cloneResized(0); } /// Creates column with the same type and specified size. - /// If size is less current size, then data is cut. - /// If size is greater, than default values are appended. + /// If size is less than current size, then data is cut. + /// If size is greater, then default values are appended. [[nodiscard]] virtual MutablePtr cloneResized(size_t /*size*/) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cloneResized() column {}", getName()); } /// Returns number of values in column. @@ -453,6 +453,16 @@ public: return getPtr(); } + /// Some columns may require finalization before using of other operations. + virtual void finalize() {} + virtual bool isFinalized() const { return true; } + + MutablePtr cloneFinalized() const + { + auto finalized = IColumn::mutate(getPtr()); + finalized->finalize(); + return finalized; + } [[nodiscard]] static MutablePtr mutate(Ptr ptr) { diff --git a/src/Columns/MaskOperations.cpp b/src/Columns/MaskOperations.cpp index 3120828921f..e320e1d57a3 100644 --- a/src/Columns/MaskOperations.cpp +++ b/src/Columns/MaskOperations.cpp @@ -22,8 +22,8 @@ void expandDataByMask(PaddedPODArray & data, const PaddedPODArray & ma if (mask.size() < data.size()) throw Exception("Mask size should be no less than data size.", ErrorCodes::LOGICAL_ERROR); - int from = data.size() - 1; - int index = mask.size() - 1; + ssize_t from = data.size() - 1; + ssize_t index = mask.size() - 1; data.resize(mask.size()); while (index >= 0) { @@ -317,7 +317,7 @@ int checkShortCircuitArguments(const ColumnsWithTypeAndName & arguments) for (size_t i = 0; i != arguments.size(); ++i) { if (checkAndGetShortCircuitArgument(arguments[i].column)) - last_short_circuit_argument_index = i; + last_short_circuit_argument_index = static_cast(i); } return last_short_circuit_argument_index; diff --git a/src/Columns/tests/gtest_column_vector.cpp b/src/Columns/tests/gtest_column_vector.cpp new file mode 100644 index 00000000000..5017d687791 --- /dev/null +++ b/src/Columns/tests/gtest_column_vector.cpp @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include + +using namespace DB; + +static pcg64 rng(randomSeed()); +static constexpr int error_code = 12345; +static constexpr size_t TEST_RUNS = 500; +static constexpr size_t MAX_ROWS = 10000; +static const std::vector filter_ratios = {1, 2, 5, 11, 32, 64, 100, 1000}; +static const size_t K = filter_ratios.size(); + +template +static MutableColumnPtr createColumn(size_t n) +{ + auto column = ColumnVector::create(); + auto & values = column->getData(); + + for (size_t i = 0; i < n; ++i) + values.push_back(static_cast(i)); + + return column; +} + +bool checkFilter(const PaddedPODArray &flit, const IColumn & src, const IColumn & dst) +{ + size_t n = flit.size(); + size_t dst_size = dst.size(); + size_t j = 0; /// index of dest + for (size_t i = 0; i < n; ++i) + { + if (flit[i] != 0) + { + if ((dst_size <= j) || (src.compareAt(i, j, dst, 0) != 0)) + return false; + j++; + } + } + return dst_size == j; /// filtered size check +} + +template +static void testFilter() +{ + auto test_case = [&](size_t rows, size_t filter_ratio) + { + auto vector_column = createColumn(rows); + PaddedPODArray flit(rows); + for (size_t i = 0; i < rows; ++i) + flit[i] = rng() % filter_ratio == 0; + auto res_column = vector_column->filter(flit, -1); + + if (!checkFilter(flit, *vector_column, *res_column)) + throw Exception(error_code, "VectorColumn filter failure, type: {}", typeid(T).name()); + }; + + try + { + for (size_t i = 0; i < TEST_RUNS; ++i) + { + size_t rows = rng() % MAX_ROWS + 1; + size_t filter_ratio = filter_ratios[rng() % K]; + + test_case(rows, filter_ratio); + } + } + catch (const Exception & e) + { + FAIL() << e.displayText(); + } +} + +TEST(ColumnVector, Filter) +{ + testFilter(); + testFilter(); + testFilter(); + testFilter(); + testFilter(); + testFilter(); + testFilter(); + testFilter(); + testFilter(); +} + +template +static MutableColumnPtr createIndexColumn(size_t limit, size_t rows) +{ + auto column = ColumnVector::create(); + auto & values = column->getData(); + auto max = std::numeric_limits::max(); + limit = limit > max ? max : limit; + + for (size_t i = 0; i < rows; ++i) + { + T val = rng() % limit; + values.push_back(val); + } + + return column; +} + +template +static void testIndex() +{ + static const std::vector column_sizes = {64, 128, 196, 256, 512}; + + auto test_case = [&](size_t rows, size_t index_rows, size_t limit) + { + auto vector_column = createColumn(rows); + auto index_column = createIndexColumn(rows, index_rows); + auto res_column = vector_column->index(*index_column, limit); + if (limit == 0) + limit = index_column->size(); + + /// check results + if (limit != res_column->size()) + throw Exception(error_code, "ColumnVector index size not match to limit: {} {}", typeid(T).name(), typeid(IndexType).name()); + for (size_t i = 0; i < limit; ++i) + { + /// vector_column data is the same as index, so indexed column's value will equals to index_column. + if (res_column->get64(i) != index_column->get64(i)) + throw Exception(error_code, "ColumnVector index fail: {} {}", typeid(T).name(), typeid(IndexType).name()); + } + }; + + try + { + test_case(0, 0, 0); /// test for zero length index + for (size_t i = 0; i < TEST_RUNS; ++i) + { + /// make sure rows distribute in (column_sizes[r-1], colulmn_sizes[r]] + size_t row_idx = rng() % column_sizes.size(); + size_t row_base = row_idx > 0 ? column_sizes[row_idx - 1] : 0; + size_t rows = row_base + (rng() % (column_sizes[row_idx] - row_base) + 1); + size_t index_rows = rng() % MAX_ROWS + 1; + + test_case(rows, index_rows, 0); + test_case(rows, index_rows, static_cast(0.5 * index_rows)); + } + } + catch (const Exception & e) + { + FAIL() << e.displayText(); + } +} + +TEST(ColumnVector, Index) +{ + testIndex(); + testIndex(); + testIndex(); +} diff --git a/src/Columns/tests/gtest_weak_hash_32.cpp b/src/Columns/tests/gtest_weak_hash_32.cpp index 2fa6c0ea8ac..8027bd4d6cc 100644 --- a/src/Columns/tests/gtest_weak_hash_32.cpp +++ b/src/Columns/tests/gtest_weak_hash_32.cpp @@ -164,7 +164,7 @@ TEST(WeakHash32, ColumnVectorU32) for (int idx [[maybe_unused]] : {1, 2}) { - for (uint64_t i = 0; i < 65536; ++i) + for (uint32_t i = 0; i < 65536; ++i) data.push_back(i << 16u); } @@ -181,7 +181,7 @@ TEST(WeakHash32, ColumnVectorI32) for (int idx [[maybe_unused]] : {1, 2}) { - for (int64_t i = -32768; i < 32768; ++i) + for (int32_t i = -32768; i < 32768; ++i) data.push_back(i << 16); //-V610 } @@ -240,7 +240,7 @@ TEST(WeakHash32, ColumnVectorU128) val.items[0] = i << 32u; val.items[1] = i << 32u; data.push_back(val); - eq_data.push_back(i); + eq_data.push_back(static_cast(i)); } } @@ -274,7 +274,7 @@ TEST(WeakHash32, ColumnDecimal32) for (int idx [[maybe_unused]] : {1, 2}) { - for (int64_t i = -32768; i < 32768; ++i) + for (int32_t i = -32768; i < 32768; ++i) data.push_back(i << 16); //-V610 } @@ -326,7 +326,7 @@ TEST(WeakHash32, ColumnString1) for (int idx [[maybe_unused]] : {1, 2}) { - for (int64_t i = 0; i < 65536; ++i) + for (int32_t i = 0; i < 65536; ++i) { data.push_back(i); auto str = std::to_string(i); @@ -359,7 +359,7 @@ TEST(WeakHash32, ColumnString2) { size_t max_size = 3000; char letter = 'a'; - for (int64_t i = 0; i < 65536; ++i) + for (int32_t i = 0; i < 65536; ++i) { data.push_back(i); size_t s = (i % max_size) + 1; @@ -401,7 +401,7 @@ TEST(WeakHash32, ColumnString3) char letter = 'a'; for (int64_t i = 0; i < 65536; ++i) { - data.push_back(i); + data.push_back(static_cast(i)); size_t s = (i % max_size) + 1; std::string str(s,'\0'); str[0] = letter; @@ -430,7 +430,7 @@ TEST(WeakHash32, ColumnFixedString) char letter = 'a'; for (int64_t i = 0; i < 65536; ++i) { - data.push_back(i); + data.push_back(static_cast(i)); size_t s = (i % max_size) + 1; std::string str(s, letter); col->insertData(str.data(), str.size()); @@ -471,7 +471,7 @@ TEST(WeakHash32, ColumnArray) UInt32 cur = 0; for (int64_t i = 0; i < 65536; ++i) { - eq_data.push_back(i); + eq_data.push_back(static_cast(i)); size_t s = (i % max_size) + 1; cur_off += s; @@ -505,9 +505,9 @@ TEST(WeakHash32, ColumnArray2) UInt64 cur_off = 0; for (int idx [[maybe_unused]] : {1, 2}) { - for (int64_t i = 0; i < 1000; ++i) + for (int32_t i = 0; i < 1000; ++i) { - for (size_t j = 0; j < 1000; ++j) + for (uint32_t j = 0; j < 1000; ++j) { eq_data.push_back(i * 1000 + j); @@ -556,7 +556,7 @@ TEST(WeakHash32, ColumnArrayArray) UInt32 cur = 1; for (int64_t i = 0; i < 3000; ++i) { - eq_data.push_back(i); + eq_data.push_back(static_cast(i)); size_t s = (i % max_size) + 1; cur_off2 += s; @@ -667,7 +667,7 @@ TEST(WeakHash32, ColumnTupleUInt64UInt64) { data1.push_back(l); data2.push_back(i << 32u); - eq.push_back(l * 65536 + i); + eq.push_back(static_cast(l * 65536 + i)); } } @@ -695,7 +695,7 @@ TEST(WeakHash32, ColumnTupleUInt64String) size_t max_size = 3000; char letter = 'a'; - for (int64_t i = 0; i < 65536; ++i) + for (int32_t i = 0; i < 65536; ++i) { data1.push_back(l); eq.push_back(l * 65536 + i); @@ -737,7 +737,7 @@ TEST(WeakHash32, ColumnTupleUInt64FixedString) for (int64_t i = 0; i < 65536; ++i) { data1.push_back(l); - eq.push_back(l * 65536 + i); + eq.push_back(static_cast(l * 65536 + i)); size_t s = (i % max_size) + 1; std::string str(s, letter); @@ -778,7 +778,7 @@ TEST(WeakHash32, ColumnTupleUInt64Array) auto l = idx % 2; UInt32 cur = 0; - for (int64_t i = 0; i < 65536; ++i) + for (int32_t i = 0; i < 65536; ++i) { data1.push_back(l); eq_data.push_back(l * 65536 + i); diff --git a/src/Common/Arena.h b/src/Common/Arena.h index b706f3b3413..17d53acd8f7 100644 --- a/src/Common/Arena.h +++ b/src/Common/Arena.h @@ -34,8 +34,7 @@ namespace DB class Arena : private boost::noncopyable { private: - /// Padding allows to use 'memcpySmallAllowReadWriteOverflow15' instead of 'memcpy'. - static constexpr size_t pad_right = 15; + static constexpr size_t pad_right = PADDING_FOR_SIMD - 1; /// Contiguous MemoryChunk of memory and pointer to free space inside it. Member of single-linked list. struct alignas(16) MemoryChunk : private Allocator /// empty base optimization diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp index a02909309b6..99b4c34dfbd 100644 --- a/src/Common/CaresPTRResolver.cpp +++ b/src/Common/CaresPTRResolver.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include "ares.h" #include "netdb.h" @@ -40,6 +41,8 @@ namespace DB } } + std::mutex CaresPTRResolver::mutex; + CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr) { /* @@ -73,6 +76,8 @@ namespace DB std::unordered_set CaresPTRResolver::resolve(const std::string & ip) { + std::lock_guard guard(mutex); + std::unordered_set ptr_records; resolve(ip, ptr_records); @@ -83,6 +88,8 @@ namespace DB std::unordered_set CaresPTRResolver::resolve_v6(const std::string & ip) { + std::lock_guard guard(mutex); + std::unordered_set ptr_records; resolve_v6(ip, ptr_records); @@ -110,23 +117,83 @@ namespace DB void CaresPTRResolver::wait() { - timeval * tvp, tv; - fd_set read_fds; - fd_set write_fds; - int nfds; + int sockets[ARES_GETSOCK_MAXNUM]; + pollfd pollfd[ARES_GETSOCK_MAXNUM]; - for (;;) + while (true) { - FD_ZERO(&read_fds); - FD_ZERO(&write_fds); - nfds = ares_fds(channel, &read_fds,&write_fds); - if (nfds == 0) + auto readable_sockets = get_readable_sockets(sockets, pollfd); + auto timeout = calculate_timeout(); + + int number_of_fds_ready = 0; + if (!readable_sockets.empty()) + { + number_of_fds_ready = poll(readable_sockets.data(), static_cast(readable_sockets.size()), static_cast(timeout)); + } + + if (number_of_fds_ready > 0) + { + process_readable_sockets(readable_sockets); + } + else + { + process_possible_timeout(); + break; + } + } + } + + std::span CaresPTRResolver::get_readable_sockets(int * sockets, pollfd * pollfd) + { + int sockets_bitmask = ares_getsock(channel, sockets, ARES_GETSOCK_MAXNUM); + + int number_of_sockets_to_poll = 0; + + for (int i = 0; i < ARES_GETSOCK_MAXNUM; i++, number_of_sockets_to_poll++) + { + pollfd[i].events = 0; + pollfd[i].revents = 0; + + if (ARES_GETSOCK_READABLE(sockets_bitmask, i)) + { + pollfd[i].fd = sockets[i]; + pollfd[i].events = POLLIN; + } + else { break; } - tvp = ares_timeout(channel, nullptr, &tv); - select(nfds, &read_fds, &write_fds, nullptr, tvp); - ares_process(channel, &read_fds, &write_fds); + } + + return std::span(pollfd, number_of_sockets_to_poll); + } + + int64_t CaresPTRResolver::calculate_timeout() + { + timeval tv; + if (auto * tvp = ares_timeout(channel, nullptr, &tv)) + { + auto timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000; + + return timeout; + } + + return 0; + } + + void CaresPTRResolver::process_possible_timeout() + { + /* Call ares_process() unconditonally here, even if we simply timed out + above, as otherwise the ares name resolve won't timeout! */ + ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD); + } + + void CaresPTRResolver::process_readable_sockets(std::span readable_sockets) + { + for (auto readable_socket : readable_sockets) + { + auto fd = readable_socket.revents & POLLIN ? readable_socket.fd : ARES_SOCKET_BAD; + ares_process_fd(channel, fd, ARES_SOCKET_BAD); } } } diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h index e5182d34682..9df6d7aeb72 100644 --- a/src/Common/CaresPTRResolver.h +++ b/src/Common/CaresPTRResolver.h @@ -1,5 +1,8 @@ #pragma once +#include +#include +#include #include "DNSPTRResolver.h" using ares_channel = struct ares_channeldata *; @@ -20,7 +23,6 @@ namespace DB * Allow only DNSPTRProvider to instantiate this class * */ struct provider_token {}; - public: explicit CaresPTRResolver(provider_token); ~CaresPTRResolver() override; @@ -36,7 +38,17 @@ namespace DB void resolve_v6(const std::string & ip, std::unordered_set & response); + std::span get_readable_sockets(int * sockets, pollfd * pollfd); + + int64_t calculate_timeout(); + + void process_possible_timeout(); + + void process_readable_sockets(std::span readable_sockets); + ares_channel channel; + + static std::mutex mutex; }; } diff --git a/src/Common/CombinedCardinalityEstimator.h b/src/Common/CombinedCardinalityEstimator.h index 3f4b481dce9..1911cafeaa2 100644 --- a/src/Common/CombinedCardinalityEstimator.h +++ b/src/Common/CombinedCardinalityEstimator.h @@ -65,7 +65,7 @@ public: private: using Small = SmallSet; using Medium = HashContainer; - using Large = HyperLogLogCounter; + using Large = HyperLogLogCounter; public: CombinedCardinalityEstimator() diff --git a/src/Common/CpuId.h b/src/Common/CpuId.h index 167fa22faf6..1e54ccf62b3 100644 --- a/src/Common/CpuId.h +++ b/src/Common/CpuId.h @@ -82,6 +82,7 @@ inline bool cpuid(UInt32 op, UInt32 * res) noexcept /// NOLINT OP(AVX512BW) \ OP(AVX512VL) \ OP(AVX512VBMI) \ + OP(AVX512VBMI2) \ OP(PREFETCHWT1) \ OP(SHA) \ OP(ADX) \ @@ -302,6 +303,11 @@ bool haveAVX512VBMI() noexcept return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ecx >> 1) & 1u); } +bool haveAVX512VBMI2() noexcept +{ + return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ecx >> 6) & 1u); +} + bool haveRDRAND() noexcept { return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x1).registers.ecx >> 30) & 1u); diff --git a/src/Common/DNSPTRResolverProvider.cpp b/src/Common/DNSPTRResolverProvider.cpp index 97d601a3a78..91ce4dbb938 100644 --- a/src/Common/DNSPTRResolverProvider.cpp +++ b/src/Common/DNSPTRResolverProvider.cpp @@ -5,8 +5,10 @@ namespace DB { std::shared_ptr DNSPTRResolverProvider::get() { - return std::make_shared( + static auto resolver = std::make_shared( CaresPTRResolver::provider_token {} ); + + return resolver; } } diff --git a/src/Common/DateLUTImpl.cpp b/src/Common/DateLUTImpl.cpp index c4b32a3466b..6eb8b47b114 100644 --- a/src/Common/DateLUTImpl.cpp +++ b/src/Common/DateLUTImpl.cpp @@ -149,9 +149,9 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) /// Fill lookup table for years and months. size_t year_months_lut_index = 0; - size_t first_day_of_last_month = 0; + unsigned first_day_of_last_month = 0; - for (size_t day = 0; day < DATE_LUT_SIZE; ++day) + for (unsigned day = 0; day < DATE_LUT_SIZE; ++day) { const Values & values = lut[day]; diff --git a/src/Common/DateLUTImpl.h b/src/Common/DateLUTImpl.h index a0d5a976f35..3afbb6735dc 100644 --- a/src/Common/DateLUTImpl.h +++ b/src/Common/DateLUTImpl.h @@ -73,7 +73,7 @@ private: return LUTIndex(0); if (index >= DATE_LUT_SIZE) return LUTIndex(DATE_LUT_SIZE - 1); - return LUTIndex{index}; + return LUTIndex{static_cast(index)}; } template @@ -229,12 +229,12 @@ private: if (t >= lut[guess].date) { if (guess + 1 >= DATE_LUT_SIZE || t < lut[guess + 1].date) - return LUTIndex(guess); + return LUTIndex(static_cast(guess)); - return LUTIndex(guess + 1); + return LUTIndex(static_cast(guess) + 1); } - return LUTIndex(guess ? guess - 1 : 0); + return LUTIndex(guess ? static_cast(guess) - 1 : 0); } static inline LUTIndex toLUTIndex(DayNum d) @@ -272,11 +272,11 @@ private: if (likely(offset_is_whole_number_of_hours_during_epoch)) { if (likely(x >= 0)) - return x / divisor * divisor; + return static_cast(x / divisor * divisor); /// Integer division for negative numbers rounds them towards zero (up). /// We will shift the number so it will be rounded towards -inf (down). - return (x + 1 - divisor) / divisor * divisor; + return static_cast((x + 1 - divisor) / divisor * divisor); } Time date = find(x).date; @@ -285,7 +285,7 @@ private: { if (unlikely(res < 0)) return 0; - return res; + return static_cast(res); } else return res; @@ -509,7 +509,7 @@ public: if (time >= lut[index].time_at_offset_change()) time += lut[index].amount_of_offset_change(); - unsigned res = time / 3600; + unsigned res = static_cast(time / 3600); /// In case time was changed backwards at the start of next day, we will repeat the hour 23. return res <= 23 ? res : 23; @@ -548,8 +548,8 @@ public: { Time res = t % 60; if (likely(res >= 0)) - return res; - return res + 60; + return static_cast(res); + return static_cast(res) + 60; } LUTIndex index = findIndex(t); @@ -895,6 +895,19 @@ public: return toRelativeHourNum(lut[toLUTIndex(v)].date); } + /// The same formula is used for positive time (after Unix epoch) and negative time (before Unix epoch). + /// It’s needed for correct work of dateDiff function. + inline Time toStableRelativeHourNum(Time t) const + { + return (t + DATE_LUT_ADD + 86400 - offset_at_start_of_epoch) / 3600 - (DATE_LUT_ADD / 3600); + } + + template + inline Time toStableRelativeHourNum(DateOrTime v) const + { + return toStableRelativeHourNum(lut[toLUTIndex(v)].date); + } + inline Time toRelativeMinuteNum(Time t) const /// NOLINT { return (t + DATE_LUT_ADD) / 60 - (DATE_LUT_ADD / 60); @@ -960,7 +973,7 @@ public: if constexpr (std::is_same_v) return DayNum(4 + (d - 4) / days * days); else - return ExtendedDayNum(4 + (d - 4) / days * days); + return ExtendedDayNum(static_cast(4 + (d - 4) / days * days)); } template @@ -970,9 +983,9 @@ public: if (days == 1) return toDate(d); if constexpr (std::is_same_v) - return lut_saturated[toLUTIndex(ExtendedDayNum(d / days * days))].date; + return lut_saturated[toLUTIndex(ExtendedDayNum(static_cast(d / days * days)))].date; else - return lut[toLUTIndex(ExtendedDayNum(d / days * days))].date; + return lut[toLUTIndex(ExtendedDayNum(static_cast(d / days * days)))].date; } template @@ -1021,7 +1034,7 @@ public: { if (unlikely(res < 0)) return 0; - return res; + return static_cast(res); } else return res; @@ -1034,8 +1047,8 @@ public: if (likely(offset_is_whole_number_of_minutes_during_epoch)) { if (likely(t >= 0)) - return t / divisor * divisor; - return (t + 1 - divisor) / divisor * divisor; + return static_cast(t / divisor * divisor); + return static_cast((t + 1 - divisor) / divisor * divisor); } Time date = find(t).date; @@ -1044,7 +1057,7 @@ public: { if (unlikely(res < 0)) return 0; - return res; + return static_cast(res); } else return res; @@ -1058,7 +1071,7 @@ public: if (seconds % 60 == 0) return toStartOfMinuteInterval(t, seconds / 60); - return roundDown(t, seconds); + return static_cast(roundDown(t, seconds)); } inline LUTIndex makeLUTIndex(Int16 year, UInt8 month, UInt8 day_of_month) const @@ -1236,9 +1249,9 @@ public: return lut[new_index].date + time; } - inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int32 delta) const + inline NO_SANITIZE_UNDEFINED Time addWeeks(Time t, Int64 delta) const { - return addDays(t, static_cast(delta) * 7); + return addDays(t, delta * 7); } inline UInt8 saturateDayOfMonth(Int16 year, UInt8 month, UInt8 day_of_month) const @@ -1318,9 +1331,9 @@ public: } template - inline auto addQuarters(DateOrTime d, Int32 delta) const + inline auto addQuarters(DateOrTime d, Int64 delta) const { - return addMonths(d, static_cast(delta) * 3); + return addMonths(d, delta * 3); } template diff --git a/src/Common/Dwarf.h b/src/Common/Dwarf.h index 09178c66d47..ef6364b6b18 100644 --- a/src/Common/Dwarf.h +++ b/src/Common/Dwarf.h @@ -218,7 +218,7 @@ private: // Offset from start to first attribute uint8_t attr_offset; // Offset within debug info. - uint32_t offset; + uint64_t offset; uint64_t code; DIEAbbreviation abbr; }; @@ -252,10 +252,10 @@ private: uint8_t unit_type = DW_UT_compile; // DW_UT_compile or DW_UT_skeleton uint8_t addr_size = 0; // Offset in .debug_info of this compilation unit. - uint32_t offset = 0; - uint32_t size = 0; + uint64_t offset = 0; + uint64_t size = 0; // Offset in .debug_info for the first DIE in this compilation unit. - uint32_t first_die = 0; + uint64_t first_die = 0; uint64_t abbrev_offset = 0; // The beginning of the CU's contribution to .debug_addr diff --git a/src/Common/HashTable/Hash.h b/src/Common/HashTable/Hash.h index 4d798173698..f36ab576766 100644 --- a/src/Common/HashTable/Hash.h +++ b/src/Common/HashTable/Hash.h @@ -48,6 +48,9 @@ inline DB::UInt64 intHash64(DB::UInt64 x) #include #endif +/// NOTE: Intel intrinsic can be confusing. +/// - https://code.google.com/archive/p/sse-intrinsics/wikis/PmovIntrinsicBug.wiki +/// - https://stackoverflow.com/questions/15752770/mm-crc32-u64-poorly-defined inline DB::UInt64 intHashCRC32(DB::UInt64 x) { #ifdef __SSE4_2__ @@ -56,16 +59,16 @@ inline DB::UInt64 intHashCRC32(DB::UInt64 x) return __crc32cd(-1U, x); #else /// On other platforms we do not have CRC32. NOTE This can be confusing. + /// NOTE: consider using intHash32() return intHash64(x); #endif } - inline DB::UInt64 intHashCRC32(DB::UInt64 x, DB::UInt64 updated_value) { #ifdef __SSE4_2__ return _mm_crc32_u64(updated_value, x); #elif defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) - return __crc32cd(updated_value, x); + return __crc32cd(static_cast(updated_value), x); #else /// On other platforms we do not have CRC32. NOTE This can be confusing. return intHash64(x) ^ updated_value; @@ -123,14 +126,14 @@ inline UInt32 updateWeakHash32(const DB::UInt8 * pos, size_t size, DB::UInt32 up } reinterpret_cast(&value)[7] = size; - return intHashCRC32(value, updated_value); + return static_cast(intHashCRC32(value, updated_value)); } const auto * end = pos + size; while (pos + 8 <= end) { auto word = unalignedLoad(pos); - updated_value = intHashCRC32(word, updated_value); + updated_value = static_cast(intHashCRC32(word, updated_value)); pos += 8; } @@ -148,7 +151,7 @@ inline UInt32 updateWeakHash32(const DB::UInt8 * pos, size_t size, DB::UInt32 up /// Use least byte to store tail length. word |= tail_size; /// Now word is '\3\0\0\0\0XYZ' - updated_value = intHashCRC32(word, updated_value); + updated_value = static_cast(intHashCRC32(word, updated_value)); } return updated_value; @@ -302,8 +305,8 @@ struct UInt128HashCRC32 size_t operator()(UInt128 x) const { UInt64 crc = -1ULL; - crc = __crc32cd(crc, x.items[0]); - crc = __crc32cd(crc, x.items[1]); + crc = __crc32cd(static_cast(crc), x.items[0]); + crc = __crc32cd(static_cast(crc), x.items[1]); return crc; } }; @@ -358,10 +361,10 @@ struct UInt256HashCRC32 size_t operator()(UInt256 x) const { UInt64 crc = -1ULL; - crc = __crc32cd(crc, x.items[0]); - crc = __crc32cd(crc, x.items[1]); - crc = __crc32cd(crc, x.items[2]); - crc = __crc32cd(crc, x.items[3]); + crc = __crc32cd(static_cast(crc), x.items[0]); + crc = __crc32cd(static_cast(crc), x.items[1]); + crc = __crc32cd(static_cast(crc), x.items[2]); + crc = __crc32cd(static_cast(crc), x.items[3]); return crc; } }; @@ -423,7 +426,7 @@ inline DB::UInt32 intHash32(DB::UInt64 key) key = key + (key << 6); key = key ^ ((key >> 22) | (key << 42)); - return key; + return static_cast(key); } diff --git a/src/Common/HashTable/TwoLevelHashTable.h b/src/Common/HashTable/TwoLevelHashTable.h index b8d5eedd430..5acc8b19195 100644 --- a/src/Common/HashTable/TwoLevelHashTable.h +++ b/src/Common/HashTable/TwoLevelHashTable.h @@ -44,8 +44,8 @@ protected: public: using Impl = ImplTable; - static constexpr size_t NUM_BUCKETS = 1ULL << BITS_FOR_BUCKET; - static constexpr size_t MAX_BUCKET = NUM_BUCKETS - 1; + static constexpr UInt32 NUM_BUCKETS = 1ULL << BITS_FOR_BUCKET; + static constexpr UInt32 MAX_BUCKET = NUM_BUCKETS - 1; size_t hash(const Key & x) const { return Hash::operator()(x); } @@ -286,13 +286,13 @@ public: void write(DB::WriteBuffer & wb) const { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) impls[i].write(wb); } void writeText(DB::WriteBuffer & wb) const { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) { if (i != 0) DB::writeChar(',', wb); @@ -302,13 +302,13 @@ public: void read(DB::ReadBuffer & rb) { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) impls[i].read(rb); } void readText(DB::ReadBuffer & rb) { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) { if (i != 0) DB::assertChar(',', rb); @@ -320,7 +320,7 @@ public: size_t size() const { size_t res = 0; - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) res += impls[i].size(); return res; @@ -328,7 +328,7 @@ public: bool empty() const { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) if (!impls[i].empty()) return false; @@ -338,7 +338,7 @@ public: size_t getBufferSizeInBytes() const { size_t res = 0; - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) res += impls[i].getBufferSizeInBytes(); return res; diff --git a/src/Common/HashTable/TwoLevelStringHashTable.h b/src/Common/HashTable/TwoLevelStringHashTable.h index 77acca5e707..ea1914348b2 100644 --- a/src/Common/HashTable/TwoLevelStringHashTable.h +++ b/src/Common/HashTable/TwoLevelStringHashTable.h @@ -13,8 +13,8 @@ public: using Key = StringRef; using Impl = ImplTable; - static constexpr size_t NUM_BUCKETS = 1ULL << BITS_FOR_BUCKET; - static constexpr size_t MAX_BUCKET = NUM_BUCKETS - 1; + static constexpr UInt32 NUM_BUCKETS = 1ULL << BITS_FOR_BUCKET; + static constexpr UInt32 MAX_BUCKET = NUM_BUCKETS - 1; // TODO: currently hashing contains redundant computations when doing distributed or external aggregations size_t hash(const Key & x) const @@ -175,13 +175,13 @@ public: void write(DB::WriteBuffer & wb) const { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) impls[i].write(wb); } void writeText(DB::WriteBuffer & wb) const { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) { if (i != 0) DB::writeChar(',', wb); @@ -191,13 +191,13 @@ public: void read(DB::ReadBuffer & rb) { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) impls[i].read(rb); } void readText(DB::ReadBuffer & rb) { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) { if (i != 0) DB::assertChar(',', rb); @@ -208,7 +208,7 @@ public: size_t size() const { size_t res = 0; - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) res += impls[i].size(); return res; @@ -216,7 +216,7 @@ public: bool empty() const { - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) if (!impls[i].empty()) return false; @@ -226,7 +226,7 @@ public: size_t getBufferSizeInBytes() const { size_t res = 0; - for (size_t i = 0; i < NUM_BUCKETS; ++i) + for (UInt32 i = 0; i < NUM_BUCKETS; ++i) res += impls[i].getBufferSizeInBytes(); return res; diff --git a/src/Common/HyperLogLogCounter.h b/src/Common/HyperLogLogCounter.h index 36db00a5982..32c04d85d57 100644 --- a/src/Common/HyperLogLogCounter.h +++ b/src/Common/HyperLogLogCounter.h @@ -264,7 +264,8 @@ enum class HyperLogLogMode /// of Algorithms). template < UInt8 precision, - typename Hash = IntHash32, + typename Key = UInt64, + typename Hash = IntHash32, typename HashValueType = UInt32, typename DenominatorType = double, typename BiasEstimator = TrivialBiasEstimator, @@ -409,7 +410,9 @@ private: inline HashValueType getHash(Value key) const { - return Hash::operator()(key); + /// NOTE: this should be OK, since value is the same as key for HLL. + return static_cast( + Hash::operator()(static_cast(key))); } /// Update maximum rank for current bucket. @@ -532,6 +535,7 @@ private: template < UInt8 precision, + typename Key, typename Hash, typename HashValueType, typename DenominatorType, @@ -542,6 +546,7 @@ template details::LogLUT HyperLogLogCounter < precision, + Key, Hash, HashValueType, DenominatorType, @@ -555,6 +560,7 @@ details::LogLUT HyperLogLogCounter /// Serialization format must not be changed. using HLL12 = HyperLogLogCounter< 12, + UInt64, IntHash32, UInt32, double, diff --git a/src/Common/HyperLogLogWithSmallSetOptimization.h b/src/Common/HyperLogLogWithSmallSetOptimization.h index 39c00660ebe..1d2408186de 100644 --- a/src/Common/HyperLogLogWithSmallSetOptimization.h +++ b/src/Common/HyperLogLogWithSmallSetOptimization.h @@ -26,7 +26,7 @@ class HyperLogLogWithSmallSetOptimization : private boost::noncopyable { private: using Small = SmallSet; - using Large = HyperLogLogCounter; + using Large = HyperLogLogCounter; using LargeValueType = typename Large::value_type; Small small; diff --git a/src/Common/JSONParsers/DummyJSONParser.h b/src/Common/JSONParsers/DummyJSONParser.h index 3cedd59decd..50c112affe2 100644 --- a/src/Common/JSONParsers/DummyJSONParser.h +++ b/src/Common/JSONParsers/DummyJSONParser.h @@ -3,6 +3,7 @@ #include #include #include +#include "ElementTypes.h" namespace DB @@ -25,6 +26,7 @@ struct DummyJSONParser { public: Element() = default; + static ElementType type() { return ElementType::NULL_VALUE; } static bool isInt64() { return false; } static bool isUInt64() { return false; } static bool isDouble() { return false; } diff --git a/src/Common/JSONParsers/ElementTypes.h b/src/Common/JSONParsers/ElementTypes.h new file mode 100644 index 00000000000..44e4c850a2f --- /dev/null +++ b/src/Common/JSONParsers/ElementTypes.h @@ -0,0 +1,17 @@ +#pragma once + +namespace DB +{ +// Enum values match simdjson's for fast conversion +enum class ElementType +{ + ARRAY = '[', + OBJECT = '{', + INT64 = 'l', + UINT64 = 'u', + DOUBLE = 'd', + STRING = '"', + BOOL = 't', + NULL_VALUE = 'n' +}; +} diff --git a/src/Common/JSONParsers/RapidJSONParser.h b/src/Common/JSONParsers/RapidJSONParser.h index 01730bc0692..6c5ea938bfe 100644 --- a/src/Common/JSONParsers/RapidJSONParser.h +++ b/src/Common/JSONParsers/RapidJSONParser.h @@ -6,7 +6,7 @@ # include # include # include - +# include "ElementTypes.h" namespace DB { @@ -26,6 +26,20 @@ struct RapidJSONParser ALWAYS_INLINE Element() = default; ALWAYS_INLINE Element(const rapidjson::Value & value_) : ptr(&value_) {} /// NOLINT + ALWAYS_INLINE ElementType type() const + { + switch (ptr->GetType()) + { + case rapidjson::kNumberType: return ptr->IsDouble() ? ElementType::DOUBLE : (ptr->IsUint64() ? ElementType::UINT64 : ElementType::INT64); + case rapidjson::kStringType: return ElementType::STRING; + case rapidjson::kArrayType: return ElementType::ARRAY; + case rapidjson::kObjectType: return ElementType::OBJECT; + case rapidjson::kTrueType: return ElementType::BOOL; + case rapidjson::kFalseType: return ElementType::BOOL; + case rapidjson::kNullType: return ElementType::NULL_VALUE; + } + } + ALWAYS_INLINE bool isInt64() const { return ptr->IsInt64(); } ALWAYS_INLINE bool isUInt64() const { return ptr->IsUint64(); } ALWAYS_INLINE bool isDouble() const { return ptr->IsDouble(); } diff --git a/src/Common/JSONParsers/SimdJSONParser.h b/src/Common/JSONParsers/SimdJSONParser.h index 14eb3cd6d78..f0f8f91109f 100644 --- a/src/Common/JSONParsers/SimdJSONParser.h +++ b/src/Common/JSONParsers/SimdJSONParser.h @@ -7,7 +7,7 @@ # include # include # include - +# include "ElementTypes.h" namespace DB { @@ -31,6 +31,21 @@ struct SimdJSONParser ALWAYS_INLINE Element() {} /// NOLINT ALWAYS_INLINE Element(const simdjson::dom::element & element_) : element(element_) {} /// NOLINT + ALWAYS_INLINE ElementType type() const + { + switch (element.type()) + { + case simdjson::dom::element_type::INT64: return ElementType::INT64; + case simdjson::dom::element_type::UINT64: return ElementType::UINT64; + case simdjson::dom::element_type::DOUBLE: return ElementType::DOUBLE; + case simdjson::dom::element_type::STRING: return ElementType::STRING; + case simdjson::dom::element_type::ARRAY: return ElementType::ARRAY; + case simdjson::dom::element_type::OBJECT: return ElementType::OBJECT; + case simdjson::dom::element_type::BOOL: return ElementType::BOOL; + case simdjson::dom::element_type::NULL_VALUE: return ElementType::NULL_VALUE; + } + } + ALWAYS_INLINE bool isInt64() const { return element.type() == simdjson::dom::element_type::INT64; } ALWAYS_INLINE bool isUInt64() const { return element.type() == simdjson::dom::element_type::UINT64; } ALWAYS_INLINE bool isDouble() const { return element.type() == simdjson::dom::element_type::DOUBLE; } diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp index 8bd31681706..b530410ec63 100644 --- a/src/Common/MemoryTracker.cpp +++ b/src/Common/MemoryTracker.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "config.h" @@ -86,6 +87,8 @@ inline std::string_view toDescription(OvercommitResult result) namespace ProfileEvents { extern const Event QueryMemoryLimitExceeded; + extern const Event MemoryAllocatorPurge; + extern const Event MemoryAllocatorPurgeTimeMicroseconds; } using namespace std::chrono_literals; @@ -229,7 +232,10 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT { if (free_memory_in_allocator_arenas.exchange(-current_free_memory_in_allocator_arenas) > 0) { + Stopwatch watch; mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0); + ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge); + ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, watch.elapsedMicroseconds()); } } @@ -432,7 +438,7 @@ void MemoryTracker::reset() void MemoryTracker::setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_) { - Int64 new_amount = rss_; // - free_memory_in_allocator_arenas_; + Int64 new_amount = rss_; total_memory_tracker.amount.store(new_amount, std::memory_order_relaxed); free_memory_in_allocator_arenas.store(free_memory_in_allocator_arenas_, std::memory_order_relaxed); diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index 60efab69433..e95bc42a1ea 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -506,8 +506,16 @@ unsigned OptimizedRegularExpressionImpl::match(const char * subject DB::PODArrayWithStackMemory pieces(limit); - if (!re2->Match(StringPieceType(subject, subject_size), 0, subject_size, RegexType::UNANCHORED, pieces.data(), pieces.size())) + if (!re2->Match( + StringPieceType(subject, subject_size), + 0, + subject_size, + RegexType::UNANCHORED, + pieces.data(), + static_cast(pieces.size()))) + { return 0; + } else { matches.resize(limit); diff --git a/src/Common/OvercommitTracker.cpp b/src/Common/OvercommitTracker.cpp index c7730667f55..bb477d6019d 100644 --- a/src/Common/OvercommitTracker.cpp +++ b/src/Common/OvercommitTracker.cpp @@ -5,6 +5,7 @@ #include #include + namespace ProfileEvents { extern const Event MemoryOvercommitWaitTimeMicroseconds; @@ -170,7 +171,8 @@ void UserOvercommitTracker::pickQueryToExcludeImpl() GlobalOvercommitTracker::GlobalOvercommitTracker(DB::ProcessList * process_list_) : OvercommitTracker(process_list_) -{} +{ +} void GlobalOvercommitTracker::pickQueryToExcludeImpl() { @@ -180,16 +182,16 @@ void GlobalOvercommitTracker::pickQueryToExcludeImpl() // This is guaranteed by locking global_mutex in OvercommitTracker::needToStopQuery. for (auto const & query : process_list->processes) { - if (query.isKilled()) + if (query->isKilled()) continue; Int64 user_soft_limit = 0; - if (auto const * user_process_list = query.getUserProcessList()) + if (auto const * user_process_list = query->getUserProcessList()) user_soft_limit = user_process_list->user_memory_tracker.getSoftLimit(); if (user_soft_limit == 0) continue; - auto * memory_tracker = query.getMemoryTracker(); + auto * memory_tracker = query->getMemoryTracker(); if (!memory_tracker) continue; auto ratio = memory_tracker->getOvercommitRatio(user_soft_limit); diff --git a/src/Common/PODArray.cpp b/src/Common/PODArray.cpp index c1edc5bafad..07c3cf1af1a 100644 --- a/src/Common/PODArray.cpp +++ b/src/Common/PODArray.cpp @@ -6,14 +6,13 @@ namespace DB /// Used for left padding of PODArray when empty const char empty_pod_array[empty_pod_array_size]{}; -template class PODArray, 15, 16>; -template class PODArray, 15, 16>; -template class PODArray, 15, 16>; -template class PODArray, 15, 16>; - -template class PODArray, 15, 16>; -template class PODArray, 15, 16>; -template class PODArray, 15, 16>; -template class PODArray, 15, 16>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; } diff --git a/src/Common/PODArray.h b/src/Common/PODArray.h index 0baefad39e2..ea3115677fc 100644 --- a/src/Common/PODArray.h +++ b/src/Common/PODArray.h @@ -502,7 +502,7 @@ public: template void insertSmallAllowReadWriteOverflow15(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params) { - static_assert(pad_right_ >= 15); + static_assert(pad_right_ >= PADDING_FOR_SIMD - 1); static_assert(sizeof(T) == sizeof(*from_begin)); insertPrepare(from_begin, from_end, std::forward(allocator_params)...); size_t bytes_to_copy = this->byte_size(from_end - from_begin); @@ -778,14 +778,13 @@ void swap(PODArray & lhs, P /// Prevent implicit template instantiation of PODArray for common numeric types -extern template class PODArray, 15, 16>; -extern template class PODArray, 15, 16>; -extern template class PODArray, 15, 16>; -extern template class PODArray, 15, 16>; - -extern template class PODArray, 15, 16>; -extern template class PODArray, 15, 16>; -extern template class PODArray, 15, 16>; -extern template class PODArray, 15, 16>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; +extern template class PODArray, PADDING_FOR_SIMD - 1, PADDING_FOR_SIMD>; } diff --git a/src/Common/PODArray_fwd.h b/src/Common/PODArray_fwd.h index ec7b3bf4331..56e84d68285 100644 --- a/src/Common/PODArray_fwd.h +++ b/src/Common/PODArray_fwd.h @@ -4,6 +4,7 @@ * PODArray. */ +#include #include #include @@ -22,7 +23,7 @@ class PODArray; /** For columns. Padding is enough to read and write xmm-register at the address of the last element. */ template > -using PaddedPODArray = PODArray; +using PaddedPODArray = PODArray; /** A helper for declaring PODArray that uses inline memory. * The initial size is set to use all the inline bytes, since using less would diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index 42b5b3d0990..9939a5738da 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -339,7 +339,7 @@ struct PoolWithFailoverBase::PoolState Int64 config_priority = 1; /// Priority from the GetPriorityFunc. Int64 priority = 0; - UInt32 random = 0; + UInt64 random = 0; void randomize() { @@ -353,7 +353,7 @@ struct PoolWithFailoverBase::PoolState } private: - std::minstd_rand rng = std::minstd_rand(randomSeed()); + std::minstd_rand rng = std::minstd_rand(static_cast(randomSeed())); }; template diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 46bec669626..2f801e496fa 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -229,6 +229,8 @@ The server successfully detected this situation and will download merged part fr M(UserTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in user space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \ M(SystemTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \ M(MemoryOvercommitWaitTimeMicroseconds, "Total time spent in waiting for memory to be freed in OvercommitTracker.") \ + M(MemoryAllocatorPurge, "Total number of times memory allocator purge was requested") \ + M(MemoryAllocatorPurgeTimeMicroseconds, "Total number of times memory allocator purge was requested") \ M(SoftPageFaults, "The number of soft page faults in query execution threads. Soft page fault usually means a miss in the memory allocator cache which required a new memory mapping from the OS and subsequent allocation of a page of physical memory.") \ M(HardPageFaults, "The number of hard page faults in query execution threads. High values indicate either that you forgot to turn off swap on your server, or eviction of memory pages of the ClickHouse binary during very high memory pressure, or successful usage of the 'mmap' read method for the tables data.") \ \ diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 7266b9b9553..b50e0c0ab49 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -132,11 +132,11 @@ QueryProfilerBase::QueryProfilerBase(UInt64 thread_id, int clock_t sev.sigev_signo = pause_signal; #if defined(OS_FREEBSD) - sev._sigev_un._threadid = thread_id; + sev._sigev_un._threadid = static_cast(thread_id); #elif defined(USE_MUSL) - sev.sigev_notify_thread_id = thread_id; + sev.sigev_notify_thread_id = static_cast(thread_id); #else - sev._sigev_un._tid = thread_id; + sev._sigev_un._tid = static_cast(thread_id); #endif timer_t local_timer_id; if (timer_create(clock_type, &sev, &local_timer_id)) diff --git a/src/Common/RadixSort.h b/src/Common/RadixSort.h index 9ca43bee30c..739bec8d9dd 100644 --- a/src/Common/RadixSort.h +++ b/src/Common/RadixSort.h @@ -273,13 +273,13 @@ private: { /// Replace the histograms with the accumulated sums: the value in position i is the sum of the previous positions minus one. - size_t sums[NUM_PASSES] = {0}; + CountType sums[NUM_PASSES] = {0}; for (size_t i = 0; i < HISTOGRAM_SIZE; ++i) { for (size_t pass = 0; pass < NUM_PASSES; ++pass) { - size_t tmp = histograms[pass * HISTOGRAM_SIZE + i] + sums[pass]; + CountType tmp = histograms[pass * HISTOGRAM_SIZE + i] + sums[pass]; histograms[pass * HISTOGRAM_SIZE + i] = sums[pass] - 1; sums[pass] = tmp; } diff --git a/src/Common/StringSearcher.h b/src/Common/StringSearcher.h index b8f8a9d3a88..048e7a1f34c 100644 --- a/src/Common/StringSearcher.h +++ b/src/Common/StringSearcher.h @@ -40,7 +40,7 @@ public: #ifdef __SSE2__ protected: static constexpr auto n = sizeof(__m128i); - const int page_size = ::getPageSize(); + const Int64 page_size = ::getPageSize(); bool pageSafe(const void * const ptr) const { diff --git a/src/Common/SymbolIndex.cpp b/src/Common/SymbolIndex.cpp index e217d23cc27..6f31009b1d2 100644 --- a/src/Common/SymbolIndex.cpp +++ b/src/Common/SymbolIndex.cpp @@ -99,23 +99,25 @@ void updateResources(ElfW(Addr) base_address, std::string_view object_name, std: name = name.substr((name[0] == '_') + strlen("binary_")); name = name.substr(0, name.size() - strlen("_start")); - resources.emplace(name, SymbolIndex::ResourcesBlob{ - base_address, - object_name, - std::string_view{char_address, 0}, // NOLINT - }); + auto & resource = resources[name]; + if (!resource.base_address || resource.base_address == base_address) + { + resource.base_address = base_address; + resource.start = std::string_view{char_address, 0}; // NOLINT(bugprone-string-constructor) + resource.object_name = object_name; + } } - else if (name.ends_with("_end")) + if (name.ends_with("_end")) { name = name.substr((name[0] == '_') + strlen("binary_")); name = name.substr(0, name.size() - strlen("_end")); - auto it = resources.find(name); - if (it != resources.end() && it->second.base_address == base_address && it->second.data.empty()) + auto & resource = resources[name]; + if (!resource.base_address || resource.base_address == base_address) { - const char * start = it->second.data.data(); - assert(char_address >= start); - it->second.data = std::string_view{start, static_cast(char_address - start)}; + resource.base_address = base_address; + resource.end = std::string_view{char_address, 0}; // NOLINT(bugprone-string-constructor) + resource.object_name = object_name; } } } diff --git a/src/Common/SymbolIndex.h b/src/Common/SymbolIndex.h index f2b40f02ead..47162331946 100644 --- a/src/Common/SymbolIndex.h +++ b/src/Common/SymbolIndex.h @@ -51,7 +51,7 @@ public: std::string_view getResource(String name) const { if (auto it = data.resources.find(name); it != data.resources.end()) - return it->second.data; + return it->second.data(); return {}; } @@ -63,11 +63,18 @@ public: { /// Symbol can be presented in multiple shared objects, /// base_address will be used to compare only symbols from the same SO. - ElfW(Addr) base_address; + ElfW(Addr) base_address = 0; /// Just a human name of the SO. std::string_view object_name; /// Data blob. - std::string_view data; + std::string_view start; + std::string_view end; + + std::string_view data() const + { + assert(end.data() >= start.data()); + return std::string_view{start.data(), static_cast(end.data() - start.data())}; + } }; using Resources = std::unordered_map; diff --git a/src/Common/TargetSpecific.cpp b/src/Common/TargetSpecific.cpp index 9a445ea0fc1..1ab499027bf 100644 --- a/src/Common/TargetSpecific.cpp +++ b/src/Common/TargetSpecific.cpp @@ -21,6 +21,8 @@ UInt32 getSupportedArchs() result |= static_cast(TargetArch::AVX512BW); if (Cpu::CpuFlagsCache::have_AVX512VBMI) result |= static_cast(TargetArch::AVX512VBMI); + if (Cpu::CpuFlagsCache::have_AVX512VBMI2) + result |= static_cast(TargetArch::AVX512VBMI2); return result; } @@ -39,8 +41,9 @@ String toString(TargetArch arch) case TargetArch::AVX: return "avx"; case TargetArch::AVX2: return "avx2"; case TargetArch::AVX512F: return "avx512f"; - case TargetArch::AVX512BW: return "avx512bw"; - case TargetArch::AVX512VBMI: return "avx512vbmi"; + case TargetArch::AVX512BW: return "avx512bw"; + case TargetArch::AVX512VBMI: return "avx512vbmi"; + case TargetArch::AVX512VBMI2: return "avx512vbmi"; } UNREACHABLE(); diff --git a/src/Common/TargetSpecific.h b/src/Common/TargetSpecific.h index f078c0e3ffc..250642f6ee4 100644 --- a/src/Common/TargetSpecific.h +++ b/src/Common/TargetSpecific.h @@ -31,7 +31,7 @@ * int funcImpl() { * return 2; * } - * ) // DECLARE_DEFAULT_CODE + * ) // DECLARE_AVX2_SPECIFIC_CODE * * int func() { * #if USE_MULTITARGET_CODE @@ -80,8 +80,9 @@ enum class TargetArch : UInt32 AVX = (1 << 1), AVX2 = (1 << 2), AVX512F = (1 << 3), - AVX512BW = (1 << 4), - AVX512VBMI = (1 << 5), + AVX512BW = (1 << 4), + AVX512VBMI = (1 << 5), + AVX512VBMI2 = (1 << 6), }; /// Runtime detection. @@ -100,6 +101,7 @@ String toString(TargetArch arch); #if defined(__clang__) +#define AVX512VBMI2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2"))) #define AVX512VBMI_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi"))) #define AVX512BW_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw"))) #define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f"))) @@ -108,6 +110,8 @@ String toString(TargetArch arch); #define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt"))) #define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE +# define BEGIN_AVX512VBMI2_SPECIFIC_CODE \ + _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2\"))),apply_to=function)") # define BEGIN_AVX512VBMI_SPECIFIC_CODE \ _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi\"))),apply_to=function)") # define BEGIN_AVX512BW_SPECIFIC_CODE \ @@ -129,6 +133,7 @@ String toString(TargetArch arch); # define DUMMY_FUNCTION_DEFINITION [[maybe_unused]] void _dummy_function_definition(); #else +#define AVX512VBMI2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2,tune=native"))) #define AVX512VBMI_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,tune=native"))) #define AVX512BW_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,tune=native"))) #define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,tune=native"))) @@ -137,6 +142,9 @@ String toString(TargetArch arch); #define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt",tune=native))) #define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE +# define BEGIN_AVX512VBMI2_SPECIFIC_CODE \ + _Pragma("GCC push_options") \ + _Pragma("GCC target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,avx512vbmi2,tune=native\")") # define BEGIN_AVX512VBMI_SPECIFIC_CODE \ _Pragma("GCC push_options") \ _Pragma("GCC target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,tune=native\")") @@ -217,6 +225,16 @@ namespace TargetSpecific::AVX512VBMI { \ } \ END_TARGET_SPECIFIC_CODE +#define DECLARE_AVX512VBMI2_SPECIFIC_CODE(...) \ +BEGIN_AVX512VBMI2_SPECIFIC_CODE \ +namespace TargetSpecific::AVX512VBMI2 { \ + DUMMY_FUNCTION_DEFINITION \ + using namespace DB::TargetSpecific::AVX512VBMI2; \ + __VA_ARGS__ \ +} \ +END_TARGET_SPECIFIC_CODE + + #else #define USE_MULTITARGET_CODE 0 @@ -229,6 +247,7 @@ END_TARGET_SPECIFIC_CODE #define DECLARE_AVX512F_SPECIFIC_CODE(...) #define DECLARE_AVX512BW_SPECIFIC_CODE(...) #define DECLARE_AVX512VBMI_SPECIFIC_CODE(...) +#define DECLARE_AVX512VBMI2_SPECIFIC_CODE(...) #endif @@ -245,8 +264,9 @@ DECLARE_SSE42_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX2_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX512F_SPECIFIC_CODE(__VA_ARGS__) \ -DECLARE_AVX512BW_SPECIFIC_CODE(__VA_ARGS__) \ -DECLARE_AVX512VBMI_SPECIFIC_CODE(__VA_ARGS__) +DECLARE_AVX512BW_SPECIFIC_CODE (__VA_ARGS__) \ +DECLARE_AVX512VBMI_SPECIFIC_CODE (__VA_ARGS__) \ +DECLARE_AVX512VBMI2_SPECIFIC_CODE (__VA_ARGS__) DECLARE_DEFAULT_CODE( constexpr auto BuildArch = TargetArch::Default; /// NOLINT @@ -276,6 +296,9 @@ DECLARE_AVX512VBMI_SPECIFIC_CODE( constexpr auto BuildArch = TargetArch::AVX512VBMI; /// NOLINT ) // DECLARE_AVX512VBMI_SPECIFIC_CODE +DECLARE_AVX512VBMI2_SPECIFIC_CODE( + constexpr auto BuildArch = TargetArch::AVX512VBMI2; /// NOLINT +) // DECLARE_AVX512VBMI2_SPECIFIC_CODE /** Runtime Dispatch helpers for class members. * diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 0b89139fa53..b70b1fc5e60 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -87,7 +87,7 @@ void ThreadPoolImpl::setQueueSize(size_t value) template template -ReturnType ThreadPoolImpl::scheduleImpl(Job job, int priority, std::optional wait_microseconds, bool propagate_opentelemetry_tracing_context) +ReturnType ThreadPoolImpl::scheduleImpl(Job job, ssize_t priority, std::optional wait_microseconds, bool propagate_opentelemetry_tracing_context) { auto on_error = [&](const std::string & reason) { @@ -163,19 +163,19 @@ ReturnType ThreadPoolImpl::scheduleImpl(Job job, int priority, std::opti } template -void ThreadPoolImpl::scheduleOrThrowOnError(Job job, int priority) +void ThreadPoolImpl::scheduleOrThrowOnError(Job job, ssize_t priority) { scheduleImpl(std::move(job), priority, std::nullopt); } template -bool ThreadPoolImpl::trySchedule(Job job, int priority, uint64_t wait_microseconds) noexcept +bool ThreadPoolImpl::trySchedule(Job job, ssize_t priority, uint64_t wait_microseconds) noexcept { return scheduleImpl(std::move(job), priority, wait_microseconds); } template -void ThreadPoolImpl::scheduleOrThrow(Job job, int priority, uint64_t wait_microseconds, bool propagate_opentelemetry_tracing_context) +void ThreadPoolImpl::scheduleOrThrow(Job job, ssize_t priority, uint64_t wait_microseconds, bool propagate_opentelemetry_tracing_context) { scheduleImpl(std::move(job), priority, wait_microseconds, propagate_opentelemetry_tracing_context); } diff --git a/src/Common/ThreadPool.h b/src/Common/ThreadPool.h index 76ada9e0d75..f19a412db37 100644 --- a/src/Common/ThreadPool.h +++ b/src/Common/ThreadPool.h @@ -50,13 +50,13 @@ public: /// NOTE: Probably you should call wait() if exception was thrown. If some previously scheduled jobs are using some objects, /// located on stack of current thread, the stack must not be unwinded until all jobs finished. However, /// if ThreadPool is a local object, it will wait for all scheduled jobs in own destructor. - void scheduleOrThrowOnError(Job job, int priority = 0); + void scheduleOrThrowOnError(Job job, ssize_t priority = 0); /// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or return false. - bool trySchedule(Job job, int priority = 0, uint64_t wait_microseconds = 0) noexcept; + bool trySchedule(Job job, ssize_t priority = 0, uint64_t wait_microseconds = 0) noexcept; /// Similar to scheduleOrThrowOnError(...). Wait for specified amount of time and schedule a job or throw an exception. - void scheduleOrThrow(Job job, int priority = 0, uint64_t wait_microseconds = 0, bool propagate_opentelemetry_tracing_context = true); + void scheduleOrThrow(Job job, ssize_t priority = 0, uint64_t wait_microseconds = 0, bool propagate_opentelemetry_tracing_context = true); /// Wait for all currently active jobs to be done. /// You may call schedule and wait many times in arbitrary order. @@ -96,10 +96,10 @@ private: struct JobWithPriority { Job job; - int priority; + ssize_t priority; DB::OpenTelemetry::TracingContextOnThread thread_trace_context; - JobWithPriority(Job job_, int priority_, const DB::OpenTelemetry::TracingContextOnThread& thread_trace_context_) + JobWithPriority(Job job_, ssize_t priority_, const DB::OpenTelemetry::TracingContextOnThread& thread_trace_context_) : job(job_), priority(priority_), thread_trace_context(thread_trace_context_) {} bool operator< (const JobWithPriority & rhs) const @@ -113,7 +113,7 @@ private: std::exception_ptr first_exception; template - ReturnType scheduleImpl(Job job, int priority, std::optional wait_microseconds, bool propagate_opentelemetry_tracing_context = true); + ReturnType scheduleImpl(Job job, ssize_t priority, std::optional wait_microseconds, bool propagate_opentelemetry_tracing_context = true); void worker(typename std::list::iterator thread_it); @@ -178,7 +178,10 @@ public: func = std::forward(func), args = std::make_tuple(std::forward(args)...)]() mutable /// mutable is needed to destroy capture { - SCOPE_EXIT(state->event.set()); + SCOPE_EXIT( + state->thread_id = std::thread::id(); + state->event.set(); + ); state->thread_id = std::this_thread::get_id(); diff --git a/src/Common/ThreadProfileEvents.cpp b/src/Common/ThreadProfileEvents.cpp index baa77468a13..76a4d8b1adf 100644 --- a/src/Common/ThreadProfileEvents.cpp +++ b/src/Common/ThreadProfileEvents.cpp @@ -121,7 +121,7 @@ TasksStatsCounters::TasksStatsCounters(const UInt64 tid, const MetricsProvider p stats_getter = [metrics_provider = std::make_shared(), tid]() { ::taskstats result{}; - metrics_provider->getStat(result, tid); + metrics_provider->getStat(result, static_cast(tid)); return result; }; break; @@ -526,7 +526,7 @@ void PerfEventsCounters::finalizeProfileEvents(ProfileEvents::Counters & profile continue; constexpr ssize_t bytes_to_read = sizeof(current_values[0]); - const int bytes_read = read(fd, ¤t_values[i], bytes_to_read); + const ssize_t bytes_read = read(fd, ¤t_values[i], bytes_to_read); if (bytes_read != bytes_to_read) { diff --git a/src/Common/Throttler.cpp b/src/Common/Throttler.cpp index 2c9279e21e1..b38777efc03 100644 --- a/src/Common/Throttler.cpp +++ b/src/Common/Throttler.cpp @@ -3,7 +3,6 @@ #include #include #include -#include namespace ProfileEvents { @@ -21,63 +20,56 @@ namespace ErrorCodes /// Just 10^9. static constexpr auto NS = 1000000000UL; -/// Tracking window. Actually the size is not really important. We just want to avoid -/// throttles when there are no actions for a long period time. -static const double window_ns = 1ULL * NS; +static const size_t default_burst_seconds = 1; + +Throttler::Throttler(size_t max_speed_, const std::shared_ptr & parent_) + : max_speed(max_speed_) + , max_burst(max_speed_ * default_burst_seconds) + , limit_exceeded_exception_message("") + , tokens(max_burst) + , parent(parent_) +{} + +Throttler::Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_, + const std::shared_ptr & parent_) + : max_speed(max_speed_) + , max_burst(max_speed_ * default_burst_seconds) + , limit(limit_) + , limit_exceeded_exception_message(limit_exceeded_exception_message_) + , tokens(max_burst) + , parent(parent_) +{} void Throttler::add(size_t amount) { - size_t new_count; - /// This outer variable is always equal to smoothed_speed. - /// We use to avoid race condition. - double current_speed = 0; - + // Values obtained under lock to be checked after release + size_t count_value; + double tokens_value; { std::lock_guard lock(mutex); - auto now = clock_gettime_ns_adjusted(prev_ns); - /// If prev_ns is equal to zero (first `add` call) we known nothing about speed - /// and don't track anything. - if (max_speed && prev_ns != 0) + if (max_speed) { - /// Time spent to process the amount of bytes - double time_spent = now - prev_ns; - - /// The speed in bytes per second is equal to amount / time_spent in seconds - auto new_speed = amount / (time_spent / NS); - - /// We want to make old values of speed less important for our smoothed value - /// so we decay it's value with coef. - auto decay_coeff = std::pow(0.5, time_spent / window_ns); - - /// Weighted average between previous and new speed - smoothed_speed = smoothed_speed * decay_coeff + (1 - decay_coeff) * new_speed; - current_speed = smoothed_speed; + double delta_seconds = prev_ns ? static_cast(now - prev_ns) / NS : 0; + tokens = std::min(tokens + max_speed * delta_seconds - amount, max_burst); } - count += amount; - new_count = count; + count_value = count; + tokens_value = tokens; prev_ns = now; } - if (limit && new_count > limit) + if (limit && count_value > limit) throw Exception(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED); - if (max_speed && current_speed > max_speed) + /// Wait unless there is positive amount of tokens - throttling + if (max_speed && tokens_value < 0) { - /// If we was too fast then we have to sleep until our smoothed speed became <= max_speed - int64_t sleep_time = static_cast(-window_ns * std::log2(max_speed / current_speed)); - - if (sleep_time > 0) - { - accumulated_sleep += sleep_time; - - sleepForNanoseconds(sleep_time); - - accumulated_sleep -= sleep_time; - - ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time / 1000UL); - } + int64_t sleep_time = static_cast(-tokens_value / max_speed * NS); + accumulated_sleep += sleep_time; + sleepForNanoseconds(sleep_time); + accumulated_sleep -= sleep_time; + ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time / 1000UL); } if (parent) @@ -89,9 +81,9 @@ void Throttler::reset() std::lock_guard lock(mutex); count = 0; - accumulated_sleep = 0; - smoothed_speed = 0; + tokens = max_burst; prev_ns = 0; + // NOTE: do not zero `accumulated_sleep` to avoid races } bool Throttler::isThrottling() const diff --git a/src/Common/Throttler.h b/src/Common/Throttler.h index 6d44ad6ca5f..9b6eff13506 100644 --- a/src/Common/Throttler.h +++ b/src/Common/Throttler.h @@ -10,25 +10,26 @@ namespace DB { -/** Allows you to limit the speed of something (in entities per second) using sleep. - * Specifics of work: - * Tracks exponentially (pow of 1/2) smoothed speed with hardcoded window. - * See more comments in .cpp file. - * - * Also allows you to set a limit on the maximum number of entities. If exceeded, an exception will be thrown. +/** Allows you to limit the speed of something (in tokens per second) using sleep. + * Implemented using Token Bucket Throttling algorithm. + * Also allows you to set a limit on the maximum number of tokens. If exceeded, an exception will be thrown. */ class Throttler { public: - explicit Throttler(size_t max_speed_, const std::shared_ptr & parent_ = nullptr) - : max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent_) {} + Throttler(size_t max_speed_, size_t max_burst_, const std::shared_ptr & parent_ = nullptr) + : max_speed(max_speed_), max_burst(max_burst_), limit_exceeded_exception_message(""), tokens(max_burst), parent(parent_) {} + + explicit Throttler(size_t max_speed_, const std::shared_ptr & parent_ = nullptr); + + Throttler(size_t max_speed_, size_t max_burst_, size_t limit_, const char * limit_exceeded_exception_message_, + const std::shared_ptr & parent_ = nullptr) + : max_speed(max_speed_), max_burst(max_burst_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), tokens(max_burst), parent(parent_) {} Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_, - const std::shared_ptr & parent_ = nullptr) - : max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {} + const std::shared_ptr & parent_ = nullptr); - /// Calculates the smoothed speed, sleeps if required and throws exception on - /// limit overflow. + /// Use `amount` tokens, sleeps if required or throws exception on limit overflow. void add(size_t amount); /// Not thread safe @@ -45,15 +46,14 @@ public: private: size_t count{0}; - const size_t max_speed{0}; - const uint64_t limit{0}; /// 0 - not limited. + const size_t max_speed{0}; /// in tokens per second. + const size_t max_burst{0}; /// in tokens. + const uint64_t limit{0}; /// 0 - not limited. const char * limit_exceeded_exception_message = nullptr; std::mutex mutex; - std::atomic accumulated_sleep{0}; - /// Smoothed value of current speed. Updated in `add` method. - double smoothed_speed{0}; - /// previous `add` call time (in nanoseconds) - uint64_t prev_ns{0}; + std::atomic accumulated_sleep{0}; // Accumulated sleep time over all waiting threads + double tokens{0}; /// Amount of tokens available in token bucket. Updated in `add` method. + uint64_t prev_ns{0}; /// Previous `add` call time (in nanoseconds). /// Used to implement a hierarchy of throttlers std::shared_ptr parent; diff --git a/src/Common/UTF8Helpers.h b/src/Common/UTF8Helpers.h index ce90af3d5ce..623a62a6f79 100644 --- a/src/Common/UTF8Helpers.h +++ b/src/Common/UTF8Helpers.h @@ -99,7 +99,10 @@ requires (sizeof(CharT) == 1) size_t convertCodePointToUTF8(int code_point, CharT * out_bytes, size_t out_length) { static const Poco::UTF8Encoding utf8; - int res = utf8.convert(code_point, reinterpret_cast(out_bytes), out_length); + int res = utf8.convert( + code_point, + reinterpret_cast(out_bytes), + static_cast(out_length)); assert(res >= 0); return res; } @@ -109,7 +112,9 @@ requires (sizeof(CharT) == 1) std::optional convertUTF8ToCodePoint(const CharT * in_bytes, size_t in_length) { static const Poco::UTF8Encoding utf8; - int res = utf8.queryConvert(reinterpret_cast(in_bytes), in_length); + int res = utf8.queryConvert( + reinterpret_cast(in_bytes), + static_cast(in_length)); if (res >= 0) return res; diff --git a/src/Common/Volnitsky.h b/src/Common/Volnitsky.h index 6f5948b6564..a27fd36f704 100644 --- a/src/Common/Volnitsky.h +++ b/src/Common/Volnitsky.h @@ -404,7 +404,8 @@ public: /// And also adding from the end guarantees that we will find first occurrence because we will lookup bigger offsets first. for (auto i = static_cast(needle_size - sizeof(VolnitskyTraits::Ngram)); i >= 0; --i) { - bool ok = VolnitskyTraits::putNGram(needle + i, i + 1, needle, needle_size, callback); + bool ok = VolnitskyTraits::putNGram( + needle + i, static_cast(i + 1), needle, needle_size, callback); /** `putNGramUTF8CaseInsensitive` does not work if characters with lower and upper cases * are represented by different number of bytes or code points. diff --git a/src/Common/ZooKeeper/TestKeeper.cpp b/src/Common/ZooKeeper/TestKeeper.cpp index 098dc522eeb..134374f98d0 100644 --- a/src/Common/ZooKeeper/TestKeeper.cpp +++ b/src/Common/ZooKeeper/TestKeeper.cpp @@ -218,7 +218,7 @@ std::pair TestKeeperCreateRequest::process(TestKeeper::Contai created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1); created_node.stat.mtime = created_node.stat.ctime; created_node.stat.numChildren = 0; - created_node.stat.dataLength = data.length(); + created_node.stat.dataLength = static_cast(data.length()); created_node.data = data; created_node.is_ephemeral = is_ephemeral; created_node.is_sequental = is_sequential; diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 406d8b27c39..ab5d918e1f0 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -777,19 +777,34 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition & return false; } -void ZooKeeper::waitForEphemeralToDisappearIfAny(const std::string & path) +void ZooKeeper::handleEphemeralNodeExistence(const std::string & path, const std::string & fast_delete_if_equal_value) { zkutil::EventPtr eph_node_disappeared = std::make_shared(); String content; - if (!tryGet(path, content, nullptr, eph_node_disappeared)) + Coordination::Stat stat; + if (!tryGet(path, content, &stat, eph_node_disappeared)) return; - int32_t timeout_ms = 3 * args.session_timeout_ms; - if (!eph_node_disappeared->tryWait(timeout_ms)) - throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, - "Ephemeral node {} still exists after {}s, probably it's owned by someone else. " - "Either session_timeout_ms in client's config is different from server's config or it's a bug. " - "Node data: '{}'", path, timeout_ms / 1000, content); + if (content == fast_delete_if_equal_value) + { + auto code = tryRemove(path, stat.version); + if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE) + throw Coordination::Exception(code, path); + } + else + { + LOG_WARNING(log, "Ephemeral node ('{}') already exists but it isn't owned by us. Will wait until it disappears", path); + int32_t timeout_ms = 3 * args.session_timeout_ms; + if (!eph_node_disappeared->tryWait(timeout_ms)) + throw DB::Exception( + DB::ErrorCodes::LOGICAL_ERROR, + "Ephemeral node {} still exists after {}s, probably it's owned by someone else. " + "Either session_timeout_ms in client's config is different from server's config or it's a bug. " + "Node data: '{}'", + path, + timeout_ms / 1000, + content); + } } ZooKeeperPtr ZooKeeper::startNewSession() const diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 5098788fb2e..968d10ad9a5 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -45,7 +45,7 @@ struct ShuffleHost { String host; Int64 priority = 0; - UInt32 random = 0; + UInt64 random = 0; void randomize() { @@ -393,9 +393,11 @@ public: /// The function returns true if waited and false if waiting was interrupted by condition. bool waitForDisappear(const std::string & path, const WaitCondition & condition = {}); - /// Wait for the ephemeral node created in previous session to disappear. - /// Throws LOGICAL_ERROR if node still exists after 2x session_timeout. - void waitForEphemeralToDisappearIfAny(const std::string & path); + /// Checks if a the ephemeral node exists. These nodes are removed automatically by ZK when the session ends + /// If the node exists and its value is equal to fast_delete_if_equal_value it will remove it + /// If the node exists and its value is different, it will wait for it to disappear. It will throw a LOGICAL_ERROR if the node doesn't + /// disappear automatically after 3x session_timeout. + void handleEphemeralNodeExistence(const std::string & path, const std::string & fast_delete_if_equal_value); /// Async interface (a small subset of operations is implemented). /// @@ -609,7 +611,7 @@ public: catch (...) { ProfileEvents::increment(ProfileEvents::CannotRemoveEphemeralNode); - DB::tryLogCurrentException(__PRETTY_FUNCTION__, "Cannot remove " + path + ": "); + DB::tryLogCurrentException(__PRETTY_FUNCTION__, "Cannot remove " + path); } } diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.cpp b/src/Common/ZooKeeper/ZooKeeperCommon.cpp index a565a322790..d49800b1abe 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.cpp +++ b/src/Common/ZooKeeper/ZooKeeperCommon.cpp @@ -724,7 +724,7 @@ void ZooKeeperRequest::createLogElements(LogElements & elems) const elem.has_watch = has_watch; elem.op_num = static_cast(getOpNum()); elem.path = getPath(); - elem.request_idx = elems.size() - 1; + elem.request_idx = static_cast(elems.size() - 1); } @@ -762,7 +762,7 @@ void ZooKeeperCheckRequest::createLogElements(LogElements & elems) const void ZooKeeperMultiRequest::createLogElements(LogElements & elems) const { ZooKeeperRequest::createLogElements(elems); - elems.back().requests_size = requests.size(); + elems.back().requests_size = static_cast(requests.size()); for (const auto & request : requests) { auto & req = dynamic_cast(*request); diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index f70dac74a6a..ebab18b5ed7 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -546,7 +546,7 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data) if (read_xid != AUTH_XID) throw Exception(Error::ZMARSHALLINGERROR, "Unexpected event received in reply to auth request: {}", read_xid); - int32_t actual_length = in->count() - count_before_event; + int32_t actual_length = static_cast(in->count() - count_before_event); if (length != actual_length) throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}", length, actual_length); @@ -821,7 +821,7 @@ void ZooKeeper::receiveEvent() } } - int32_t actual_length = in->count() - count_before_event; + int32_t actual_length = static_cast(in->count() - count_before_event); if (length != actual_length) throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}", length, actual_length); diff --git a/src/Common/examples/arena_with_free_lists.cpp b/src/Common/examples/arena_with_free_lists.cpp index dcb777abc42..4f209ccb5b2 100644 --- a/src/Common/examples/arena_with_free_lists.cpp +++ b/src/Common/examples/arena_with_free_lists.cpp @@ -176,11 +176,11 @@ struct Dictionary { case AttributeUnderlyingTypeTest::UInt8: std::get>(attribute.arrays)[idx] = value.get(); break; case AttributeUnderlyingTypeTest::UInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::UInt32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingTypeTest::UInt32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; case AttributeUnderlyingTypeTest::UInt64: std::get>(attribute.arrays)[idx] = value.get(); break; case AttributeUnderlyingTypeTest::Int8: std::get>(attribute.arrays)[idx] = value.get(); break; case AttributeUnderlyingTypeTest::Int16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingTypeTest::Int32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; case AttributeUnderlyingTypeTest::Int64: std::get>(attribute.arrays)[idx] = value.get(); break; case AttributeUnderlyingTypeTest::Float32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; case AttributeUnderlyingTypeTest::Float64: std::get>(attribute.arrays)[idx] = value.get(); break; diff --git a/src/Common/examples/array_cache.cpp b/src/Common/examples/array_cache.cpp index f6bbbba4cbc..c1267d3c9cf 100644 --- a/src/Common/examples/array_cache.cpp +++ b/src/Common/examples/array_cache.cpp @@ -46,9 +46,9 @@ int main(int argc, char ** argv) size_t cache_size = DB::parse(argv[1]); size_t num_threads = DB::parse(argv[2]); - size_t num_iterations = DB::parse(argv[3]); + int num_iterations = DB::parse(argv[3]); size_t region_max_size = DB::parse(argv[4]); - size_t max_key = DB::parse(argv[5]); + int max_key = DB::parse(argv[5]); using Cache = ArrayCache; Cache cache(cache_size); @@ -60,7 +60,7 @@ int main(int argc, char ** argv) { pcg64 generator(randomSeed()); - for (size_t j = 0; j < num_iterations; ++j) + for (int j = 0; j < num_iterations; ++j) { size_t size = std::uniform_int_distribution(1, region_max_size)(generator); int key = std::uniform_int_distribution(1, max_key)(generator); diff --git a/src/Common/examples/average.cpp b/src/Common/examples/average.cpp index d2802717fc8..749bad848cc 100644 --- a/src/Common/examples/average.cpp +++ b/src/Common/examples/average.cpp @@ -425,7 +425,7 @@ Float NO_INLINE microsort(const PODArray & keys, const PODArray & for (size_t i = 1; i < HISTOGRAM_SIZE; ++i) positions[i] = positions[i - 1] + count[i - 1]; - for (size_t i = 0; i < size; ++i) + for (UInt32 i = 0; i < size; ++i) *positions[keys[i]]++ = i; /// Update states. diff --git a/src/Common/examples/parallel_aggregation.cpp b/src/Common/examples/parallel_aggregation.cpp index f54c4cee12c..bd252b330f3 100644 --- a/src/Common/examples/parallel_aggregation.cpp +++ b/src/Common/examples/parallel_aggregation.cpp @@ -492,7 +492,7 @@ int main(int argc, char ** argv) watch.restart(); - for (size_t i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i) + for (unsigned i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i) pool.scheduleOrThrowOnError([&] { merge2(maps.data(), num_threads, i); }); pool.wait(); @@ -545,7 +545,7 @@ int main(int argc, char ** argv) watch.restart(); - for (size_t i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i) + for (unsigned i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i) pool.scheduleOrThrowOnError([&] { merge2(maps.data(), num_threads, i); }); pool.wait(); diff --git a/src/Common/filesystemHelpers.cpp b/src/Common/filesystemHelpers.cpp index 4c60a6ddac0..07a08dc7fbc 100644 --- a/src/Common/filesystemHelpers.cpp +++ b/src/Common/filesystemHelpers.cpp @@ -258,7 +258,7 @@ size_t getSizeFromFileDescriptor(int fd, const String & file_name) return buf.st_size; } -int getINodeNumberFromPath(const String & path) +Int64 getINodeNumberFromPath(const String & path) { struct stat file_stat; if (stat(path.data(), &file_stat)) diff --git a/src/Common/filesystemHelpers.h b/src/Common/filesystemHelpers.h index 9faaabb42f2..0e6e16941bb 100644 --- a/src/Common/filesystemHelpers.h +++ b/src/Common/filesystemHelpers.h @@ -74,7 +74,7 @@ std::optional tryGetSizeFromFilePath(const String & path); /// Get inode number for a file path. /// Will not work correctly if filesystem does not support inodes. -int getINodeNumberFromPath(const String & path); +Int64 getINodeNumberFromPath(const String & path); } diff --git a/src/Common/formatIPv6.cpp b/src/Common/formatIPv6.cpp index 2e08828f724..bc1878c0bc6 100644 --- a/src/Common/formatIPv6.cpp +++ b/src/Common/formatIPv6.cpp @@ -80,7 +80,7 @@ static void printInteger(char *& out, T value) void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_bytes_count) { - struct { int base, len; } best{-1, 0}, cur{-1, 0}; + struct { Int64 base, len; } best{-1, 0}, cur{-1, 0}; std::array words{}; /** Preprocess: @@ -122,14 +122,18 @@ void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_byte best.base = -1; /// Format the result. - for (const int i : collections::range(0, words.size())) + for (const size_t i : collections::range(0, words.size())) { /// Are we inside the best run of 0x00's? - if (best.base != -1 && i >= best.base && i < (best.base + best.len)) + if (best.base != -1) { - if (i == best.base) - *dst++ = ':'; - continue; + size_t best_base = static_cast(best.base); + if (i >= best_base && i < (best_base + best.len)) + { + if (i == best_base) + *dst++ = ':'; + continue; + } } /// Are we following an initial run of 0x00s or any real hex? diff --git a/src/Common/formatIPv6.h b/src/Common/formatIPv6.h index 83b9d6e9fb1..31d5e83760a 100644 --- a/src/Common/formatIPv6.h +++ b/src/Common/formatIPv6.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -55,8 +56,11 @@ inline bool parseIPv4(const char * src, unsigned char * dst) } if (*(src - 1) != '\0') return false; - +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + reverseMemcpy(dst, &result, sizeof(result)); +#else memcpy(dst, &result, sizeof(result)); +#endif return true; } diff --git a/src/Common/getCurrentProcessFDCount.cpp b/src/Common/getCurrentProcessFDCount.cpp index 4c0f328c853..6217d92fbc1 100644 --- a/src/Common/getCurrentProcessFDCount.cpp +++ b/src/Common/getCurrentProcessFDCount.cpp @@ -7,10 +7,10 @@ #include -int getCurrentProcessFDCount() +Int64 getCurrentProcessFDCount() { namespace fs = std::filesystem; - int result = -1; + Int64 result = -1; #if defined(OS_LINUX) || defined(OS_DARWIN) using namespace DB; diff --git a/src/Common/getCurrentProcessFDCount.h b/src/Common/getCurrentProcessFDCount.h index 583b99f6c13..f6273580df7 100644 --- a/src/Common/getCurrentProcessFDCount.h +++ b/src/Common/getCurrentProcessFDCount.h @@ -1,5 +1,7 @@ #pragma once +#include + /// Get current process file descriptor count /// @return -1 os doesn't support "lsof" command or some error occurs. -int getCurrentProcessFDCount(); +Int64 getCurrentProcessFDCount(); diff --git a/src/Common/getNumberOfPhysicalCPUCores.cpp b/src/Common/getNumberOfPhysicalCPUCores.cpp index 7bb68b324b2..7a1f10b6435 100644 --- a/src/Common/getNumberOfPhysicalCPUCores.cpp +++ b/src/Common/getNumberOfPhysicalCPUCores.cpp @@ -48,7 +48,7 @@ static unsigned getNumberOfPhysicalCPUCoresImpl() /// Let's limit ourself to the number of physical cores. /// But if the number of logical cores is small - maybe it is a small machine /// or very limited cloud instance and it is reasonable to use all the cores. - if (cpu_count >= 8) + if (cpu_count >= 32) cpu_count /= 2; #endif diff --git a/src/Common/intExp.h b/src/Common/intExp.h index 3529990ef3b..69b0f09975a 100644 --- a/src/Common/intExp.h +++ b/src/Common/intExp.h @@ -47,6 +47,11 @@ namespace common constexpr inline int exp10_i32(int x) { + if (x < 0) + return 0; + if (x > 9) + return std::numeric_limits::max(); + constexpr int values[] = { 1, @@ -65,6 +70,11 @@ constexpr inline int exp10_i32(int x) constexpr inline int64_t exp10_i64(int x) { + if (x < 0) + return 0; + if (x > 18) + return std::numeric_limits::max(); + constexpr int64_t values[] = { 1LL, @@ -92,6 +102,11 @@ constexpr inline int64_t exp10_i64(int x) constexpr inline Int128 exp10_i128(int x) { + if (x < 0) + return 0; + if (x > 38) + return std::numeric_limits::max(); + constexpr Int128 values[] = { static_cast(1LL), @@ -140,6 +155,11 @@ constexpr inline Int128 exp10_i128(int x) inline Int256 exp10_i256(int x) { + if (x < 0) + return 0; + if (x > 76) + return std::numeric_limits::max(); + using Int256 = Int256; static constexpr Int256 i10e18{1000000000000000000ll}; static const Int256 values[] = { @@ -231,8 +251,10 @@ inline Int256 exp10_i256(int x) template constexpr inline T intExp10OfSize(int x) { - if constexpr (sizeof(T) <= 8) - return intExp10(x); + if constexpr (sizeof(T) <= 4) + return static_cast(common::exp10_i32(x)); + else if constexpr (sizeof(T) <= 8) + return common::exp10_i64(x); else if constexpr (sizeof(T) <= 16) return common::exp10_i128(x); else diff --git a/src/Common/mysqlxx/PoolWithFailover.cpp b/src/Common/mysqlxx/PoolWithFailover.cpp index f3dee1a6776..0ed231cdf8d 100644 --- a/src/Common/mysqlxx/PoolWithFailover.cpp +++ b/src/Common/mysqlxx/PoolWithFailover.cpp @@ -42,8 +42,8 @@ PoolWithFailover::PoolWithFailover( /// which triggers massive re-constructing of connection pools. /// The state of PRNGs like std::mt19937 is considered to be quite heavy /// thus here we attempt to optimize its construction. - static thread_local std::mt19937 rnd_generator( - std::hash{}(std::this_thread::get_id()) + std::clock()); + static thread_local std::mt19937 rnd_generator(static_cast( + std::hash{}(std::this_thread::get_id()) + std::clock())); for (auto & [_, replicas] : replicas_by_priority) { if (replicas.size() > 1) diff --git a/src/Common/mysqlxx/Value.cpp b/src/Common/mysqlxx/Value.cpp index 85b63b722a2..6954080f864 100644 --- a/src/Common/mysqlxx/Value.cpp +++ b/src/Common/mysqlxx/Value.cpp @@ -124,7 +124,7 @@ double Value::readFloatText(const char * buf, size_t length) const case 'E': { ++buf; - Int32 exponent = readIntText(buf, end - buf); + Int32 exponent = static_cast(readIntText(buf, end - buf)); x *= preciseExp10(exponent); if (negative) x = -x; diff --git a/src/Common/mysqlxx/mysqlxx/Row.h b/src/Common/mysqlxx/mysqlxx/Row.h index 6ef40ff2060..5690389d1cf 100644 --- a/src/Common/mysqlxx/mysqlxx/Row.h +++ b/src/Common/mysqlxx/mysqlxx/Row.h @@ -44,9 +44,9 @@ public: /** Получить значение по индексу. * Здесь используется int, а не unsigned, чтобы не было неоднозначности с тем же методом, принимающим const char *. */ - Value operator[] (int n) const + Value operator[] (size_t n) const { - if (unlikely(static_cast(n) >= res->getNumFields())) + if (unlikely(n >= res->getNumFields())) throw Exception("Index of column is out of range."); return Value(row[n], lengths[n], res); } diff --git a/src/Common/mysqlxx/mysqlxx/Value.h b/src/Common/mysqlxx/mysqlxx/Value.h index 46fcee0753f..892e5a19d93 100644 --- a/src/Common/mysqlxx/mysqlxx/Value.h +++ b/src/Common/mysqlxx/mysqlxx/Value.h @@ -242,8 +242,8 @@ template <> inline unsigned char Value::get() cons template <> inline char8_t Value::get() const { return getUInt(); } template <> inline short Value::get() const { return getInt(); } /// NOLINT template <> inline unsigned short Value::get() const { return getUInt(); } /// NOLINT -template <> inline int Value::get() const { return getInt(); } -template <> inline unsigned int Value::get() const { return getUInt(); } +template <> inline int Value::get() const { return static_cast(getInt()); } +template <> inline unsigned int Value::get() const { return static_cast(getUInt()); } template <> inline long Value::get() const { return getInt(); } /// NOLINT template <> inline unsigned long Value::get() const { return getUInt(); } /// NOLINT template <> inline long long Value::get() const { return getInt(); } /// NOLINT diff --git a/src/Common/parseGlobs.cpp b/src/Common/parseGlobs.cpp index 8e9195f9842..f8d331c2b76 100644 --- a/src/Common/parseGlobs.cpp +++ b/src/Common/parseGlobs.cpp @@ -68,14 +68,14 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob output_width = std::max(range_begin_width, range_end_width); if (leading_zeros) - oss_for_replacing << std::setfill('0') << std::setw(output_width); + oss_for_replacing << std::setfill('0') << std::setw(static_cast(output_width)); oss_for_replacing << range_begin; for (size_t i = range_begin + 1; i <= range_end; ++i) { oss_for_replacing << '|'; if (leading_zeros) - oss_for_replacing << std::setfill('0') << std::setw(output_width); + oss_for_replacing << std::setfill('0') << std::setw(static_cast(output_width)); oss_for_replacing << i; } } @@ -90,17 +90,23 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob oss_for_replacing << escaped_with_globs.substr(current_index); std::string almost_res = oss_for_replacing.str(); WriteBufferFromOwnString buf_final_processing; + char previous = ' '; for (const auto & letter : almost_res) { - if ((letter == '?') || (letter == '*')) + if (previous == '*' && letter == '*') + { + buf_final_processing << "[^{}]"; + } + else if ((letter == '?') || (letter == '*')) { buf_final_processing << "[^/]"; /// '?' is any symbol except '/' if (letter == '?') continue; } - if ((letter == '.') || (letter == '{') || (letter == '}')) + else if ((letter == '.') || (letter == '{') || (letter == '}')) buf_final_processing << '\\'; buf_final_processing << letter; + previous = letter; } return buf_final_processing.str(); } diff --git a/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp index 1f614945491..c8434b0993e 100644 --- a/src/Common/parseRemoteDescription.cpp +++ b/src/Common/parseRemoteDescription.cpp @@ -82,8 +82,8 @@ std::vector parseRemoteDescription(const String & description, size_t l, /// Either the numeric interval (8..10) or equivalent expression in brackets if (description[i] == '{') { - int cnt = 1; - int last_dot = -1; /// The rightmost pair of points, remember the index of the right of the two + ssize_t cnt = 1; + ssize_t last_dot = -1; /// The rightmost pair of points, remember the index of the right of the two size_t m; std::vector buffer; bool have_splitter = false; diff --git a/src/Common/safe_cast.h b/src/Common/safe_cast.h new file mode 100644 index 00000000000..133808ca259 --- /dev/null +++ b/src/Common/safe_cast.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +template +To safe_cast(From from) +{ + constexpr auto max = std::numeric_limits::max(); + if (from > max) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Overflow ({} > {})", from, max); + return static_cast(from); +} + +} diff --git a/src/Common/tests/gtest_DateLUTImpl.cpp b/src/Common/tests/gtest_DateLUTImpl.cpp index 49013625ed3..6ae934b2296 100644 --- a/src/Common/tests/gtest_DateLUTImpl.cpp +++ b/src/Common/tests/gtest_DateLUTImpl.cpp @@ -58,7 +58,7 @@ FailuresCount countFailures(const ::testing::TestResult & test_result) const size_t count = test_result.total_part_count(); for (size_t i = 0; i < count; ++i) { - const auto & part = test_result.GetTestPartResult(i); + const auto & part = test_result.GetTestPartResult(static_cast(i)); if (part.nonfatally_failed()) { ++failures.non_fatal; @@ -134,6 +134,7 @@ TEST(DateLUTTest, TimeValuesInMiddleOfRange) EXPECT_EQ(lut.toRelativeMonthNum(time), 24237 /*unsigned*/); EXPECT_EQ(lut.toRelativeQuarterNum(time), 8078 /*unsigned*/); EXPECT_EQ(lut.toRelativeHourNum(time), 435736 /*time_t*/); + EXPECT_EQ(lut.toStableRelativeHourNum(time), 435757 /*time_t*/); EXPECT_EQ(lut.toRelativeMinuteNum(time), 26144180 /*time_t*/); EXPECT_EQ(lut.toStartOfMinuteInterval(time, 6), 1568650680 /*time_t*/); EXPECT_EQ(lut.toStartOfSecondInterval(time, 7), 1568650811 /*time_t*/); @@ -196,6 +197,7 @@ TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange) EXPECT_EQ(lut.toRelativeMonthNum(time), 23641 /*unsigned*/); // ? EXPECT_EQ(lut.toRelativeQuarterNum(time), 7880 /*unsigned*/); // ? EXPECT_EQ(lut.toRelativeHourNum(time), 0 /*time_t*/); + EXPECT_EQ(lut.toStableRelativeHourNum(time), 24 /*time_t*/); EXPECT_EQ(lut.toRelativeMinuteNum(time), 0 /*time_t*/); EXPECT_EQ(lut.toStartOfMinuteInterval(time, 6), 0 /*time_t*/); EXPECT_EQ(lut.toStartOfSecondInterval(time, 7), 0 /*time_t*/); @@ -259,6 +261,7 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT) EXPECT_EQ(lut.toRelativeMonthNum(time), 25273 /*unsigned*/); EXPECT_EQ(lut.toRelativeQuarterNum(time), 8424 /*unsigned*/); EXPECT_EQ(lut.toRelativeHourNum(time), 1192873 /*time_t*/); + EXPECT_EQ(lut.toStableRelativeHourNum(time), 1192897 /*time_t*/); EXPECT_EQ(lut.toRelativeMinuteNum(time), 71572397 /*time_t*/); EXPECT_EQ(lut.toStartOfMinuteInterval(time, 6), 4294343520 /*time_t*/); EXPECT_EQ(lut.toStartOfSecondInterval(time, 7), 4294343872 /*time_t*/); diff --git a/src/Common/tests/gtest_hash_table.cpp b/src/Common/tests/gtest_hash_table.cpp index b06ee5a666e..fd0b2495fde 100644 --- a/src/Common/tests/gtest_hash_table.cpp +++ b/src/Common/tests/gtest_hash_table.cpp @@ -216,27 +216,27 @@ TEST(HashTable, Erase) using Cont = HashSet, HashTableGrowerWithPrecalculation<1>>; Cont cont; - for (size_t i = 0; i < 5000; ++i) + for (int i = 0; i < 5000; ++i) { cont.insert(i); } - for (size_t i = 0; i < 2500; ++i) + for (int i = 0; i < 2500; ++i) { cont.erase(i); } - for (size_t i = 5000; i < 10000; ++i) + for (int i = 5000; i < 10000; ++i) { cont.insert(i); } - for (size_t i = 5000; i < 10000; ++i) + for (int i = 5000; i < 10000; ++i) { cont.erase(i); } - for (size_t i = 2500; i < 5000; ++i) + for (int i = 2500; i < 5000; ++i) { cont.erase(i); } diff --git a/src/Common/tests/gtest_lru_hash_map.cpp b/src/Common/tests/gtest_lru_hash_map.cpp index 562ee667b7b..f45a503be43 100644 --- a/src/Common/tests/gtest_lru_hash_map.cpp +++ b/src/Common/tests/gtest_lru_hash_map.cpp @@ -26,7 +26,7 @@ void testInsert(size_t elements_to_insert_size, size_t map_size) std::vector expected; for (size_t i = 0; i < elements_to_insert_size; ++i) - map.insert(i, i); + map.insert(static_cast(i), static_cast(i)); for (size_t i = elements_to_insert_size - map_size; i < elements_to_insert_size; ++i) expected.emplace_back(i); diff --git a/src/Common/tests/gtest_pod_array.cpp b/src/Common/tests/gtest_pod_array.cpp index 82a6f7589b8..b0c1aab0732 100644 --- a/src/Common/tests/gtest_pod_array.cpp +++ b/src/Common/tests/gtest_pod_array.cpp @@ -532,7 +532,7 @@ TEST(Common, PODNoOverallocation) } } - EXPECT_EQ(capacities, (std::vector{4065, 8161, 16353, 32737, 65505, 131041, 262113, 524257, 1048545})); + EXPECT_EQ(capacities, (std::vector{3969, 8065, 16257, 32641, 65409, 130945, 262017, 524161, 1048449})); } template diff --git a/src/Common/waitForPid.cpp b/src/Common/waitForPid.cpp index 73d88c68adb..2cf80de644d 100644 --- a/src/Common/waitForPid.cpp +++ b/src/Common/waitForPid.cpp @@ -54,7 +54,7 @@ namespace DB static int syscall_pidfd_open(pid_t pid) { - return syscall(SYS_pidfd_open, pid, 0); + return static_cast(syscall(SYS_pidfd_open, pid, 0)); } static bool supportsPidFdOpen() @@ -170,7 +170,8 @@ bool waitForPid(pid_t pid, size_t timeout_in_seconds) /// If timeout is positive try waitpid without block in loop until /// process is normally terminated or waitpid return error - int timeout_in_ms = timeout_in_seconds * 1000; + /// NOTE: timeout casted to int, since poll() accept int for timeout + int timeout_in_ms = static_cast(timeout_in_seconds * 1000); while (timeout_in_ms > 0) { int waitpid_res = HANDLE_EINTR(waitpid(pid, &status, WNOHANG)); diff --git a/src/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp index 9101caf568e..0492b7faec5 100644 --- a/src/Compression/CompressedReadBufferBase.cpp +++ b/src/Compression/CompressedReadBufferBase.cpp @@ -279,7 +279,7 @@ static void readHeaderAndGetCodec(const char * compressed_buffer, size_t size_de void CompressedReadBufferBase::decompressTo(char * to, size_t size_decompressed, size_t size_compressed_without_checksum) { readHeaderAndGetCodec(compressed_buffer, size_decompressed, codec, allow_different_codecs); - codec->decompress(compressed_buffer, size_compressed_without_checksum, to); + codec->decompress(compressed_buffer, static_cast(size_compressed_without_checksum), to); } void CompressedReadBufferBase::decompress(BufferBase::Buffer & to, size_t size_decompressed, size_t size_compressed_without_checksum) @@ -300,7 +300,7 @@ void CompressedReadBufferBase::decompress(BufferBase::Buffer & to, size_t size_d to = BufferBase::Buffer(compressed_buffer + header_size, compressed_buffer + size_compressed_without_checksum); } else - codec->decompress(compressed_buffer, size_compressed_without_checksum, to.begin()); + codec->decompress(compressed_buffer, static_cast(size_compressed_without_checksum), to.begin()); } void CompressedReadBufferBase::flushAsynchronousDecompressRequests() const diff --git a/src/Compression/CompressedWriteBuffer.cpp b/src/Compression/CompressedWriteBuffer.cpp index 6c1dbd9e00c..82beeea37cd 100644 --- a/src/Compression/CompressedWriteBuffer.cpp +++ b/src/Compression/CompressedWriteBuffer.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include "CompressedWriteBuffer.h" @@ -22,7 +23,8 @@ void CompressedWriteBuffer::nextImpl() if (!offset()) return; - size_t decompressed_size = offset(); + chassert(offset() <= INT_MAX); + UInt32 decompressed_size = static_cast(offset()); UInt32 compressed_reserve_size = codec->getCompressedReserveSize(decompressed_size); /** During compression we need buffer with capacity >= compressed_reserve_size + CHECKSUM_SIZE. diff --git a/src/Compression/CompressionCodecDoubleDelta.cpp b/src/Compression/CompressionCodecDoubleDelta.cpp index 816f242672a..c1278cb88de 100644 --- a/src/Compression/CompressionCodecDoubleDelta.cpp +++ b/src/Compression/CompressionCodecDoubleDelta.cpp @@ -353,7 +353,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest) writer.flush(); - return (dest - dest_start) + (writer.count() + 7) / 8; + return static_cast((dest - dest_start) + (writer.count() + 7) / 8); } template @@ -414,7 +414,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest, if (write_spec.data_bits != 0) { const UInt8 sign = reader.readBit(); - double_delta = reader.readBits(write_spec.data_bits - 1) + 1; + double_delta = static_cast(reader.readBits(write_spec.data_bits - 1) + 1); if (sign) { /// It's well defined for unsigned data types. diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index a9550c9e28d..02b640ae402 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -7,6 +7,7 @@ #include #include #include +#include // This depends on BoringSSL-specific API, notably . #if USE_SSL @@ -480,7 +481,8 @@ UInt32 CompressionCodecEncrypted::doCompressData(const char * source, UInt32 sou if (out_len != source_size + tag_size) throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't encrypt data, length after encryption {} is wrong, expected {}", out_len, source_size + tag_size); - return out_len + keyid_size + nonce_size; + size_t out_size = out_len + keyid_size + nonce_size; + return safe_cast(out_size); } void CompressionCodecEncrypted::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const diff --git a/src/Compression/CompressionCodecFPC.cpp b/src/Compression/CompressionCodecFPC.cpp index 247eb73b65b..48eba210b60 100644 --- a/src/Compression/CompressionCodecFPC.cpp +++ b/src/Compression/CompressionCodecFPC.cpp @@ -453,9 +453,9 @@ UInt32 CompressionCodecFPC::doCompressData(const char * source, UInt32 source_si switch (float_width) { case sizeof(Float64): - return HEADER_SIZE + FPCOperation(destination, level).encode(src); + return static_cast(HEADER_SIZE + FPCOperation(destination, level).encode(src)); case sizeof(Float32): - return HEADER_SIZE + FPCOperation(destination, level).encode(src); + return static_cast(HEADER_SIZE + FPCOperation(destination, level).encode(src)); default: break; } diff --git a/src/Compression/CompressionCodecGorilla.cpp b/src/Compression/CompressionCodecGorilla.cpp index 0ca3e5660e0..0da6ff46dbc 100644 --- a/src/Compression/CompressionCodecGorilla.cpp +++ b/src/Compression/CompressionCodecGorilla.cpp @@ -259,7 +259,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest, writer.flush(); - return (dest - dest_start) + (writer.count() + 7) / 8; + return static_cast((dest - dest_start) + (writer.count() + 7) / 8); } template @@ -320,7 +320,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest) ErrorCodes::CANNOT_DECOMPRESS); } - xored_data = reader.readBits(curr_xored_info.data_bits); + xored_data = static_cast(reader.readBits(curr_xored_info.data_bits)); xored_data <<= curr_xored_info.trailing_zero_bits; curr_value = prev_value ^ xored_data; } diff --git a/src/Compression/CompressionCodecLZ4.cpp b/src/Compression/CompressionCodecLZ4.cpp index 12f138dc95a..5b93e7ef60f 100644 --- a/src/Compression/CompressionCodecLZ4.cpp +++ b/src/Compression/CompressionCodecLZ4.cpp @@ -134,7 +134,7 @@ void registerCodecLZ4HC(CompressionCodecFactory & factory) if (!literal) throw Exception("LZ4HC codec argument must be integer", ErrorCodes::ILLEGAL_CODEC_PARAMETER); - level = literal->value.safeGet(); + level = static_cast(literal->value.safeGet()); } return std::make_shared(level); diff --git a/src/Compression/CompressionCodecMultiple.cpp b/src/Compression/CompressionCodecMultiple.cpp index 8ad054673e1..628c2d97d86 100644 --- a/src/Compression/CompressionCodecMultiple.cpp +++ b/src/Compression/CompressionCodecMultiple.cpp @@ -48,7 +48,7 @@ UInt32 CompressionCodecMultiple::getMaxCompressedDataSize(UInt32 uncompressed_si compressed_size = codec->getCompressedReserveSize(compressed_size); /// TotalCodecs ByteForEachCodec data - return sizeof(UInt8) + codecs.size() + compressed_size; + return static_cast(sizeof(UInt8) + codecs.size() + compressed_size); } UInt32 CompressionCodecMultiple::doCompressData(const char * source, UInt32 source_size, char * dest) const @@ -73,7 +73,7 @@ UInt32 CompressionCodecMultiple::doCompressData(const char * source, UInt32 sour memcpy(&dest[1 + codecs.size()], uncompressed_buf.data(), source_size); - return 1 + codecs.size() + source_size; + return static_cast(1 + codecs.size() + source_size); } void CompressionCodecMultiple::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 decompressed_size) const diff --git a/src/Compression/CompressionCodecT64.cpp b/src/Compression/CompressionCodecT64.cpp index bfcebad9676..cc8ce24476f 100644 --- a/src/Compression/CompressionCodecT64.cpp +++ b/src/Compression/CompressionCodecT64.cpp @@ -550,7 +550,7 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco UInt32 num_bits = getValuableBitsNumber(min, max); if (!num_bits) { - T min_value = min; + T min_value = static_cast(min); for (UInt32 i = 0; i < num_elements; ++i, dst += sizeof(T)) unalignedStore(dst, min_value); return; @@ -572,14 +572,14 @@ void decompressData(const char * src, UInt32 bytes_size, char * dst, UInt32 unco T upper_max [[maybe_unused]] = 0; T sign_bit [[maybe_unused]] = 0; if (num_bits < 64) - upper_min = static_cast(min) >> num_bits << num_bits; + upper_min = static_cast(static_cast(min) >> num_bits << num_bits); if constexpr (is_signed_v) { if (min < 0 && max >= 0 && num_bits < 64) { - sign_bit = 1ull << (num_bits - 1); - upper_max = static_cast(max) >> num_bits << num_bits; + sign_bit = static_cast(1ull << (num_bits - 1)); + upper_max = static_cast(static_cast(max) >> num_bits << num_bits); } } diff --git a/src/Compression/CompressionCodecZSTD.cpp b/src/Compression/CompressionCodecZSTD.cpp index b47c8c4b080..f1c50840e54 100644 --- a/src/Compression/CompressionCodecZSTD.cpp +++ b/src/Compression/CompressionCodecZSTD.cpp @@ -65,7 +65,7 @@ void CompressionCodecZSTD::updateHash(SipHash & hash) const UInt32 CompressionCodecZSTD::getMaxCompressedDataSize(UInt32 uncompressed_size) const { - return ZSTD_compressBound(uncompressed_size); + return static_cast(ZSTD_compressBound(uncompressed_size)); } @@ -84,7 +84,7 @@ UInt32 CompressionCodecZSTD::doCompressData(const char * source, UInt32 source_s if (ZSTD_isError(compressed_size)) throw Exception("Cannot compress block with ZSTD: " + std::string(ZSTD_getErrorName(compressed_size)), ErrorCodes::CANNOT_COMPRESS); - return compressed_size; + return static_cast(compressed_size); } @@ -124,18 +124,20 @@ void registerCodecZSTD(CompressionCodecFactory & factory) if (!literal) throw Exception("ZSTD codec argument must be integer", ErrorCodes::ILLEGAL_CODEC_PARAMETER); - level = literal->value.safeGet(); + level = static_cast(literal->value.safeGet()); if (level > ZSTD_maxCLevel()) - throw Exception( - "ZSTD codec can't have level more than " + toString(ZSTD_maxCLevel()) + ", given " + toString(level), - ErrorCodes::ILLEGAL_CODEC_PARAMETER); + { + throw Exception(ErrorCodes::ILLEGAL_CODEC_PARAMETER, + "ZSTD codec can't have level more than {}, given {}", + ZSTD_maxCLevel(), level); + } if (arguments->children.size() > 1) { const auto * window_literal = children[1]->as(); if (!window_literal) throw Exception("ZSTD codec second argument must be integer", ErrorCodes::ILLEGAL_CODEC_PARAMETER); - const int window_log = window_literal->value.safeGet(); + const int window_log = static_cast(window_literal->value.safeGet()); ZSTD_bounds window_log_bounds = ZSTD_cParam_getBounds(ZSTD_c_windowLog); if (ZSTD_isError(window_log_bounds.error)) diff --git a/src/Compression/getCompressionCodecForFile.cpp b/src/Compression/getCompressionCodecForFile.cpp index f9365862c5b..027ee0ac57a 100644 --- a/src/Compression/getCompressionCodecForFile.cpp +++ b/src/Compression/getCompressionCodecForFile.cpp @@ -13,9 +13,9 @@ namespace DB using Checksum = CityHash_v1_0_2::uint128; -CompressionCodecPtr getCompressionCodecForFile(const DataPartStoragePtr & data_part_storage, const String & relative_path) +CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path) { - auto read_buffer = data_part_storage->readFile(relative_path, {}, std::nullopt, std::nullopt); + auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt); read_buffer->ignore(sizeof(Checksum)); UInt8 header_size = ICompressionCodec::getHeaderSize(); diff --git a/src/Compression/getCompressionCodecForFile.h b/src/Compression/getCompressionCodecForFile.h index ad855684128..b6f22750e4d 100644 --- a/src/Compression/getCompressionCodecForFile.h +++ b/src/Compression/getCompressionCodecForFile.h @@ -11,6 +11,6 @@ namespace DB /// clickhouse fashion (with checksums, headers for each block, etc). This /// method should be used as fallback when we cannot deduce compression codec /// from metadata. -CompressionCodecPtr getCompressionCodecForFile(const DataPartStoragePtr & data_part_storage, const String & relative_path); +CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path); } diff --git a/src/Compression/tests/gtest_compressionCodec.cpp b/src/Compression/tests/gtest_compressionCodec.cpp index 2df3edb23ad..9b44c60cd81 100644 --- a/src/Compression/tests/gtest_compressionCodec.cpp +++ b/src/Compression/tests/gtest_compressionCodec.cpp @@ -391,7 +391,7 @@ CodecTestSequence generateSeq(Generator gen, const char* gen_name, B Begin = 0, for (auto i = Begin; i < End; i += direction) { - const T v = gen(static_cast(i)); + const T v = static_cast(gen(i)); unalignedStoreLE(write_pos, v); write_pos += sizeof(v); @@ -464,13 +464,15 @@ void testTranscoding(Timer & timer, ICompressionCodec & codec, const CodecTestSe { const auto & source_data = test_sequence.serialized_data; - const UInt32 encoded_max_size = codec.getCompressedReserveSize(source_data.size()); + const UInt32 encoded_max_size = codec.getCompressedReserveSize( + static_cast(source_data.size())); PODArray encoded(encoded_max_size); timer.start(); assert(source_data.data() != nullptr); // Codec assumes that source buffer is not null. - const UInt32 encoded_size = codec.compress(source_data.data(), source_data.size(), encoded.data()); + const UInt32 encoded_size = codec.compress( + source_data.data(), static_cast(source_data.size()), encoded.data()); timer.report("encoding"); encoded.resize(encoded_size); @@ -478,7 +480,8 @@ void testTranscoding(Timer & timer, ICompressionCodec & codec, const CodecTestSe PODArray decoded(source_data.size()); timer.start(); - const UInt32 decoded_size = codec.decompress(encoded.data(), encoded.size(), decoded.data()); + const UInt32 decoded_size = codec.decompress( + encoded.data(), static_cast(encoded.size()), decoded.data()); timer.report("decoding"); decoded.resize(decoded_size); @@ -542,10 +545,12 @@ TEST_P(CodecTestCompatibility, Encoding) const auto & source_data = data_sequence.serialized_data; // Just encode the data with codec - const UInt32 encoded_max_size = codec->getCompressedReserveSize(source_data.size()); + const UInt32 encoded_max_size = codec->getCompressedReserveSize( + static_cast(source_data.size())); PODArray encoded(encoded_max_size); - const UInt32 encoded_size = codec->compress(source_data.data(), source_data.size(), encoded.data()); + const UInt32 encoded_size = codec->compress( + source_data.data(), static_cast(source_data.size()), encoded.data()); encoded.resize(encoded_size); SCOPED_TRACE(::testing::Message("encoded: ") << AsHexString(encoded)); @@ -560,7 +565,8 @@ TEST_P(CodecTestCompatibility, Decoding) const auto codec = makeCodec(codec_spec.codec_statement, expected.data_type); PODArray decoded(expected.serialized_data.size()); - const UInt32 decoded_size = codec->decompress(encoded_data.c_str(), encoded_data.size(), decoded.data()); + const UInt32 decoded_size = codec->decompress( + encoded_data.c_str(), static_cast(encoded_data.size()), decoded.data()); decoded.resize(decoded_size); ASSERT_TRUE(EqualByteContainers(expected.data_type->getSizeOfValueInMemory(), expected.serialized_data, decoded)); @@ -716,7 +722,7 @@ template struct RandomGenerator { explicit RandomGenerator(T seed = 0, T value_min = std::numeric_limits::min(), T value_max = std::numeric_limits::max()) - : random_engine(seed), + : random_engine(static_cast(seed)), distribution(value_min, value_max) { } @@ -1043,7 +1049,7 @@ INSTANTIATE_TEST_SUITE_P(RandomInt, ::testing::Combine( DefaultCodecsToTest, ::testing::Values( - generateSeq(G(RandomGenerator(0))), + generateSeq(G(RandomGenerator(0))), generateSeq(G(RandomGenerator(0))), generateSeq(G(RandomGenerator(0, 0, 1000'000'000))), generateSeq(G(RandomGenerator(0, 0, 1000'000'000))) @@ -1206,8 +1212,7 @@ auto PrimesWithMultiplierGenerator = [](int multiplier = 1) }; static const size_t count = sizeof(vals)/sizeof(vals[0]); - using T = decltype(i); - return static_cast(vals[i % count] * static_cast(multiplier)); + return static_cast(vals[i % count]) * multiplier; }; }; diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index d725ecb5cfe..6e9116d4b75 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -1,14 +1,21 @@ #include + +#include +#include + +#include #include #include -#include -#include -#include -#include -#include #include #include + +#include +#include +#include +#include +#include + namespace CurrentMetrics { extern const Metric KeeperAliveConnections; @@ -32,9 +39,7 @@ KeeperDispatcher::KeeperDispatcher() : responses_queue(std::numeric_limits::max()) , configuration_and_settings(std::make_shared()) , log(&Poco::Logger::get("KeeperDispatcher")) -{ -} - +{} void KeeperDispatcher::requestThread() { @@ -191,7 +196,13 @@ void KeeperDispatcher::snapshotThread() try { - task.create_snapshot(std::move(task.snapshot)); + auto snapshot_path = task.create_snapshot(std::move(task.snapshot)); + + if (snapshot_path.empty()) + continue; + + if (isLeader()) + snapshot_s3.uploadSnapshot(snapshot_path); } catch (...) { @@ -285,7 +296,9 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); - server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue); + snapshot_s3.startup(config); + + server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, snapshot_s3); try { @@ -312,7 +325,6 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf /// Start it after keeper server start session_cleaner_thread = ThreadFromGlobalPool([this] { sessionCleanerTask(); }); update_configuration_thread = ThreadFromGlobalPool([this] { updateConfigurationThread(); }); - updateConfiguration(config); LOG_DEBUG(log, "Dispatcher initialized"); } @@ -415,6 +427,8 @@ void KeeperDispatcher::shutdown() if (server) server->shutdown(); + snapshot_s3.shutdown(); + CurrentMetrics::set(CurrentMetrics::KeeperAliveConnections, 0); } @@ -678,6 +692,8 @@ void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfigurati if (!push_result) throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue"); } + + snapshot_s3.updateS3Configuration(config); } void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms) diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 3b524b24ed7..0003867adbe 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -14,6 +14,7 @@ #include #include #include +#include namespace DB { @@ -76,6 +77,8 @@ private: /// Counter for new session_id requests. std::atomic internal_session_id_counter{0}; + KeeperSnapshotManagerS3 snapshot_s3; + /// Thread put requests to raft void requestThread(); /// Thread put responses for subscribed sessions diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 7a0cee746c6..a097cb57bc6 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -105,7 +106,8 @@ KeeperServer::KeeperServer( const KeeperConfigurationAndSettingsPtr & configuration_and_settings_, const Poco::Util::AbstractConfiguration & config, ResponsesQueue & responses_queue_, - SnapshotsQueue & snapshots_queue_) + SnapshotsQueue & snapshots_queue_, + KeeperSnapshotManagerS3 & snapshot_manager_s3) : server_id(configuration_and_settings_->server_id) , coordination_settings(configuration_and_settings_->coordination_settings) , log(&Poco::Logger::get("KeeperServer")) @@ -125,6 +127,7 @@ KeeperServer::KeeperServer( configuration_and_settings_->snapshot_storage_path, coordination_settings, keeper_context, + config.getBool("keeper_server.upload_snapshot_on_exit", true) ? &snapshot_manager_s3 : nullptr, checkAndGetSuperdigest(configuration_and_settings_->super_digest)); state_manager = nuraft::cs_new( @@ -281,8 +284,9 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co params.client_req_timeout_ = getValueOrMaxInt32AndLogWarning(coordination_settings->operation_timeout_ms.totalMilliseconds(), "operation_timeout_ms", log); params.auto_forwarding_ = coordination_settings->auto_forwarding; - params.auto_forwarding_req_timeout_ - = std::max(coordination_settings->operation_timeout_ms.totalMilliseconds() * 2, std::numeric_limits::max()); + params.auto_forwarding_req_timeout_ = std::max( + static_cast(coordination_settings->operation_timeout_ms.totalMilliseconds() * 2), + std::numeric_limits::max()); params.auto_forwarding_req_timeout_ = getValueOrMaxInt32AndLogWarning(coordination_settings->operation_timeout_ms.totalMilliseconds() * 2, "operation_timeout_ms", log); params.max_append_size_ diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 6873ef2a01e..a33e29b4540 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -71,7 +71,8 @@ public: const KeeperConfigurationAndSettingsPtr & settings_, const Poco::Util::AbstractConfiguration & config_, ResponsesQueue & responses_queue_, - SnapshotsQueue & snapshots_queue_); + SnapshotsQueue & snapshots_queue_, + KeeperSnapshotManagerS3 & snapshot_manager_s3); /// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings. void startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6 = true); diff --git a/src/Coordination/KeeperSnapshotManager.h b/src/Coordination/KeeperSnapshotManager.h index c00ce9421e7..52647712083 100644 --- a/src/Coordination/KeeperSnapshotManager.h +++ b/src/Coordination/KeeperSnapshotManager.h @@ -87,7 +87,7 @@ public: }; using KeeperStorageSnapshotPtr = std::shared_ptr; -using CreateSnapshotCallback = std::function; +using CreateSnapshotCallback = std::function; using SnapshotMetaAndStorage = std::pair; diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp new file mode 100644 index 00000000000..2e19d496407 --- /dev/null +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -0,0 +1,311 @@ +#include + +#if USE_AWS_S3 +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +namespace fs = std::filesystem; + +namespace DB +{ + +struct KeeperSnapshotManagerS3::S3Configuration +{ + S3Configuration(S3::URI uri_, S3::AuthSettings auth_settings_, std::shared_ptr client_) + : uri(std::move(uri_)) + , auth_settings(std::move(auth_settings_)) + , client(std::move(client_)) + {} + + S3::URI uri; + S3::AuthSettings auth_settings; + std::shared_ptr client; +}; + +KeeperSnapshotManagerS3::KeeperSnapshotManagerS3() + : snapshots_s3_queue(std::numeric_limits::max()) + , log(&Poco::Logger::get("KeeperSnapshotManagerS3")) + , uuid(UUIDHelpers::generateV4()) +{} + +void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config) +{ + try + { + const std::string config_prefix = "keeper_server.s3_snapshot"; + + if (!config.has(config_prefix)) + { + std::lock_guard client_lock{snapshot_s3_client_mutex}; + if (snapshot_s3_client) + LOG_INFO(log, "S3 configuration was removed"); + snapshot_s3_client = nullptr; + return; + } + + auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config); + + auto endpoint = config.getString(config_prefix + ".endpoint"); + auto new_uri = S3::URI{Poco::URI(endpoint)}; + + { + std::lock_guard client_lock{snapshot_s3_client_mutex}; + // if client is not changed (same auth settings, same endpoint) we don't need to update + if (snapshot_s3_client && snapshot_s3_client->client && auth_settings == snapshot_s3_client->auth_settings + && snapshot_s3_client->uri.uri == new_uri.uri) + return; + } + + LOG_INFO(log, "S3 configuration was updated"); + + auto credentials = Aws::Auth::AWSCredentials(auth_settings.access_key_id, auth_settings.secret_access_key); + HeaderCollection headers = auth_settings.headers; + + static constexpr size_t s3_max_redirects = 10; + static constexpr bool enable_s3_requests_logging = false; + + if (!new_uri.key.empty()) + { + LOG_ERROR(log, "Invalid endpoint defined for S3, it shouldn't contain key, endpoint: {}", endpoint); + return; + } + + S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration( + auth_settings.region, + RemoteHostFilter(), s3_max_redirects, + enable_s3_requests_logging, + /* for_disk_s3 = */ false); + + client_configuration.endpointOverride = new_uri.endpoint; + + auto client = S3::ClientFactory::instance().create( + client_configuration, + new_uri.is_virtual_hosted_style, + credentials.GetAWSAccessKeyId(), + credentials.GetAWSSecretKey(), + auth_settings.server_side_encryption_customer_key_base64, + std::move(headers), + auth_settings.use_environment_credentials.value_or(false), + auth_settings.use_insecure_imds_request.value_or(false)); + + auto new_client = std::make_shared(std::move(new_uri), std::move(auth_settings), std::move(client)); + + { + std::lock_guard client_lock{snapshot_s3_client_mutex}; + snapshot_s3_client = std::move(new_client); + } + LOG_INFO(log, "S3 client was updated"); + } + catch (...) + { + LOG_ERROR(log, "Failed to create an S3 client for snapshots"); + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} +std::shared_ptr KeeperSnapshotManagerS3::getSnapshotS3Client() const +{ + std::lock_guard lock{snapshot_s3_client_mutex}; + return snapshot_s3_client; +} + +void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_path) +{ + try + { + auto s3_client = getSnapshotS3Client(); + if (s3_client == nullptr) + return; + + S3Settings::ReadWriteSettings read_write_settings; + read_write_settings.upload_part_size_multiply_parts_count_threshold = 10000; + + const auto create_writer = [&](const auto & key) + { + return WriteBufferFromS3 + { + s3_client->client, + s3_client->uri.bucket, + key, + read_write_settings + }; + }; + + const auto file_exists = [&](const auto & key) + { + Aws::S3::Model::HeadObjectRequest request; + request.SetBucket(s3_client->uri.bucket); + request.SetKey(key); + auto outcome = s3_client->client->HeadObject(request); + + if (outcome.IsSuccess()) + return true; + + const auto & error = outcome.GetError(); + if (error.GetErrorType() != Aws::S3::S3Errors::NO_SUCH_KEY && error.GetErrorType() != Aws::S3::S3Errors::RESOURCE_NOT_FOUND) + throw S3Exception(error.GetErrorType(), "Failed to verify existence of lock file: {}", error.GetMessage()); + + return false; + }; + + + LOG_INFO(log, "Will try to upload snapshot on {} to S3", snapshot_path); + ReadBufferFromFile snapshot_file(snapshot_path); + + auto snapshot_name = fs::path(snapshot_path).filename().string(); + auto lock_file = fmt::format(".{}_LOCK", snapshot_name); + + if (file_exists(snapshot_name)) + { + LOG_ERROR(log, "Snapshot {} already exists", snapshot_name); + return; + } + + // First we need to verify that there isn't already a lock file for the snapshot we want to upload + // Only leader uploads a snapshot, but there can be a rare case where we have 2 leaders in NuRaft + if (file_exists(lock_file)) + { + LOG_ERROR(log, "Lock file for {} already, exists. Probably a different node is already uploading the snapshot", snapshot_name); + return; + } + + // We write our UUID to lock file + LOG_DEBUG(log, "Trying to create a lock file"); + WriteBufferFromS3 lock_writer = create_writer(lock_file); + writeUUIDText(uuid, lock_writer); + lock_writer.finalize(); + + // We read back the written UUID, if it's the same we can upload the file + ReadBufferFromS3 lock_reader + { + s3_client->client, + s3_client->uri.bucket, + lock_file, + "", + 1, + {} + }; + + std::string read_uuid; + readStringUntilEOF(read_uuid, lock_reader); + + if (read_uuid != toString(uuid)) + { + LOG_ERROR(log, "Failed to create a lock file"); + return; + } + + SCOPE_EXIT( + { + LOG_INFO(log, "Removing lock file"); + try + { + Aws::S3::Model::DeleteObjectRequest delete_request; + delete_request.SetBucket(s3_client->uri.bucket); + delete_request.SetKey(lock_file); + auto delete_outcome = s3_client->client->DeleteObject(delete_request); + if (!delete_outcome.IsSuccess()) + throw S3Exception(delete_outcome.GetError().GetMessage(), delete_outcome.GetError().GetErrorType()); + } + catch (...) + { + LOG_INFO(log, "Failed to delete lock file for {} from S3", snapshot_path); + tryLogCurrentException(__PRETTY_FUNCTION__); + } + }); + + WriteBufferFromS3 snapshot_writer = create_writer(snapshot_name); + copyData(snapshot_file, snapshot_writer); + snapshot_writer.finalize(); + + LOG_INFO(log, "Successfully uploaded {} to S3", snapshot_path); + } + catch (...) + { + LOG_INFO(log, "Failure during upload of {} to S3", snapshot_path); + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + +void KeeperSnapshotManagerS3::snapshotS3Thread() +{ + setThreadName("KeeperS3SnpT"); + + while (!shutdown_called) + { + std::string snapshot_path; + if (!snapshots_s3_queue.pop(snapshot_path)) + break; + + if (shutdown_called) + break; + + uploadSnapshotImpl(snapshot_path); + } +} + +void KeeperSnapshotManagerS3::uploadSnapshot(const std::string & path, bool async_upload) +{ + if (getSnapshotS3Client() == nullptr) + return; + + if (async_upload) + { + if (!snapshots_s3_queue.push(path)) + LOG_WARNING(log, "Failed to add snapshot {} to S3 queue", path); + + return; + } + + uploadSnapshotImpl(path); +} + +void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config) +{ + updateS3Configuration(config); + snapshot_s3_thread = ThreadFromGlobalPool([this] { snapshotS3Thread(); }); +} + +void KeeperSnapshotManagerS3::shutdown() +{ + if (shutdown_called) + return; + + LOG_DEBUG(log, "Shutting down KeeperSnapshotManagerS3"); + shutdown_called = true; + + try + { + snapshots_s3_queue.finish(); + if (snapshot_s3_thread.joinable()) + snapshot_s3_thread.join(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + + LOG_INFO(log, "KeeperSnapshotManagerS3 shut down"); +} + +} + +#endif diff --git a/src/Coordination/KeeperSnapshotManagerS3.h b/src/Coordination/KeeperSnapshotManagerS3.h new file mode 100644 index 00000000000..5b62d114aae --- /dev/null +++ b/src/Coordination/KeeperSnapshotManagerS3.h @@ -0,0 +1,68 @@ +#pragma once + +#include "config.h" + +#include + +#if USE_AWS_S3 +#include +#include +#include + +#include +#endif + +namespace DB +{ + +#if USE_AWS_S3 +class KeeperSnapshotManagerS3 +{ +public: + KeeperSnapshotManagerS3(); + + void updateS3Configuration(const Poco::Util::AbstractConfiguration & config); + void uploadSnapshot(const std::string & path, bool async_upload = true); + + void startup(const Poco::Util::AbstractConfiguration & config); + void shutdown(); +private: + using SnapshotS3Queue = ConcurrentBoundedQueue; + SnapshotS3Queue snapshots_s3_queue; + + /// Upload new snapshots to S3 + ThreadFromGlobalPool snapshot_s3_thread; + + struct S3Configuration; + mutable std::mutex snapshot_s3_client_mutex; + std::shared_ptr snapshot_s3_client; + + std::atomic shutdown_called{false}; + + Poco::Logger * log; + + UUID uuid; + + std::shared_ptr getSnapshotS3Client() const; + + void uploadSnapshotImpl(const std::string & snapshot_path); + + /// Thread upload snapshots to S3 in the background + void snapshotS3Thread(); +}; +#else +class KeeperSnapshotManagerS3 +{ +public: + KeeperSnapshotManagerS3() = default; + + void updateS3Configuration(const Poco::Util::AbstractConfiguration &) {} + void uploadSnapshot(const std::string &, [[maybe_unused]] bool async_upload = true) {} + + void startup(const Poco::Util::AbstractConfiguration &) {} + + void shutdown() {} +}; +#endif + +} diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index c5a66ce29ca..ee5bfa48357 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -44,6 +44,7 @@ KeeperStateMachine::KeeperStateMachine( const std::string & snapshots_path_, const CoordinationSettingsPtr & coordination_settings_, const KeeperContextPtr & keeper_context_, + KeeperSnapshotManagerS3 * snapshot_manager_s3_, const std::string & superdigest_) : coordination_settings(coordination_settings_) , snapshot_manager( @@ -59,6 +60,7 @@ KeeperStateMachine::KeeperStateMachine( , log(&Poco::Logger::get("KeeperStateMachine")) , superdigest(superdigest_) , keeper_context(keeper_context_) + , snapshot_manager_s3(snapshot_manager_s3_) { } @@ -400,13 +402,22 @@ void KeeperStateMachine::create_snapshot(nuraft::snapshot & s, nuraft::async_res } when_done(ret, exception); + + return ret ? latest_snapshot_path : ""; }; if (keeper_context->server_state == KeeperContext::Phase::SHUTDOWN) { LOG_INFO(log, "Creating a snapshot during shutdown because 'create_snapshot_on_exit' is enabled."); - snapshot_task.create_snapshot(std::move(snapshot_task.snapshot)); + auto snapshot_path = snapshot_task.create_snapshot(std::move(snapshot_task.snapshot)); + + if (!snapshot_path.empty() && snapshot_manager_s3) + { + LOG_INFO(log, "Uploading snapshot {} during shutdown because 'upload_snapshot_on_exit' is enabled.", snapshot_path); + snapshot_manager_s3->uploadSnapshot(snapshot_path, /* asnyc_upload */ false); + } + return; } diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index fbd4fdc5ac2..ffc7fce1cfe 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -2,11 +2,13 @@ #include #include +#include +#include #include + #include #include #include -#include namespace DB @@ -26,6 +28,7 @@ public: const std::string & snapshots_path_, const CoordinationSettingsPtr & coordination_settings_, const KeeperContextPtr & keeper_context_, + KeeperSnapshotManagerS3 * snapshot_manager_s3_, const std::string & superdigest_ = ""); /// Read state from the latest snapshot @@ -146,6 +149,8 @@ private: const std::string superdigest; KeeperContextPtr keeper_context; + + KeeperSnapshotManagerS3 * snapshot_manager_s3; }; } diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index a30a32b5735..875dccfd705 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -922,7 +922,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr stat.version = 0; stat.aversion = 0; stat.cversion = 0; - stat.dataLength = request.data.length(); + stat.dataLength = static_cast(request.data.length()); stat.ephemeralOwner = request.is_ephemeral ? session_id : 0; new_deltas.emplace_back( @@ -1222,7 +1222,7 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce value.stat.version++; value.stat.mzxid = zxid; value.stat.mtime = time; - value.stat.dataLength = data.length(); + value.stat.dataLength = static_cast(data.length()); value.setData(data); }, request.version}); diff --git a/src/Coordination/SummingStateMachine.cpp b/src/Coordination/SummingStateMachine.cpp index ae3d2b06d75..4151b727744 100644 --- a/src/Coordination/SummingStateMachine.cpp +++ b/src/Coordination/SummingStateMachine.cpp @@ -71,10 +71,10 @@ void SummingStateMachine::createSnapshotInternal(nuraft::snapshot & s) snapshots[s.get_last_log_idx()] = ctx; // Maintain last 3 snapshots only. - int num = snapshots.size(); + ssize_t num = snapshots.size(); auto entry = snapshots.begin(); - for (int ii = 0; ii < num - MAX_SNAPSHOTS; ++ii) + for (ssize_t ii = 0; ii < num - MAX_SNAPSHOTS; ++ii) { if (entry == snapshots.end()) break; diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 6702c4cc718..3b803c18dbf 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -119,7 +119,7 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::L Coordination::read(node.stat.pzxid, in); if (!path.empty()) { - node.stat.dataLength = node.getData().length(); + node.stat.dataLength = static_cast(node.getData().length()); node.seq_num = node.stat.cversion; storage.container.insertOrReplace(path, node); diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index 5bb1ecc7c85..fa4c42dd82a 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -941,7 +941,7 @@ TEST_P(CoordinationTest, SnapshotableHashMapTrySnapshot) EXPECT_EQ(itr->active_in_map, true); itr = std::next(itr); EXPECT_EQ(itr, map_snp.end()); - for (size_t i = 0; i < 5; ++i) + for (int i = 0; i < 5; ++i) { EXPECT_TRUE(map_snp.insert("/hello" + std::to_string(i), i).second); } @@ -1318,7 +1318,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint ResponsesQueue queue(std::numeric_limits::max()); SnapshotsQueue snapshots_queue{1}; - auto state_machine = std::make_shared(queue, snapshots_queue, "./snapshots", settings, keeper_context); + auto state_machine = std::make_shared(queue, snapshots_queue, "./snapshots", settings, keeper_context, nullptr); state_machine->init(); DB::KeeperLogStore changelog("./logs", settings->rotate_log_storage_interval, true, enable_compression); changelog.init(state_machine->last_commit_index() + 1, settings->reserved_log_items); @@ -1359,7 +1359,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint } SnapshotsQueue snapshots_queue1{1}; - auto restore_machine = std::make_shared(queue, snapshots_queue1, "./snapshots", settings, keeper_context); + auto restore_machine = std::make_shared(queue, snapshots_queue1, "./snapshots", settings, keeper_context, nullptr); restore_machine->init(); EXPECT_EQ(restore_machine->last_commit_index(), total_logs - total_logs % settings->snapshot_distance); @@ -1471,7 +1471,7 @@ TEST_P(CoordinationTest, TestEphemeralNodeRemove) ResponsesQueue queue(std::numeric_limits::max()); SnapshotsQueue snapshots_queue{1}; - auto state_machine = std::make_shared(queue, snapshots_queue, "./snapshots", settings, keeper_context); + auto state_machine = std::make_shared(queue, snapshots_queue, "./snapshots", settings, keeper_context, nullptr); state_machine->init(); std::shared_ptr request_c = std::make_shared(); @@ -1982,7 +1982,7 @@ TEST_P(CoordinationTest, TestListRequestTypes) KeeperStorage storage{500, "", keeper_context}; - int64_t zxid = 0; + int32_t zxid = 0; static constexpr std::string_view test_path = "/list_request_type/node"; diff --git a/src/Core/DecimalComparison.h b/src/Core/DecimalComparison.h index 530722a2519..953c98f80b4 100644 --- a/src/Core/DecimalComparison.h +++ b/src/Core/DecimalComparison.h @@ -230,7 +230,7 @@ private: if constexpr (is_decimal) y = b.value; else - y = b; + y = static_cast(b); if constexpr (_check_overflow) { diff --git a/src/Core/DecimalFunctions.h b/src/Core/DecimalFunctions.h index 0f2158fb83b..263f78af5cc 100644 --- a/src/Core/DecimalFunctions.h +++ b/src/Core/DecimalFunctions.h @@ -241,7 +241,7 @@ inline DecimalComponents split(const DecimalType & decimal, UInt32 * If scale is to big, result is undefined. */ template -inline typename DecimalType::NativeType getWholePart(const DecimalType & decimal, size_t scale) +inline typename DecimalType::NativeType getWholePart(const DecimalType & decimal, UInt32 scale) { if (scale == 0) return decimal.value; @@ -273,7 +273,7 @@ inline typename DecimalType::NativeType getFractionalPartWithScaleMultiplier( * If scale is to big, result is undefined. */ template -inline typename DecimalType::NativeType getFractionalPart(const DecimalType & decimal, size_t scale) +inline typename DecimalType::NativeType getFractionalPart(const DecimalType & decimal, UInt32 scale) { if (scale == 0) return 0; @@ -283,7 +283,7 @@ inline typename DecimalType::NativeType getFractionalPart(const DecimalType & de /// Decimal to integer/float conversion template -ReturnType convertToImpl(const DecimalType & decimal, size_t scale, To & result) +ReturnType convertToImpl(const DecimalType & decimal, UInt32 scale, To & result) { using DecimalNativeType = typename DecimalType::NativeType; static constexpr bool throw_exception = std::is_void_v; @@ -334,7 +334,7 @@ ReturnType convertToImpl(const DecimalType & decimal, size_t scale, To & result) template -To convertTo(const DecimalType & decimal, size_t scale) +To convertTo(const DecimalType & decimal, UInt32 scale) { To result; convertToImpl(decimal, scale, result); @@ -342,7 +342,7 @@ To convertTo(const DecimalType & decimal, size_t scale) } template -bool tryConvertTo(const DecimalType & decimal, size_t scale, To & result) +bool tryConvertTo(const DecimalType & decimal, UInt32 scale, To & result) { return convertToImpl(decimal, scale, result); } diff --git a/src/Core/Defines.h b/src/Core/Defines.h index 80efe4f77bf..ae3701c18f0 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -14,17 +14,20 @@ /// The size of the I/O buffer by default. #define DBMS_DEFAULT_BUFFER_SIZE 1048576ULL +#define PADDING_FOR_SIMD 64 + /** Which blocks by default read the data (by number of rows). * Smaller values give better cache locality, less consumption of RAM, but more overhead to process the query. */ -#define DEFAULT_BLOCK_SIZE 65505 /// 65536 minus 16 + 15 bytes padding that we usually have in arrays +#define DEFAULT_BLOCK_SIZE 65409 /// 65536 - PADDING_FOR_SIMD - (PADDING_FOR_SIMD - 1) bytes padding that we usually have in arrays /** Which blocks should be formed for insertion into the table, if we control the formation of blocks. * (Sometimes the blocks are inserted exactly such blocks that have been read / transmitted from the outside, and this parameter does not affect their size.) * More than DEFAULT_BLOCK_SIZE, because in some tables a block of data on the disk is created for each block (quite a big thing), * and if the parts were small, then it would be costly then to combine them. */ -#define DEFAULT_INSERT_BLOCK_SIZE 1048545 /// 1048576 minus 16 + 15 bytes padding that we usually have in arrays +#define DEFAULT_INSERT_BLOCK_SIZE \ + 1048449 /// 1048576 - PADDING_FOR_SIMD - (PADDING_FOR_SIMD - 1) bytes padding that we usually have in arrays /** The same, but for merge operations. Less DEFAULT_BLOCK_SIZE for saving RAM (since all the columns are read). * Significantly less, since there are 10-way mergers. diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index 87566eade54..6f3ac1b40e9 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -161,7 +161,7 @@ namespace MySQLReplication /// https://dev.mysql.com/doc/internals/en/table-map-event.html void TableMapEvent::parseImpl(ReadBuffer & payload) { - column_count = readLengthEncodedNumber(payload); + column_count = static_cast(readLengthEncodedNumber(payload)); for (auto i = 0U; i < column_count; ++i) { UInt8 v = 0x00; @@ -283,7 +283,7 @@ namespace MySQLReplication void RowsEvent::parseImpl(ReadBuffer & payload) { - number_columns = readLengthEncodedNumber(payload); + number_columns = static_cast(readLengthEncodedNumber(payload)); size_t columns_bitmap_size = (number_columns + 7) / 8; switch (header.type) { @@ -494,7 +494,7 @@ namespace MySQLReplication readBigEndianStrict(payload, reinterpret_cast(&uintpart), 6); intpart = uintpart - 0x800000000000L; ltime = intpart; - frac = std::abs(intpart % (1L << 24)); + frac = static_cast(std::abs(intpart % (1L << 24))); break; } default: @@ -536,7 +536,7 @@ namespace MySQLReplication readBigEndianStrict(payload, reinterpret_cast(&val), 5); readTimeFractionalPart(payload, fsp, meta); - UInt32 year_month = readBits(val, 1, 17, 40); + UInt32 year_month = static_cast(readBits(val, 1, 17, 40)); time_t date_time = DateLUT::instance().makeDateTime( year_month / 13, year_month % 13, readBits(val, 18, 5, 40) , readBits(val, 23, 5, 40), readBits(val, 28, 6, 40), readBits(val, 34, 6, 40) @@ -625,7 +625,7 @@ namespace MySQLReplication { UInt32 val = 0; readBigEndianStrict(payload, reinterpret_cast(&val), 4); - res *= intExp10OfSize(digits_per_integer); + res *= intExp10OfSize(static_cast(digits_per_integer)); res += (val ^ mask); } } @@ -638,7 +638,7 @@ namespace MySQLReplication { UInt32 val = 0; readBigEndianStrict(payload, reinterpret_cast(&val), 4); - res *= intExp10OfSize(digits_per_integer); + res *= intExp10OfSize(static_cast(digits_per_integer)); res += (val ^ mask); } @@ -651,7 +651,7 @@ namespace MySQLReplication if (to_read) //-V547 { readBigEndianStrict(payload, reinterpret_cast(&val), to_read); - res *= intExp10OfSize(compressed_decimals); + res *= intExp10OfSize(static_cast(compressed_decimals)); res += (val ^ (mask & compressed_integer_align_numbers[compressed_decimals])); } } diff --git a/src/Core/MySQL/PacketsReplication.cpp b/src/Core/MySQL/PacketsReplication.cpp index ec5e8868cc5..74c6ca2d81f 100644 --- a/src/Core/MySQL/PacketsReplication.cpp +++ b/src/Core/MySQL/PacketsReplication.cpp @@ -57,7 +57,7 @@ void BinlogDumpGTID::writePayloadImpl(WriteBuffer & buffer) const const UInt64 position = 4; buffer.write(reinterpret_cast(&position), 8); - UInt32 gtid_size = gtid_datas.size(); + UInt32 gtid_size = static_cast(gtid_datas.size()); buffer.write(reinterpret_cast(>id_size), 4); buffer.write(gtid_datas.data(), gtid_datas.size()); } diff --git a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp index 61a0a91ab2e..e5f3a0f91c3 100644 --- a/src/Core/PostgreSQL/insertPostgreSQLValue.cpp +++ b/src/Core/PostgreSQL/insertPostgreSQLValue.cpp @@ -100,7 +100,7 @@ void insertPostgreSQLValue( readDateTimeText(time, in, assert_cast(data_type.get())->getTimeZone()); if (time < 0) time = 0; - assert_cast(column).insertValue(time); + assert_cast(column).insertValue(static_cast(time)); break; } case ExternalResultDescription::ValueType::vtDateTime64: diff --git a/src/Core/PostgreSQLProtocol.h b/src/Core/PostgreSQLProtocol.h index 908518eff5e..994494fc92f 100644 --- a/src/Core/PostgreSQLProtocol.h +++ b/src/Core/PostgreSQLProtocol.h @@ -336,7 +336,12 @@ public: Int32 size() const override { // message length part + (1 + sizes of other fields + 1) + null byte in the end of the message - return 4 + (1 + enum_to_string[severity].size() + 1) + (1 + sql_state.size() + 1) + (1 + message.size() + 1) + 1; + return static_cast( + 4 + + (1 + enum_to_string[severity].size() + 1) + + (1 + sql_state.size() + 1) + + (1 + message.size() + 1) + + 1); } MessageType getMessageType() const override @@ -518,7 +523,7 @@ public: Int32 size() const override { - return 4 + name.size() + 1 + value.size() + 1; + return static_cast(4 + name.size() + 1 + value.size() + 1); } MessageType getMessageType() const override @@ -633,7 +638,7 @@ public: // + object ID of the table (Int32 and always zero) + attribute number of the column (Int16 and always zero) // + type object id (Int32) + data type size (Int16) // + type modifier (Int32 and always -1) + format code (Int16) - return (name.size() + 1) + 4 + 2 + 4 + 2 + 4 + 2; + return static_cast((name.size() + 1) + 4 + 2 + 4 + 2 + 4 + 2); } }; @@ -682,7 +687,7 @@ public: Int32 size() const override { - return str.size(); + return static_cast(str.size()); } }; @@ -762,7 +767,7 @@ public: Int32 size() const override { - return 4 + value.size() + 1; + return static_cast(4 + value.size() + 1); } MessageType getMessageType() const override diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 17826566a83..844280ab77b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -93,6 +93,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, s3_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables.", 0) \ M(Bool, s3_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in s3 engine tables", 0) \ M(Bool, s3_check_objects_after_upload, false, "Check each uploaded object to s3 with head request to be sure that upload was successful", 0) \ + M(Bool, s3_allow_parallel_part_upload, true, "Use multiple threads for s3 multipart upload. It may lead to slightly higher memory usage", 0) \ M(Bool, enable_s3_requests_logging, false, "Enable very explicit logging of S3 requests. Makes sense for debug only.", 0) \ M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \ M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \ @@ -302,6 +303,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Float, opentelemetry_start_trace_probability, 0., "Probability to start an OpenTelemetry trace for an incoming query.", 0) \ M(Bool, opentelemetry_trace_processors, false, "Collect OpenTelemetry spans for processors.", 0) \ M(Bool, prefer_column_name_to_alias, false, "Prefer using column names instead of aliases if possible.", 0) \ + M(Bool, allow_experimental_analyzer, false, "Allow experimental analyzer", 0) \ M(Bool, prefer_global_in_and_join, false, "If enabled, all IN/JOIN operators will be rewritten as GLOBAL IN/JOIN. It's useful when the to-be-joined tables are only available on the initiator and we need to always scatter their data on-the-fly during distributed processing with the GLOBAL keyword. It's also useful to reduce the need to access the external sources joining external tables.", 0) \ \ \ @@ -331,8 +333,8 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, max_bytes_before_remerge_sort, 1000000000, "In case of ORDER BY with LIMIT, when memory usage is higher than specified threshold, perform additional steps of merging blocks before final merge to keep just top LIMIT rows.", 0) \ M(Float, remerge_sort_lowered_memory_bytes_ratio, 2., "If memory usage after remerge does not reduced by this ratio, remerge will be disabled.", 0) \ \ - M(UInt64, max_result_rows, 0, "Limit on result size in rows. Also checked for intermediate data sent from remote servers.", 0) \ - M(UInt64, max_result_bytes, 0, "Limit on result size in bytes (uncompressed). Also checked for intermediate data sent from remote servers.", 0) \ + M(UInt64, max_result_rows, 0, "Limit on result size in rows. The query will stop after processing a block of data if the threshold is met, but it will not cut the last block of the result, therefore the result size can be larger than the threshold.", 0) \ + M(UInt64, max_result_bytes, 0, "Limit on result size in bytes (uncompressed). The query will stop after processing a block of data if the threshold is met, but it will not cut the last block of the result, therefore the result size can be larger than the threshold. Caveats: the result size in memory is taken into account for this threshold. Even if the result size is small, it can reference larger data structures in memory, representing dictionaries of LowCardinality columns, and Arenas of AggregateFunction columns, so the threshold can be exceeded despite the small result size. The setting is fairly low level and should be used with caution.", 0) \ M(OverflowMode, result_overflow_mode, OverflowMode::THROW, "What to do when the limit is exceeded.", 0) \ \ /* TODO: Check also when merging and finalizing aggregate functions. */ \ diff --git a/src/Core/tests/gtest_DecimalFunctions.cpp b/src/Core/tests/gtest_DecimalFunctions.cpp index 1712785488e..08f25ddd97c 100644 --- a/src/Core/tests/gtest_DecimalFunctions.cpp +++ b/src/Core/tests/gtest_DecimalFunctions.cpp @@ -17,18 +17,13 @@ struct DecimalUtilsSplitAndCombineTestParam DecimalUtils::DecimalComponents components; }; -std::ostream & operator << (std::ostream & ostr, const DecimalUtilsSplitAndCombineTestParam & param) -{ - return ostr << param.description; -} - class DecimalUtilsSplitAndCombineTest : public ::testing::TestWithParam {}; template void testSplit(const DecimalUtilsSplitAndCombineTestParam & param) { - const DecimalType decimal_value = param.decimal_value; + const DecimalType decimal_value(static_cast(param.decimal_value.value)); const auto & actual_components = DecimalUtils::split(decimal_value, param.scale); EXPECT_EQ(param.components.whole, actual_components.whole); @@ -39,21 +34,28 @@ template void testDecimalFromComponents(const DecimalUtilsSplitAndCombineTestParam & param) { EXPECT_EQ(param.decimal_value, - DecimalUtils::decimalFromComponents(param.components.whole, param.components.fractional, param.scale)); + DecimalUtils::decimalFromComponents( + static_cast(param.components.whole), + static_cast(param.components.fractional), + param.scale)); } template void testGetWhole(const DecimalUtilsSplitAndCombineTestParam & param) { EXPECT_EQ(param.components.whole, - DecimalUtils::getWholePart(DecimalType{param.decimal_value}, param.scale)); + DecimalUtils::getWholePart( + DecimalType{static_cast(param.decimal_value.value)}, + param.scale)); } template void testGetFractional(const DecimalUtilsSplitAndCombineTestParam & param) { EXPECT_EQ(param.components.fractional, - DecimalUtils::getFractionalPart(DecimalType{param.decimal_value}, param.scale)); + DecimalUtils::getFractionalPart( + DecimalType{static_cast(param.decimal_value.value)}, + param.scale)); } // Unfortunately typed parametrized tests () are not supported in this version of gtest, so I have to emulate by hand. @@ -144,6 +146,17 @@ TEST_P(DecimalUtilsSplitAndCombineForDateTime64Test, getFractionalPartDateTime64 } +namespace std // NOLINT(cert-dcl58-cpp) +{ + +std::ostream & operator << (std::ostream & ostr, const DecimalUtilsSplitAndCombineTestParam & param) // NOLINT(cert-dcl58-cpp) +{ + return ostr << param.description; +} + +} + + // Intentionally small values that fit into 32-bit in order to cover Decimal32, Decimal64 and Decimal128 with single set of data. INSTANTIATE_TEST_SUITE_P(Basic, DecimalUtilsSplitAndCombineTest, diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 2dddfd6874a..758f85e688f 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -575,7 +575,8 @@ void BaseDaemon::closeFDs() { int max_fd = -1; #if defined(_SC_OPEN_MAX) - max_fd = sysconf(_SC_OPEN_MAX); + // fd cannot be > INT_MAX + max_fd = static_cast(sysconf(_SC_OPEN_MAX)); if (max_fd == -1) #endif max_fd = 256; /// bad fallback diff --git a/src/Daemon/SentryWriter.cpp b/src/Daemon/SentryWriter.cpp index a6033af6fe3..bb330162818 100644 --- a/src/Daemon/SentryWriter.cpp +++ b/src/Daemon/SentryWriter.cpp @@ -189,7 +189,7 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta sentry_value_set_by_key(sentry_frame, "filename", sentry_value_new_string(current_frame.file.value().c_str())); if (current_frame.line.has_value()) - sentry_value_set_by_key(sentry_frame, "lineno", sentry_value_new_int32(current_frame.line.value())); + sentry_value_set_by_key(sentry_frame, "lineno", sentry_value_new_int32(static_cast(current_frame.line.value()))); sentry_value_append(sentry_frames, sentry_frame); } diff --git a/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp index 8c0f0b95025..7056fcff42f 100644 --- a/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/src/DataTypes/DataTypeAggregateFunction.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -241,6 +242,23 @@ static DataTypePtr create(const ASTPtr & arguments) return std::make_shared(function, argument_types, params_row, version); } +void setVersionToAggregateFunctions(DataTypePtr & type, bool if_empty, std::optional revision) +{ + auto callback = [revision, if_empty](DataTypePtr & column_type) + { + const auto * aggregate_function_type = typeid_cast(column_type.get()); + if (aggregate_function_type && aggregate_function_type->isVersioned()) + { + if (revision) + aggregate_function_type->updateVersionFromRevision(*revision, if_empty); + else + aggregate_function_type->setVersion(0, if_empty); + } + }; + + callOnNestedSimpleTypes(type, callback); +} + void registerDataTypeAggregateFunction(DataTypeFactory & factory) { diff --git a/src/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h index 39fbfb62917..4a92e6c5703 100644 --- a/src/DataTypes/DataTypeAggregateFunction.h +++ b/src/DataTypes/DataTypeAggregateFunction.h @@ -70,8 +70,6 @@ public: bool isVersioned() const { return function->isVersioned(); } - size_t getVersionFromRevision(size_t revision) const { return function->getVersionFromRevision(revision); } - /// Version is not empty only if it was parsed from AST or implicitly cast to 0 or version according /// to server revision. /// It is ok to have an empty version value here - then for serialization a default (latest) @@ -84,6 +82,13 @@ public: version = version_; } + + void updateVersionFromRevision(size_t revision, bool if_empty) const + { + setVersion(function->getVersionFromRevision(revision), if_empty); + } }; +void setVersionToAggregateFunctions(DataTypePtr & type, bool if_empty, std::optional revision = std::nullopt); + } diff --git a/src/DataTypes/DataTypeArray.h b/src/DataTypes/DataTypeArray.h index 122ac8e03a3..033a657c845 100644 --- a/src/DataTypes/DataTypeArray.h +++ b/src/DataTypes/DataTypeArray.h @@ -48,6 +48,7 @@ public: bool textCanContainOnlyValidUTF8() const override { return nested->textCanContainOnlyValidUTF8(); } bool isComparable() const override { return nested->isComparable(); } bool canBeComparedWithCollation() const override { return nested->canBeComparedWithCollation(); } + bool hasDynamicSubcolumns() const override { return nested->hasDynamicSubcolumns(); } bool isValueUnambiguouslyRepresentedInContiguousMemoryRegion() const override { diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index c58e186b980..aab328eaa33 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -102,7 +102,7 @@ bool DataTypeEnum::textCanContainOnlyValidUTF8() const if (pos + length > end) return false; - if (Poco::UTF8Encoding::isLegal(reinterpret_cast(pos), length)) + if (Poco::UTF8Encoding::isLegal(reinterpret_cast(pos), static_cast(length))) pos += length; else return false; diff --git a/src/DataTypes/DataTypeMap.cpp b/src/DataTypes/DataTypeMap.cpp index 42ec739c33b..d49c205fc59 100644 --- a/src/DataTypes/DataTypeMap.cpp +++ b/src/DataTypes/DataTypeMap.cpp @@ -22,6 +22,27 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } +DataTypeMap::DataTypeMap(const DataTypePtr & nested_) + : nested(nested_) +{ + const auto * type_array = typeid_cast(nested.get()); + if (!type_array) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expected Array(Tuple(key, value)) type, got {}", nested->getName()); + + const auto * type_tuple = typeid_cast(type_array->getNestedType().get()); + if (!type_tuple) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expected Array(Tuple(key, value)) type, got {}", nested->getName()); + + if (type_tuple->getElements().size() != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expected Array(Tuple(key, value)) type, got {}", nested->getName()); + + key_type = type_tuple->getElement(0); + value_type = type_tuple->getElement(1); + assertKeyType(); +} DataTypeMap::DataTypeMap(const DataTypes & elems_) { diff --git a/src/DataTypes/DataTypeMap.h b/src/DataTypes/DataTypeMap.h index 479008031fe..2ab5c602a25 100644 --- a/src/DataTypes/DataTypeMap.h +++ b/src/DataTypes/DataTypeMap.h @@ -23,6 +23,7 @@ private: public: static constexpr bool is_parametric = true; + explicit DataTypeMap(const DataTypePtr & nested_); explicit DataTypeMap(const DataTypes & elems); DataTypeMap(const DataTypePtr & key_type_, const DataTypePtr & value_type_); @@ -40,6 +41,7 @@ public: bool isComparable() const override { return key_type->isComparable() && value_type->isComparable(); } bool isParametric() const override { return true; } bool haveSubtypes() const override { return true; } + bool hasDynamicSubcolumns() const override { return nested->hasDynamicSubcolumns(); } const DataTypePtr & getKeyType() const { return key_type; } const DataTypePtr & getValueType() const { return value_type; } diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 503947c3738..2f6ad961512 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -36,6 +36,7 @@ public: bool haveSubtypes() const override { return false; } bool equals(const IDataType & rhs) const override; bool isParametric() const override { return true; } + bool hasDynamicSubcolumns() const override { return true; } SerializationPtr doGetDefaultSerialization() const override; diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index ef05cd440c0..87cbac4cfb2 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -247,6 +247,11 @@ bool DataTypeTuple::haveMaximumSizeOfValue() const return std::all_of(elems.begin(), elems.end(), [](auto && elem) { return elem->haveMaximumSizeOfValue(); }); } +bool DataTypeTuple::hasDynamicSubcolumns() const +{ + return std::any_of(elems.begin(), elems.end(), [](auto && elem) { return elem->hasDynamicSubcolumns(); }); +} + bool DataTypeTuple::isComparable() const { return std::all_of(elems.begin(), elems.end(), [](auto && elem) { return elem->isComparable(); }); diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index 5abec18bd3b..152f21015f5 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -50,6 +50,7 @@ public: bool isComparable() const override; bool textCanContainOnlyValidUTF8() const override; bool haveMaximumSizeOfValue() const override; + bool hasDynamicSubcolumns() const override; size_t getMaximumSizeOfValueInMemory() const override; size_t getSizeOfValueInMemory() const override; diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index c93128ced95..4db944c8c3f 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -291,6 +291,9 @@ public: /// Strings, Numbers, Date, DateTime, Nullable virtual bool canBeInsideLowCardinality() const { return false; } + /// Object, Array(Object), Tuple(..., Object, ...) + virtual bool hasDynamicSubcolumns() const { return false; } + /// Updates avg_value_size_hint for newly read column. Uses to optimize deserialization. Zero expected for first column. static void updateAvgValueSizeHint(const IColumn & column, double & avg_value_size_hint); diff --git a/src/DataTypes/Native.h b/src/DataTypes/Native.h index 40086b14a0c..9782c5d64e9 100644 --- a/src/DataTypes/Native.h +++ b/src/DataTypes/Native.h @@ -224,7 +224,7 @@ static inline std::pair nativeCastToCommon(llvm::I size_t rhs_bit_width = rhs->getType()->getIntegerBitWidth() + (!rhs_is_signed && lhs_is_signed); size_t max_bit_width = std::max(lhs_bit_width, rhs_bit_width); - common = b.getIntNTy(max_bit_width); + common = b.getIntNTy(static_cast(max_bit_width)); } else { diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index e5d8d05acb5..e711b34ffa9 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -1,17 +1,19 @@ -#include #include #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include +#include #include #include #include @@ -105,10 +107,11 @@ Array createEmptyArrayField(size_t num_dimensions) DataTypePtr getDataTypeByColumn(const IColumn & column) { auto idx = column.getDataType(); - if (WhichDataType(idx).isSimple()) + WhichDataType which(idx); + if (which.isSimple()) return DataTypeFactory::instance().get(String(magic_enum::enum_name(idx))); - if (WhichDataType(idx).isNothing()) + if (which.isNothing()) return std::make_shared(); if (const auto * column_array = checkAndGetColumn(&column)) @@ -132,41 +135,124 @@ static auto extractVector(const std::vector & vec) return res; } -void convertObjectsToTuples(Block & block, const NamesAndTypesList & extended_storage_columns) +static DataTypePtr recreateTupleWithElements(const DataTypeTuple & type_tuple, const DataTypes & elements) { - std::unordered_map storage_columns_map; - for (const auto & [name, type] : extended_storage_columns) - storage_columns_map[name] = type; - - for (auto & column : block) - { - if (!isObject(column.type)) - continue; - - const auto & column_object = assert_cast(*column.column); - if (!column_object.isFinalized()) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Cannot convert to tuple column '{}' from type {}. Column should be finalized first", - column.name, column.type->getName()); - - std::tie(column.column, column.type) = unflattenObjectToTuple(column_object); - - auto it = storage_columns_map.find(column.name); - if (it == storage_columns_map.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' not found in storage", column.name); - - /// Check that constructed Tuple type and type in storage are compatible. - getLeastCommonTypeForObject({column.type, it->second}, true); - } + return type_tuple.haveExplicitNames() + ? std::make_shared(elements, type_tuple.getElementNames()) + : std::make_shared(elements); } -void deduceTypesOfObjectColumns(const StorageSnapshotPtr & storage_snapshot, Block & block) +static std::pair convertObjectColumnToTuple( + const ColumnObject & column_object, const DataTypeObject & type_object) { - if (!storage_snapshot->object_columns.empty()) + if (!column_object.isFinalized()) { - auto options = GetColumnsOptions(GetColumnsOptions::AllPhysical).withExtendedObjects(); - auto storage_columns = storage_snapshot->getColumns(options); - convertObjectsToTuples(block, storage_columns); + auto finalized = column_object.cloneFinalized(); + const auto & finalized_object = assert_cast(*finalized); + return convertObjectColumnToTuple(finalized_object, type_object); + } + + const auto & subcolumns = column_object.getSubcolumns(); + + PathsInData tuple_paths; + DataTypes tuple_types; + Columns tuple_columns; + + for (const auto & entry : subcolumns) + { + tuple_paths.emplace_back(entry->path); + tuple_types.emplace_back(entry->data.getLeastCommonType()); + tuple_columns.emplace_back(entry->data.getFinalizedColumnPtr()); + } + + return unflattenTuple(tuple_paths, tuple_types, tuple_columns); +} + +static std::pair recursivlyConvertDynamicColumnToTuple( + const ColumnPtr & column, const DataTypePtr & type) +{ + if (!type->hasDynamicSubcolumns()) + return {column, type}; + + if (const auto * type_object = typeid_cast(type.get())) + { + const auto & column_object = assert_cast(*column); + return convertObjectColumnToTuple(column_object, *type_object); + } + + if (const auto * type_array = typeid_cast(type.get())) + { + const auto & column_array = assert_cast(*column); + auto [new_column, new_type] = recursivlyConvertDynamicColumnToTuple( + column_array.getDataPtr(), type_array->getNestedType()); + + return + { + ColumnArray::create(new_column, column_array.getOffsetsPtr()), + std::make_shared(std::move(new_type)), + }; + } + + if (const auto * type_map = typeid_cast(type.get())) + { + const auto & column_map = assert_cast(*column); + auto [new_column, new_type] = recursivlyConvertDynamicColumnToTuple( + column_map.getNestedColumnPtr(), type_map->getNestedType()); + + return + { + ColumnMap::create(new_column), + std::make_shared(std::move(new_type)), + }; + } + + if (const auto * type_tuple = typeid_cast(type.get())) + { + const auto & tuple_columns = assert_cast(*column).getColumns(); + const auto & tuple_types = type_tuple->getElements(); + + assert(tuple_columns.size() == tuple_types.size()); + const size_t tuple_size = tuple_types.size(); + + Columns new_tuple_columns(tuple_size); + DataTypes new_tuple_types(tuple_size); + + for (size_t i = 0; i < tuple_size; ++i) + { + std::tie(new_tuple_columns[i], new_tuple_types[i]) + = recursivlyConvertDynamicColumnToTuple(tuple_columns[i], tuple_types[i]); + } + + return + { + ColumnTuple::create(new_tuple_columns), + recreateTupleWithElements(*type_tuple, new_tuple_types) + }; + } + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Type {} unexpectedly has dynamic columns", type->getName()); +} + +void convertDynamicColumnsToTuples(Block & block, const StorageSnapshotPtr & storage_snapshot) +{ + for (auto & column : block) + { + if (!column.type->hasDynamicSubcolumns()) + continue; + + std::tie(column.column, column.type) + = recursivlyConvertDynamicColumnToTuple(column.column, column.type); + + GetColumnsOptions options(GetColumnsOptions::AllPhysical); + auto storage_column = storage_snapshot->tryGetColumn(options, column.name); + if (!storage_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Column '{}' not found in storage", column.name); + + auto storage_column_concrete = storage_snapshot->getColumn(options.withExtendedObjects(), column.name); + + /// Check that constructed Tuple type and type in storage are compatible. + getLeastCommonTypeForDynamicColumns( + storage_column->type, {column.type, storage_column_concrete.type}, true); } } @@ -217,24 +303,8 @@ void checkObjectHasNoAmbiguosPaths(const PathsInData & paths) } } -DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool check_ambiguos_paths) +static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool check_ambiguos_paths) { - if (types.empty()) - return nullptr; - - bool all_equal = true; - for (size_t i = 1; i < types.size(); ++i) - { - if (!types[i]->equals(*types[0])) - { - all_equal = false; - break; - } - } - - if (all_equal) - return types[0]; - /// Types of subcolumns by path from all tuples. std::unordered_map subcolumns_types; @@ -287,19 +357,139 @@ DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool check_ambi return unflattenTuple(tuple_paths, tuple_types); } -NameSet getNamesOfObjectColumns(const NamesAndTypesList & columns_list) -{ - NameSet res; - for (const auto & [name, type] : columns_list) - if (isObject(type)) - res.insert(name); +static DataTypePtr getLeastCommonTypeForDynamicColumnsImpl( + const DataTypePtr & type_in_storage, const DataTypes & concrete_types, bool check_ambiguos_paths); - return res; +template +static DataTypePtr getLeastCommonTypeForColumnWithNestedType( + const Type & type, const DataTypes & concrete_types, bool check_ambiguos_paths) +{ + DataTypes nested_types; + nested_types.reserve(concrete_types.size()); + + for (const auto & concrete_type : concrete_types) + { + const auto * type_with_nested_conctete = typeid_cast(concrete_type.get()); + if (!type_with_nested_conctete) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Expected {} type, got {}", demangle(typeid(Type).name()), concrete_type->getName()); + + nested_types.push_back(type_with_nested_conctete->getNestedType()); + } + + return std::make_shared( + getLeastCommonTypeForDynamicColumnsImpl( + type.getNestedType(), nested_types, check_ambiguos_paths)); } -bool hasObjectColumns(const ColumnsDescription & columns) +static DataTypePtr getLeastCommonTypeForTuple( + const DataTypeTuple & type, const DataTypes & concrete_types, bool check_ambiguos_paths) { - return std::any_of(columns.begin(), columns.end(), [](const auto & column) { return isObject(column.type); }); + const auto & element_types = type.getElements(); + DataTypes new_element_types(element_types.size()); + + for (size_t i = 0; i < element_types.size(); ++i) + { + DataTypes concrete_element_types; + concrete_element_types.reserve(concrete_types.size()); + + for (const auto & type_concrete : concrete_types) + { + const auto * type_tuple_conctete = typeid_cast(type_concrete.get()); + if (!type_tuple_conctete) + throw Exception(ErrorCodes::TYPE_MISMATCH, "Expected Tuple type, got {}", type_concrete->getName()); + + concrete_element_types.push_back(type_tuple_conctete->getElement(i)); + } + + new_element_types[i] = getLeastCommonTypeForDynamicColumnsImpl( + element_types[i], concrete_element_types, check_ambiguos_paths); + } + + return recreateTupleWithElements(type, new_element_types); +} + +static DataTypePtr getLeastCommonTypeForDynamicColumnsImpl( + const DataTypePtr & type_in_storage, const DataTypes & concrete_types, bool check_ambiguos_paths) +{ + if (!type_in_storage->hasDynamicSubcolumns()) + return type_in_storage; + + if (isObject(type_in_storage)) + return getLeastCommonTypeForObject(concrete_types, check_ambiguos_paths); + + if (const auto * type_array = typeid_cast(type_in_storage.get())) + return getLeastCommonTypeForColumnWithNestedType(*type_array, concrete_types, check_ambiguos_paths); + + if (const auto * type_map = typeid_cast(type_in_storage.get())) + return getLeastCommonTypeForColumnWithNestedType(*type_map, concrete_types, check_ambiguos_paths); + + if (const auto * type_tuple = typeid_cast(type_in_storage.get())) + return getLeastCommonTypeForTuple(*type_tuple, concrete_types, check_ambiguos_paths); + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Type {} unexpectedly has dynamic columns", type_in_storage->getName()); +} + +DataTypePtr getLeastCommonTypeForDynamicColumns( + const DataTypePtr & type_in_storage, const DataTypes & concrete_types, bool check_ambiguos_paths) +{ + if (concrete_types.empty()) + return nullptr; + + bool all_equal = true; + for (size_t i = 1; i < concrete_types.size(); ++i) + { + if (!concrete_types[i]->equals(*concrete_types[0])) + { + all_equal = false; + break; + } + } + + if (all_equal) + return concrete_types[0]; + + return getLeastCommonTypeForDynamicColumnsImpl(type_in_storage, concrete_types, check_ambiguos_paths); +} + +DataTypePtr createConcreteEmptyDynamicColumn(const DataTypePtr & type_in_storage) +{ + if (!type_in_storage->hasDynamicSubcolumns()) + return type_in_storage; + + if (isObject(type_in_storage)) + return std::make_shared( + DataTypes{std::make_shared()}, Names{ColumnObject::COLUMN_NAME_DUMMY}); + + if (const auto * type_array = typeid_cast(type_in_storage.get())) + return std::make_shared( + createConcreteEmptyDynamicColumn(type_array->getNestedType())); + + if (const auto * type_map = typeid_cast(type_in_storage.get())) + return std::make_shared( + createConcreteEmptyDynamicColumn(type_map->getNestedType())); + + if (const auto * type_tuple = typeid_cast(type_in_storage.get())) + { + const auto & elements = type_tuple->getElements(); + DataTypes new_elements; + new_elements.reserve(elements.size()); + + for (const auto & element : elements) + new_elements.push_back(createConcreteEmptyDynamicColumn(element)); + + return recreateTupleWithElements(*type_tuple, new_elements); + } + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Type {} unexpectedly has dynamic columns", type_in_storage->getName()); +} + +bool hasDynamicSubcolumns(const ColumnsDescription & columns) +{ + return std::any_of(columns.begin(), columns.end(), + [](const auto & column) + { + return column.type->hasDynamicSubcolumns(); + }); } void extendObjectColumns(NamesAndTypesList & columns_list, const ColumnsDescription & object_columns, bool with_subcolumns) @@ -320,16 +510,20 @@ void extendObjectColumns(NamesAndTypesList & columns_list, const ColumnsDescript columns_list.splice(columns_list.end(), std::move(subcolumns_list)); } -void updateObjectColumns(ColumnsDescription & object_columns, const NamesAndTypesList & new_columns) +void updateObjectColumns( + ColumnsDescription & object_columns, + const ColumnsDescription & storage_columns, + const NamesAndTypesList & new_columns) { for (const auto & new_column : new_columns) { auto object_column = object_columns.tryGetColumn(GetColumnsOptions::All, new_column.name); if (object_column && !object_column->type->equals(*new_column.type)) { + auto storage_column = storage_columns.getColumn(GetColumnsOptions::All, new_column.name); object_columns.modify(new_column.name, [&](auto & column) { - column.type = getLeastCommonTypeForObject({object_column->type, new_column.type}); + column.type = getLeastCommonTypeForDynamicColumns(storage_column.type, {object_column->type, new_column.type}); }); } } @@ -745,13 +939,6 @@ void replaceMissedSubcolumnsByConstants( addConstantToWithClause(query, name, type); } -void finalizeObjectColumns(const MutableColumns & columns) -{ - for (const auto & column : columns) - if (auto * column_object = typeid_cast(column.get())) - column_object->finalize(); -} - Field FieldVisitorReplaceScalars::operator()(const Array & x) const { if (num_dimensions_to_keep == 0) @@ -768,11 +955,13 @@ size_t FieldVisitorToNumberOfDimensions::operator()(const Array & x) { const size_t size = x.size(); size_t dimensions = 0; + for (size_t i = 0; i < size; ++i) { size_t element_dimensions = applyVisitor(*this, x[i]); if (i > 0 && element_dimensions != dimensions) need_fold_dimension = true; + dimensions = std::max(dimensions, element_dimensions); } @@ -783,12 +972,13 @@ Field FieldVisitorFoldDimension::operator()(const Array & x) const { if (num_dimensions_to_fold == 0) return x; + const size_t size = x.size(); Array res(size); for (size_t i = 0; i < size; ++i) - { res[i] = applyVisitor(FieldVisitorFoldDimension(num_dimensions_to_fold - 1), x[i]); - } + return res; } + } diff --git a/src/DataTypes/ObjectUtils.h b/src/DataTypes/ObjectUtils.h index c60d5bec208..bd15edfe851 100644 --- a/src/DataTypes/ObjectUtils.h +++ b/src/DataTypes/ObjectUtils.h @@ -39,27 +39,31 @@ Array createEmptyArrayField(size_t num_dimensions); DataTypePtr getDataTypeByColumn(const IColumn & column); /// Converts Object types and columns to Tuples in @columns_list and @block -/// and checks that types are consistent with types in @extended_storage_columns. -void convertObjectsToTuples(Block & block, const NamesAndTypesList & extended_storage_columns); -void deduceTypesOfObjectColumns(const StorageSnapshotPtr & storage_snapshot, Block & block); +/// and checks that types are consistent with types in @storage_snapshot. +void convertDynamicColumnsToTuples(Block & block, const StorageSnapshotPtr & storage_snapshot); /// Checks that each path is not the prefix of any other path. void checkObjectHasNoAmbiguosPaths(const PathsInData & paths); /// Receives several Tuple types and deduces the least common type among them. -DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool check_ambiguos_paths = false); +DataTypePtr getLeastCommonTypeForDynamicColumns( + const DataTypePtr & type_in_storage, const DataTypes & types, bool check_ambiguos_paths = false); + +DataTypePtr createConcreteEmptyDynamicColumn(const DataTypePtr & type_in_storage); /// Converts types of object columns to tuples in @columns_list /// according to @object_columns and adds all tuple's subcolumns if needed. void extendObjectColumns(NamesAndTypesList & columns_list, const ColumnsDescription & object_columns, bool with_subcolumns); -NameSet getNamesOfObjectColumns(const NamesAndTypesList & columns_list); -bool hasObjectColumns(const ColumnsDescription & columns); -void finalizeObjectColumns(const MutableColumns & columns); +/// Checks whether @columns contain any column with dynamic subcolumns. +bool hasDynamicSubcolumns(const ColumnsDescription & columns); /// Updates types of objects in @object_columns inplace /// according to types in new_columns. -void updateObjectColumns(ColumnsDescription & object_columns, const NamesAndTypesList & new_columns); +void updateObjectColumns( + ColumnsDescription & object_columns, + const ColumnsDescription & storage_columns, + const NamesAndTypesList & new_columns); using DataTypeTuplePtr = std::shared_ptr; @@ -142,13 +146,15 @@ public: { if (num_dimensions_to_fold == 0) return x; - Array res(1,x); + + Array res(1, x); for (size_t i = 1; i < num_dimensions_to_fold; ++i) { Array new_res; new_res.push_back(std::move(res)); res = std::move(new_res); } + return res; } @@ -163,7 +169,7 @@ private: /// columns-like objects from entry to which Iterator points. /// columns-like object should have fields "name" and "type". template -ColumnsDescription getObjectColumns( +ColumnsDescription getConcreteObjectColumns( Iterator begin, Iterator end, const ColumnsDescription & storage_columns, EntryColumnsGetter && entry_columns_getter) @@ -176,14 +182,8 @@ ColumnsDescription getObjectColumns( /// dummy column will be removed. for (const auto & column : storage_columns) { - if (isObject(column.type)) - { - auto tuple_type = std::make_shared( - DataTypes{std::make_shared()}, - Names{ColumnObject::COLUMN_NAME_DUMMY}); - - types_in_entries[column.name].push_back(std::move(tuple_type)); - } + if (column.type->hasDynamicSubcolumns()) + types_in_entries[column.name].push_back(createConcreteEmptyDynamicColumn(column.type)); } for (auto it = begin; it != end; ++it) @@ -192,14 +192,17 @@ ColumnsDescription getObjectColumns( for (const auto & column : entry_columns) { auto storage_column = storage_columns.tryGetPhysical(column.name); - if (storage_column && isObject(storage_column->type)) + if (storage_column && storage_column->type->hasDynamicSubcolumns()) types_in_entries[column.name].push_back(column.type); } } ColumnsDescription res; for (const auto & [name, types] : types_in_entries) - res.add({name, getLeastCommonTypeForObject(types)}); + { + auto storage_column = storage_columns.getPhysical(name); + res.add({name, getLeastCommonTypeForDynamicColumns(storage_column.type, types)}); + } return res; } diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 1193c15b939..d64b41253f5 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -249,7 +249,9 @@ public: }; /// Call before serializeBinaryBulkWithMultipleStreams chain to write something before first mark. + /// Column may be used only to retrieve the structure. virtual void serializeBinaryBulkStatePrefix( + const IColumn & /*column*/, SerializeBinaryBulkSettings & /*settings*/, SerializeBinaryBulkStatePtr & /*state*/) const {} diff --git a/src/DataTypes/Serializations/SerializationArray.cpp b/src/DataTypes/Serializations/SerializationArray.cpp index eb93b5049a0..143a3264381 100644 --- a/src/DataTypes/Serializations/SerializationArray.cpp +++ b/src/DataTypes/Serializations/SerializationArray.cpp @@ -246,11 +246,13 @@ void SerializationArray::enumerateStreams( } void SerializationArray::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { settings.path.push_back(Substream::ArrayElements); - nested->serializeBinaryBulkStatePrefix(settings, state); + const auto & column_array = assert_cast(column); + nested->serializeBinaryBulkStatePrefix(column_array.getData(), settings, state); settings.path.pop_back(); } diff --git a/src/DataTypes/Serializations/SerializationArray.h b/src/DataTypes/Serializations/SerializationArray.h index 84e37acbaad..860461d667f 100644 --- a/src/DataTypes/Serializations/SerializationArray.h +++ b/src/DataTypes/Serializations/SerializationArray.h @@ -41,6 +41,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationDate.cpp b/src/DataTypes/Serializations/SerializationDate.cpp index 60db191a9dc..678817017e0 100644 --- a/src/DataTypes/Serializations/SerializationDate.cpp +++ b/src/DataTypes/Serializations/SerializationDate.cpp @@ -76,9 +76,9 @@ void SerializationDate::serializeTextCSV(const IColumn & column, size_t row_num, void SerializationDate::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings &) const { - LocalDate value; + DayNum value; readCSV(value, istr); - assert_cast(column).getData().push_back(value.getDayNum()); + assert_cast(column).getData().push_back(value); } } diff --git a/src/DataTypes/Serializations/SerializationDateTime.cpp b/src/DataTypes/Serializations/SerializationDateTime.cpp index fd56c1baebd..7238d3ce190 100644 --- a/src/DataTypes/Serializations/SerializationDateTime.cpp +++ b/src/DataTypes/Serializations/SerializationDateTime.cpp @@ -75,7 +75,7 @@ void SerializationDateTime::deserializeTextEscaped(IColumn & column, ReadBuffer readText(x, istr, settings, time_zone, utc_time_zone); if (x < 0) x = 0; - assert_cast(column).getData().push_back(x); + assert_cast(column).getData().push_back(static_cast(x)); } void SerializationDateTime::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const @@ -99,7 +99,9 @@ void SerializationDateTime::deserializeTextQuoted(IColumn & column, ReadBuffer & } if (x < 0) x = 0; - assert_cast(column).getData().push_back(x); /// It's important to do this at the end - for exception safety. + + /// It's important to do this at the end - for exception safety. + assert_cast(column).getData().push_back(static_cast(x)); } void SerializationDateTime::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const @@ -123,7 +125,7 @@ void SerializationDateTime::deserializeTextJSON(IColumn & column, ReadBuffer & i } if (x < 0) x = 0; - assert_cast(column).getData().push_back(x); + assert_cast(column).getData().push_back(static_cast(x)); } void SerializationDateTime::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const @@ -153,7 +155,7 @@ void SerializationDateTime::deserializeTextCSV(IColumn & column, ReadBuffer & is if (x < 0) x = 0; - assert_cast(column).getData().push_back(x); + assert_cast(column).getData().push_back(static_cast(x)); } } diff --git a/src/DataTypes/Serializations/SerializationInfoTuple.cpp b/src/DataTypes/Serializations/SerializationInfoTuple.cpp index d0fa5572a48..6c326743e8a 100644 --- a/src/DataTypes/Serializations/SerializationInfoTuple.cpp +++ b/src/DataTypes/Serializations/SerializationInfoTuple.cpp @@ -124,7 +124,7 @@ void SerializationInfoTuple::fromJSON(const Poco::JSON::Object & object) "Expected: {}, got: {}", elems.size(), subcolumns->size()); for (size_t i = 0; i < elems.size(); ++i) - elems[i]->fromJSON(*subcolumns->getObject(i)); + elems[i]->fromJSON(*subcolumns->getObject(static_cast(i))); } } diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.cpp b/src/DataTypes/Serializations/SerializationLowCardinality.cpp index dfe0188c8e7..c70bb1e1465 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.cpp +++ b/src/DataTypes/Serializations/SerializationLowCardinality.cpp @@ -221,6 +221,7 @@ struct DeserializeStateLowCardinality : public ISerialization::DeserializeBinary }; void SerializationLowCardinality::serializeBinaryBulkStatePrefix( + const IColumn & /*column*/, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { @@ -385,13 +386,13 @@ namespace } else if (map[val] == 0 && val != zero_pos_value) { - map[val] = cur_pos; + map[val] = static_cast(cur_pos); ++cur_pos; } } else { - T shifted_val = val - dict_size; + T shifted_val = static_cast(val - dict_size); if (cur_overflowed_pos == 0) { zero_pos_overflowed_value = shifted_val; @@ -399,7 +400,7 @@ namespace } else if (overflow_map[shifted_val] == 0 && shifted_val != zero_pos_overflowed_value) { - overflow_map[shifted_val] = cur_overflowed_pos; + overflow_map[shifted_val] = static_cast(cur_overflowed_pos); ++cur_overflowed_pos; } } @@ -429,7 +430,7 @@ namespace if (val < dict_size) val = map[val]; else - val = overflow_map[val - dict_size] + cur_pos; + val = overflow_map[val - dict_size] + static_cast(cur_pos); } return {std::move(dictionary_map), std::move(additional_keys_map)}; diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.h b/src/DataTypes/Serializations/SerializationLowCardinality.h index cc090f2044e..1d0c3226faf 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.h +++ b/src/DataTypes/Serializations/SerializationLowCardinality.h @@ -23,6 +23,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationMap.cpp b/src/DataTypes/Serializations/SerializationMap.cpp index 958e33fbaf4..cd0a99c0c68 100644 --- a/src/DataTypes/Serializations/SerializationMap.cpp +++ b/src/DataTypes/Serializations/SerializationMap.cpp @@ -270,10 +270,11 @@ void SerializationMap::enumerateStreams( } void SerializationMap::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - nested->serializeBinaryBulkStatePrefix(settings, state); + nested->serializeBinaryBulkStatePrefix(extractNestedColumn(column), settings, state); } void SerializationMap::serializeBinaryBulkStateSuffix( diff --git a/src/DataTypes/Serializations/SerializationMap.h b/src/DataTypes/Serializations/SerializationMap.h index 42f99ca7991..864ac1f3a99 100644 --- a/src/DataTypes/Serializations/SerializationMap.h +++ b/src/DataTypes/Serializations/SerializationMap.h @@ -37,6 +37,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationNamed.cpp b/src/DataTypes/Serializations/SerializationNamed.cpp index 4dac4b3a922..ca60948ce68 100644 --- a/src/DataTypes/Serializations/SerializationNamed.cpp +++ b/src/DataTypes/Serializations/SerializationNamed.cpp @@ -17,11 +17,12 @@ void SerializationNamed::enumerateStreams( } void SerializationNamed::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { addToPath(settings.path); - nested_serialization->serializeBinaryBulkStatePrefix(settings, state); + nested_serialization->serializeBinaryBulkStatePrefix(column, settings, state); settings.path.pop_back(); } diff --git a/src/DataTypes/Serializations/SerializationNamed.h b/src/DataTypes/Serializations/SerializationNamed.h index 2a2c7c0dfc7..52bbb039442 100644 --- a/src/DataTypes/Serializations/SerializationNamed.h +++ b/src/DataTypes/Serializations/SerializationNamed.h @@ -31,6 +31,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationNullable.cpp b/src/DataTypes/Serializations/SerializationNullable.cpp index 560b73bc827..c46fde27ddb 100644 --- a/src/DataTypes/Serializations/SerializationNullable.cpp +++ b/src/DataTypes/Serializations/SerializationNullable.cpp @@ -70,11 +70,13 @@ void SerializationNullable::enumerateStreams( } void SerializationNullable::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { settings.path.push_back(Substream::NullableElements); - nested->serializeBinaryBulkStatePrefix(settings, state); + const auto & column_nullable = assert_cast(column); + nested->serializeBinaryBulkStatePrefix(column_nullable.getNestedColumn(), settings, state); settings.path.pop_back(); } diff --git a/src/DataTypes/Serializations/SerializationNullable.h b/src/DataTypes/Serializations/SerializationNullable.h index ea3958065e7..9aabbe299cc 100644 --- a/src/DataTypes/Serializations/SerializationNullable.h +++ b/src/DataTypes/Serializations/SerializationNullable.h @@ -19,6 +19,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index b893407e7a5..98a94886f67 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -13,8 +13,6 @@ #include #include -#include - #include #include #include @@ -30,6 +28,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int INCORRECT_DATA; extern const int CANNOT_READ_ALL_DATA; + extern const int ARGUMENT_OUT_OF_BOUND; extern const int LOGICAL_ERROR; } @@ -141,7 +140,6 @@ void SerializationObject::checkSerializationIsSupported(const TSettings template struct SerializationObject::SerializeStateObject : public ISerialization::SerializeBinaryBulkState { - bool is_first = true; DataTypePtr nested_type; SerializationPtr nested_serialization; SerializeBinaryBulkStatePtr nested_state; @@ -158,6 +156,7 @@ struct SerializationObject::DeserializeStateObject : public ISerializati template void SerializationObject::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { @@ -166,15 +165,34 @@ void SerializationObject::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DataTypeObject doesn't support serialization with non-trivial state"); + const auto & column_object = assert_cast(column); + if (!column_object.isFinalized()) + { + auto finalized = column_object.cloneFinalized(); + serializeBinaryBulkStatePrefix(*finalized, settings, state); + return; + } + settings.path.push_back(Substream::ObjectStructure); auto * stream = settings.getter(settings.path); - settings.path.pop_back(); if (!stream) throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for kind of binary serialization"); + auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + writeIntBinary(static_cast(BinarySerializationKind::TUPLE), *stream); - state = std::make_shared(); + writeStringBinary(tuple_type->getName(), *stream); + + auto state_object = std::make_shared(); + state_object->nested_type = tuple_type; + state_object->nested_serialization = tuple_type->getDefaultSerialization(); + + settings.path.back() = Substream::ObjectData; + state_object->nested_serialization->serializeBinaryBulkStatePrefix(*tuple_column, settings, state_object->nested_state); + + state = std::move(state_object); + settings.path.pop_back(); } template @@ -261,33 +279,14 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( if (!column_object.isFinalized()) { - auto finalized_object = column_object.clone(); - assert_cast(*finalized_object).finalize(); - serializeBinaryBulkWithMultipleStreams(*finalized_object, offset, limit, settings, state); + auto finalized = column_object.cloneFinalized(); + serializeBinaryBulkWithMultipleStreams(*finalized, offset, limit, settings, state); return; } auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); - if (state_object->is_first) - { - /// Actually it's a part of serializeBinaryBulkStatePrefix, - /// but it cannot be done there, because we have to know the - /// structure of column. - - settings.path.push_back(Substream::ObjectStructure); - if (auto * stream = settings.getter(settings.path)) - writeStringBinary(tuple_type->getName(), *stream); - - state_object->nested_type = tuple_type; - state_object->nested_serialization = tuple_type->getDefaultSerialization(); - state_object->is_first = false; - - settings.path.back() = Substream::ObjectData; - state_object->nested_serialization->serializeBinaryBulkStatePrefix(settings, state_object->nested_state); - settings.path.pop_back(); - } - else if (!state_object->nested_type->equals(*tuple_type)) + if (!state_object->nested_type->equals(*tuple_type)) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Types of internal column of Object mismatched. Expected: {}, Got: {}", @@ -411,18 +410,63 @@ void SerializationObject::serializeTextImpl(const IColumn & column, size writeChar('{', ostr); for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) { + const auto & entry = *it; if (it != subcolumns.begin()) writeCString(",", ostr); - writeDoubleQuoted((*it)->path.getPath(), ostr); + writeDoubleQuoted(entry->path.getPath(), ostr); writeChar(':', ostr); - - auto serialization = (*it)->data.getLeastCommonType()->getDefaultSerialization(); - serialization->serializeTextJSON((*it)->data.getFinalizedColumn(), row_num, ostr, settings); + serializeTextFromSubcolumn(entry->data, row_num, ostr, settings); } writeChar('}', ostr); } +template +void SerializationObject::serializeTextFromSubcolumn( + const ColumnObject::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + const auto & least_common_type = subcolumn.getLeastCommonType(); + + if (subcolumn.isFinalized()) + { + const auto & finalized_column = subcolumn.getFinalizedColumn(); + auto info = least_common_type->getSerializationInfo(finalized_column); + auto serialization = least_common_type->getSerialization(*info); + serialization->serializeTextJSON(finalized_column, row_num, ostr, settings); + return; + } + + size_t ind = row_num; + if (ind < subcolumn.getNumberOfDefaultsInPrefix()) + { + /// Suboptimal, but it should happen rarely. + auto tmp_column = subcolumn.getLeastCommonType()->createColumn(); + tmp_column->insertDefault(); + + auto info = least_common_type->getSerializationInfo(*tmp_column); + auto serialization = least_common_type->getSerialization(*info); + serialization->serializeTextJSON(*tmp_column, 0, ostr, settings); + return; + } + + ind -= subcolumn.getNumberOfDefaultsInPrefix(); + for (const auto & part : subcolumn.getData()) + { + if (ind < part->size()) + { + auto part_type = getDataTypeByColumn(*part); + auto info = part_type->getSerializationInfo(*part); + auto serialization = part_type->getSerialization(*info); + serialization->serializeTextJSON(*part, ind, ostr, settings); + return; + } + + ind -= part->size(); + } + + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for text serialization is out of range", row_num); +} + template void SerializationObject::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index ff72c84faaa..47a7127cd1c 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -8,7 +8,7 @@ namespace DB { /** Serialization for data type Object. - * Supported only test serialization/deserialization. + * Supported only text serialization/deserialization. * and binary bulk serialization/deserialization without position independent * encoding, i.e. serialization/deserialization into Native format. */ @@ -31,6 +31,7 @@ public: */ void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; @@ -104,6 +105,7 @@ private: void deserializeTextImpl(IColumn & column, Reader && reader) const; void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const; + void serializeTextFromSubcolumn(const ColumnObject::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const; /// Pool of parser objects to make SerializationObject thread safe. mutable SimpleObjectPool parsers_pool; diff --git a/src/DataTypes/Serializations/SerializationSparse.cpp b/src/DataTypes/Serializations/SerializationSparse.cpp index 855bdfa1b3e..cd09cd7be5a 100644 --- a/src/DataTypes/Serializations/SerializationSparse.cpp +++ b/src/DataTypes/Serializations/SerializationSparse.cpp @@ -178,11 +178,16 @@ void SerializationSparse::enumerateStreams( } void SerializationSparse::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { settings.path.push_back(Substream::SparseElements); - nested->serializeBinaryBulkStatePrefix(settings, state); + if (const auto * column_sparse = typeid_cast(&column)) + nested->serializeBinaryBulkStatePrefix(column_sparse->getValuesColumn(), settings, state); + else + nested->serializeBinaryBulkStatePrefix(column, settings, state); + settings.path.pop_back(); } diff --git a/src/DataTypes/Serializations/SerializationSparse.h b/src/DataTypes/Serializations/SerializationSparse.h index dc2f63c5a05..c157fe7ce98 100644 --- a/src/DataTypes/Serializations/SerializationSparse.h +++ b/src/DataTypes/Serializations/SerializationSparse.h @@ -33,6 +33,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationTuple.cpp b/src/DataTypes/Serializations/SerializationTuple.cpp index c2c40cbb507..8ffb1fe86bc 100644 --- a/src/DataTypes/Serializations/SerializationTuple.cpp +++ b/src/DataTypes/Serializations/SerializationTuple.cpp @@ -314,6 +314,7 @@ struct DeserializeBinaryBulkStateTuple : public ISerialization::DeserializeBinar void SerializationTuple::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { @@ -321,7 +322,7 @@ void SerializationTuple::serializeBinaryBulkStatePrefix( tuple_state->states.resize(elems.size()); for (size_t i = 0; i < elems.size(); ++i) - elems[i]->serializeBinaryBulkStatePrefix(settings, tuple_state->states[i]); + elems[i]->serializeBinaryBulkStatePrefix(extractElementColumn(column, i), settings, tuple_state->states[i]); state = std::move(tuple_state); } diff --git a/src/DataTypes/Serializations/SerializationTuple.h b/src/DataTypes/Serializations/SerializationTuple.h index d1caeb73dad..db0339bc996 100644 --- a/src/DataTypes/Serializations/SerializationTuple.h +++ b/src/DataTypes/Serializations/SerializationTuple.h @@ -39,6 +39,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/SerializationWrapper.cpp b/src/DataTypes/Serializations/SerializationWrapper.cpp index 7c50c1c6e26..c83de614751 100644 --- a/src/DataTypes/Serializations/SerializationWrapper.cpp +++ b/src/DataTypes/Serializations/SerializationWrapper.cpp @@ -13,10 +13,11 @@ void SerializationWrapper::enumerateStreams( } void SerializationWrapper::serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - nested_serialization->serializeBinaryBulkStatePrefix(settings, state); + nested_serialization->serializeBinaryBulkStatePrefix(column, settings, state); } void SerializationWrapper::serializeBinaryBulkStateSuffix( diff --git a/src/DataTypes/Serializations/SerializationWrapper.h b/src/DataTypes/Serializations/SerializationWrapper.h index d010c6b5314..46941f150e1 100644 --- a/src/DataTypes/Serializations/SerializationWrapper.h +++ b/src/DataTypes/Serializations/SerializationWrapper.h @@ -26,6 +26,7 @@ public: const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( + const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; diff --git a/src/DataTypes/Serializations/tests/gtest_json_parser.cpp b/src/DataTypes/Serializations/tests/gtest_json_parser.cpp index 4dddb3cd03d..9b0c8e44d02 100644 --- a/src/DataTypes/Serializations/tests/gtest_json_parser.cpp +++ b/src/DataTypes/Serializations/tests/gtest_json_parser.cpp @@ -69,7 +69,7 @@ static std::ostream & operator<<(std::ostream & ostr, const JSONPathAndValue & p bool first = true; for (const auto & part : path_and_value.path.getParts()) { - ostr << (first ? "{" : ", {") << part.key << ", " << part.is_nested << ", " << part.anonymous_array_level << "}"; + ostr << (first ? "{" : ", {") << part.key << ", " << part.is_nested << ", " << static_cast(part.anonymous_array_level) << "}"; first = false; } diff --git a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp index f1fbbe115e2..fc7432d5bf6 100644 --- a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp +++ b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp @@ -31,7 +31,7 @@ TEST(SerializationObject, FromString) settings.getter = [&out](const auto &) { return &out; }; writeIntBinary(static_cast(1), out); - serialization->serializeBinaryBulkStatePrefix(settings, state); + serialization->serializeBinaryBulkStatePrefix(*column_string, settings, state); serialization->serializeBinaryBulkWithMultipleStreams(*column_string, 0, column_string->size(), settings, state); serialization->serializeBinaryBulkStateSuffix(settings, state); } diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index fee3cf1553e..82c8cadc6a1 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -447,8 +447,8 @@ DataTypePtr getLeastSupertype(const DataTypes & types) /// For String and FixedString, or for different FixedStrings, the common type is String. /// No other types are compatible with Strings. TODO Enums? { - UInt32 have_string = type_ids.count(TypeIndex::String); - UInt32 have_fixed_string = type_ids.count(TypeIndex::FixedString); + size_t have_string = type_ids.count(TypeIndex::String); + size_t have_fixed_string = type_ids.count(TypeIndex::FixedString); if (have_string || have_fixed_string) { @@ -462,10 +462,10 @@ DataTypePtr getLeastSupertype(const DataTypes & types) /// For Date and DateTime/DateTime64, the common type is DateTime/DateTime64. No other types are compatible. { - UInt32 have_date = type_ids.count(TypeIndex::Date); - UInt32 have_date32 = type_ids.count(TypeIndex::Date32); - UInt32 have_datetime = type_ids.count(TypeIndex::DateTime); - UInt32 have_datetime64 = type_ids.count(TypeIndex::DateTime64); + size_t have_date = type_ids.count(TypeIndex::Date); + size_t have_date32 = type_ids.count(TypeIndex::Date32); + size_t have_datetime = type_ids.count(TypeIndex::DateTime); + size_t have_datetime64 = type_ids.count(TypeIndex::DateTime64); if (have_date || have_date32 || have_datetime || have_datetime64) { @@ -526,26 +526,24 @@ DataTypePtr getLeastSupertype(const DataTypes & types) /// Decimals { - UInt32 have_decimal32 = type_ids.count(TypeIndex::Decimal32); - UInt32 have_decimal64 = type_ids.count(TypeIndex::Decimal64); - UInt32 have_decimal128 = type_ids.count(TypeIndex::Decimal128); + size_t have_decimal32 = type_ids.count(TypeIndex::Decimal32); + size_t have_decimal64 = type_ids.count(TypeIndex::Decimal64); + size_t have_decimal128 = type_ids.count(TypeIndex::Decimal128); if (have_decimal32 || have_decimal64 || have_decimal128) { - UInt32 num_supported = have_decimal32 + have_decimal64 + have_decimal128; + size_t num_supported = have_decimal32 + have_decimal64 + have_decimal128; std::vector int_ids = {TypeIndex::Int8, TypeIndex::UInt8, TypeIndex::Int16, TypeIndex::UInt16, - TypeIndex::Int32, TypeIndex::UInt32, TypeIndex::Int64, TypeIndex::UInt64}; - std::vector num_ints(int_ids.size(), 0); + TypeIndex::Int32, TypeIndex::UInt32, TypeIndex::Int64, TypeIndex::UInt64}; TypeIndex max_int = TypeIndex::Nothing; - for (size_t i = 0; i < int_ids.size(); ++i) + for (auto int_id : int_ids) { - UInt32 num = type_ids.count(int_ids[i]); - num_ints[i] = num; + size_t num = type_ids.count(int_id); num_supported += num; if (num) - max_int = int_ids[i]; + max_int = int_id; } if (num_supported != type_ids.size()) diff --git a/src/DataTypes/transformTypesRecursively.cpp b/src/DataTypes/transformTypesRecursively.cpp index 3544c7e477d..57128966565 100644 --- a/src/DataTypes/transformTypesRecursively.cpp +++ b/src/DataTypes/transformTypesRecursively.cpp @@ -175,4 +175,10 @@ void transformTypesRecursively(DataTypes & types, std::function callback) +{ + DataTypes types = {type}; + transformTypesRecursively(types, [callback](auto & data_types){ callback(data_types[0]); }, {}); +} + } diff --git a/src/DataTypes/transformTypesRecursively.h b/src/DataTypes/transformTypesRecursively.h index 5cb8f095494..54e6f2102ad 100644 --- a/src/DataTypes/transformTypesRecursively.h +++ b/src/DataTypes/transformTypesRecursively.h @@ -14,4 +14,6 @@ namespace DB /// Function transform_complex_types will be applied to complex types (Array/Map/Tuple) after recursive call to their nested types. void transformTypesRecursively(DataTypes & types, std::function transform_simple_types, std::function transform_complex_types); +void callOnNestedSimpleTypes(DataTypePtr & type, std::function callback); + } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 796142884a3..88aa086fe65 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -716,8 +716,12 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromStorage(const String & table_name, cons auto ast_storage = std::make_shared(); ast_storage->set(ast_storage->engine, ast_engine); - auto create_table_query = DB::getCreateQueryFromStorage(storage, ast_storage, false, - getContext()->getSettingsRef().max_parser_depth, throw_on_error); + unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); + auto create_table_query = DB::getCreateQueryFromStorage(storage, + ast_storage, + false, + max_parser_depth, + throw_on_error); create_table_query->set(create_table_query->as()->comment, std::make_shared("SYSTEM TABLE is built on the fly.")); diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index 80301732ff8..5f59f6497e2 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -164,8 +164,13 @@ ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, Context std::erase_if(storage_children, [&](const ASTPtr & element) { return element.get() == ast_storage->settings; }); ast_storage->settings = nullptr; } - auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true, - getContext()->getSettingsRef().max_parser_depth, throw_on_error); + + unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); + auto create_table_query = DB::getCreateQueryFromStorage(storage, + table_storage_define, + true, + max_parser_depth, + throw_on_error); return create_table_query; } diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index 604dc220fed..ed9199a359f 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -478,8 +478,9 @@ static inline UInt32 randomNumber() { std::mt19937 rng; rng.seed(std::random_device()()); - std::uniform_int_distribution dist6(std::numeric_limits::min(), std::numeric_limits::max()); - return dist6(rng); + std::uniform_int_distribution dist6( + std::numeric_limits::min(), std::numeric_limits::max()); + return static_cast(dist6(rng)); } bool MaterializedMySQLSyncThread::prepareSynchronized(MaterializeMetadata & metadata) @@ -679,11 +680,11 @@ static void writeFieldsToColumn( if (write_data_to_null_map(value, index)) { if (value.getType() == Field::Types::UInt64) - casted_int32_column->insertValue(value.get()); + casted_int32_column->insertValue(static_cast(value.get())); else if (value.getType() == Field::Types::Int64) { /// For MYSQL_TYPE_INT24 - const Int32 & num = value.get(); + const Int32 & num = static_cast(value.get()); casted_int32_column->insertValue(num & 0x800000 ? num | 0xFF000000 : num); } else diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index 44a392ce1f2..83c5ebe00d3 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -192,8 +192,10 @@ ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, Contex /// Add table_name to engine arguments storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 1, std::make_shared(table_id.table_name)); + unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true, - getContext()->getSettingsRef().max_parser_depth, throw_on_error); + max_parser_depth, + throw_on_error); return create_table_query; } diff --git a/src/Dictionaries/CassandraSource.cpp b/src/Dictionaries/CassandraSource.cpp index fd5982443fa..878921c53d0 100644 --- a/src/Dictionaries/CassandraSource.cpp +++ b/src/Dictionaries/CassandraSource.cpp @@ -32,7 +32,7 @@ CassandraSource::CassandraSource( , has_more_pages(cass_true) { description.init(sample_block); - cassandraCheck(cass_statement_set_paging_size(statement, max_block_size)); + cassandraCheck(cass_statement_set_paging_size(statement, static_cast(max_block_size))); } void CassandraSource::insertValue(IColumn & column, ValueType type, const CassValue * cass_value) diff --git a/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp index f91bbaa12a6..68bd6142416 100644 --- a/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp +++ b/src/Dictionaries/Embedded/GeodataProviders/HierarchyFormatReader.cpp @@ -31,7 +31,7 @@ bool RegionsHierarchyFormatReader::readNext(RegionEntry & entry) UInt64 population_big = 0; DB::readIntText(population_big, *input); population = population_big > std::numeric_limits::max() ? std::numeric_limits::max() - : population_big; + : static_cast(population_big); } DB::assertChar('\n', *input); diff --git a/src/Dictionaries/RedisSource.cpp b/src/Dictionaries/RedisSource.cpp index 4208d5fa63b..50a73e52010 100644 --- a/src/Dictionaries/RedisSource.cpp +++ b/src/Dictionaries/RedisSource.cpp @@ -109,7 +109,7 @@ namespace DB readDateTimeText(time, in); if (time < 0) time = 0; - assert_cast(column).insertValue(time); + assert_cast(column).insertValue(static_cast(time)); break; } case ValueType::vtUUID: diff --git a/src/Dictionaries/SSDCacheDictionaryStorage.h b/src/Dictionaries/SSDCacheDictionaryStorage.h index 5f73352a4c9..428b4321ffd 100644 --- a/src/Dictionaries/SSDCacheDictionaryStorage.h +++ b/src/Dictionaries/SSDCacheDictionaryStorage.h @@ -675,7 +675,7 @@ public: pointers.push_back(&requests.back()); } - AIOContext aio_context(read_from_file_buffer_blocks_size); + AIOContext aio_context(static_cast(read_from_file_buffer_blocks_size)); PaddedPODArray processed(requests.size(), false); PaddedPODArray events; @@ -735,7 +735,8 @@ public: ++to_pop; /// add new io tasks - const int new_tasks_count = std::min(read_from_file_buffer_blocks_size - (to_push - to_pop), requests.size() - to_push); + const int new_tasks_count = static_cast(std::min( + read_from_file_buffer_blocks_size - (to_push - to_pop), requests.size() - to_push)); int pushed = 0; while (new_tasks_count > 0 && (pushed = io_submit(aio_context.ctx, new_tasks_count, &pointers[to_push])) <= 0) diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp index 73540aaa0ab..af17289c8af 100644 --- a/src/Disks/DiskDecorator.cpp +++ b/src/Disks/DiskDecorator.cpp @@ -241,6 +241,11 @@ DiskObjectStoragePtr DiskDecorator::createDiskObjectStorage() return delegate->createDiskObjectStorage(); } +ObjectStoragePtr DiskDecorator::getObjectStorage() +{ + return delegate->getObjectStorage(); +} + DiskPtr DiskDecorator::getNestedDisk() const { if (const auto * decorator = dynamic_cast(delegate.get())) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index dcd12ab4bbf..25278f905ba 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -89,6 +89,7 @@ public: void getRemotePathsRecursive(const String & path, std::vector & paths_map) override { return delegate->getRemotePathsRecursive(path, paths_map); } DiskObjectStoragePtr createDiskObjectStorage() override; + ObjectStoragePtr getObjectStorage() override; NameSet getCacheLayersNames() const override { return delegate->getCacheLayersNames(); } MetadataStoragePtr getMetadataStorage() override { return delegate->getMetadataStorage(); } diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp index 3704a511478..8a6bea2565b 100644 --- a/src/Disks/IDisk.cpp +++ b/src/Disks/IDisk.cpp @@ -24,13 +24,13 @@ bool IDisk::isDirectoryEmpty(const String & path) const return !iterateDirectory(path)->isValid(); } -void IDisk::copyFile(const String & from_file_path, IDisk & to_disk, const String & to_file_path) +void IDisk::copyFile(const String & from_file_path, IDisk & to_disk, const String & to_file_path, const WriteSettings & settings) /// NOLINT { LOG_DEBUG(&Poco::Logger::get("IDisk"), "Copying from {} (path: {}) {} to {} (path: {}) {}.", getName(), getPath(), from_file_path, to_disk.getName(), to_disk.getPath(), to_file_path); auto in = readFile(from_file_path); - auto out = to_disk.writeFile(to_file_path); + auto out = to_disk.writeFile(to_file_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, settings); copyData(*in, *out); out->finalize(); } @@ -56,15 +56,15 @@ void IDisk::removeSharedFiles(const RemoveBatchRequest & files, bool keep_all_ba using ResultsCollector = std::vector>; -void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_path, Executor & exec, ResultsCollector & results, bool copy_root_dir) +void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_path, Executor & exec, ResultsCollector & results, bool copy_root_dir, const WriteSettings & settings) { if (from_disk.isFile(from_path)) { auto result = exec.execute( - [&from_disk, from_path, &to_disk, to_path]() + [&from_disk, from_path, &to_disk, to_path, &settings]() { setThreadName("DiskCopier"); - from_disk.copyFile(from_path, to_disk, fs::path(to_path) / fileName(from_path)); + from_disk.copyFile(from_path, to_disk, fs::path(to_path) / fileName(from_path), settings); }); results.push_back(std::move(result)); @@ -80,7 +80,7 @@ void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_p } for (auto it = from_disk.iterateDirectory(from_path); it->isValid(); it->next()) - asyncCopy(from_disk, it->path(), to_disk, dest, exec, results, true); + asyncCopy(from_disk, it->path(), to_disk, dest, exec, results, true, settings); } } @@ -89,7 +89,12 @@ void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptrgetExecutor(); ResultsCollector results; - asyncCopy(*this, from_path, *to_disk, to_path, exec, results, copy_root_dir); + WriteSettings settings; + /// Disable parallel write. We already copy in parallel. + /// Avoid high memory usage. See test_s3_zero_copy_ttl/test.py::test_move_and_s3_memory_usage + settings.s3_allow_parallel_part_upload = false; + + asyncCopy(*this, from_path, *to_disk, to_path, exec, results, copy_root_dir, settings); for (auto & result : results) result.wait(); diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index ba843235345..66a5c55f7f7 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -181,7 +181,11 @@ public: virtual void copyDirectoryContent(const String & from_dir, const std::shared_ptr & to_disk, const String & to_dir); /// Copy file `from_file_path` to `to_file_path` located at `to_disk`. - virtual void copyFile(const String & from_file_path, IDisk & to_disk, const String & to_file_path); + virtual void copyFile( /// NOLINT + const String & from_file_path, + IDisk & to_disk, + const String & to_file_path, + const WriteSettings & settings = {}); /// List files at `path` and add their names to `file_names` virtual void listFiles(const String & path, std::vector & file_names) const = 0; @@ -366,6 +370,14 @@ public: /// Return current disk revision. virtual UInt64 getRevision() const { return 0; } + virtual ObjectStoragePtr getObjectStorage() + { + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, + "Method getObjectStorage() is not implemented for disk type: {}", + getDataSourceDescription().type); + } + /// Create disk object storage according to disk type. /// For example for DiskLocal create DiskObjectStorage(LocalObjectStorage), /// for DiskObjectStorage create just a copy. diff --git a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h index ba041efe24a..899d06b4ed7 100644 --- a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h +++ b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h @@ -68,7 +68,7 @@ private: IAsynchronousReader & reader; - Int32 priority; + Int64 priority; std::shared_ptr impl; diff --git a/src/Disks/IStoragePolicy.cpp b/src/Disks/IStoragePolicy.cpp index 2ba6df4be8f..c843ee11563 100644 --- a/src/Disks/IStoragePolicy.cpp +++ b/src/Disks/IStoragePolicy.cpp @@ -31,4 +31,33 @@ VolumePtr IStoragePolicy::getVolumeByName(const String & volume_name) const return volume; } +size_t IStoragePolicy::getVolumeIndexByDiskName(const String & disk_name) const +{ + auto index = tryGetVolumeIndexByDiskName(disk_name); + if (!index) + throw Exception(ErrorCodes::UNKNOWN_DISK, + "No disk {} in policy {}", backQuote(disk_name), backQuote(getName())); + + return *index; +} + +VolumePtr IStoragePolicy::tryGetVolumeByDiskName(const String & disk_name) const +{ + auto index = tryGetVolumeIndexByDiskName(disk_name); + if (!index) + return nullptr; + + return getVolume(*index); +} + +VolumePtr IStoragePolicy::getVolumeByDiskName(const String & disk_name) const +{ + auto volume = tryGetVolumeByDiskName(disk_name); + if (!volume) + throw Exception(ErrorCodes::UNKNOWN_DISK, + "No disk {} in policy {}", backQuote(disk_name), backQuote(getName())); + + return volume; +} + } diff --git a/src/Disks/IStoragePolicy.h b/src/Disks/IStoragePolicy.h index 8d14a26691b..a6a5fe5f692 100644 --- a/src/Disks/IStoragePolicy.h +++ b/src/Disks/IStoragePolicy.h @@ -4,6 +4,7 @@ #include #include +#include #include namespace DB @@ -55,12 +56,15 @@ public: /// Get volume by index. virtual VolumePtr getVolume(size_t index) const = 0; virtual VolumePtr tryGetVolumeByName(const String & volume_name) const = 0; - virtual VolumePtr tryGetVolumeByDisk(const DiskPtr & disk_ptr) const = 0; VolumePtr getVolumeByName(const String & volume_name) const; /// Checks if storage policy can be replaced by another one. virtual void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const = 0; - /// Find volume index, which contains disk - virtual size_t getVolumeIndexByDisk(const DiskPtr & disk_ptr) const = 0; + /// Finds a volume index, which contains disk + virtual std::optional tryGetVolumeIndexByDiskName(const String & disk_name) const = 0; + size_t getVolumeIndexByDiskName(const String & disk_name) const; + /// Finds a volume which contains a specified disk. + VolumePtr tryGetVolumeByDiskName(const String & disk_name) const; + VolumePtr getVolumeByDiskName(const String & disk_name) const; /// Check if we have any volume with stopped merges virtual bool hasAnyVolumeWithDisabledMerges() const = 0; virtual bool containsVolume(const String & volume_name) const = 0; diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index b3dcfdafa9e..c3549701ec1 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -141,7 +141,7 @@ std::unique_ptr AzureObjectStorage::writeObject( /// NO return std::make_unique(std::move(buffer), std::move(finalize_callback), object.absolute_path); } -void AzureObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize & children) const +void AzureObjectStorage::findAllFiles(const std::string & path, RelativePathsWithSize & children) const { auto client_ptr = client.get(); diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 47ac0d6badd..6fd41dae2ec 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -84,7 +84,7 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void listPrefix(const std::string & path, RelativePathsWithSize & children) const override; + void findAllFiles(const std::string & path, RelativePathsWithSize & children) const override; /// Remove file. Throws exception if file doesn't exists or it's a directory. void removeObject(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index 7e829847846..f3d3f049dc1 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -282,9 +282,9 @@ std::unique_ptr CachedObjectStorage::cloneObjectStorage( return object_storage->cloneObjectStorage(new_namespace, config, config_prefix, context); } -void CachedObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize & children) const +void CachedObjectStorage::findAllFiles(const std::string & path, RelativePathsWithSize & children) const { - object_storage->listPrefix(path, children); + object_storage->findAllFiles(path, children); } ObjectMetadata CachedObjectStorage::getObjectMetadata(const std::string & path) const diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 68ded61a9f1..64e6eed45bb 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -72,7 +72,7 @@ public: const std::string & config_prefix, ContextPtr context) override; - void listPrefix(const std::string & path, RelativePathsWithSize & children) const override; + void findAllFiles(const std::string & path, RelativePathsWithSize & children) const override; ObjectMetadata getObjectMetadata(const std::string & path) const override; diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index fb13ed7eec8..8814d12d6eb 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -82,6 +82,11 @@ DiskTransactionPtr DiskObjectStorage::createTransaction() return std::make_shared(*this); } +ObjectStoragePtr DiskObjectStorage::getObjectStorage() +{ + return object_storage; +} + DiskTransactionPtr DiskObjectStorage::createObjectStorageTransaction() { return std::make_shared( diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.h b/src/Disks/ObjectStorages/DiskObjectStorage.h index 14fb84d7a15..333fcb258e4 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.h +++ b/src/Disks/ObjectStorages/DiskObjectStorage.h @@ -166,6 +166,8 @@ public: UInt64 getRevision() const override; + ObjectStoragePtr getObjectStorage() override; + DiskObjectStoragePtr createDiskObjectStorage() override; bool supportsCache() const override; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp index 56cc20098ba..dc4898559c0 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp @@ -67,14 +67,6 @@ void DiskObjectStorageMetadata::deserialize(ReadBuffer & buf) } } -void DiskObjectStorageMetadata::createFromSingleObject(const std::string & relative_path, size_t bytes_size, size_t ref_count_, bool read_only_) -{ - storage_objects.emplace_back(relative_path, bytes_size); - total_size = bytes_size; - ref_count = ref_count_; - read_only = read_only_; -} - void DiskObjectStorageMetadata::deserializeFromString(const std::string & data) { ReadBufferFromString buf(data); diff --git a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.h b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.h index 09e0f4ee85b..d3ea5795dd3 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.h +++ b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.h @@ -50,7 +50,6 @@ public: void deserialize(ReadBuffer & buf); void deserializeFromString(const std::string & data); - void createFromSingleObject(const std::string & relative_path, size_t bytes_size, size_t ref_count_, bool is_read_only_); void serialize(WriteBuffer & buf, bool sync) const; std::string serializeToString() const; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp b/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp index 65f0d24035a..4ea42616ba2 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp @@ -390,7 +390,7 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::restoreFiles(IObjectStorage * }; RelativePathsWithSize children; - source_object_storage->listPrefix(restore_information.source_path, children); + source_object_storage->findAllFiles(restore_information.source_path, children); restore_files(children); @@ -540,7 +540,7 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::restoreFileOperations(IObject }; RelativePathsWithSize children; - source_object_storage->listPrefix(restore_information.source_path + "operations/", children); + source_object_storage->findAllFiles(restore_information.source_path + "operations/", children); restore_file_operations(children); if (restore_information.detached) diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index 2a75668dd76..b55fb2c4fa5 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -599,7 +599,7 @@ std::unique_ptr DiskObjectStorageTransaction::writeFile auto write_operation = std::make_unique(object_storage, metadata_storage, object); std::function create_metadata_callback; - if (autocommit) + if (autocommit) { create_metadata_callback = [tx = shared_from_this(), mode, path, blob_name] (size_t count) { diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index 2f82458ecd8..80c4bb2bc64 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -101,18 +101,6 @@ std::unique_ptr HDFSObjectStorage::writeObject( /// NOL } -void HDFSObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize & children) const -{ - const size_t begin_of_path = path.find('/', path.find("//") + 2); - int32_t num_entries; - auto * files_list = hdfsListDirectory(hdfs_fs.get(), path.substr(begin_of_path).c_str(), &num_entries); - if (num_entries == -1) - throw Exception(ErrorCodes::HDFS_ERROR, "HDFSDelete failed with path: " + path); - - for (int32_t i = 0; i < num_entries; ++i) - children.emplace_back(files_list[i].mName, files_list[i].mSize); -} - /// Remove file. Throws exception if file doesn't exists or it's a directory. void HDFSObjectStorage::removeObject(const StoredObject & object) { diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index 82cddfb9122..4064a5c5b7f 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -85,8 +85,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void listPrefix(const std::string & path, RelativePathsWithSize & children) const override; - /// Remove file. Throws exception if file doesn't exists or it's a directory. void removeObject(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/IMetadataStorage.h b/src/Disks/ObjectStorages/IMetadataStorage.h index 3d6c772157d..597d7744c78 100644 --- a/src/Disks/ObjectStorages/IMetadataStorage.h +++ b/src/Disks/ObjectStorages/IMetadataStorage.h @@ -11,10 +11,16 @@ #include #include #include +#include namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + class IMetadataStorage; /// Tries to provide some "transactions" interface, which allow @@ -33,32 +39,71 @@ public: /// General purpose methods /// Write metadata string to file - virtual void writeStringToFile(const std::string & path, const std::string & data) = 0; + virtual void writeStringToFile(const std::string & /* path */, const std::string & /* data */) + { + throwNotImplemented(); + } - virtual void setLastModified(const std::string & path, const Poco::Timestamp & timestamp) = 0; + virtual void setLastModified(const std::string & /* path */, const Poco::Timestamp & /* timestamp */) + { + throwNotImplemented(); + } virtual bool supportsChmod() const = 0; - virtual void chmod(const String & path, mode_t mode) = 0; + virtual void chmod(const String & /* path */, mode_t /* mode */) + { + throwNotImplemented(); + } - virtual void setReadOnly(const std::string & path) = 0; + virtual void setReadOnly(const std::string & /* path */) + { + throwNotImplemented(); + } - virtual void unlinkFile(const std::string & path) = 0; + virtual void unlinkFile(const std::string & /* path */) + { + throwNotImplemented(); + } - virtual void createDirectory(const std::string & path) = 0; + virtual void createDirectory(const std::string & /* path */) + { + throwNotImplemented(); + } - virtual void createDirectoryRecursive(const std::string & path) = 0; + virtual void createDirectoryRecursive(const std::string & /* path */) + { + throwNotImplemented(); + } - virtual void removeDirectory(const std::string & path) = 0; + virtual void removeDirectory(const std::string & /* path */) + { + throwNotImplemented(); + } - virtual void removeRecursive(const std::string & path) = 0; + virtual void removeRecursive(const std::string & /* path */) + { + throwNotImplemented(); + } - virtual void createHardLink(const std::string & path_from, const std::string & path_to) = 0; + virtual void createHardLink(const std::string & /* path_from */, const std::string & /* path_to */) + { + throwNotImplemented(); + } - virtual void moveFile(const std::string & path_from, const std::string & path_to) = 0; + virtual void moveFile(const std::string & /* path_from */, const std::string & /* path_to */) + { + throwNotImplemented(); + } - virtual void moveDirectory(const std::string & path_from, const std::string & path_to) = 0; + virtual void moveDirectory(const std::string & /* path_from */, const std::string & /* path_to */) + { + throwNotImplemented(); + } - virtual void replaceFile(const std::string & path_from, const std::string & path_to) = 0; + virtual void replaceFile(const std::string & /* path_from */, const std::string & /* path_to */) + { + throwNotImplemented(); + } /// Metadata related methods @@ -69,7 +114,10 @@ public: virtual void createMetadataFile(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) = 0; /// Add to new blob to metadata file (way to implement appends) - virtual void addBlobToMetadata(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) = 0; + virtual void addBlobToMetadata(const std::string & /* path */, const std::string & /* blob_name */, uint64_t /* size_in_bytes */) + { + throwNotImplemented(); + } /// Unlink metadata file and do something special if required /// By default just remove file (unlink file). @@ -79,6 +127,12 @@ public: } virtual ~IMetadataTransaction() = default; + +private: + [[noreturn]] static void throwNotImplemented() + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Operation is not implemented"); + } }; using MetadataTransactionPtr = std::shared_ptr; @@ -106,12 +160,18 @@ public: virtual Poco::Timestamp getLastModified(const std::string & path) const = 0; - virtual time_t getLastChanged(const std::string & path) const = 0; + virtual time_t getLastChanged(const std::string & /* path */) const + { + throwNotImplemented(); + } virtual bool supportsChmod() const = 0; virtual bool supportsStat() const = 0; - virtual struct stat stat(const String & path) const = 0; + virtual struct stat stat(const String & /* path */) const + { + throwNotImplemented(); + } virtual std::vector listDirectory(const std::string & path) const = 0; @@ -120,20 +180,32 @@ public: virtual uint32_t getHardlinkCount(const std::string & path) const = 0; /// Read metadata file to string from path - virtual std::string readFileToString(const std::string & path) const = 0; + virtual std::string readFileToString(const std::string & /* path */) const + { + throwNotImplemented(); + } virtual ~IMetadataStorage() = default; /// ==== More specific methods. Previous were almost general purpose. ==== /// Read multiple metadata files into strings and return mapping from file_path -> metadata - virtual std::unordered_map getSerializedMetadata(const std::vector & file_paths) const = 0; + virtual std::unordered_map getSerializedMetadata(const std::vector & /* file_paths */) const + { + throwNotImplemented(); + } /// Return object information (absolute_path, bytes_size, ...) for metadata path. /// object_storage_path is absolute. virtual StoredObjects getStorageObjects(const std::string & path) const = 0; virtual std::string getObjectStorageRootPath() const = 0; + +private: + [[noreturn]] static void throwNotImplemented() + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Operation is not implemented"); + } }; using MetadataStoragePtr = std::shared_ptr; diff --git a/src/Disks/ObjectStorages/IObjectStorage.cpp b/src/Disks/ObjectStorages/IObjectStorage.cpp index 9d6610ee326..3f8ac566603 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.cpp +++ b/src/Disks/ObjectStorages/IObjectStorage.cpp @@ -14,6 +14,17 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +void IObjectStorage::findAllFiles(const std::string &, RelativePathsWithSize &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "findAllFiles() is not supported"); +} +void IObjectStorage::getDirectoryContents(const std::string &, + RelativePathsWithSize &, + std::vector &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getDirectoryContents() is not supported"); +} + IAsynchronousReader & IObjectStorage::getThreadPoolReader() { auto context = Context::getGlobalContextInstance(); diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 52e1a2cb270..9451ae31b07 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -65,8 +65,32 @@ public: /// Object exists or not virtual bool exists(const StoredObject & object) const = 0; - /// List on prefix, return children (relative paths) with their sizes. - virtual void listPrefix(const std::string & path, RelativePathsWithSize & children) const = 0; + /// List all objects with specific prefix. + /// + /// For example if you do this over filesystem, you should skip folders and + /// return files only, so something like on local filesystem: + /// + /// find . -type f + /// + /// @param children - out files (relative paths) with their sizes. + /// + /// NOTE: It makes sense only for real object storages (S3, Azure), since + /// it is used only for one of the following: + /// - send_metadata (to restore metadata) + /// - see DiskObjectStorage::restoreMetadataIfNeeded() + /// - MetadataStorageFromPlainObjectStorage - only for s3_plain disk + virtual void findAllFiles(const std::string & path, RelativePathsWithSize & children) const; + + /// Analog of directory content for object storage (object storage does not + /// have "directory" definition, but it can be emulated with usage of + /// "delimiter"), so this is analog of: + /// + /// find . -maxdepth 1 $path + /// + /// Return files in @files and directories in @directories + virtual void getDirectoryContents(const std::string & path, + RelativePathsWithSize & files, + std::vector & directories) const; /// Get object metadata if supported. It should be possible to receive /// at least size of object diff --git a/src/Disks/ObjectStorages/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/LocalObjectStorage.cpp index dbb3a7c2aba..67e2cc2d74b 100644 --- a/src/Disks/ObjectStorages/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/LocalObjectStorage.cpp @@ -104,13 +104,6 @@ std::unique_ptr LocalObjectStorage::writeObject( /// NO return std::make_unique(path, buf_size, flags); } -void LocalObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize & children) const -{ - fs::directory_iterator end_it; - for (auto it = fs::directory_iterator(path); it != end_it; ++it) - children.emplace_back(it->path().filename(), it->file_size()); -} - void LocalObjectStorage::removeObject(const StoredObject & object) { /// For local object storage files are actually removed when "metadata" is removed. diff --git a/src/Disks/ObjectStorages/LocalObjectStorage.h b/src/Disks/ObjectStorages/LocalObjectStorage.h index 0e4c71b4a47..b04e3fa6285 100644 --- a/src/Disks/ObjectStorages/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/LocalObjectStorage.h @@ -45,8 +45,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void listPrefix(const std::string & path, RelativePathsWithSize & children) const override; - void removeObject(const StoredObject & object) override; void removeObjects(const StoredObjects & objects) override; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 35cd3be15d2..259f6e01fd7 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -12,7 +12,6 @@ namespace DB namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; } @@ -33,194 +32,102 @@ const std::string & MetadataStorageFromPlainObjectStorage::getPath() const { return object_storage_root_path; } +std::filesystem::path MetadataStorageFromPlainObjectStorage::getAbsolutePath(const std::string & path) const +{ + return fs::path(object_storage_root_path) / path; +} bool MetadataStorageFromPlainObjectStorage::exists(const std::string & path) const { - auto object = StoredObject::create(*object_storage, fs::path(object_storage_root_path) / path); + auto object = StoredObject::create(*object_storage, getAbsolutePath(path)); return object_storage->exists(object); } bool MetadataStorageFromPlainObjectStorage::isFile(const std::string & path) const { /// NOTE: This check is inaccurate and has excessive API calls - return !isDirectory(path) && exists(path); + return exists(path) && !isDirectory(path); } bool MetadataStorageFromPlainObjectStorage::isDirectory(const std::string & path) const { - std::string directory = path; + std::string directory = getAbsolutePath(path); trimRight(directory); directory += "/"; /// NOTE: This check is far from ideal, since it work only if the directory /// really has files, and has excessive API calls - RelativePathsWithSize children; - object_storage->listPrefix(directory, children); - return !children.empty(); -} - -Poco::Timestamp MetadataStorageFromPlainObjectStorage::getLastModified(const std::string &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getLastModified is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -struct stat MetadataStorageFromPlainObjectStorage::stat(const std::string &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "stat is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -time_t MetadataStorageFromPlainObjectStorage::getLastChanged(const std::string &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getLastChanged is not implemented for MetadataStorageFromPlainObjectStorage"); + RelativePathsWithSize files; + std::vector directories; + object_storage->getDirectoryContents(directory, files, directories); + return !files.empty() || !directories.empty(); } uint64_t MetadataStorageFromPlainObjectStorage::getFileSize(const String & path) const { RelativePathsWithSize children; - object_storage->listPrefix(path, children); + object_storage->findAllFiles(getAbsolutePath(path), children); if (children.empty()) return 0; if (children.size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "listPrefix() return multiple paths ({}) for {}", children.size(), path); + throw Exception(ErrorCodes::LOGICAL_ERROR, "findAllFiles() return multiple paths ({}) for {}", children.size(), path); return children.front().bytes_size; } std::vector MetadataStorageFromPlainObjectStorage::listDirectory(const std::string & path) const { - RelativePathsWithSize children; - object_storage->listPrefix(path, children); + RelativePathsWithSize files; + std::vector directories; + object_storage->getDirectoryContents(getAbsolutePath(path), files, directories); std::vector result; - for (const auto & path_size : children) - { + for (const auto & path_size : files) result.push_back(path_size.relative_path); - } + for (const auto & directory : directories) + result.push_back(directory); return result; } DirectoryIteratorPtr MetadataStorageFromPlainObjectStorage::iterateDirectory(const std::string & path) const { - /// NOTE: this is not required for BACKUP/RESTORE, but this is a first step - /// towards MergeTree on plain S3. + /// Required for MergeTree auto paths = listDirectory(path); std::vector fs_paths(paths.begin(), paths.end()); return std::make_unique(std::move(fs_paths)); } -std::string MetadataStorageFromPlainObjectStorage::readFileToString(const std::string &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "readFileToString is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -std::unordered_map MetadataStorageFromPlainObjectStorage::getSerializedMetadata(const std::vector &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getSerializedMetadata is not implemented for MetadataStorageFromPlainObjectStorage"); -} - StoredObjects MetadataStorageFromPlainObjectStorage::getStorageObjects(const std::string & path) const { std::string blob_name = object_storage->generateBlobNameForPath(path); - - std::string object_path = fs::path(object_storage_root_path) / blob_name; - size_t object_size = getFileSize(object_path); - - auto object = StoredObject::create(*object_storage, object_path, object_size, /* exists */true); + size_t object_size = getFileSize(blob_name); + auto object = StoredObject::create(*object_storage, getAbsolutePath(blob_name), object_size, /* exists */true); return {std::move(object)}; } -uint32_t MetadataStorageFromPlainObjectStorage::getHardlinkCount(const std::string &) const -{ - return 1; -} - const IMetadataStorage & MetadataStorageFromPlainObjectStorageTransaction::getStorageForNonTransactionalReads() const { return metadata_storage; } -void MetadataStorageFromPlainObjectStorageTransaction::writeStringToFile(const std::string &, const std::string & /* data */) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "writeStringToFile is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::setLastModified(const std::string &, const Poco::Timestamp &) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setLastModified is not implemented for MetadataStorageFromPlainObjectStorage"); -} - void MetadataStorageFromPlainObjectStorageTransaction::unlinkFile(const std::string & path) { - auto object = StoredObject::create(*metadata_storage.object_storage, fs::path(metadata_storage.object_storage_root_path) / path); + auto object = StoredObject::create(*metadata_storage.object_storage, metadata_storage.getAbsolutePath(path)); metadata_storage.object_storage->removeObject(object); } -void MetadataStorageFromPlainObjectStorageTransaction::removeRecursive(const std::string &) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "removeRecursive is not implemented for MetadataStorageFromPlainObjectStorage"); -} - void MetadataStorageFromPlainObjectStorageTransaction::createDirectory(const std::string &) { /// Noop. It is an Object Storage not a filesystem. } - void MetadataStorageFromPlainObjectStorageTransaction::createDirectoryRecursive(const std::string &) { /// Noop. It is an Object Storage not a filesystem. } - -void MetadataStorageFromPlainObjectStorageTransaction::removeDirectory(const std::string &) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "removeDirectory is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::moveFile(const std::string & /* path_from */, const std::string & /* path_to */) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "moveFile is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::moveDirectory(const std::string & /* path_from */, const std::string & /* path_to */) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "moveDirectory is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::replaceFile(const std::string & /* path_from */, const std::string & /* path_to */) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "replaceFile is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::chmod(const String &, mode_t) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "chmod is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::setReadOnly(const std::string &) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setReadOnly is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::createHardLink(const std::string & /* path_from */, const std::string & /* path_to */) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "createHardLink is not implemented for MetadataStorageFromPlainObjectStorage"); -} - -void MetadataStorageFromPlainObjectStorageTransaction::createEmptyMetadataFile(const std::string &) -{ - /// Noop, no separate metadata. -} - -void MetadataStorageFromPlainObjectStorageTransaction::createMetadataFile( - const std::string &, const std::string & /* blob_name */, uint64_t /* size_in_bytes */) -{ - /// Noop, no separate metadata. -} - void MetadataStorageFromPlainObjectStorageTransaction::addBlobToMetadata( const std::string &, const std::string & /* blob_name */, uint64_t /* size_in_bytes */) { /// Noop, local metadata files is only one file, it is the metadata file itself. } - void MetadataStorageFromPlainObjectStorageTransaction::unlinkMetadata(const std::string &) { /// Noop, no separate metadata. diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index bd993918413..99cc960b9e4 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -45,31 +45,32 @@ public: uint64_t getFileSize(const String & path) const override; - Poco::Timestamp getLastModified(const std::string & path) const override; - - time_t getLastChanged(const std::string & path) const override; - - bool supportsChmod() const override { return false; } - - bool supportsStat() const override { return false; } - - struct stat stat(const String & path) const override; - std::vector listDirectory(const std::string & path) const override; DirectoryIteratorPtr iterateDirectory(const std::string & path) const override; - std::string readFileToString(const std::string & path) const override; - - std::unordered_map getSerializedMetadata(const std::vector & file_paths) const override; - - uint32_t getHardlinkCount(const std::string & path) const override; - DiskPtr getDisk() const { return {}; } StoredObjects getStorageObjects(const std::string & path) const override; std::string getObjectStorageRootPath() const override { return object_storage_root_path; } + + Poco::Timestamp getLastModified(const std::string & /* path */) const override + { + /// Required by MergeTree + return {}; + } + + uint32_t getHardlinkCount(const std::string & /* path */) const override + { + return 1; + } + + bool supportsChmod() const override { return false; } + bool supportsStat() const override { return false; } + +private: + std::filesystem::path getAbsolutePath(const std::string & path) const; }; class MetadataStorageFromPlainObjectStorageTransaction final : public IMetadataTransaction @@ -83,47 +84,34 @@ public: : metadata_storage(metadata_storage_) {} - ~MetadataStorageFromPlainObjectStorageTransaction() override = default; - - const IMetadataStorage & getStorageForNonTransactionalReads() const final; - - void commit() final {} - - void writeStringToFile(const std::string & path, const std::string & data) override; - - void createEmptyMetadataFile(const std::string & path) override; - - void createMetadataFile(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) override; + const IMetadataStorage & getStorageForNonTransactionalReads() const override; void addBlobToMetadata(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) override; - void setLastModified(const std::string & path, const Poco::Timestamp & timestamp) override; + void createEmptyMetadataFile(const std::string & /* path */) override + { + /// No metadata, no need to create anything. + } - bool supportsChmod() const override { return false; } - - void chmod(const String & path, mode_t mode) override; - - void setReadOnly(const std::string & path) override; - - void unlinkFile(const std::string & path) override; + void createMetadataFile(const std::string & /* path */, const std::string & /* blob_name */, uint64_t /* size_in_bytes */) override + { + /// Noop + } void createDirectory(const std::string & path) override; void createDirectoryRecursive(const std::string & path) override; - void removeDirectory(const std::string & path) override; - - void removeRecursive(const std::string & path) override; - - void createHardLink(const std::string & path_from, const std::string & path_to) override; - - void moveFile(const std::string & path_from, const std::string & path_to) override; - - void moveDirectory(const std::string & path_from, const std::string & path_to) override; - - void replaceFile(const std::string & path_from, const std::string & path_to) override; + void unlinkFile(const std::string & path) override; void unlinkMetadata(const std::string & path) override; + + void commit() override + { + /// Nothing to commit. + } + + bool supportsChmod() const override { return false; } }; } diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 213f744d84f..0c421ee03d7 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -28,7 +28,7 @@ #include #include - +#include #include #include @@ -230,7 +230,9 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 doesn't support append to files"); auto settings_ptr = s3_settings.get(); - auto scheduler = threadPoolCallbackRunner(getThreadPoolWriter(), "VFSWrite"); + ThreadPoolCallbackRunner scheduler; + if (write_settings.s3_allow_parallel_part_upload) + scheduler = threadPoolCallbackRunner(getThreadPoolWriter(), "VFSWrite"); auto s3_buffer = std::make_unique( client.get(), @@ -246,7 +248,7 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN std::move(s3_buffer), std::move(finalize_callback), object.absolute_path); } -void S3ObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize & children) const +void S3ObjectStorage::findAllFiles(const std::string & path, RelativePathsWithSize & children) const { auto settings_ptr = s3_settings.get(); auto client_ptr = client.get(); @@ -277,6 +279,49 @@ void S3ObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize } while (outcome.GetResult().GetIsTruncated()); } +void S3ObjectStorage::getDirectoryContents(const std::string & path, + RelativePathsWithSize & files, + std::vector & directories) const +{ + auto settings_ptr = s3_settings.get(); + auto client_ptr = client.get(); + + Aws::S3::Model::ListObjectsV2Request request; + request.SetBucket(bucket); + request.SetPrefix(path); + request.SetMaxKeys(settings_ptr->list_object_keys_size); + request.SetDelimiter("/"); + + Aws::S3::Model::ListObjectsV2Outcome outcome; + do + { + ProfileEvents::increment(ProfileEvents::S3ListObjects); + ProfileEvents::increment(ProfileEvents::DiskS3ListObjects); + outcome = client_ptr->ListObjectsV2(request); + throwIfError(outcome); + + auto result = outcome.GetResult(); + auto result_objects = result.GetContents(); + auto result_common_prefixes = result.GetCommonPrefixes(); + + if (result_objects.empty() && result_common_prefixes.empty()) + break; + + for (const auto & object : result_objects) + files.emplace_back(object.GetKey(), object.GetSize()); + + for (const auto & common_prefix : result_common_prefixes) + { + std::string directory = common_prefix.GetPrefix(); + /// Make it compatible with std::filesystem::path::filename() + trimRight(directory, '/'); + directories.emplace_back(directory); + } + + request.SetContinuationToken(outcome.GetResult().GetNextContinuationToken()); + } while (outcome.GetResult().GetIsTruncated()); +} + void S3ObjectStorage::removeObjectImpl(const StoredObject & object, bool if_exists) { auto client_ptr = client.get(); @@ -482,7 +527,7 @@ void S3ObjectStorage::copyObjectMultipartImpl( part_request.SetBucket(dst_bucket); part_request.SetKey(dst_key); part_request.SetUploadId(multipart_upload_id); - part_request.SetPartNumber(part_number); + part_request.SetPartNumber(static_cast(part_number)); part_request.SetCopySourceRange(fmt::format("bytes={}-{}", position, std::min(size, position + upload_part_size) - 1)); auto outcome = client_ptr->UploadPartCopy(part_request); @@ -515,7 +560,7 @@ void S3ObjectStorage::copyObjectMultipartImpl( for (size_t i = 0; i < part_tags.size(); ++i) { Aws::S3::Model::CompletedPart part; - multipart_upload.AddParts(part.WithETag(part_tags[i]).WithPartNumber(i + 1)); + multipart_upload.AddParts(part.WithETag(part_tags[i]).WithPartNumber(static_cast(i) + 1)); } req.SetMultipartUpload(multipart_upload); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index a193653db9a..6b1e8289b15 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -105,7 +105,10 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void listPrefix(const std::string & path, RelativePathsWithSize & children) const override; + void findAllFiles(const std::string & path, RelativePathsWithSize & children) const override; + void getDirectoryContents(const std::string & path, + RelativePathsWithSize & files, + std::vector & directories) const override; /// Uses `DeleteObjectRequest`. void removeObject(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 1635cb5c552..e61987163d2 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -116,7 +116,8 @@ std::unique_ptr getClient(const Poco::Util::AbstractConfigura { S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration( config.getString(config_prefix + ".region", ""), - context->getRemoteHostFilter(), context->getGlobalContext()->getSettingsRef().s3_max_redirects, + context->getRemoteHostFilter(), + static_cast(context->getGlobalContext()->getSettingsRef().s3_max_redirects), context->getGlobalContext()->getSettingsRef().enable_s3_requests_logging, /* for_disk_s3 = */ true); diff --git a/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp b/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp index 06e36a2ddd8..12c2cd16a9f 100644 --- a/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp +++ b/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.cpp @@ -12,7 +12,6 @@ namespace DB namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; extern const int FILE_DOESNT_EXIST; extern const int NETWORK_ERROR; } @@ -55,6 +54,9 @@ bool MetadataStorageFromStaticFilesWebServer::exists(const std::string & path) c path, [](const auto & file, const std::string & path_) { return file.first < path_; } ); + if (it == object_storage.files.end()) + return false; + if (startsWith(it->first, path) || (it != object_storage.files.begin() && startsWith(std::prev(it)->first, path))) return true; @@ -165,91 +167,11 @@ DirectoryIteratorPtr MetadataStorageFromStaticFilesWebServer::iterateDirectory(c return std::make_unique(std::move(dir_file_paths)); } -std::string MetadataStorageFromStaticFilesWebServer::readFileToString(const std::string &) const -{ - WebObjectStorage::throwNotAllowed(); -} - -Poco::Timestamp MetadataStorageFromStaticFilesWebServer::getLastModified(const std::string &) const -{ - return {}; -} - -time_t MetadataStorageFromStaticFilesWebServer::getLastChanged(const std::string &) const -{ - return {}; -} - -uint32_t MetadataStorageFromStaticFilesWebServer::getHardlinkCount(const std::string &) const -{ - return 1; -} - const IMetadataStorage & MetadataStorageFromStaticFilesWebServerTransaction::getStorageForNonTransactionalReads() const { return metadata_storage; } -void MetadataStorageFromStaticFilesWebServerTransaction::writeStringToFile(const std::string &, const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::setLastModified(const std::string &, const Poco::Timestamp &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::unlinkFile(const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::removeRecursive(const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::removeDirectory(const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::moveFile(const std::string &, const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::moveDirectory(const std::string &, const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::replaceFile(const std::string &, const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::setReadOnly(const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::createHardLink(const std::string &, const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::addBlobToMetadata(const std::string &, const std::string &, uint64_t) -{ - WebObjectStorage::throwNotAllowed(); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::unlinkMetadata(const std::string &) -{ - WebObjectStorage::throwNotAllowed(); -} - void MetadataStorageFromStaticFilesWebServerTransaction::createDirectory(const std::string &) { /// Noop. @@ -260,30 +182,4 @@ void MetadataStorageFromStaticFilesWebServerTransaction::createDirectoryRecursiv /// Noop. } -void MetadataStorageFromStaticFilesWebServerTransaction::createEmptyMetadataFile(const std::string & /* path */) -{ - /// Noop. -} - -void MetadataStorageFromStaticFilesWebServerTransaction::createMetadataFile( - const std::string & /* path */, const std::string & /* blob_name */, uint64_t /* size_in_bytes */) -{ - /// Noop. -} - -void MetadataStorageFromStaticFilesWebServerTransaction::commit() -{ - /// Noop. -} - -std::unordered_map MetadataStorageFromStaticFilesWebServer::getSerializedMetadata(const std::vector &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getSerializedMetadata is not implemented for MetadataStorageFromStaticFilesWebServer"); -} - -void MetadataStorageFromStaticFilesWebServerTransaction::chmod(const String &, mode_t) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "chmod is not implemented for MetadataStorageFromStaticFilesWebServer"); -} - } diff --git a/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h b/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h index 27a1ae8b8fa..338a2690b8f 100644 --- a/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h +++ b/src/Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h @@ -36,29 +36,28 @@ public: uint64_t getFileSize(const String & path) const override; - Poco::Timestamp getLastModified(const std::string & path) const override; - - time_t getLastChanged(const std::string & path) const override; - std::vector listDirectory(const std::string & path) const override; DirectoryIteratorPtr iterateDirectory(const std::string & path) const override; - std::string readFileToString(const std::string & path) const override; - - std::unordered_map getSerializedMetadata(const std::vector & file_paths) const override; - - uint32_t getHardlinkCount(const std::string & path) const override; - StoredObjects getStorageObjects(const std::string & path) const override; std::string getObjectStorageRootPath() const override { return ""; } + struct stat stat(const String & /* path */) const override { return {}; } + + Poco::Timestamp getLastModified(const std::string & /* path */) const override + { + /// Required by MergeTree + return {}; + } + uint32_t getHardlinkCount(const std::string & /* path */) const override + { + return 1; + } + bool supportsChmod() const override { return false; } - bool supportsStat() const override { return false; } - - struct stat stat(const String &) const override { return {}; } }; class MetadataStorageFromStaticFilesWebServerTransaction final : public IMetadataTransaction @@ -73,47 +72,28 @@ public: : metadata_storage(metadata_storage_) {} - ~MetadataStorageFromStaticFilesWebServerTransaction() override = default; - const IMetadataStorage & getStorageForNonTransactionalReads() const override; - void commit() override; + void createEmptyMetadataFile(const std::string & /* path */) override + { + /// No metadata, no need to create anything. + } - void writeStringToFile(const std::string & path, const std::string & data) override; - - void createEmptyMetadataFile(const std::string & path) override; - - void createMetadataFile(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) override; - - void addBlobToMetadata(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) override; - - void setLastModified(const std::string & path, const Poco::Timestamp & timestamp) override; - - void setReadOnly(const std::string & path) override; - - void unlinkFile(const std::string & path) override; + void createMetadataFile(const std::string & /* path */, const std::string & /* blob_name */, uint64_t /* size_in_bytes */) override + { + /// Noop + } void createDirectory(const std::string & path) override; void createDirectoryRecursive(const std::string & path) override; - void removeDirectory(const std::string & path) override; - - void removeRecursive(const std::string & path) override; - - void createHardLink(const std::string & path_from, const std::string & path_to) override; - - void moveFile(const std::string & path_from, const std::string & path_to) override; - - void moveDirectory(const std::string & path_from, const std::string & path_to) override; - - void replaceFile(const std::string & path_from, const std::string & path_to) override; - - void unlinkMetadata(const std::string & path) override; + void commit() override + { + /// Nothing to commit. + } bool supportsChmod() const override { return false; } - - void chmod(const String &, mode_t) override; }; } diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 71bde110fa6..f97409cfc6c 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -178,17 +178,6 @@ std::unique_ptr WebObjectStorage::readObject( /// NOLINT } } -void WebObjectStorage::listPrefix(const std::string & path, RelativePathsWithSize & children) const -{ - for (const auto & [file_path, file_info] : files) - { - if (file_info.type == FileType::File && file_path.starts_with(path)) - { - children.emplace_back(file_path, file_info.size); - } - } -} - void WebObjectStorage::throwNotAllowed() { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Only read-only operations are supported"); diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index 2fda5e576aa..2dab8fdb62d 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -55,8 +55,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void listPrefix(const std::string & path, RelativePathsWithSize & children) const override; - void removeObject(const StoredObject & object) override; void removeObjects(const StoredObjects & objects) override; diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 10513c6beae..10524ffcc0f 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -26,7 +26,6 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int EXCESSIVE_ELEMENT_IN_CONFIG; extern const int NO_ELEMENTS_IN_CONFIG; - extern const int UNKNOWN_DISK; extern const int UNKNOWN_POLICY; extern const int UNKNOWN_VOLUME; extern const int LOGICAL_ERROR; @@ -311,22 +310,12 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol } -size_t StoragePolicy::getVolumeIndexByDisk(const DiskPtr & disk_ptr) const +std::optional StoragePolicy::tryGetVolumeIndexByDiskName(const String & disk_name) const { - auto it = volume_index_by_disk_name.find(disk_ptr->getName()); + auto it = volume_index_by_disk_name.find(disk_name); if (it != volume_index_by_disk_name.end()) return it->second; - else - throw Exception("No disk " + backQuote(disk_ptr->getName()) + " in policy " + backQuote(name), ErrorCodes::UNKNOWN_DISK); -} - - -VolumePtr StoragePolicy::tryGetVolumeByDisk(const DiskPtr & disk_ptr) const -{ - auto it = volume_index_by_disk_name.find(disk_ptr->getName()); - if (it == volume_index_by_disk_name.end()) - return nullptr; - return getVolume(it->second); + return {}; } diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index fd0169a6ebe..9631f1c2e52 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -68,7 +68,7 @@ public: ReservationPtr reserve(UInt64 bytes, size_t min_volume_index) const override; /// Find volume index, which contains disk - size_t getVolumeIndexByDisk(const DiskPtr & disk_ptr) const override; + std::optional tryGetVolumeIndexByDiskName(const String & disk_name) const override; /// Reserves 0 bytes on disk with max available space /// Do not use this function when it is possible to predict size. @@ -85,9 +85,6 @@ public: VolumePtr tryGetVolumeByName(const String & volume_name) const override; - /// Finds a volume which contains a specified disk. - VolumePtr tryGetVolumeByDisk(const DiskPtr & disk_ptr) const override; - /// Checks if storage policy can be replaced by another one. void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const override; diff --git a/src/Formats/CapnProtoUtils.cpp b/src/Formats/CapnProtoUtils.cpp index b8702380aa7..a7ff065aca5 100644 --- a/src/Formats/CapnProtoUtils.cpp +++ b/src/Formats/CapnProtoUtils.cpp @@ -138,7 +138,7 @@ static String getCapnProtoFullTypeName(const capnp::Type & type) auto enum_schema = type.asEnum(); String enum_name = "Enum("; auto enumerants = enum_schema.getEnumerants(); - for (size_t i = 0; i != enumerants.size(); ++i) + for (unsigned i = 0; i != enumerants.size(); ++i) { enum_name += String(enumerants[i].getProto().getName()) + " = " + std::to_string(enumerants[i].getOrdinal()); if (i + 1 != enumerants.size()) diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp index f1a97c84fec..e80ab50968d 100644 --- a/src/Formats/EscapingRuleUtils.cpp +++ b/src/Formats/EscapingRuleUtils.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include #include @@ -453,23 +455,51 @@ void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & secon second = std::move(types[1]); } -DataTypePtr tryInferDateOrDateTime(const std::string_view & field, const FormatSettings & settings) +bool tryInferDate(const std::string_view & field) { - if (settings.try_infer_dates) + ReadBufferFromString buf(field); + DayNum tmp; + return tryReadDateText(tmp, buf) && buf.eof(); +} + +bool tryInferDateTime(const std::string_view & field, const FormatSettings & settings) +{ + ReadBufferFromString buf(field); + Float64 tmp_float; + /// Check if it's just a number, and if so, don't try to infer DateTime from it, + /// because we can interpret this number as a timestamp and it will lead to + /// inferring DateTime instead of simple Int64/Float64 in some cases. + if (tryReadFloatText(tmp_float, buf) && buf.eof()) + return false; + + buf.seek(0, SEEK_SET); /// Return position to the beginning + DateTime64 tmp; + switch (settings.date_time_input_format) { - ReadBufferFromString buf(field); - DayNum tmp; - if (tryReadDateText(tmp, buf) && buf.eof()) - return makeNullable(std::make_shared()); + case FormatSettings::DateTimeInputFormat::Basic: + if (tryReadDateTime64Text(tmp, 9, buf) && buf.eof()) + return true; + break; + case FormatSettings::DateTimeInputFormat::BestEffort: + if (tryParseDateTime64BestEffort(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof()) + return true; + break; + case FormatSettings::DateTimeInputFormat::BestEffortUS: + if (tryParseDateTime64BestEffortUS(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof()) + return true; + break; } - if (settings.try_infer_datetimes) - { - ReadBufferFromString buf(field); - DateTime64 tmp; - if (tryReadDateTime64Text(tmp, 9, buf) && buf.eof()) - return makeNullable(std::make_shared(9)); - } + return false; +} + +DataTypePtr tryInferDateOrDateTime(const std::string_view & field, const FormatSettings & settings) +{ + if (settings.try_infer_dates && tryInferDate(field)) + return makeNullable(std::make_shared()); + + if (settings.try_infer_datetimes && tryInferDateTime(field, settings)) + return makeNullable(std::make_shared(9)); return nullptr; } @@ -829,7 +859,7 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo result += fmt::format( ", use_best_effort_in_schema_inference={}, bool_true_representation={}, bool_false_representation={}," " null_representation={}, delimiter={}, tuple_delimiter={}", - settings.tsv.use_best_effort_in_schema_inference, + settings.csv.use_best_effort_in_schema_inference, settings.bool_true_representation, settings.bool_false_representation, settings.csv.null_representation, @@ -846,4 +876,19 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo return result; } + +void checkSupportedDelimiterAfterField(FormatSettings::EscapingRule escaping_rule, const String & delimiter, const DataTypePtr & type) +{ + if (escaping_rule != FormatSettings::EscapingRule::Escaped) + return; + + bool is_supported_delimiter_after_string = !delimiter.empty() && (delimiter.front() == '\t' || delimiter.front() == '\n'); + if (is_supported_delimiter_after_string) + return; + + /// Nullptr means that field is skipped and it's equivalent to String + if (!type || isString(removeNullable(removeLowCardinality(type)))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "'Escaped' serialization requires delimiter after String field to start with '\\t' or '\\n'"); +} + } diff --git a/src/Formats/EscapingRuleUtils.h b/src/Formats/EscapingRuleUtils.h index 901679b6a05..c8b710002a5 100644 --- a/src/Formats/EscapingRuleUtils.h +++ b/src/Formats/EscapingRuleUtils.h @@ -77,6 +77,8 @@ void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, c void transformInferredJSONTypesIfNeeded(DataTypes & types, const FormatSettings & settings, const std::unordered_set * numbers_parsed_from_json_strings = nullptr); void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings); -String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings,FormatSettings::EscapingRule escaping_rule); +String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, FormatSettings::EscapingRule escaping_rule); + +void checkSupportedDelimiterAfterField(FormatSettings::EscapingRule escaping_rule, const String & delimiter, const DataTypePtr & type); } diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index bfe651dd1af..a882fcf5009 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -303,7 +303,7 @@ InputFormatPtr FormatFactory::getInputFormat( static void addExistingProgressToOutputFormat(OutputFormatPtr format, ContextPtr context) { - auto * element_id = context->getProcessListElement(); + auto element_id = context->getProcessListElement(); if (element_id) { /// While preparing the query there might have been progress (for example in subscalar subqueries) so add it here diff --git a/src/Formats/NativeReader.cpp b/src/Formats/NativeReader.cpp index 2500158374d..98688bf03b7 100644 --- a/src/Formats/NativeReader.cpp +++ b/src/Formats/NativeReader.cpp @@ -145,12 +145,7 @@ Block NativeReader::read() readBinary(type_name, istr); column.type = data_type_factory.get(type_name); - const auto * aggregate_function_data_type = typeid_cast(column.type.get()); - if (aggregate_function_data_type && aggregate_function_data_type->isVersioned()) - { - auto version = aggregate_function_data_type->getVersionFromRevision(server_revision); - aggregate_function_data_type->setVersion(version, /*if_empty=*/ true); - } + setVersionToAggregateFunctions(column.type, true, server_revision); SerializationPtr serialization; if (server_revision >= DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION) diff --git a/src/Formats/NativeWriter.cpp b/src/Formats/NativeWriter.cpp index 9d4cfb68d56..c4dea371afd 100644 --- a/src/Formats/NativeWriter.cpp +++ b/src/Formats/NativeWriter.cpp @@ -11,9 +11,8 @@ #include #include -#include -#include #include +#include #include namespace DB @@ -59,7 +58,7 @@ static void writeData(const ISerialization & serialization, const ColumnPtr & co settings.low_cardinality_max_dictionary_size = 0; //-V1048 ISerialization::SerializeBinaryBulkStatePtr state; - serialization.serializeBinaryBulkStatePrefix(settings, state); + serialization.serializeBinaryBulkStatePrefix(*full_column, settings, state); serialization.serializeBinaryBulkWithMultipleStreams(*full_column, offset, limit, settings, state); serialization.serializeBinaryBulkStateSuffix(settings, state); } @@ -116,19 +115,7 @@ void NativeWriter::write(const Block & block) writeStringBinary(column.name, ostr); bool include_version = client_revision >= DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING; - const auto * aggregate_function_data_type = typeid_cast(column.type.get()); - if (aggregate_function_data_type && aggregate_function_data_type->isVersioned()) - { - if (include_version) - { - auto version = aggregate_function_data_type->getVersionFromRevision(client_revision); - aggregate_function_data_type->setVersion(version, /* if_empty */true); - } - else - { - aggregate_function_data_type->setVersion(0, /* if_empty */false); - } - } + setVersionToAggregateFunctions(column.type, include_version, include_version ? std::optional(client_revision) : std::nullopt); /// Type String type_name = column.type->getName(); diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index 567a2a9ee98..2f56c4242e5 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -864,7 +864,7 @@ namespace case FieldTypeId::TYPE_ENUM: { write_function = [this](std::string_view str) { writeInt(stringToProtobufEnumValue(str)); }; - read_function = [this](PaddedPODArray & str) { protobufEnumValueToStringAppend(readInt(), str); }; + read_function = [this](PaddedPODArray & str) { protobufEnumValueToStringAppend(static_cast(readInt()), str); }; default_function = [this]() -> String { return field_descriptor.default_value_enum()->name(); }; break; } @@ -1029,7 +1029,7 @@ namespace case FieldTypeId::TYPE_ENUM: { this->write_function = [this](NumberType value) { writeInt(enumDataTypeValueToProtobufEnumValue(value)); }; - this->read_function = [this]() -> NumberType { return protobufEnumValueToEnumDataTypeValue(readInt()); }; + this->read_function = [this]() -> NumberType { return protobufEnumValueToEnumDataTypeValue(static_cast(readInt())); }; this->default_function = [this]() -> NumberType { return protobufEnumValueToEnumDataTypeValue(this->field_descriptor.default_value_enum()->number()); }; break; } @@ -1539,10 +1539,13 @@ namespace read_function = [this]() -> UInt32 { readStr(text_buffer); - return stringToDateTime(text_buffer, date_lut); + return static_cast(stringToDateTime(text_buffer, date_lut)); }; - default_function = [this]() -> UInt32 { return stringToDateTime(field_descriptor.default_value_string(), date_lut); }; + default_function = [this]() -> UInt32 + { + return static_cast(stringToDateTime(field_descriptor.default_value_string(), date_lut)); + }; break; } diff --git a/src/Formats/newLineSegmentationEngine.cpp b/src/Formats/newLineSegmentationEngine.cpp new file mode 100644 index 00000000000..a605bba7e5b --- /dev/null +++ b/src/Formats/newLineSegmentationEngine.cpp @@ -0,0 +1,50 @@ +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +std::pair newLineFileSegmentationEngine(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t max_rows) +{ + char * pos = in.position(); + bool need_more_data = true; + size_t number_of_rows = 0; + + while (loadAtPosition(in, memory, pos) && need_more_data) + { + pos = find_first_symbols<'\r', '\n'>(pos, in.buffer().end()); + if (pos > in.buffer().end()) + throw Exception("Position in buffer is out of bounds. There must be a bug.", ErrorCodes::LOGICAL_ERROR); + else if (pos == in.buffer().end()) + continue; + + ++number_of_rows; + if ((memory.size() + static_cast(pos - in.position()) >= min_bytes) || (number_of_rows == max_rows)) + need_more_data = false; + + if (*pos == '\n') + { + ++pos; + if (loadAtPosition(in, memory, pos) && *pos == '\r') + ++pos; + } + else if (*pos == '\r') + { + ++pos; + if (loadAtPosition(in, memory, pos) && *pos == '\n') + ++pos; + } + } + + saveUpToPosition(in, memory, pos); + + return {loadAtPosition(in, memory, pos), number_of_rows}; +} + +} diff --git a/src/Formats/newLineSegmentationEngine.h b/src/Formats/newLineSegmentationEngine.h new file mode 100644 index 00000000000..598f808b798 --- /dev/null +++ b/src/Formats/newLineSegmentationEngine.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include + +namespace DB +{ + std::pair newLineFileSegmentationEngine(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t max_rows); +} diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp index 593e4568be1..ba40fe442ab 100644 --- a/src/Formats/registerFormats.cpp +++ b/src/Formats/registerFormats.cpp @@ -18,6 +18,7 @@ void registerFileSegmentationEngineJSONCompactEachRow(FormatFactory & factory); #if USE_HIVE void registerFileSegmentationEngineHiveText(FormatFactory & factory); #endif +void registerFileSegmentationEngineLineAsString(FormatFactory & factory); /// Formats for both input/output. @@ -153,6 +154,7 @@ void registerFormats() #if USE_HIVE registerFileSegmentationEngineHiveText(factory); #endif + registerFileSegmentationEngineLineAsString(factory); registerInputFormatNative(factory); diff --git a/src/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt index ad36c51447f..c84e23da85b 100644 --- a/src/Functions/CMakeLists.txt +++ b/src/Functions/CMakeLists.txt @@ -22,6 +22,7 @@ list (APPEND PUBLIC_LIBS ch_contrib::metrohash ch_contrib::murmurhash ch_contrib::hashidsxx + ch_contrib::morton_nd ) list (APPEND PRIVATE_LIBS diff --git a/src/Functions/CRC.cpp b/src/Functions/CRC.cpp index 92f0130c19b..91b549873a3 100644 --- a/src/Functions/CRC.cpp +++ b/src/Functions/CRC.cpp @@ -15,7 +15,7 @@ struct CRCBase { for (size_t i = 0; i < 256; ++i) { - T c = i; + T c = static_cast(i); for (size_t j = 0; j < 8; ++j) c = c & 1 ? polynomial ^ (c >> 1) : c >> 1; tab[i] = c; @@ -58,7 +58,7 @@ struct CRC32ZLIBImpl static UInt32 makeCRC(const unsigned char *buf, size_t size) { - return crc32_z(0L, buf, size); + return static_cast(crc32_z(0L, buf, size)); } }; diff --git a/src/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h index b690463d456..413c81d1400 100644 --- a/src/Functions/CustomWeekTransforms.h +++ b/src/Functions/CustomWeekTransforms.h @@ -62,10 +62,7 @@ struct ToStartOfWeekImpl static inline UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - if (t < 0) - return 0; - - return time_zone.toFirstDayNumOfWeek(DayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM))), week_mode); + return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); } static inline UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { @@ -73,10 +70,7 @@ struct ToStartOfWeekImpl } static inline UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) { - if (d < 0) - return 0; - - return time_zone.toFirstDayNumOfWeek(DayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM))), week_mode); + return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d), week_mode); } static inline UInt16 execute(UInt16 d, UInt8 week_mode, const DateLUTImpl & time_zone) { @@ -134,14 +128,17 @@ struct WeekTransformer void vector(const FromVectorType & vec_from, ToVectorType & vec_to, UInt8 week_mode, const DateLUTImpl & time_zone) const { + using ValueType = typename ToVectorType::value_type; size_t size = vec_from.size(); vec_to.resize(size); for (size_t i = 0; i < size; ++i) + { if constexpr (is_extended_result) - vec_to[i] = transform.executeExtendedResult(vec_from[i], week_mode, time_zone); + vec_to[i] = static_cast(transform.executeExtendedResult(vec_from[i], week_mode, time_zone)); else - vec_to[i] = transform.execute(vec_from[i], week_mode, time_zone); + vec_to[i] = static_cast(transform.execute(vec_from[i], week_mode, time_zone)); + } } private: diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 217f158cc8e..aa1e1f86569 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -55,15 +55,15 @@ struct ToDateImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return t < 0 ? 0 : std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)); + return UInt16(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toDayNum(t); + return UInt16(time_zone.toDayNum(t)); } - static inline UInt16 execute(Int32 t, const DateLUTImpl &) + static inline UInt16 execute(Int32, const DateLUTImpl &) { - return t < 0 ? 0 : std::min(t, Int32(DATE_LUT_MAX_DAY_NUM)); + throwDateIsNotSupported(name); } static inline UInt16 execute(UInt16 d, const DateLUTImpl &) { @@ -104,30 +104,19 @@ struct ToStartOfDayImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) - return 0; - - return time_zone.toDate(std::min(t.whole, Int64(0xffffffff))); + return static_cast(time_zone.toDate(static_cast(t.whole))); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toDate(t); + return static_cast(time_zone.toDate(t)); } static inline UInt32 execute(Int32 d, const DateLUTImpl & time_zone) { - if (d < 0) - return 0; - - auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); - if (date_time <= 0xffffffff) - return date_time; - else - return time_zone.toDate(0xffffffff); + return static_cast(time_zone.toDate(ExtendedDayNum(d))); } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); - return date_time < 0xffffffff ? date_time : time_zone.toDate(0xffffffff); + return static_cast(time_zone.toDate(DayNum(d))); } static inline DecimalUtils::DecimalComponents executeExtendedResult(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { @@ -147,16 +136,17 @@ struct ToMondayImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return t < 0 ? 0 : time_zone.toFirstDayNumOfWeek(ExtendedDayNum( - std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)))); + //return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); + return time_zone.toFirstDayNumOfWeek(t); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { + //return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t)); return time_zone.toFirstDayNumOfWeek(t); } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return d < 0 ? 0 : time_zone.toFirstDayNumOfWeek(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfWeek(ExtendedDayNum(d)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -179,15 +169,15 @@ struct ToStartOfMonthImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return t < 0 ? 0 : time_zone.toFirstDayNumOfMonth(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfMonth(ExtendedDayNum(time_zone.toDayNum(t))); + return time_zone.toFirstDayNumOfMonth(time_zone.toDayNum(t)); } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return d < 0 ? 0 : time_zone.toFirstDayNumOfMonth(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfMonth(ExtendedDayNum(d)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -211,11 +201,7 @@ struct ToLastDayOfMonthImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - if (t < 0) - return 0; - - /// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value. - return time_zone.toLastDayNumOfMonth(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(0xFFF9)))); + return time_zone.toLastDayNumOfMonth(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -223,16 +209,11 @@ struct ToLastDayOfMonthImpl } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - if (d < 0) - return 0; - - /// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value. - return time_zone.toLastDayNumOfMonth(ExtendedDayNum(std::min(d, Int32(0xFFF9)))); + return time_zone.toLastDayNumOfMonth(ExtendedDayNum(d)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { - /// 0xFFF9 is Int value for 2149-05-31 -- the last day where we can actually find LastDayOfMonth. This will also be the return value. - return time_zone.toLastDayNumOfMonth(DayNum(std::min(d, UInt16(0xFFF9)))); + return time_zone.toLastDayNumOfMonth(DayNum(d)); } static inline Int64 executeExtendedResult(Int64 t, const DateLUTImpl & time_zone) { @@ -251,7 +232,7 @@ struct ToStartOfQuarterImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return t < 0 ? 0 : time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(std::min(Int64(time_zone.toDayNum(t)), Int64(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfQuarter(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -259,7 +240,7 @@ struct ToStartOfQuarterImpl } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return d < 0 ? 0 : time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfQuarter(ExtendedDayNum(d)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -282,7 +263,7 @@ struct ToStartOfYearImpl static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) { - return t < 0 ? 0 : time_zone.toFirstDayNumOfYear(ExtendedDayNum(std::min(Int32(time_zone.toDayNum(t)), Int32(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfYear(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -290,7 +271,7 @@ struct ToStartOfYearImpl } static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) { - return d < 0 ? 0 : time_zone.toFirstDayNumOfYear(ExtendedDayNum(std::min(d, Int32(DATE_LUT_MAX_DAY_NUM)))); + return time_zone.toFirstDayNumOfYear(ExtendedDayNum(d)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -316,11 +297,11 @@ struct ToTimeImpl static UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toTime(t.whole) + 86400; + return static_cast(time_zone.toTime(t.whole) + 86400); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toTime(t) + 86400; + return static_cast(time_zone.toTime(t) + 86400); } static inline UInt32 execute(Int32, const DateLUTImpl &) { @@ -340,10 +321,7 @@ struct ToStartOfMinuteImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) - return 0; - - return time_zone.toStartOfMinute(std::min(t.whole, Int64(0xffffffff))); + return static_cast(time_zone.toStartOfMinute(t.whole)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -541,7 +519,7 @@ struct ToStartOfFiveMinutesImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toStartOfFiveMinutes(t.whole); + return static_cast(time_zone.toStartOfFiveMinutes(t.whole)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -573,7 +551,7 @@ struct ToStartOfTenMinutesImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toStartOfTenMinutes(t.whole); + return static_cast(time_zone.toStartOfTenMinutes(t.whole)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -605,7 +583,7 @@ struct ToStartOfFifteenMinutesImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - return time_zone.toStartOfFifteenMinutes(t.whole); + return static_cast(time_zone.toStartOfFifteenMinutes(t.whole)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { @@ -638,7 +616,7 @@ struct TimeSlotImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl &) { - return t.whole / 1800 * 1800; + return static_cast(t.whole / 1800 * 1800); } static inline UInt32 execute(UInt32 t, const DateLUTImpl &) @@ -677,10 +655,7 @@ struct ToStartOfHourImpl static inline UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & time_zone) { - if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) - return 0; - - return time_zone.toStartOfHour(std::min(t.whole, Int64(0xffffffff))); + return static_cast(time_zone.toStartOfHour(t.whole)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) @@ -1034,21 +1009,39 @@ struct ToISOWeekImpl using FactorTransform = ToISOYearImpl; }; +enum class ResultPrecision +{ + Standard, + Extended +}; + +/// Standard precision results (precision_ == ResultPrecision::Standard) potentially lead to overflows when returning values. +/// This mode is used by SQL functions "toRelative*Num()" which cannot easily be changed due to backward compatibility. +/// According to documentation, these functions merely need to compute the time difference to a deterministic, fixed point in the past. +/// As a future TODO, we should fix their behavior in a backwards-compatible way. +/// See https://github.com/ClickHouse/ClickHouse/issues/41977#issuecomment-1267536814. +template struct ToRelativeYearNumImpl { static constexpr auto name = "toRelativeYearNum"; - static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toYear(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toYear(t)); + else + return static_cast(time_zone.toYear(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toYear(static_cast(t)); } - static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toYear(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toYear(ExtendedDayNum(d))); + else + return static_cast(time_zone.toYear(ExtendedDayNum(d))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -1058,21 +1051,28 @@ struct ToRelativeYearNumImpl using FactorTransform = ZeroTransform; }; +template struct ToRelativeQuarterNumImpl { static constexpr auto name = "toRelativeQuarterNum"; - static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeQuarterNum(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeQuarterNum(t)); + else + return static_cast(time_zone.toRelativeQuarterNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeQuarterNum(static_cast(t)); } - static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeQuarterNum(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeQuarterNum(ExtendedDayNum(d))); + else + return static_cast(time_zone.toRelativeQuarterNum(ExtendedDayNum(d))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -1082,21 +1082,28 @@ struct ToRelativeQuarterNumImpl using FactorTransform = ZeroTransform; }; +template struct ToRelativeMonthNumImpl { static constexpr auto name = "toRelativeMonthNum"; - static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMonthNum(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeMonthNum(t)); + else + return static_cast(time_zone.toRelativeMonthNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeMonthNum(static_cast(t)); } - static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMonthNum(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeMonthNum(ExtendedDayNum(d))); + else + return static_cast(time_zone.toRelativeMonthNum(ExtendedDayNum(d))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -1106,21 +1113,28 @@ struct ToRelativeMonthNumImpl using FactorTransform = ZeroTransform; }; +template struct ToRelativeWeekNumImpl { static constexpr auto name = "toRelativeWeekNum"; - static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeWeekNum(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeWeekNum(t)); + else + return static_cast(time_zone.toRelativeWeekNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toRelativeWeekNum(static_cast(t)); } - static inline UInt16 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeWeekNum(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeWeekNum(ExtendedDayNum(d))); + else + return static_cast(time_zone.toRelativeWeekNum(ExtendedDayNum(d))); } static inline UInt16 execute(UInt16 d, const DateLUTImpl & time_zone) { @@ -1130,21 +1144,28 @@ struct ToRelativeWeekNumImpl using FactorTransform = ZeroTransform; }; +template struct ToRelativeDayNumImpl { static constexpr auto name = "toRelativeDayNum"; - static inline UInt16 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toDayNum(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toDayNum(t)); + else + return static_cast(time_zone.toDayNum(t)); } static inline UInt16 execute(UInt32 t, const DateLUTImpl & time_zone) { return time_zone.toDayNum(static_cast(t)); } - static inline UInt16 execute(Int32 d, const DateLUTImpl &) + static inline auto execute(Int32 d, const DateLUTImpl &) { - return static_cast(d); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(static_cast(d)); + else + return static_cast(static_cast(d)); } static inline UInt16 execute(UInt16 d, const DateLUTImpl &) { @@ -1154,55 +1175,75 @@ struct ToRelativeDayNumImpl using FactorTransform = ZeroTransform; }; - +template struct ToRelativeHourNumImpl { static constexpr auto name = "toRelativeHourNum"; - static inline UInt32 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeHourNum(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toStableRelativeHourNum(t)); + else + return static_cast(time_zone.toRelativeHourNum(t)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeHourNum(static_cast(t)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toStableRelativeHourNum(static_cast(t))); + else + return static_cast(time_zone.toRelativeHourNum(static_cast(t))); } - static inline UInt32 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeHourNum(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toStableRelativeHourNum(ExtendedDayNum(d))); + else + return static_cast(time_zone.toRelativeHourNum(ExtendedDayNum(d))); } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeHourNum(DayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toStableRelativeHourNum(DayNum(d))); + else + return static_cast(time_zone.toRelativeHourNum(DayNum(d))); } using FactorTransform = ZeroTransform; }; +template struct ToRelativeMinuteNumImpl { static constexpr auto name = "toRelativeMinuteNum"; - static inline UInt32 execute(Int64 t, const DateLUTImpl & time_zone) + static inline auto execute(Int64 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMinuteNum(t); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeMinuteNum(t)); + else + return static_cast(time_zone.toRelativeMinuteNum(t)); } static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMinuteNum(static_cast(t)); + return static_cast(time_zone.toRelativeMinuteNum(static_cast(t))); } - static inline UInt32 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMinuteNum(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.toRelativeMinuteNum(ExtendedDayNum(d))); + else + return static_cast(time_zone.toRelativeMinuteNum(ExtendedDayNum(d))); } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.toRelativeMinuteNum(DayNum(d)); + return static_cast(time_zone.toRelativeMinuteNum(DayNum(d))); } using FactorTransform = ZeroTransform; }; +template struct ToRelativeSecondNumImpl { static constexpr auto name = "toRelativeSecondNum"; @@ -1215,13 +1256,16 @@ struct ToRelativeSecondNumImpl { return t; } - static inline UInt32 execute(Int32 d, const DateLUTImpl & time_zone) + static inline auto execute(Int32 d, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(ExtendedDayNum(d)); + if constexpr (precision_ == ResultPrecision::Extended) + return static_cast(time_zone.fromDayNum(ExtendedDayNum(d))); + else + return static_cast(time_zone.fromDayNum(ExtendedDayNum(d))); } static inline UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - return time_zone.fromDayNum(DayNum(d)); + return static_cast(time_zone.fromDayNum(DayNum(d))); } using FactorTransform = ZeroTransform; @@ -1306,14 +1350,17 @@ struct Transformer template static void vector(const FromTypeVector & vec_from, ToTypeVector & vec_to, const DateLUTImpl & time_zone, const Transform & transform) { + using ValueType = typename ToTypeVector::value_type; size_t size = vec_from.size(); vec_to.resize(size); for (size_t i = 0; i < size; ++i) + { if constexpr (is_extended_result) - vec_to[i] = transform.executeExtendedResult(vec_from[i], time_zone); + vec_to[i] = static_cast(transform.executeExtendedResult(vec_from[i], time_zone)); else - vec_to[i] = transform.execute(vec_from[i], time_zone); + vec_to[i] = static_cast(transform.execute(vec_from[i], time_zone)); + } } }; diff --git a/src/Functions/DivisionUtils.h b/src/Functions/DivisionUtils.h index e120595c4d9..98e5c690eb9 100644 --- a/src/Functions/DivisionUtils.h +++ b/src/Functions/DivisionUtils.h @@ -163,7 +163,7 @@ struct ModuloImpl return static_cast(int_a % static_cast(int_b)); } else - return IntegerAType(a) % IntegerBType(b); + return static_cast(IntegerAType(a) % IntegerBType(b)); } } diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index baa3c65537d..c921b0425d3 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -39,6 +39,7 @@ #include #include #include +#include #include #if USE_EMBEDDED_COMPILER @@ -415,8 +416,8 @@ public: { for (size_t i = 0; i < size; ++i) c[i] = applyScaled( - unwrap(a, i), - unwrap(b, i), + static_cast(unwrap(a, i)), + static_cast(unwrap(b, i)), scale_a); return; } @@ -424,8 +425,8 @@ public: { for (size_t i = 0; i < size; ++i) c[i] = applyScaled( - unwrap(a, i), - unwrap(b, i), + static_cast(unwrap(a, i)), + static_cast(unwrap(b, i)), scale_b); return; } @@ -436,8 +437,8 @@ public: { for (size_t i = 0; i < size; ++i) c[i] = applyScaled( - unwrap(a, i), - unwrap(b, i), + static_cast(unwrap(a, i)), + static_cast(unwrap(b, i)), scale_a); return; } @@ -445,8 +446,8 @@ public: { for (size_t i = 0; i < size; ++i) c[i] = applyScaled( - unwrap(a, i), - unwrap(b, i), + static_cast(unwrap(a, i)), + static_cast(unwrap(b, i)), scale_b); return; } @@ -456,12 +457,20 @@ public: { processWithRightNullmapImpl(a, b, c, size, right_nullmap, [&scale_a](const auto & left, const auto & right) { - return applyScaledDiv(left, right, scale_a); + return applyScaledDiv( + static_cast(left), right, scale_a); }); return; } - processWithRightNullmapImpl(a, b, c, size, right_nullmap, [](const auto & left, const auto & right){ return apply(left, right); }); + processWithRightNullmapImpl( + a, b, c, size, right_nullmap, + [](const auto & left, const auto & right) + { + return apply( + static_cast(left), + static_cast(right)); + }); } template @@ -899,10 +908,10 @@ class FunctionBinaryArithmetic : public IFunction std::swap(new_arguments[0], new_arguments[1]); /// Change interval argument type to its representation - new_arguments[1].type = std::make_shared>(); + if (WhichDataType(new_arguments[1].type).isInterval()) + new_arguments[1].type = std::make_shared>(); auto function = function_builder->build(new_arguments); - return function->execute(new_arguments, result_type, input_rows_count); } @@ -995,8 +1004,10 @@ class FunctionBinaryArithmetic : public IFunction /// non-vector result if (col_left_const && col_right_const) { - const NativeResultType const_a = helperGetOrConvert(col_left_const, left); - const NativeResultType const_b = helperGetOrConvert(col_right_const, right); + const NativeResultType const_a = static_cast( + helperGetOrConvert(col_left_const, left)); + const NativeResultType const_b = static_cast( + helperGetOrConvert(col_right_const, right)); ResultType res = {}; if (!right_nullmap || !(*right_nullmap)[0]) @@ -1020,14 +1031,16 @@ class FunctionBinaryArithmetic : public IFunction } else if (col_left_const && col_right) { - const NativeResultType const_a = helperGetOrConvert(col_left_const, left); + const NativeResultType const_a = static_cast( + helperGetOrConvert(col_left_const, left)); helperInvokeEither( const_a, col_right->getData(), vec_res, scale_a, scale_b, right_nullmap); } else if (col_left && col_right_const) { - const NativeResultType const_b = helperGetOrConvert(col_right_const, right); + const NativeResultType const_b = static_cast( + helperGetOrConvert(col_right_const, right)); helperInvokeEither( col_left->getData(), const_b, vec_res, scale_a, scale_b, right_nullmap); @@ -1778,25 +1791,32 @@ public: // const +|- variable if (left.column && isColumnConst(*left.column)) { + auto left_type = removeNullable(removeLowCardinality(left.type)); + auto right_type = removeNullable(removeLowCardinality(right.type)); + auto ret_type = removeNullable(removeLowCardinality(return_type)); + auto transform = [&](const Field & point) { ColumnsWithTypeAndName columns_with_constant - = {{left.column->cloneResized(1), left.type, left.name}, - {right.type->createColumnConst(1, point), right.type, right.name}}; + = {{left_type->createColumnConst(1, (*left.column)[0]), left_type, left.name}, + {right_type->createColumnConst(1, point), right_type, right.name}}; - auto col = Base::executeImpl(columns_with_constant, return_type, 1); + /// This is a bit dangerous to call Base::executeImpl cause it ignores `use Default Implementation For XXX` flags. + /// It was possible to check monotonicity for nullable right type which result to exception. + /// Adding removeNullable above fixes the issue, but some other inconsistency may left. + auto col = Base::executeImpl(columns_with_constant, ret_type, 1); Field point_transformed; col->get(0, point_transformed); return point_transformed; }; - transform(left_point); - transform(right_point); + + bool is_positive_monotonicity = applyVisitor(FieldVisitorAccurateLess(), left_point, right_point) + == applyVisitor(FieldVisitorAccurateLess(), transform(left_point), transform(right_point)); if (name_view == "plus") { // Check if there is an overflow - if (applyVisitor(FieldVisitorAccurateLess(), left_point, right_point) - == applyVisitor(FieldVisitorAccurateLess(), transform(left_point), transform(right_point))) + if (is_positive_monotonicity) return {true, true, false, true}; else return {false, true, false, false}; @@ -1804,8 +1824,7 @@ public: else { // Check if there is an overflow - if (applyVisitor(FieldVisitorAccurateLess(), left_point, right_point) - != applyVisitor(FieldVisitorAccurateLess(), transform(left_point), transform(right_point))) + if (!is_positive_monotonicity) return {true, false, false, true}; else return {false, false, false, false}; @@ -1814,13 +1833,17 @@ public: // variable +|- constant else if (right.column && isColumnConst(*right.column)) { + auto left_type = removeNullable(removeLowCardinality(left.type)); + auto right_type = removeNullable(removeLowCardinality(right.type)); + auto ret_type = removeNullable(removeLowCardinality(return_type)); + auto transform = [&](const Field & point) { ColumnsWithTypeAndName columns_with_constant - = {{left.type->createColumnConst(1, point), left.type, left.name}, - {right.column->cloneResized(1), right.type, right.name}}; + = {{left_type->createColumnConst(1, point), left_type, left.name}, + {right_type->createColumnConst(1, (*right.column)[0]), right_type, right.name}}; - auto col = Base::executeImpl(columns_with_constant, return_type, 1); + auto col = Base::executeImpl(columns_with_constant, ret_type, 1); Field point_transformed; col->get(0, point_transformed); return point_transformed; diff --git a/src/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h index 29dcf87316d..2259cc71f07 100644 --- a/src/Functions/FunctionDateOrDateTimeAddInterval.h +++ b/src/Functions/FunctionDateOrDateTimeAddInterval.h @@ -63,7 +63,7 @@ struct AddNanosecondsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { Int64 multiplier = DecimalUtils::scaleMultiplier(9); - return t * multiplier + delta; + return static_cast(t * multiplier + delta); } static inline NO_SANITIZE_UNDEFINED DateTime64 execute(UInt16, Int64, const DateLUTImpl &, UInt16 = 0) @@ -107,7 +107,7 @@ struct AddMicrosecondsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { Int64 multiplier = DecimalUtils::scaleMultiplier(6); - return t * multiplier + delta; + return static_cast(t * multiplier + delta); } static inline NO_SANITIZE_UNDEFINED DateTime64 execute(UInt16, Int64, const DateLUTImpl &, UInt16 = 0) @@ -151,7 +151,7 @@ struct AddMillisecondsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { Int64 multiplier = DecimalUtils::scaleMultiplier(3); - return t * multiplier + delta; + return static_cast(t * multiplier + delta); } static inline NO_SANITIZE_UNDEFINED DateTime64 execute(UInt16, Int64, const DateLUTImpl &, UInt16 = 0) @@ -183,7 +183,7 @@ struct AddSecondsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { - return t + delta; + return static_cast(t + delta); } static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) @@ -194,7 +194,7 @@ struct AddSecondsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.fromDayNum(DayNum(d)) + delta; + return static_cast(time_zone.fromDayNum(DayNum(d)) + delta); } }; @@ -216,7 +216,7 @@ struct AddMinutesImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { - return t + delta * 60; + return static_cast(t + delta * 60); } static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) @@ -227,7 +227,7 @@ struct AddMinutesImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.fromDayNum(DayNum(d)) + delta * 60; + return static_cast(time_zone.fromDayNum(DayNum(d)) + delta * 60); } }; @@ -249,7 +249,7 @@ struct AddHoursImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { - return t + delta * 3600; + return static_cast(t + delta * 3600); } static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) @@ -260,7 +260,7 @@ struct AddHoursImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.fromDayNum(DayNum(d)) + delta * 3600; + return static_cast(time_zone.fromDayNum(DayNum(d)) + delta * 3600); } }; @@ -284,7 +284,7 @@ struct AddDaysImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.addDays(t, delta); + return static_cast(time_zone.addDays(t, delta)); } static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl &, UInt16 = 0) @@ -294,7 +294,7 @@ struct AddDaysImpl static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int64 delta, const DateLUTImpl &, UInt16 = 0) { - return d + delta; + return static_cast(d + delta); } }; @@ -303,32 +303,32 @@ struct AddWeeksImpl static constexpr auto name = "addWeeks"; static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addWeeks(t.whole, delta), t.fractional}; } static inline NO_SANITIZE_UNDEFINED DateTime64 - execute(DateTime64 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + execute(DateTime64 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) { auto multiplier = DecimalUtils::scaleMultiplier(scale); auto d = std::div(t, multiplier); return time_zone.addDays(d.quot, delta * 7) * multiplier + d.rem; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.addWeeks(t, delta); + return static_cast(time_zone.addWeeks(t, delta)); } - static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int32 delta, const DateLUTImpl &, UInt16 = 0) + static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl &, UInt16 = 0) { - return d + delta * 7; + return static_cast(d + delta * 7); } - static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int32 delta, const DateLUTImpl &, UInt16 = 0) + static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int64 delta, const DateLUTImpl &, UInt16 = 0) { - return d + delta * 7; + return static_cast(d + delta * 7); } }; @@ -352,7 +352,7 @@ struct AddMonthsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.addMonths(t, delta); + return static_cast(time_zone.addMonths(t, delta)); } static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) @@ -371,30 +371,30 @@ struct AddQuartersImpl static constexpr auto name = "addQuarters"; static inline DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addQuarters(t.whole, delta), t.fractional}; } static inline NO_SANITIZE_UNDEFINED DateTime64 - execute(DateTime64 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + execute(DateTime64 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) { auto multiplier = DecimalUtils::scaleMultiplier(scale); auto d = std::div(t, multiplier); return time_zone.addQuarters(d.quot, delta) * multiplier + d.rem; } - static inline UInt32 execute(UInt32 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) + static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.addQuarters(t, delta); + return static_cast(time_zone.addQuarters(t, delta)); } - static inline UInt16 execute(UInt16 d, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) + static inline UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addQuarters(DayNum(d), delta); } - static inline Int32 execute(Int32 d, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) + static inline Int32 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addQuarters(ExtendedDayNum(d), delta); } @@ -420,7 +420,7 @@ struct AddYearsImpl static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { - return time_zone.addYears(t, delta); + return static_cast(time_zone.addYears(t, delta)); } static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) diff --git a/src/Functions/FunctionIfBase.h b/src/Functions/FunctionIfBase.h index 2b0f8289914..4c9ecf78a12 100644 --- a/src/Functions/FunctionIfBase.h +++ b/src/Functions/FunctionIfBase.h @@ -67,7 +67,7 @@ public: b.SetInsertPoint(join); - auto * phi = b.CreatePHI(toNativeType(b, return_type), returns.size()); + auto * phi = b.CreatePHI(toNativeType(b, return_type), static_cast(returns.size())); for (const auto & [block, value] : returns) phi->addIncoming(value, block); diff --git a/src/Functions/FunctionSQLJSON.h b/src/Functions/FunctionSQLJSON.h index dce953ddc6f..3a0f4f483b3 100644 --- a/src/Functions/FunctionSQLJSON.h +++ b/src/Functions/FunctionSQLJSON.h @@ -164,7 +164,7 @@ public: /// 2. Create ASTPtr /// 3. Parser(Tokens, ASTPtr) -> complete AST /// 4. Execute functions: call getNextItem on generator and handle each item - uint32_t parse_depth = getContext()->getSettingsRef().max_parser_depth; + unsigned parse_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); #if USE_SIMDJSON if (getContext()->getSettingsRef().allow_simdjson) return FunctionSQLJSONHelpers::Executor::run(arguments, result_type, input_rows_count, parse_depth); diff --git a/src/Functions/FunctionSnowflake.h b/src/Functions/FunctionSnowflake.h index f4a62e509ed..998db98890a 100644 --- a/src/Functions/FunctionSnowflake.h +++ b/src/Functions/FunctionSnowflake.h @@ -109,7 +109,8 @@ public: for (size_t i = 0; i < input_rows_count; ++i) { - result_data[i] = ((source_data[i] >> time_shift) + snowflake_epoch) / 1000; + result_data[i] = static_cast( + ((source_data[i] >> time_shift) + snowflake_epoch) / 1000); } return res_column; } diff --git a/src/Functions/FunctionUnixTimestamp64.h b/src/Functions/FunctionUnixTimestamp64.h index 8c248d79c4b..d869ccccca8 100644 --- a/src/Functions/FunctionUnixTimestamp64.h +++ b/src/Functions/FunctionUnixTimestamp64.h @@ -57,7 +57,7 @@ public: const auto & source_data = typeid_cast &>(col).getData(); - const Int32 scale_diff = typeid_cast(*src.type).getScale() - target_scale; + const Int32 scale_diff = static_cast(typeid_cast(*src.type).getScale() - target_scale); if (scale_diff == 0) { for (size_t i = 0; i < input_rows_count; ++i) @@ -140,7 +140,7 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - auto result_column = ColumnDecimal::create(input_rows_count, target_scale); + auto result_column = ColumnDecimal::create(input_rows_count, static_cast(target_scale)); if (!((executeType(result_column, arguments, input_rows_count)) || (executeType(result_column, arguments, input_rows_count)) diff --git a/src/Functions/FunctionsAES.h b/src/Functions/FunctionsAES.h index 94ff55d180b..ad129a315b3 100644 --- a/src/Functions/FunctionsAES.h +++ b/src/Functions/FunctionsAES.h @@ -2,6 +2,7 @@ #include "config.h" +#include #include #include #include @@ -336,7 +337,7 @@ private: if (EVP_EncryptInit_ex(evp_ctx, evp_cipher, nullptr, nullptr, nullptr) != 1) onError("Failed to initialize encryption context with cipher"); - if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_IVLEN, iv_value.size, nullptr) != 1) + if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_IVLEN, safe_cast(iv_value.size), nullptr) != 1) onError("Failed to set custom IV length to " + std::to_string(iv_value.size)); if (EVP_EncryptInit_ex(evp_ctx, nullptr, nullptr, @@ -350,7 +351,7 @@ private: const auto aad_data = aad_column->getDataAt(row_idx); int tmp_len = 0; if (aad_data.size != 0 && EVP_EncryptUpdate(evp_ctx, nullptr, &tmp_len, - reinterpret_cast(aad_data.data), aad_data.size) != 1) + reinterpret_cast(aad_data.data), safe_cast(aad_data.size)) != 1) onError("Failed to set AAD data"); } } @@ -636,7 +637,7 @@ private: onError("Failed to initialize cipher context 1"); // 1.a.1 : Set custom IV length - if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_IVLEN, iv_value.size, nullptr) != 1) + if (EVP_CIPHER_CTX_ctrl(evp_ctx, EVP_CTRL_AEAD_SET_IVLEN, safe_cast(iv_value.size), nullptr) != 1) onError("Failed to set custom IV length to " + std::to_string(iv_value.size)); // 1.a.1 : Init CTX with key and IV @@ -651,7 +652,7 @@ private: StringRef aad_data = aad_column->getDataAt(row_idx); int tmp_len = 0; if (aad_data.size != 0 && EVP_DecryptUpdate(evp_ctx, nullptr, &tmp_len, - reinterpret_cast(aad_data.data), aad_data.size) != 1) + reinterpret_cast(aad_data.data), safe_cast(aad_data.size)) != 1) onError("Failed to sed AAD data"); } } diff --git a/src/Functions/FunctionsCodingUUID.cpp b/src/Functions/FunctionsCodingUUID.cpp index 9309c4cdbeb..f6dacc77045 100644 --- a/src/Functions/FunctionsCodingUUID.cpp +++ b/src/Functions/FunctionsCodingUUID.cpp @@ -13,36 +13,151 @@ #include #include -namespace DB -{ +#include -namespace ErrorCodes +namespace DB::ErrorCodes { - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int ILLEGAL_COLUMN; +extern const int ARGUMENT_OUT_OF_BOUND; +extern const int ILLEGAL_COLUMN; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int LOGICAL_ERROR; +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } +namespace +{ +enum class Representation +{ + BigEndian, + LittleEndian +}; + +std::pair determineBinaryStartIndexWithIncrement(const ptrdiff_t num_bytes, const Representation representation) +{ + if (representation == Representation::BigEndian) + return {0, 1}; + else if (representation == Representation::LittleEndian) + return {num_bytes - 1, -1}; + + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "{} is not handled yet", magic_enum::enum_name(representation)); +} + +void formatHex(const std::span src, UInt8 * dst, const Representation representation) +{ + const auto src_size = std::ssize(src); + const auto [src_start_index, src_increment] = determineBinaryStartIndexWithIncrement(src_size, representation); + for (int src_pos = src_start_index, dst_pos = 0; src_pos >= 0 && src_pos < src_size; src_pos += src_increment, dst_pos += 2) + writeHexByteLowercase(src[src_pos], dst + dst_pos); +} + +void parseHex(const UInt8 * __restrict src, const std::span dst, const Representation representation) +{ + const auto dst_size = std::ssize(dst); + const auto [dst_start_index, dst_increment] = determineBinaryStartIndexWithIncrement(dst_size, representation); + const auto * src_as_char = reinterpret_cast(src); + for (auto dst_pos = dst_start_index, src_pos = 0; dst_pos >= 0 && dst_pos < dst_size; dst_pos += dst_increment, src_pos += 2) + dst[dst_pos] = unhex2(src_as_char + src_pos); +} + +class UUIDSerializer +{ +public: + enum class Variant + { + Default = 1, + Microsoft = 2 + }; + + explicit UUIDSerializer(const Variant variant) + : first_half_binary_representation(variant == Variant::Microsoft ? Representation::LittleEndian : Representation::BigEndian) + { + if (variant != Variant::Default && variant != Variant::Microsoft) + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "{} is not handled yet", magic_enum::enum_name(variant)); + } + + void deserialize(const UInt8 * src16, UInt8 * dst36) const + { + formatHex({src16, 4}, &dst36[0], first_half_binary_representation); + dst36[8] = '-'; + formatHex({src16 + 4, 2}, &dst36[9], first_half_binary_representation); + dst36[13] = '-'; + formatHex({src16 + 6, 2}, &dst36[14], first_half_binary_representation); + dst36[18] = '-'; + formatHex({src16 + 8, 2}, &dst36[19], Representation::BigEndian); + dst36[23] = '-'; + formatHex({src16 + 10, 6}, &dst36[24], Representation::BigEndian); + } + + void serialize(const UInt8 * src36, UInt8 * dst16) const + { + /// If string is not like UUID - implementation specific behaviour. + parseHex(&src36[0], {dst16 + 0, 4}, first_half_binary_representation); + parseHex(&src36[9], {dst16 + 4, 2}, first_half_binary_representation); + parseHex(&src36[14], {dst16 + 6, 2}, first_half_binary_representation); + parseHex(&src36[19], {dst16 + 8, 2}, Representation::BigEndian); + parseHex(&src36[24], {dst16 + 10, 6}, Representation::BigEndian); + } + +private: + Representation first_half_binary_representation; +}; + +void checkArgumentCount(const DB::DataTypes & arguments, const std::string_view function_name) +{ + if (const auto argument_count = std::ssize(arguments); argument_count < 1 || argument_count > 2) + throw DB::Exception( + DB::ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1 or 2", + function_name, + argument_count); +} + +void checkFormatArgument(const DB::DataTypes & arguments, const std::string_view function_name) +{ + if (const auto argument_count = std::ssize(arguments); + argument_count > 1 && !DB::WhichDataType(arguments[1]).isInt8() && !DB::WhichDataType(arguments[1]).isUInt8()) + throw DB::Exception( + DB::ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}, expected Int8 or UInt8 type", + arguments[1]->getName(), + function_name); +} + +UUIDSerializer::Variant parseVariant(const DB::ColumnsWithTypeAndName & arguments) +{ + if (arguments.size() < 2) + return UUIDSerializer::Variant::Default; + + const auto representation = static_cast>(arguments[1].column->getInt(0)); + const auto as_enum = magic_enum::enum_cast(representation); + if (!as_enum) + throw DB::Exception(DB::ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Expected UUID variant, got {}", representation); + + return *as_enum; +} +} + +namespace DB +{ constexpr size_t uuid_bytes_length = 16; constexpr size_t uuid_text_length = 36; class FunctionUUIDNumToString : public IFunction { - public: static constexpr auto name = "UUIDNumToString"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } - - size_t getNumberOfArguments() const override { return 1; } + String getName() const override { return name; } + size_t getNumberOfArguments() const override { return 0; } bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + bool isVariadic() const override { return true; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { + checkArgumentCount(arguments, name); + const auto * ptr = checkAndGetDataType(arguments[0].get()); if (!ptr || ptr->getN() != uuid_bytes_length) throw Exception("Illegal type " + arguments[0]->getName() + @@ -50,6 +165,8 @@ public: ", expected FixedString(" + toString(uuid_bytes_length) + ")", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + checkFormatArgument(arguments, name); + return std::make_shared(); } @@ -59,7 +176,7 @@ public: { const ColumnWithTypeAndName & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; - + const auto variant = parseVariant(arguments); if (const auto * col_in = checkAndGetColumn(column.get())) { if (col_in->getN() != uuid_bytes_length) @@ -82,9 +199,10 @@ public: size_t src_offset = 0; size_t dst_offset = 0; + const UUIDSerializer uuid_serializer(variant); for (size_t i = 0; i < size; ++i) { - formatUUID(&vec_in[src_offset], &vec_res[dst_offset]); + uuid_serializer.deserialize(&vec_in[src_offset], &vec_res[dst_offset]); src_offset += uuid_bytes_length; dst_offset += uuid_text_length; vec_res[dst_offset] = 0; @@ -104,55 +222,33 @@ public: class FunctionUUIDStringToNum : public IFunction { -private: - static void parseHex(const UInt8 * __restrict src, UInt8 * __restrict dst, const size_t num_bytes) - { - size_t src_pos = 0; - size_t dst_pos = 0; - for (; dst_pos < num_bytes; ++dst_pos) - { - dst[dst_pos] = unhex2(reinterpret_cast(&src[src_pos])); - src_pos += 2; - } - } - - static void parseUUID(const UInt8 * src36, UInt8 * dst16) - { - /// If string is not like UUID - implementation specific behaviour. - - parseHex(&src36[0], &dst16[0], 4); - parseHex(&src36[9], &dst16[4], 2); - parseHex(&src36[14], &dst16[6], 2); - parseHex(&src36[19], &dst16[8], 2); - parseHex(&src36[24], &dst16[10], 6); - } - public: static constexpr auto name = "UUIDStringToNum"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } - - size_t getNumberOfArguments() const override { return 1; } + String getName() const override { return name; } + size_t getNumberOfArguments() const override { return 0; } bool isInjective(const ColumnsWithTypeAndName &) const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + bool isVariadic() const override { return true; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { + checkArgumentCount(arguments, name); + /// String or FixedString(36) if (!isString(arguments[0])) { const auto * ptr = checkAndGetDataType(arguments[0].get()); if (!ptr || ptr->getN() != uuid_text_length) throw Exception("Illegal type " + arguments[0]->getName() + - " of argument of function " + getName() + + " of first argument of function " + getName() + ", expected FixedString(" + toString(uuid_text_length) + ")", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } + checkFormatArgument(arguments, name); + return std::make_shared(uuid_bytes_length); } @@ -163,6 +259,7 @@ public: const ColumnWithTypeAndName & col_type_name = arguments[0]; const ColumnPtr & column = col_type_name.column; + const UUIDSerializer uuid_serializer(parseVariant(arguments)); if (const auto * col_in = checkAndGetColumn(column.get())) { const auto & vec_in = col_in->getChars(); @@ -184,7 +281,7 @@ public: size_t string_size = offsets_in[i] - src_offset; if (string_size == uuid_text_length + 1) - parseUUID(&vec_in[src_offset], &vec_res[dst_offset]); + uuid_serializer.serialize(&vec_in[src_offset], &vec_res[dst_offset]); else memset(&vec_res[dst_offset], 0, uuid_bytes_length); @@ -216,7 +313,7 @@ public: for (size_t i = 0; i < size; ++i) { - parseUUID(&vec_in[src_offset], &vec_res[dst_offset]); + uuid_serializer.serialize(&vec_in[src_offset], &vec_res[dst_offset]); src_offset += uuid_text_length; dst_offset += uuid_bytes_length; } diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 8cbe3b0e532..dd494d821bf 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -101,7 +101,7 @@ inline UInt32 extractToDecimalScale(const ColumnWithTypeAndName & named_column) Field field; named_column.column->get(0, field); - return field.get(); + return static_cast(field.get()); } /// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type. @@ -302,11 +302,6 @@ struct ConvertImpl } }; -/** Conversion of Date32 to Date: check bounds. - */ -template struct ConvertImpl - : DateTimeTransformImpl {}; - /** Conversion of DateTime to Date: throw off time component. */ template struct ConvertImpl @@ -325,17 +320,12 @@ struct ToDateTimeImpl static UInt32 execute(UInt16 d, const DateLUTImpl & time_zone) { - auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); - return date_time <= 0xffffffff ? UInt32(date_time) : UInt32(0xffffffff); + return static_cast(time_zone.fromDayNum(DayNum(d))); } - static UInt32 execute(Int32 d, const DateLUTImpl & time_zone) + static Int64 execute(Int32 d, const DateLUTImpl & time_zone) { - if (d < 0) - return 0; - - auto date_time = time_zone.fromDayNum(ExtendedDayNum(d)); - return date_time <= 0xffffffff ? date_time : 0xffffffff; + return time_zone.fromDayNum(ExtendedDayNum(d)); } static UInt32 execute(UInt32 dt, const DateLUTImpl & /*time_zone*/) @@ -343,21 +333,10 @@ struct ToDateTimeImpl return dt; } - static UInt32 execute(Int64 d, const DateLUTImpl & time_zone) + // TODO: return UInt32 ??? + static Int64 execute(Int64 dt64, const DateLUTImpl & /*time_zone*/) { - if (d < 0) - return 0; - - auto date_time = time_zone.toDate(d); - return date_time <= 0xffffffff ? date_time : 0xffffffff; - } - - static UInt32 execute(const DecimalUtils::DecimalComponents & t, const DateLUTImpl & /*time_zone*/) - { - if (t.whole < 0 || (t.whole >= 0 && t.fractional < 0)) - return 0; - - return std::min(t.whole, Int64(0xFFFFFFFF)); + return dt64; } }; @@ -377,12 +356,9 @@ struct ToDateTransform32Or64 static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { // since converting to Date, no need in values outside of default LUT range. - if (from < 0) - return 0; - return (from < DATE_LUT_MAX_DAY_NUM) ? from - : std::min(Int32(time_zone.toDayNum(from)), Int32(DATE_LUT_MAX_DAY_NUM)); + : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); } }; @@ -397,14 +373,9 @@ struct ToDateTransform32Or64Signed /// The function should be monotonic (better for query optimizations), so we saturate instead of overflow. if (from < 0) return 0; - - auto day_num = time_zone.toDayNum(ExtendedDayNum(static_cast(from))); - return day_num < DATE_LUT_MAX_DAY_NUM ? day_num : DATE_LUT_MAX_DAY_NUM; - return (from < DATE_LUT_MAX_DAY_NUM) - ? from - : std::min(Int32(time_zone.toDayNum(static_cast(from))), Int32(0xFFFFFFFF)); - + ? static_cast(from) + : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); } }; @@ -434,8 +405,8 @@ struct ToDate32Transform32Or64 static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone) { return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) - ? from - : std::min(Int32(time_zone.toDayNum(from)), Int32(DATE_LUT_MAX_EXTEND_DAY_NUM)); + ? static_cast(from) + : time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))); } }; @@ -451,7 +422,7 @@ struct ToDate32Transform32Or64Signed return daynum_min_offset; return (from < DATE_LUT_MAX_EXTEND_DAY_NUM) ? static_cast(from) - : time_zone.toDayNum(std::min(Int64(from), Int64(0xFFFFFFFF))); + : time_zone.toDayNum(std::min(time_t(Int64(from)), time_t(0xFFFFFFFF))); } }; @@ -477,49 +448,35 @@ struct ToDate32Transform8Or16Signed */ template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; - template struct ConvertImpl : DateTimeTransformImpl> {}; @@ -531,7 +488,7 @@ struct ToDateTimeTransform64 static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { - return std::min(Int64(from), Int64(0xFFFFFFFF)); + return static_cast(std::min(time_t(from), time_t(0xFFFFFFFF))); } }; @@ -553,12 +510,11 @@ struct ToDateTimeTransform64Signed { static constexpr auto name = "toDateTime"; - static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & /* time_zone */) + static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl &) { if (from < 0) return 0; - - return std::min(Int64(from), Int64(0xFFFFFFFF)); + return static_cast(std::min(time_t(from), time_t(0xFFFFFFFF))); } }; @@ -618,8 +574,8 @@ struct ToDateTime64TransformSigned NO_SANITIZE_UNDEFINED DateTime64::NativeType execute(FromType from, const DateLUTImpl &) const { - from = std::max(from, LUT_MIN_TIME); - from = std::min(from, LUT_MAX_TIME); + from = static_cast(std::max(from, LUT_MIN_TIME)); + from = static_cast(std::min(from, LUT_MAX_TIME)); return DecimalUtils::decimalFromComponentsWithMultiplier(from, 0, scale_multiplier); } }; @@ -678,6 +634,8 @@ struct FromDateTime64Transform } }; +/** Conversion of DateTime64 to Date or DateTime: discards fractional part. + */ template struct ConvertImpl : DateTimeTransformImpl> {}; template struct ConvertImpl @@ -701,7 +659,7 @@ struct ToDateTime64Transform DateTime64::NativeType execute(Int32 d, const DateLUTImpl & time_zone) const { - const auto dt = time_zone.fromDayNum(ExtendedDayNum(d)); + const auto dt = ToDateTimeImpl::execute(d, time_zone); return DecimalUtils::decimalFromComponentsWithMultiplier(dt, 0, scale_multiplier); } @@ -979,7 +937,7 @@ inline void convertFromTime(DataTypeDate::FieldType & x, time_t & template <> inline void convertFromTime(DataTypeDate32::FieldType & x, time_t & time) { - x = time; + x = static_cast(time); } template <> @@ -990,7 +948,7 @@ inline void convertFromTime(DataTypeDateTime::FieldType & x, t else if (unlikely(time > 0xFFFFFFFF)) x = 0xFFFFFFFF; else - x = time; + x = static_cast(time); } /** Conversion of strings to numbers, dates, datetimes: through parsing. @@ -1070,7 +1028,7 @@ inline bool tryParseImpl(DataTypeDateTime::FieldType & x, Read time_t tmp = 0; if (!tryReadDateTimeText(tmp, rb, *time_zone)) return false; - x = tmp; + x = static_cast(tmp); return true; } @@ -1855,7 +1813,7 @@ private: { /// Account for optional timezone argument. if (arguments.size() != 2 && arguments.size() != 3) - throw Exception{"Function " + getName() + " expects 2 or 3 arguments for DateTime64.", + throw Exception{"Function " + getName() + " expects 2 or 3 arguments for DataTypeDateTime64.", ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION}; } else if (arguments.size() != 2) @@ -2215,6 +2173,10 @@ struct ToNumberMonotonicity const size_t size_of_from = type.getSizeOfValueInMemory(); const size_t size_of_to = sizeof(T); + /// Do not support 128 bit integers and decimals for now. + if (size_of_from > sizeof(Int64)) + return {}; + const bool left_in_first_half = left.isNull() ? from_is_unsigned : (left.get() >= 0); @@ -2285,15 +2247,24 @@ struct ToDateMonotonicity { auto which = WhichDataType(type); if (which.isDateOrDate32() || which.isDateTime() || which.isDateTime64() || which.isInt8() || which.isInt16() || which.isUInt8() || which.isUInt16()) + { return { .is_monotonic = true, .is_always_monotonic = true }; + } else if ( - (which.isUInt() && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) - || (which.isInt() && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) - || (which.isFloat() && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) - || !type.isValueRepresentedByNumber()) + ((left.getType() == Field::Types::UInt64 || left.isNull()) && (right.getType() == Field::Types::UInt64 || right.isNull()) + && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) + || ((left.getType() == Field::Types::Int64 || left.isNull()) && (right.getType() == Field::Types::Int64 || right.isNull()) + && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) + || (((left.getType() == Field::Types::Float64 || left.isNull()) && (right.getType() == Field::Types::Float64 || right.isNull()) + && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF)))) + || !isNativeNumber(type)) + { return {}; + } else + { return { .is_monotonic = true, .is_always_monotonic = true }; + } } }; @@ -3389,9 +3360,8 @@ private: { return [] (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) { - auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count); - auto & res_object = assert_cast(res->assumeMutableRef()); - res_object.finalize(); + auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count)->assumeMutable(); + res->finalize(); return res; }; } diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index 33b6b075ebe..ec0a489471b 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -636,10 +637,10 @@ struct ImplBLAKE3 static void apply(const char * begin, const size_t size, unsigned char* out_char_data) { #if defined(MEMORY_SANITIZER) - auto err_msg = blake3_apply_shim_msan_compat(begin, size, out_char_data); + auto err_msg = blake3_apply_shim_msan_compat(begin, safe_cast(size), out_char_data); __msan_unpoison(out_char_data, length); #else - auto err_msg = blake3_apply_shim(begin, size, out_char_data); + auto err_msg = blake3_apply_shim(begin, safe_cast(size), out_char_data); #endif if (err_msg != nullptr) { diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index aefc82d2f5d..c856419c9e8 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -40,6 +39,7 @@ #include #include +#include #include @@ -191,7 +191,7 @@ private: for (const auto i : collections::range(first_index_argument, first_index_argument + num_index_arguments)) { const auto & column = columns[i]; - if (!isString(column.type) && !isInteger(column.type)) + if (!isString(column.type) && !isNativeInteger(column.type)) throw Exception{"The argument " + std::to_string(i + 1) + " of function " + String(function_name) + " should be a string specifying key or an integer specifying index, illegal type: " + column.type->getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; @@ -231,7 +231,7 @@ private: { case MoveType::ConstIndex: { - if (!moveToElementByIndex(res_element, moves[j].index, key)) + if (!moveToElementByIndex(res_element, static_cast(moves[j].index), key)) return false; break; } @@ -245,7 +245,7 @@ private: case MoveType::Index: { Int64 index = (*arguments[j + 1].column)[row].get(); - if (!moveToElementByIndex(res_element, index, key)) + if (!moveToElementByIndex(res_element, static_cast(index), key)) return false; break; } @@ -623,24 +623,32 @@ public: static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { UInt8 type; - if (element.isInt64()) - type = 'i'; - else if (element.isUInt64()) - type = 'u'; - else if (element.isDouble()) - type = 'd'; - else if (element.isBool()) - type = 'b'; - else if (element.isString()) - type = '"'; - else if (element.isArray()) - type = '['; - else if (element.isObject()) - type = '{'; - else if (element.isNull()) - type = 0; - else - return false; + switch (element.type()) + { + case ElementType::INT64: + type = 'i'; + break; + case ElementType::UINT64: + type = 'u'; + break; + case ElementType::DOUBLE: + type = 'd'; + break; + case ElementType::STRING: + type = '"'; + break; + case ElementType::ARRAY: + type = '['; + break; + case ElementType::OBJECT: + type = '{'; + break; + case ElementType::NULL_VALUE: + type = 0; + break; + default: + return false; + } ColumnVector & col_vec = assert_cast &>(dest); col_vec.insertValue(type); @@ -666,34 +674,51 @@ public: { NumberType value; - if (element.isInt64()) + switch (element.type()) { - if (!accurate::convertNumeric(element.getInt64(), value)) + case ElementType::DOUBLE: + if constexpr (std::is_floating_point_v) + { + /// We permit inaccurate conversion of double to float. + /// Example: double 0.1 from JSON is not representable in float. + /// But it will be more convenient for user to perform conversion. + value = static_cast(element.getDouble()); + } + else if (!accurate::convertNumeric(element.getDouble(), value)) + return false; + break; + case ElementType::UINT64: + if (!accurate::convertNumeric(element.getUInt64(), value)) + return false; + break; + case ElementType::INT64: + if (!accurate::convertNumeric(element.getInt64(), value)) + return false; + break; + case ElementType::BOOL: + if constexpr (is_integer && convert_bool_to_integer) + { + value = static_cast(element.getBool()); + break; + } return false; - } - else if (element.isUInt64()) - { - if (!accurate::convertNumeric(element.getUInt64(), value)) - return false; - } - else if (element.isDouble()) - { - if constexpr (std::is_floating_point_v) - { - /// We permit inaccurate conversion of double to float. - /// Example: double 0.1 from JSON is not representable in float. - /// But it will be more convenient for user to perform conversion. - value = static_cast(element.getDouble()); + case ElementType::STRING: { + auto rb = ReadBufferFromMemory{element.getString()}; + if constexpr (std::is_floating_point_v) + { + if (!tryReadFloatText(value, rb) || !rb.eof()) + return false; + } + else + { + if (!tryReadIntText(value, rb) || !rb.eof()) + return false; + } + break; } - else if (!accurate::convertNumeric(element.getDouble(), value)) + default: return false; } - else if (element.isBool() && is_integer && convert_bool_to_integer) - { - value = static_cast(element.getBool()); - } - else - return false; auto & col_vec = assert_cast &>(dest); col_vec.insertValue(value); @@ -719,9 +744,25 @@ using JSONExtractInt64Impl = JSONExtractNumericImpl; template using JSONExtractUInt64Impl = JSONExtractNumericImpl; template +using JSONExtractInt128Impl = JSONExtractNumericImpl; +template +using JSONExtractUInt128Impl = JSONExtractNumericImpl; +template +using JSONExtractInt256Impl = JSONExtractNumericImpl; +template +using JSONExtractUInt256Impl = JSONExtractNumericImpl; +template using JSONExtractFloat32Impl = JSONExtractNumericImpl; template using JSONExtractFloat64Impl = JSONExtractNumericImpl; +template +using JSONExtractDecimal32Impl = JSONExtractNumericImpl; +template +using JSONExtractDecimal64Impl = JSONExtractNumericImpl; +template +using JSONExtractDecimal128Impl = JSONExtractNumericImpl; +template +using JSONExtractDecimal256Impl = JSONExtractNumericImpl; template @@ -739,11 +780,22 @@ public: static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { - if (!element.isBool()) - return false; + bool value; + switch (element.type()) + { + case ElementType::BOOL: + value = element.getBool(); + break; + case ElementType::INT64: + case ElementType::UINT64: + value = element.getUInt64() != 0; + break; + default: + return false; + } auto & col_vec = assert_cast &>(dest); - col_vec.insertValue(static_cast(element.getBool())); + col_vec.insertValue(static_cast(value)); return true; } }; @@ -845,12 +897,35 @@ struct JSONExtractTree explicit DecimalNode(DataTypePtr data_type_) : data_type(data_type_) {} bool insertResultToColumn(IColumn & dest, const Element & element) override { - if (!element.isDouble()) - return false; - const auto * type = assert_cast *>(data_type.get()); - auto result = convertToDecimal, DataTypeDecimal>(element.getDouble(), type->getScale()); - assert_cast &>(dest).insert(result); + + DecimalType value{}; + + switch (element.type()) + { + case ElementType::DOUBLE: + value = convertToDecimal, DataTypeDecimal>( + element.getDouble(), type->getScale()); + break; + case ElementType::UINT64: + value = convertToDecimal, DataTypeDecimal>( + element.getUInt64(), type->getScale()); + break; + case ElementType::INT64: + value = convertToDecimal, DataTypeDecimal>( + element.getInt64(), type->getScale()); + break; + case ElementType::STRING: { + auto rb = ReadBufferFromMemory{element.getString()}; + if (!SerializationDecimal::tryReadText(value, rb, DecimalUtils::max_precision, type->getScale())) + return false; + break; + } + default: + return false; + } + + assert_cast &>(dest).insert(value); return true; } private: @@ -1088,10 +1163,14 @@ struct JSONExtractTree case TypeIndex::UInt16: return std::make_unique>(); case TypeIndex::UInt32: return std::make_unique>(); case TypeIndex::UInt64: return std::make_unique>(); + case TypeIndex::UInt128: return std::make_unique>(); + case TypeIndex::UInt256: return std::make_unique>(); case TypeIndex::Int8: return std::make_unique>(); case TypeIndex::Int16: return std::make_unique>(); case TypeIndex::Int32: return std::make_unique>(); case TypeIndex::Int64: return std::make_unique>(); + case TypeIndex::Int128: return std::make_unique>(); + case TypeIndex::Int256: return std::make_unique>(); case TypeIndex::Float32: return std::make_unique>(); case TypeIndex::Float64: return std::make_unique>(); case TypeIndex::String: return std::make_unique(); diff --git a/src/Functions/FunctionsLanguageClassification.cpp b/src/Functions/FunctionsLanguageClassification.cpp index ecc958a0a0c..ebdb2f1afaa 100644 --- a/src/Functions/FunctionsLanguageClassification.cpp +++ b/src/Functions/FunctionsLanguageClassification.cpp @@ -83,7 +83,10 @@ struct FunctionDetectLanguageImpl if (UTF8::isValidUTF8(str, str_len)) { - auto lang = CLD2::DetectLanguage(reinterpret_cast(str), str_len, true, &is_reliable); + auto lang = CLD2::DetectLanguage( + reinterpret_cast(str), + static_cast(str_len), + true, &is_reliable); res = codeISO(LanguageCode(lang)); } else @@ -178,7 +181,10 @@ public: if (UTF8::isValidUTF8(str, str_len)) { - CLD2::DetectLanguageSummary(reinterpret_cast(str), str_len, true, result_lang_top3, pc, bytes, &is_reliable); + CLD2::DetectLanguageSummary( + reinterpret_cast(str), + static_cast(str_len), + true, result_lang_top3, pc, bytes, &is_reliable); for (size_t j = 0; j < top_N; ++j) { diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index 2ac7688737f..7e52c55e5b0 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -168,10 +168,7 @@ public: inline ResultValueType apply(const size_t i) const { const auto a = !!vec[i]; - if constexpr (Op::isSaturable()) - return Op::isSaturatedValue(a) ? a : Op::apply(a, next.apply(i)); - else - return Op::apply(a, next.apply(i)); + return Op::apply(a, next.apply(i)); } private: diff --git a/src/Functions/FunctionsLogical.h b/src/Functions/FunctionsLogical.h index d7e8ffb0c9f..22471a151d2 100644 --- a/src/Functions/FunctionsLogical.h +++ b/src/Functions/FunctionsLogical.h @@ -193,7 +193,7 @@ public: auto * next = b.GetInsertBlock(); auto * stop = llvm::BasicBlock::Create(next->getContext(), "", next->getParent()); b.SetInsertPoint(stop); - auto * phi = b.CreatePHI(b.getInt8Ty(), values.size()); + auto * phi = b.CreatePHI(b.getInt8Ty(), static_cast(values.size())); for (size_t i = 0; i < types.size(); ++i) { b.SetInsertPoint(next); diff --git a/src/Functions/FunctionsRandom.cpp b/src/Functions/FunctionsRandom.cpp index 96c41225242..6078312537f 100644 --- a/src/Functions/FunctionsRandom.cpp +++ b/src/Functions/FunctionsRandom.cpp @@ -37,7 +37,7 @@ namespace UInt32 next() { current = current * a + c; - return current >> 16; + return static_cast(current >> 16); } }; @@ -124,7 +124,7 @@ void RandImpl::execute(char * output, size_t size) char * end = output + size; constexpr int vec_size = 4; - constexpr int safe_overwrite = 15; + constexpr int safe_overwrite = PADDING_FOR_SIMD - 1; constexpr int bytes_per_write = 4 * sizeof(UInt64x4); UInt64 rand_seed = randomSeed(); diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index ccab6e9feca..283f1ea5a43 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -178,7 +178,7 @@ struct IntegerRoundingComputation return; } } - *out = compute(*in, scale); + *out = compute(*in, static_cast(scale)); } static ALWAYS_INLINE void compute(const T * __restrict in, T scale, T * __restrict out) requires(!std::integral) @@ -436,7 +436,7 @@ public: scale_arg = in_scale - scale_arg; if (scale_arg > 0) { - auto scale = intExp10OfSize(scale_arg); + auto scale = intExp10OfSize(scale_arg); const NativeType * __restrict p_in = reinterpret_cast(in.data()); const NativeType * end_in = reinterpret_cast(in.data()) + in.size(); diff --git a/src/Functions/FunctionsStringHash.cpp b/src/Functions/FunctionsStringHash.cpp index e7dbe4087f2..949503e2367 100644 --- a/src/Functions/FunctionsStringHash.cpp +++ b/src/Functions/FunctionsStringHash.cpp @@ -35,13 +35,13 @@ struct Hash #ifdef __SSE4_2__ return _mm_crc32_u64(crc, val); #elif defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) - return __crc32cd(crc, val); + return __crc32cd(static_cast(crc), val); #else throw Exception("String hash is not implemented without sse4.2 support", ErrorCodes::NOT_IMPLEMENTED); #endif } - static UInt64 crc32u32(UInt64 crc [[maybe_unused]], UInt32 val [[maybe_unused]]) + static UInt64 crc32u32(UInt32 crc [[maybe_unused]], UInt32 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u32(crc, val); @@ -52,7 +52,7 @@ struct Hash #endif } - static UInt64 crc32u16(UInt64 crc [[maybe_unused]], UInt16 val [[maybe_unused]]) + static UInt64 crc32u16(UInt32 crc [[maybe_unused]], UInt16 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u16(crc, val); @@ -63,7 +63,7 @@ struct Hash #endif } - static UInt64 crc32u8(UInt64 crc [[maybe_unused]], UInt8 val [[maybe_unused]]) + static UInt64 crc32u8(UInt32 crc [[maybe_unused]], UInt8 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u8(crc, val); @@ -84,7 +84,7 @@ struct Hash if constexpr (CaseInsensitive) x |= 0x20u; /// see toLowerIfAlphaASCII from StringUtils.h - crc = crc32u8(crc, x); + crc = crc32u8(static_cast(crc), x); --size; ++start; } @@ -96,7 +96,7 @@ struct Hash if constexpr (CaseInsensitive) x |= 0x2020u; - crc = crc32u16(crc, x); + crc = crc32u16(static_cast(crc), x); size -= 2; start += 2; } @@ -108,7 +108,7 @@ struct Hash if constexpr (CaseInsensitive) x |= 0x20202020u; - crc = crc32u32(crc, x); + crc = crc32u32(static_cast(crc), x); size -= 4; start += 4; } diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index 286ed4a729d..a47fc71c335 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -157,7 +158,7 @@ struct TimeWindowImpl const auto & interval_column = arguments[1]; const auto & from_datatype = *time_column.type.get(); const auto which_type = WhichDataType(from_datatype); - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0); if (!which_type.isDateTime() || !time_column_vec) throw Exception( @@ -198,7 +199,7 @@ struct TimeWindowImpl } template - static ColumnPtr executeTumble(const ColumnUInt32 & time_column, UInt64 num_units, const DateLUTImpl & time_zone) + static ColumnPtr executeTumble(const ColumnDateTime & time_column, UInt64 num_units, const DateLUTImpl & time_zone) { const auto & time_data = time_column.getData(); size_t size = time_column.size(); @@ -342,7 +343,7 @@ struct TimeWindowImpl const auto & hop_interval_column = arguments[1]; const auto & window_interval_column = arguments[2]; const auto & from_datatype = *time_column.type.get(); - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, 3, 0); if (!WhichDataType(from_datatype).isDateTime() || !time_column_vec) throw Exception( @@ -402,7 +403,7 @@ struct TimeWindowImpl template static ColumnPtr - executeHop(const ColumnUInt32 & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone) + executeHop(const ColumnDateTime & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone) { const auto & time_data = time_column.getData(); size_t size = time_column.size(); @@ -491,7 +492,7 @@ struct TimeWindowImpl const auto & hop_interval_column = arguments[1]; const auto & window_interval_column = arguments[2]; const auto & from_datatype = *time_column.type.get(); - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); const DateLUTImpl & time_zone = extractTimeZoneFromFunctionArguments(arguments, 3, 0); if (!WhichDataType(from_datatype).isDateTime() || !time_column_vec) throw Exception( @@ -551,7 +552,7 @@ struct TimeWindowImpl template static ColumnPtr - executeHopSlice(const ColumnUInt32 & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone) + executeHopSlice(const ColumnDateTime & time_column, UInt64 hop_num_units, UInt64 window_num_units, const DateLUTImpl & time_zone) { Int64 gcd_num_units = std::gcd(hop_num_units, window_num_units); diff --git a/src/Functions/FunctionsTimeWindow.h b/src/Functions/FunctionsTimeWindow.h index 3ea397e4c7d..4346e691046 100644 --- a/src/Functions/FunctionsTimeWindow.h +++ b/src/Functions/FunctionsTimeWindow.h @@ -64,7 +64,7 @@ struct ToStartOfTransform; { static UInt32 execute(UInt32 t, UInt64 delta, const DateLUTImpl & time_zone) { - return time_zone.toStartOfDayInterval(time_zone.toDayNum(t), delta); + return static_cast(time_zone.toStartOfDayInterval(time_zone.toDayNum(t), delta)); } }; @@ -74,7 +74,7 @@ struct ToStartOfTransform; { \ static UInt32 execute(UInt32 t, UInt64 delta, const DateLUTImpl & time_zone) \ { \ - return time_zone.toStartOf##INTERVAL_KIND##Interval(t, delta); \ + return static_cast(time_zone.toStartOf##INTERVAL_KIND##Interval(t, delta)); \ } \ }; TRANSFORM_TIME(Hour) @@ -114,7 +114,7 @@ template<> \ template <> \ struct AddTime \ { \ - static inline auto execute(UInt16 d, UInt64 delta, const DateLUTImpl & time_zone) \ + static inline auto execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) \ { \ return time_zone.add##INTERVAL_KIND##s(ExtendedDayNum(d), delta); \ } \ @@ -127,14 +127,18 @@ template<> \ template <> struct AddTime { - static inline NO_SANITIZE_UNDEFINED ExtendedDayNum execute(UInt16 d, UInt64 delta, const DateLUTImpl &) { return ExtendedDayNum(d + delta * 7);} + static inline NO_SANITIZE_UNDEFINED ExtendedDayNum execute(UInt16 d, UInt64 delta, const DateLUTImpl &) + { + return ExtendedDayNum(static_cast(d + delta * 7)); + } }; #define ADD_TIME(INTERVAL_KIND, INTERVAL) \ template <> \ struct AddTime \ { \ - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) { return t + delta * INTERVAL; } \ + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) \ + { return static_cast(t + delta * INTERVAL); } \ }; ADD_TIME(Day, 86400) ADD_TIME(Hour, 3600) diff --git a/src/Functions/GregorianDate.h b/src/Functions/GregorianDate.h index ef2b9e6eede..a64ae752509 100644 --- a/src/Functions/GregorianDate.h +++ b/src/Functions/GregorianDate.h @@ -38,7 +38,7 @@ namespace DB * integral type which should be at least 32 bits wide, and * should preferably signed. */ - explicit GregorianDate(is_integer auto mjd); + explicit GregorianDate(is_integer auto modified_julian_day); /** Convert to Modified Julian Day. The type T is an integral type * which should be at least 32 bits wide, and should preferably @@ -89,7 +89,8 @@ namespace DB * integral type which should be at least 32 bits wide, and * should preferably signed. */ - explicit OrdinalDate(is_integer auto mjd); + template + explicit OrdinalDate(DayT modified_julian_day); /** Convert to Modified Julian Day. The type T is an integral * type which should be at least 32 bits wide, and should @@ -257,9 +258,9 @@ namespace DB } template - GregorianDate::GregorianDate(is_integer auto mjd) + GregorianDate::GregorianDate(is_integer auto modified_julian_day) { - const OrdinalDate ord(mjd); + const OrdinalDate ord(modified_julian_day); const MonthDay md(gd::is_leap_year(ord.year()), ord.dayOfYear()); year_ = ord.year(); month_ = md.month(); @@ -329,9 +330,24 @@ namespace DB } template - OrdinalDate::OrdinalDate(is_integer auto mjd) + template + OrdinalDate::OrdinalDate(DayT modified_julian_day) { - const auto a = mjd + 678575; + /// This function supports day number from -678941 to 2973119 (which represent 0000-01-01 and 9999-12-31 respectively). + + if constexpr (is_signed_v && std::numeric_limits::lowest() < -678941) + if (modified_julian_day < -678941) + throw Exception( + ErrorCodes::CANNOT_FORMAT_DATETIME, + "Value cannot be represented as date because it's out of range"); + + if constexpr (std::numeric_limits::max() > 2973119) + if (modified_julian_day > 2973119) + throw Exception( + ErrorCodes::CANNOT_FORMAT_DATETIME, + "Value cannot be represented as date because it's out of range"); + + const auto a = modified_julian_day + 678575; const auto quad_cent = gd::div(a, 146097); const auto b = gd::mod(a, 146097); const auto cent = gd::min(gd::div(b, 36524), 3); @@ -339,8 +355,9 @@ namespace DB const auto quad = gd::div(c, 1461); const auto d = gd::mod(c, 1461); const auto y = gd::min(gd::div(d, 365), 3); + day_of_year_ = d - y * 365 + 1; - year_ = quad_cent * 400 + cent * 100 + quad * 4 + y + 1; + year_ = static_cast(quad_cent * 400 + cent * 100 + quad * 4 + y + 1); } template diff --git a/src/Functions/JSONPath/Generator/GeneratorJSONPath.h b/src/Functions/JSONPath/Generator/GeneratorJSONPath.h index fe00f06bbbf..3d646a6ff31 100644 --- a/src/Functions/JSONPath/Generator/GeneratorJSONPath.h +++ b/src/Functions/JSONPath/Generator/GeneratorJSONPath.h @@ -89,7 +89,7 @@ public: for (size_t i = current_visitor; i < visitors.size(); ++i) { status = visitors[i]->visit(current); - current_visitor = i; + current_visitor = static_cast(i); if (status == VisitorStatus::Error || status == VisitorStatus::Ignore) { break; diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index bc153b9d747..03c006774c0 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -46,7 +46,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { return false; } - range_indices.first = number_ptr->as()->value.get(); + range_indices.first = static_cast(number_ptr->as()->value.get()); if (pos->type == TokenType::Comma || pos->type == TokenType::ClosingSquareBracket) { @@ -63,7 +63,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { return false; } - range_indices.second = number_ptr->as()->value.get(); + range_indices.second = static_cast(number_ptr->as()->value.get()); } else { diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index 3a050e2bd6f..b8fd20d56de 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -104,7 +104,7 @@ struct LowerUpperUTF8Impl /** Converts a single code point starting at `src` to desired case, storing result starting at `dst`. * `src` and `dst` are incremented by corresponding sequence lengths. */ - static void toCase(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst) + static bool toCase(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst, bool partial) { if (src[0] <= ascii_upper_bound) { @@ -136,6 +136,11 @@ struct LowerUpperUTF8Impl static const Poco::UTF8Encoding utf8; size_t src_sequence_length = UTF8::seqLength(*src); + /// In case partial buffer was passed (due to SSE optimization) + /// we cannot convert it with current src_end, but we may have more + /// bytes to convert and eventually got correct symbol. + if (partial && src_sequence_length > static_cast(src_end-src)) + return false; auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src); if (src_code_point) @@ -152,7 +157,7 @@ struct LowerUpperUTF8Impl { src += dst_sequence_length; dst += dst_sequence_length; - return; + return true; } } } @@ -161,6 +166,8 @@ struct LowerUpperUTF8Impl ++dst; ++src; } + + return true; } private: @@ -229,16 +236,13 @@ private: const UInt8 * expected_end = std::min(src + bytes_sse, row_end); while (src < expected_end) - toCase(src, expected_end, dst); - - /// adjust src_end_sse by pushing it forward or backward - const auto diff = src - expected_end; - if (diff != 0) { - if (src_end_sse + diff < src_end) - src_end_sse += diff; - else - src_end_sse -= bytes_sse - diff; + if (!toCase(src, expected_end, dst, /* partial= */ true)) + { + /// Fallback to handling byte by byte. + src_end_sse = src; + break; + } } } } @@ -255,7 +259,7 @@ private: chassert(row_end >= src); while (src < row_end) - toCase(src, row_end, dst); + toCase(src, row_end, dst, /* partial= */ false); ++offset_it; } } diff --git a/src/Functions/MultiMatchAllIndicesImpl.h b/src/Functions/MultiMatchAllIndicesImpl.h index 3490c854f22..8e355405093 100644 --- a/src/Functions/MultiMatchAllIndicesImpl.h +++ b/src/Functions/MultiMatchAllIndicesImpl.h @@ -116,7 +116,7 @@ struct MultiMatchAllIndicesImpl err = hs_scan( regexps->getDB(), reinterpret_cast(haystack_data.data()) + offset, - length, + static_cast(length), 0, smart_scratch.get(), on_match, @@ -227,7 +227,7 @@ struct MultiMatchAllIndicesImpl err = hs_scan( regexps->getDB(), reinterpret_cast(haystack_data.data()) + prev_haystack_offset, - cur_haystack_length, + static_cast(cur_haystack_length), 0, smart_scratch.get(), on_match, diff --git a/src/Functions/MultiMatchAnyImpl.h b/src/Functions/MultiMatchAnyImpl.h index 2d4db261bb4..4b02e78dc25 100644 --- a/src/Functions/MultiMatchAnyImpl.h +++ b/src/Functions/MultiMatchAnyImpl.h @@ -136,7 +136,7 @@ struct MultiMatchAnyImpl err = hs_scan( regexps->getDB(), reinterpret_cast(haystack_data.data()) + offset, - length, + static_cast(length), 0, smart_scratch.get(), on_match, @@ -260,7 +260,7 @@ struct MultiMatchAnyImpl err = hs_scan( regexps->getDB(), reinterpret_cast(haystack_data.data()) + prev_haystack_offset, - cur_haystack_length, + static_cast(cur_haystack_length), 0, smart_scratch.get(), on_match, diff --git a/src/Functions/PolygonUtils.h b/src/Functions/PolygonUtils.h index 0ffe05fbffc..37517313879 100644 --- a/src/Functions/PolygonUtils.h +++ b/src/Functions/PolygonUtils.h @@ -625,7 +625,7 @@ UInt128 sipHash128(Polygon && polygon) auto hash_ring = [&hash](const auto & ring) { - UInt32 size = ring.size(); + UInt32 size = static_cast(ring.size()); hash.update(size); hash.update(reinterpret_cast(ring.data()), size * sizeof(ring[0])); }; diff --git a/src/Functions/Regexps.h b/src/Functions/Regexps.h index 1e40c845788..c1ff83d04fe 100644 --- a/src/Functions/Regexps.h +++ b/src/Functions/Regexps.h @@ -207,7 +207,7 @@ inline Regexps constructRegexps(const std::vector & str_patterns, [[mayb { ids.reset(new unsigned int[patterns.size()]); for (size_t i = 0; i < patterns.size(); ++i) - ids[i] = i + 1; + ids[i] = static_cast(i + 1); } hs_error_t err; @@ -216,7 +216,7 @@ inline Regexps constructRegexps(const std::vector & str_patterns, [[mayb patterns.data(), flags.data(), ids.get(), - patterns.size(), + static_cast(patterns.size()), HS_MODE_BLOCK, nullptr, &db, @@ -227,7 +227,7 @@ inline Regexps constructRegexps(const std::vector & str_patterns, [[mayb flags.data(), ids.get(), ext_exprs_ptrs.data(), - patterns.size(), + static_cast(patterns.size()), HS_MODE_BLOCK, nullptr, &db, diff --git a/src/Functions/ReplaceRegexpImpl.h b/src/Functions/ReplaceRegexpImpl.h index 88bc48a6d8c..a1d17ce9da1 100644 --- a/src/Functions/ReplaceRegexpImpl.h +++ b/src/Functions/ReplaceRegexpImpl.h @@ -189,7 +189,7 @@ struct ReplaceRegexpImpl /// Cannot perform search for whole columns. Will process each string separately. for (size_t i = 0; i < size; ++i) { - int from = i > 0 ? offsets[i - 1] : 0; + size_t from = i > 0 ? offsets[i - 1] : 0; re2_st::StringPiece input(reinterpret_cast(data.data() + from), offsets[i] - from - 1); processString(input, res_data, res_offset, searcher, num_captures, instructions); @@ -220,7 +220,7 @@ struct ReplaceRegexpImpl for (size_t i = 0; i < size; ++i) { - int from = i * n; + size_t from = i * n; re2_st::StringPiece input(reinterpret_cast(data.data() + from), n); processString(input, res_data, res_offset, searcher, num_captures, instructions); diff --git a/src/Functions/URL/CMakeLists.txt b/src/Functions/URL/CMakeLists.txt index 6328476543d..0e148e87604 100644 --- a/src/Functions/URL/CMakeLists.txt +++ b/src/Functions/URL/CMakeLists.txt @@ -2,6 +2,7 @@ include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") add_headers_and_sources(clickhouse_functions_url .) add_library(clickhouse_functions_url OBJECT ${clickhouse_functions_url_sources} ${clickhouse_functions_url_headers}) target_link_libraries(clickhouse_functions_url PRIVATE dbms) +set_source_files_properties(tldLookup.generated.cpp PROPERTIES COMPILE_FLAGS -Wno-shorten-64-to-32) if (OMIT_HEAVY_DEBUG_SYMBOLS) target_compile_options(clickhouse_functions_url PRIVATE "-g0") diff --git a/src/Functions/URL/ExtractFirstSignificantSubdomain.h b/src/Functions/URL/ExtractFirstSignificantSubdomain.h index 73137da474f..0d1b1cac8ef 100644 --- a/src/Functions/URL/ExtractFirstSignificantSubdomain.h +++ b/src/Functions/URL/ExtractFirstSignificantSubdomain.h @@ -16,7 +16,7 @@ struct FirstSignificantSubdomainDefaultLookup } }; -template +template struct ExtractFirstSignificantSubdomain { static size_t getReserveLengthForElement() { return 10; } @@ -35,7 +35,7 @@ struct ExtractFirstSignificantSubdomain Pos tmp; size_t domain_length; - ExtractDomain::execute(data, size, tmp, domain_length); + ExtractDomain::execute(data, size, tmp, domain_length); if (domain_length == 0) return; @@ -105,7 +105,7 @@ struct ExtractFirstSignificantSubdomain Pos tmp; size_t domain_length; - ExtractDomain::execute(data, size, tmp, domain_length); + ExtractDomain::execute(data, size, tmp, domain_length); if (domain_length == 0) return; diff --git a/src/Functions/URL/cutToFirstSignificantSubdomain.cpp b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp index dddfbe4f4dd..7bf09d1eb00 100644 --- a/src/Functions/URL/cutToFirstSignificantSubdomain.cpp +++ b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp @@ -6,7 +6,7 @@ namespace DB { -template +template struct CutToFirstSignificantSubdomain { static size_t getReserveLengthForElement() { return 15; } @@ -19,7 +19,7 @@ struct CutToFirstSignificantSubdomain Pos tmp_data; size_t tmp_length; Pos domain_end; - ExtractFirstSignificantSubdomain::execute(data, size, tmp_data, tmp_length, &domain_end); + ExtractFirstSignificantSubdomain::execute(data, size, tmp_data, tmp_length, &domain_end); if (tmp_length == 0) return; @@ -30,15 +30,47 @@ struct CutToFirstSignificantSubdomain }; struct NameCutToFirstSignificantSubdomain { static constexpr auto name = "cutToFirstSignificantSubdomain"; }; -using FunctionCutToFirstSignificantSubdomain = FunctionStringToString>, NameCutToFirstSignificantSubdomain>; +using FunctionCutToFirstSignificantSubdomain = FunctionStringToString>, NameCutToFirstSignificantSubdomain>; struct NameCutToFirstSignificantSubdomainWithWWW { static constexpr auto name = "cutToFirstSignificantSubdomainWithWWW"; }; -using FunctionCutToFirstSignificantSubdomainWithWWW = FunctionStringToString>, NameCutToFirstSignificantSubdomainWithWWW>; +using FunctionCutToFirstSignificantSubdomainWithWWW = FunctionStringToString>, NameCutToFirstSignificantSubdomainWithWWW>; + +struct NameCutToFirstSignificantSubdomainRFC { static constexpr auto name = "cutToFirstSignificantSubdomainRFC"; }; +using FunctionCutToFirstSignificantSubdomainRFC = FunctionStringToString>, NameCutToFirstSignificantSubdomainRFC>; + +struct NameCutToFirstSignificantSubdomainWithWWWRFC { static constexpr auto name = "cutToFirstSignificantSubdomainWithWWWRFC"; }; +using FunctionCutToFirstSignificantSubdomainWithWWWRFC = FunctionStringToString>, NameCutToFirstSignificantSubdomainWithWWWRFC>; REGISTER_FUNCTION(CutToFirstSignificantSubdomain) { - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction( + { + R"(Returns the part of the domain that includes top-level subdomains up to the "first significant subdomain" (see documentation of the `firstSignificantSubdomain`).)", + Documentation::Examples{ + {"cutToFirstSignificantSubdomain1", "SELECT cutToFirstSignificantSubdomain('https://news.clickhouse.com.tr/')"}, + {"cutToFirstSignificantSubdomain2", "SELECT cutToFirstSignificantSubdomain('www.tr')"}, + {"cutToFirstSignificantSubdomain3", "SELECT cutToFirstSignificantSubdomain('tr')"}, + }, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Returns the part of the domain that includes top-level subdomains up to the "first significant subdomain", without stripping "www".)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomain` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomainWithWWW` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp b/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp index a2e51200910..e81921d69ff 100644 --- a/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp +++ b/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp @@ -5,7 +5,7 @@ namespace DB { -template +template struct CutToFirstSignificantSubdomainCustom { static size_t getReserveLengthForElement() { return 15; } @@ -18,7 +18,7 @@ struct CutToFirstSignificantSubdomainCustom Pos tmp_data; size_t tmp_length; Pos domain_end; - ExtractFirstSignificantSubdomain::executeCustom(tld_lookup, data, size, tmp_data, tmp_length, &domain_end); + ExtractFirstSignificantSubdomain::executeCustom(tld_lookup, data, size, tmp_data, tmp_length, &domain_end); if (tmp_length == 0) return; @@ -29,15 +29,54 @@ struct CutToFirstSignificantSubdomainCustom }; struct NameCutToFirstSignificantSubdomainCustom { static constexpr auto name = "cutToFirstSignificantSubdomainCustom"; }; -using FunctionCutToFirstSignificantSubdomainCustom = FunctionCutToFirstSignificantSubdomainCustomImpl, NameCutToFirstSignificantSubdomainCustom>; +using FunctionCutToFirstSignificantSubdomainCustom = FunctionCutToFirstSignificantSubdomainCustomImpl, NameCutToFirstSignificantSubdomainCustom>; struct NameCutToFirstSignificantSubdomainCustomWithWWW { static constexpr auto name = "cutToFirstSignificantSubdomainCustomWithWWW"; }; -using FunctionCutToFirstSignificantSubdomainCustomWithWWW = FunctionCutToFirstSignificantSubdomainCustomImpl, NameCutToFirstSignificantSubdomainCustomWithWWW>; +using FunctionCutToFirstSignificantSubdomainCustomWithWWW = FunctionCutToFirstSignificantSubdomainCustomImpl, NameCutToFirstSignificantSubdomainCustomWithWWW>; + +struct NameCutToFirstSignificantSubdomainCustomRFC { static constexpr auto name = "cutToFirstSignificantSubdomainCustomRFC"; }; +using FunctionCutToFirstSignificantSubdomainCustomRFC = FunctionCutToFirstSignificantSubdomainCustomImpl, NameCutToFirstSignificantSubdomainCustomRFC>; + +struct NameCutToFirstSignificantSubdomainCustomWithWWWRFC { static constexpr auto name = "cutToFirstSignificantSubdomainCustomWithWWWRFC"; }; +using FunctionCutToFirstSignificantSubdomainCustomWithWWWRFC = FunctionCutToFirstSignificantSubdomainCustomImpl, NameCutToFirstSignificantSubdomainCustomWithWWWRFC>; REGISTER_FUNCTION(CutToFirstSignificantSubdomainCustom) { - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Returns the part of the domain that includes top-level subdomains up to the first significant subdomain. Accepts custom TLD list name. + +Can be useful if you need fresh TLD list or you have custom. + )", + Documentation::Examples{ + {"cutToFirstSignificantSubdomainCustom", "SELECT cutToFirstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list');"}, + }, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"( +Returns the part of the domain that includes top-level subdomains up to the first significant subdomain without stripping `www`. +Accepts custom TLD list name from config. + +Can be useful if you need fresh TLD list or you have custom. + )", + Documentation::Examples{{"cutToFirstSignificantSubdomainCustomWithWWW", "SELECT cutToFirstSignificantSubdomainCustomWithWWW('www.foo', 'public_suffix_list')"}}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomainCustom` but follows stricter rules according to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `cutToFirstSignificantSubdomainCustomWithWWW` but follows stricter rules according to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/domain.cpp b/src/Functions/URL/domain.cpp index 1d781b37943..fce7cea4693 100644 --- a/src/Functions/URL/domain.cpp +++ b/src/Functions/URL/domain.cpp @@ -7,12 +7,31 @@ namespace DB { struct NameDomain { static constexpr auto name = "domain"; }; -using FunctionDomain = FunctionStringToString>, NameDomain>; +using FunctionDomain = FunctionStringToString>, NameDomain>; +struct NameDomainRFC { static constexpr auto name = "domainRFC"; }; +using FunctionDomainRFC = FunctionStringToString>, NameDomainRFC>; REGISTER_FUNCTION(Domain) { - factory.registerFunction(); + factory.registerFunction( + { + R"( +Extracts the hostname from a URL. + +The URL can be specified with or without a scheme. +If the argument can't be parsed as URL, the function returns an empty string. + )", + Documentation::Examples{{"domain", "SELECT domain('svn+ssh://some.svn-hosting.com:80/repo/trunk')"}}, + Documentation::Categories{"URL"} + }); + + factory.registerFunction( + { + R"(Similar to `domain` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/domain.h b/src/Functions/URL/domain.h index 1245bb20182..64362edf2c3 100644 --- a/src/Functions/URL/domain.h +++ b/src/Functions/URL/domain.h @@ -20,6 +20,115 @@ inline std::string_view checkAndReturnHost(const Pos & pos, const Pos & dot_pos, return std::string_view(start_of_host, pos - start_of_host); } +/// Extracts host from given url (RPC). +/// +/// @return empty string view if the host is not valid (i.e. it does not have dot, or there no symbol after dot). +inline std::string_view getURLHostRFC(const char * data, size_t size) +{ + Pos pos = data; + Pos end = data + size; + + if (*pos == '/' && *(pos + 1) == '/') + { + pos += 2; + } + else + { + Pos scheme_end = data + std::min(size, 16UL); + for (++pos; pos < scheme_end; ++pos) + { + if (!isAlphaNumericASCII(*pos)) + { + switch (*pos) + { + case '.': + case '-': + case '+': + break; + case ' ': /// restricted symbols + case '\t': + case '<': + case '>': + case '%': + case '{': + case '}': + case '|': + case '\\': + case '^': + case '~': + case '[': + case ']': + case ';': + case '=': + case '&': + return std::string_view{}; + default: + goto exloop; + } + } + } +exloop: if ((scheme_end - pos) > 2 && *pos == ':' && *(pos + 1) == '/' && *(pos + 2) == '/') + pos += 3; + else + pos = data; + } + + Pos dot_pos = nullptr; + Pos colon_pos = nullptr; + bool has_at_symbol = false; + bool has_terminator_after_colon = false; + const auto * start_of_host = pos; + for (; pos < end; ++pos) + { + switch (*pos) + { + case '.': + if (has_at_symbol || colon_pos == nullptr) + dot_pos = pos; + break; + case ':': + if (has_at_symbol || colon_pos) goto done; + colon_pos = pos; + break; + case '/': /// end symbols + case '?': + case '#': + goto done; + case '@': /// myemail@gmail.com + if (has_terminator_after_colon) return std::string_view{}; + if (has_at_symbol) goto done; + has_at_symbol = true; + start_of_host = pos + 1; + break; + case ' ': /// restricted symbols in whole URL + case '\t': + case '<': + case '>': + case '%': + case '{': + case '}': + case '|': + case '\\': + case '^': + case '~': + case '[': + case ']': + case ';': + case '=': + case '&': + if (colon_pos == nullptr) + return std::string_view{}; + else + has_terminator_after_colon = true; + } + } + +done: + if (!has_at_symbol) + pos = colon_pos ? colon_pos : pos; + return checkAndReturnHost(pos, dot_pos, start_of_host); +} + /// Extracts host from given url. /// /// @return empty string view if the host is not valid (i.e. it does not have dot, or there no symbol after dot). @@ -113,14 +222,18 @@ exloop: if ((scheme_end - pos) > 2 && *pos == ':' && *(pos + 1) == '/' && *(pos return checkAndReturnHost(pos, dot_pos, start_of_host); } -template +template struct ExtractDomain { static size_t getReserveLengthForElement() { return 15; } static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) { - std::string_view host = getURLHost(data, size); + std::string_view host; + if constexpr (conform_rfc) + host = getURLHostRFC(data, size); + else + host = getURLHost(data, size); if (host.empty()) { diff --git a/src/Functions/URL/domainWithoutWWW.cpp b/src/Functions/URL/domainWithoutWWW.cpp index 53ff5bc919e..48401e5e6e5 100644 --- a/src/Functions/URL/domainWithoutWWW.cpp +++ b/src/Functions/URL/domainWithoutWWW.cpp @@ -6,12 +6,31 @@ namespace DB { struct NameDomainWithoutWWW { static constexpr auto name = "domainWithoutWWW"; }; -using FunctionDomainWithoutWWW = FunctionStringToString>, NameDomainWithoutWWW>; +using FunctionDomainWithoutWWW = FunctionStringToString>, NameDomainWithoutWWW>; + +struct NameDomainWithoutWWWRFC { static constexpr auto name = "domainWithoutWWWRFC"; }; +using FunctionDomainWithoutWWWRFC = FunctionStringToString>, NameDomainWithoutWWWRFC>; REGISTER_FUNCTION(DomainWithoutWWW) { - factory.registerFunction(); + factory.registerFunction( + { + R"( +Extracts the hostname from a URL, removing the leading "www." if present. + +The URL can be specified with or without a scheme. +If the argument can't be parsed as URL, the function returns an empty string. + )", + Documentation::Examples{{"domainWithoutWWW", "SELECT domainWithoutWWW('https://www.clickhouse.com')"}}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `domainWithoutWWW` but follows stricter rules to be compatible with RFC 3986 and less performant.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/firstSignificantSubdomain.cpp b/src/Functions/URL/firstSignificantSubdomain.cpp index d3aeb90771f..62307ef816c 100644 --- a/src/Functions/URL/firstSignificantSubdomain.cpp +++ b/src/Functions/URL/firstSignificantSubdomain.cpp @@ -7,12 +7,35 @@ namespace DB { struct NameFirstSignificantSubdomain { static constexpr auto name = "firstSignificantSubdomain"; }; +using FunctionFirstSignificantSubdomain = FunctionStringToString>, NameFirstSignificantSubdomain>; -using FunctionFirstSignificantSubdomain = FunctionStringToString>, NameFirstSignificantSubdomain>; +struct NameFirstSignificantSubdomainRFC { static constexpr auto name = "firstSignificantSubdomainRFC"; }; +using FunctionFirstSignificantSubdomainRFC = FunctionStringToString>, NameFirstSignificantSubdomainRFC>; REGISTER_FUNCTION(FirstSignificantSubdomain) { - factory.registerFunction(); + factory.registerFunction( + { + R"( +Returns the "first significant subdomain". + +The first significant subdomain is a second-level domain if it is 'com', 'net', 'org', or 'co'. +Otherwise, it is a third-level domain. + +For example, firstSignificantSubdomain('https://news.clickhouse.com/') = 'clickhouse', firstSignificantSubdomain ('https://news.clickhouse.com.tr/') = 'clickhouse'. + +The list of "insignificant" second-level domains and other implementation details may change in the future. + )", + Documentation::Examples{{"firstSignificantSubdomain", "SELECT firstSignificantSubdomain('https://news.clickhouse.com/')"}}, + Documentation::Categories{"URL"} + }); + + factory.registerFunction( + { + R"(Returns the "first significant subdomain" according to RFC 1034.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/firstSignificantSubdomainCustom.cpp b/src/Functions/URL/firstSignificantSubdomainCustom.cpp index f43b42d0309..c07aa2b3ac8 100644 --- a/src/Functions/URL/firstSignificantSubdomainCustom.cpp +++ b/src/Functions/URL/firstSignificantSubdomainCustom.cpp @@ -7,12 +7,15 @@ namespace DB { struct NameFirstSignificantSubdomainCustom { static constexpr auto name = "firstSignificantSubdomainCustom"; }; +using FunctionFirstSignificantSubdomainCustom = FunctionCutToFirstSignificantSubdomainCustomImpl, NameFirstSignificantSubdomainCustom>; -using FunctionFirstSignificantSubdomainCustom = FunctionCutToFirstSignificantSubdomainCustomImpl, NameFirstSignificantSubdomainCustom>; +struct NameFirstSignificantSubdomainCustomRFC { static constexpr auto name = "firstSignificantSubdomainCustomRFC"; }; +using FunctionFirstSignificantSubdomainCustomRFC = FunctionCutToFirstSignificantSubdomainCustomImpl, NameFirstSignificantSubdomainCustomRFC>; REGISTER_FUNCTION(FirstSignificantSubdomainCustom) { factory.registerFunction(); + factory.registerFunction(); } } diff --git a/src/Functions/URL/port.cpp b/src/Functions/URL/port.cpp index 85b060ca987..52fa4077c18 100644 --- a/src/Functions/URL/port.cpp +++ b/src/Functions/URL/port.cpp @@ -18,12 +18,9 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } -struct FunctionPort : public IFunction +template +struct FunctionPortImpl : public IFunction { - static constexpr auto name = "port"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } - - String getName() const override { return name; } bool isVariadic() const override { return true; } size_t getNumberOfArguments() const override { return 0; } bool useDefaultImplementationForConstants() const override { return true; } @@ -94,7 +91,12 @@ private: const char * p = reinterpret_cast(buf.data()) + offset; const char * end = p + size; - std::string_view host = getURLHost(p, size); + std::string_view host; + if constexpr (conform_rfc) + host = getURLHostRFC(p, size); + else + host = getURLHost(p, size); + if (host.empty()) return default_port; if (host.size() == size) @@ -121,9 +123,34 @@ private: } }; +struct FunctionPort : public FunctionPortImpl +{ + static constexpr auto name = "port"; + String getName() const override { return name; } + static FunctionPtr create(ContextPtr) { return std::make_shared(); } +}; + +struct FunctionPortRFC : public FunctionPortImpl +{ + static constexpr auto name = "portRFC"; + String getName() const override { return name; } + static FunctionPtr create(ContextPtr) { return std::make_shared(); } +}; + REGISTER_FUNCTION(Port) { - factory.registerFunction(); + factory.registerFunction( + { + R"(Returns the port or `default_port` if there is no port in the URL (or in case of validation error).)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); + factory.registerFunction( + { + R"(Similar to `port`, but conforms to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp index 9937618cae9..ed9b40d4b73 100644 --- a/src/Functions/URL/topLevelDomain.cpp +++ b/src/Functions/URL/topLevelDomain.cpp @@ -5,13 +5,18 @@ namespace DB { +template struct ExtractTopLevelDomain { static size_t getReserveLengthForElement() { return 5; } static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) { - std::string_view host = getURLHost(data, size); + std::string_view host; + if constexpr (conform_rfc) + host = getURLHostRFC(data, size); + else + host = getURLHost(data, size); res_data = data; res_size = 0; @@ -41,11 +46,30 @@ struct ExtractTopLevelDomain }; struct NameTopLevelDomain { static constexpr auto name = "topLevelDomain"; }; -using FunctionTopLevelDomain = FunctionStringToString, NameTopLevelDomain>; +using FunctionTopLevelDomain = FunctionStringToString>, NameTopLevelDomain>; + +struct NameTopLevelDomainRFC { static constexpr auto name = "topLevelDomainRFC"; }; +using FunctionTopLevelDomainRFC = FunctionStringToString>, NameTopLevelDomainRFC>; REGISTER_FUNCTION(TopLevelDomain) { - factory.registerFunction(); + factory.registerFunction( + { + R"( +Extracts the the top-level domain from a URL. + +Returns an empty string if the argument cannot be parsed as a URL or does not contain a top-level domain. + )", + Documentation::Examples{{"topLevelDomain", "SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')"}}, + Documentation::Categories{"URL"} + }); + + factory.registerFunction( + { + R"(Similar to topLevelDomain, but conforms to RFC 3986.)", + Documentation::Examples{}, + Documentation::Categories{"URL"} + }); } } diff --git a/src/Functions/array/FunctionArrayMapped.h b/src/Functions/array/FunctionArrayMapped.h index 6d500cc15c4..dfed7cedcf0 100644 --- a/src/Functions/array/FunctionArrayMapped.h +++ b/src/Functions/array/FunctionArrayMapped.h @@ -185,8 +185,10 @@ public: const auto * data_type_function = checkAndGetDataType(arguments[0].type.get()); if (!data_type_function) - throw Exception("First argument for function " + getName() + " must be a function", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be a function. Actual {}", + getName(), + arguments[0].type->getName()); /// The types of the remaining arguments are already checked in getLambdaArgumentTypes. diff --git a/src/Functions/array/arrayAggregation.cpp b/src/Functions/array/arrayAggregation.cpp index 7b72060f0c0..c8eae78dfaa 100644 --- a/src/Functions/array/arrayAggregation.cpp +++ b/src/Functions/array/arrayAggregation.cpp @@ -223,7 +223,7 @@ struct ArrayAggregateImpl if (unlikely(result_scale > DecimalUtils::max_precision)) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale); - res[i] = DecimalUtils::convertTo(product, result_scale); + res[i] = DecimalUtils::convertTo(product, static_cast(result_scale)); } else { @@ -332,7 +332,7 @@ struct ArrayAggregateImpl if (unlikely(result_scale > DecimalUtils::max_precision)) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale); - res[i] = DecimalUtils::convertTo(aggregate_value, result_scale); + res[i] = DecimalUtils::convertTo(aggregate_value, static_cast(result_scale)); } else { diff --git a/src/Functions/array/arrayCount.cpp b/src/Functions/array/arrayCount.cpp index cb902206e8b..f7ded051e5e 100644 --- a/src/Functions/array/arrayCount.cpp +++ b/src/Functions/array/arrayCount.cpp @@ -49,7 +49,7 @@ struct ArrayCountImpl size_t pos = 0; for (size_t i = 0; i < offsets.size(); ++i) { - out_counts[i] = offsets[i] - pos; + out_counts[i] = static_cast(offsets[i] - pos); pos = offsets[i]; } @@ -73,7 +73,7 @@ struct ArrayCountImpl if (filter[pos]) ++count; } - out_counts[i] = count; + out_counts[i] = static_cast(count); } return out_column; diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index acd8f89ffe5..59224096d3c 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -1025,12 +1025,14 @@ ColumnPtr FunctionArrayElement::executeMap( if (col_const_map) values_array = ColumnConst::create(values_array, input_rows_count); + const auto & type_map = assert_cast(*arguments[0].type); + /// Prepare arguments to call arrayElement for array with values and calculated indices at previous step. ColumnsWithTypeAndName new_arguments = { { values_array, - std::make_shared(result_type), + std::make_shared(type_map.getValueType()), "" }, { @@ -1086,7 +1088,9 @@ ColumnPtr FunctionArrayElement::executeImpl(const ColumnsWithTypeAndName & argum col_array = checkAndGetColumn(arguments[0].column.get()); if (col_array) + { is_array_of_nullable = isColumnNullable(col_array->getData()); + } else { col_const_array = checkAndGetColumnConstData(arguments[0].column.get()); diff --git a/src/Functions/array/arrayEnumerate.cpp b/src/Functions/array/arrayEnumerate.cpp index b20f91fe2dd..666e01899bd 100644 --- a/src/Functions/array/arrayEnumerate.cpp +++ b/src/Functions/array/arrayEnumerate.cpp @@ -60,7 +60,7 @@ public: for (auto off : offsets) { for (ColumnArray::Offset j = prev_off; j < off; ++j) - res_values[j] = j - prev_off + 1; + res_values[j] = static_cast(j - prev_off + 1); prev_off = off; } diff --git a/src/Functions/array/arrayEnumerateRanked.cpp b/src/Functions/array/arrayEnumerateRanked.cpp index 7c4b755e020..d19781f97c3 100644 --- a/src/Functions/array/arrayEnumerateRanked.cpp +++ b/src/Functions/array/arrayEnumerateRanked.cpp @@ -38,7 +38,7 @@ ArraysDepths getArraysDepths(const ColumnsWithTypeAndName & arguments) if (depths.size() < array_num && prev_array_depth) depths.emplace_back(prev_array_depth); - prev_array_depth = type_array->getNumberOfDimensions(); + prev_array_depth = static_cast(type_array->getNumberOfDimensions()); ++array_num; } else @@ -55,7 +55,7 @@ ArraysDepths getArraysDepths(const ColumnsWithTypeAndName & arguments) if (i == 0) { - clear_depth = value; + clear_depth = static_cast(value); } else { diff --git a/src/Functions/array/arrayFirstLastIndex.cpp b/src/Functions/array/arrayFirstLastIndex.cpp index f7355eb2b38..effcb04ab48 100644 --- a/src/Functions/array/arrayFirstLastIndex.cpp +++ b/src/Functions/array/arrayFirstLastIndex.cpp @@ -61,7 +61,7 @@ struct ArrayFirstLastIndexImpl if constexpr (strategy == ArrayFirstLastIndexStrategy::First) out_index[offset_index] = 1; else - out_index[offset_index] = end_offset - start_offset; + out_index[offset_index] = static_cast(end_offset - start_offset); } else { @@ -113,7 +113,7 @@ struct ArrayFirstLastIndexImpl } } - out_index[offset_index] = result_index; + out_index[offset_index] = static_cast(result_index); } return out_column; diff --git a/src/Functions/array/arrayReduce.cpp b/src/Functions/array/arrayReduce.cpp index fd16f1fc986..c93e67d4b1c 100644 --- a/src/Functions/array/arrayReduce.cpp +++ b/src/Functions/array/arrayReduce.cpp @@ -152,13 +152,6 @@ ColumnPtr FunctionArrayReduce::executeImpl(const ColumnsWithTypeAndName & argume MutableColumnPtr result_holder = result_type->createColumn(); IColumn & res_col = *result_holder; - /// AggregateFunction's states should be inserted into column using specific way - auto * res_col_aggregate_function = typeid_cast(&res_col); - - if (!res_col_aggregate_function && agg_func.isState()) - throw Exception("State function " + agg_func.getName() + " inserts results into non-state column " - + result_type->getName(), ErrorCodes::ILLEGAL_COLUMN); - PODArray places(input_rows_count); for (size_t i = 0; i < input_rows_count; ++i) { @@ -190,10 +183,9 @@ ColumnPtr FunctionArrayReduce::executeImpl(const ColumnsWithTypeAndName & argume } for (size_t i = 0; i < input_rows_count; ++i) - if (!res_col_aggregate_function) - agg_func.insertResultInto(places[i], res_col, arena.get()); - else - res_col_aggregate_function->insertFrom(places[i]); + /// We should use insertMergeResultInto to insert result into ColumnAggregateFunction + /// correctly if result contains AggregateFunction's states + agg_func.insertMergeResultInto(places[i], res_col, arena.get()); return result_holder; } diff --git a/src/Functions/array/arrayReduceInRanges.cpp b/src/Functions/array/arrayReduceInRanges.cpp index d2a382e86ba..11d5e03eb3d 100644 --- a/src/Functions/array/arrayReduceInRanges.cpp +++ b/src/Functions/array/arrayReduceInRanges.cpp @@ -202,13 +202,6 @@ ColumnPtr FunctionArrayReduceInRanges::executeImpl( result_arr->getOffsets().insert(ranges_offsets->begin(), ranges_offsets->end()); - /// AggregateFunction's states should be inserted into column using specific way - auto * res_col_aggregate_function = typeid_cast(&result_data); - - if (!res_col_aggregate_function && agg_func.isState()) - throw Exception("State function " + agg_func.getName() + " inserts results into non-state column " - + result_type->getName(), ErrorCodes::ILLEGAL_COLUMN); - /// Perform the aggregation size_t begin = 0; @@ -379,11 +372,9 @@ ColumnPtr FunctionArrayReduceInRanges::executeImpl( for (size_t k = local_begin; k < local_end; ++k) true_func->add(place, aggregate_arguments, begin + k, arena.get()); } - - if (!res_col_aggregate_function) - agg_func.insertResultInto(place, result_data, arena.get()); - else - res_col_aggregate_function->insertFrom(place); + /// We should use insertMergeResultInto to insert result into ColumnAggregateFunction + /// correctly if result contains AggregateFunction's states + agg_func.insertMergeResultInto(place, result_data, arena.get()); } } diff --git a/src/Functions/array/arrayUniq.cpp b/src/Functions/array/arrayUniq.cpp index ff75efaae71..a43c21508d9 100644 --- a/src/Functions/array/arrayUniq.cpp +++ b/src/Functions/array/arrayUniq.cpp @@ -233,7 +233,7 @@ void FunctionArrayUniq::executeMethodImpl( method.emplaceKey(set, j, pool); } - res_values[i] = set.size() + found_null; + res_values[i] = static_cast(set.size() + found_null); prev_off = off; } } diff --git a/src/Functions/array/range.cpp b/src/Functions/array/range.cpp index 6b3d8ad1139..3b5bb686e60 100644 --- a/src/Functions/array/range.cpp +++ b/src/Functions/array/range.cpp @@ -97,7 +97,7 @@ private: for (size_t row_idx = 0, rows = in->size(); row_idx < rows; ++row_idx) { for (size_t elem_idx = 0, elems = in_data[row_idx]; elem_idx < elems; ++elem_idx) - out_data[offset + elem_idx] = elem_idx; + out_data[offset + elem_idx] = static_cast(elem_idx); offset += in_data[row_idx]; out_offsets[row_idx] = offset; @@ -153,7 +153,7 @@ private: { for (size_t st = start, ed = end_data[row_idx]; st < ed; st += step) { - out_data[offset++] = st; + out_data[offset++] = static_cast(st); if (st > st + step) throw Exception{"A call to function " + getName() + " overflows, investigate the values of arguments you are passing", @@ -212,7 +212,7 @@ private: { for (size_t st = start_data[row_idx], ed = end_data[row_idx]; st < ed; st += step) { - out_data[offset++] = st; + out_data[offset++] = static_cast(st); if (st > st + step) throw Exception{"A call to function " + getName() + " overflows, investigate the values of arguments you are passing", @@ -271,7 +271,7 @@ private: { for (size_t st = start, ed = end_data[row_idx]; st < ed; st += step_data[row_idx]) { - out_data[offset++] = st; + out_data[offset++] = static_cast(st); if (st > st + step_data[row_idx]) throw Exception{"A call to function " + getName() + " overflows, investigate the values of arguments you are passing", @@ -333,7 +333,7 @@ private: { for (size_t st = start_data[row_idx], ed = end_start[row_idx]; st < ed; st += step_data[row_idx]) { - out_data[offset++] = st; + out_data[offset++] = static_cast(st); if (st > st + step_data[row_idx]) throw Exception{"A call to function " + getName() + " overflows, investigate the values of arguments you are passing", @@ -407,7 +407,7 @@ private: if ((res = executeConstStartStep(column_ptrs[1], start, step, input_rows_count)) || (res = executeConstStartStep(column_ptrs[1], start, step, input_rows_count)) || - (res = executeConstStartStep(column_ptrs[1], start, step, input_rows_count)) || + (res = executeConstStartStep(column_ptrs[1], static_cast(start), static_cast(step), input_rows_count)) || (res = executeConstStartStep(column_ptrs[1], start, step, input_rows_count))) { } @@ -418,7 +418,7 @@ private: if ((res = executeConstStart(column_ptrs[1], column_ptrs[2], start, input_rows_count)) || (res = executeConstStart(column_ptrs[1], column_ptrs[2], start, input_rows_count)) || - (res = executeConstStart(column_ptrs[1], column_ptrs[2], start, input_rows_count)) || + (res = executeConstStart(column_ptrs[1], column_ptrs[2], static_cast(start), input_rows_count)) || (res = executeConstStart(column_ptrs[1], column_ptrs[2], start, input_rows_count))) { } @@ -429,7 +429,7 @@ private: if ((res = executeConstStep(column_ptrs[0], column_ptrs[1], step, input_rows_count)) || (res = executeConstStep(column_ptrs[0], column_ptrs[1], step, input_rows_count)) || - (res = executeConstStep(column_ptrs[0], column_ptrs[1], step, input_rows_count)) || + (res = executeConstStep(column_ptrs[0], column_ptrs[1], static_cast(step), input_rows_count)) || (res = executeConstStep(column_ptrs[0], column_ptrs[1], step, input_rows_count))) { } diff --git a/src/Functions/blockSerializedSize.cpp b/src/Functions/blockSerializedSize.cpp index d406984c51c..35be65f3fed 100644 --- a/src/Functions/blockSerializedSize.cpp +++ b/src/Functions/blockSerializedSize.cpp @@ -54,7 +54,7 @@ public: auto serialization = elem.type->getDefaultSerialization(); - serialization->serializeBinaryBulkStatePrefix(settings, state); + serialization->serializeBinaryBulkStatePrefix(*full_column, settings, state); serialization->serializeBinaryBulkWithMultipleStreams(*full_column, 0 /** offset */, 0 /** limit */, settings, state); diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index b8bf3c11698..f5a4b50fb54 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -44,7 +45,6 @@ namespace */ class FunctionDateDiff : public IFunction { - using ColumnDateTime64 = ColumnDecimal; public: static constexpr auto name = "dateDiff"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } @@ -61,25 +61,30 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { if (arguments.size() != 3 && arguments.size() != 4) - throw Exception("Number of arguments for function " + getName() + " doesn't match: passed " - + toString(arguments.size()) + ", should be 3 or 4", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 3 or 4", + getName(), arguments.size()); if (!isString(arguments[0])) - throw Exception("First argument for function " + getName() + " (unit) must be String", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} (unit) must be String", + getName()); - if (!isDate(arguments[1]) && !isDateTime(arguments[1]) && !isDateTime64(arguments[1])) - throw Exception("Second argument for function " + getName() + " must be Date or DateTime", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + if (!isDate(arguments[1]) && !isDate32(arguments[1]) && !isDateTime(arguments[1]) && !isDateTime64(arguments[1])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Second argument for function {} must be Date, Date32, DateTime or DateTime64", + getName()); - if (!isDate(arguments[2]) && !isDateTime(arguments[2]) && !isDateTime64(arguments[2])) - throw Exception("Third argument for function " + getName() + " must be Date or DateTime", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + if (!isDate(arguments[2]) && !isDate32(arguments[2]) && !isDateTime(arguments[2]) && !isDateTime64(arguments[2])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Third argument for function {} must be Date, Date32, DateTime or DateTime64", + getName() + ); if (arguments.size() == 4 && !isString(arguments[3])) - throw Exception("Fourth argument for function " + getName() + " (timezone) must be String", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Fourth argument for function {} (timezone) must be String", + getName()); return std::make_shared(); } @@ -91,7 +96,9 @@ public: { const auto * unit_column = checkAndGetColumnConst(arguments[0].column.get()); if (!unit_column) - throw Exception("First argument for function " + getName() + " must be constant String", ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "First argument for function {} must be constant String", + getName()); String unit = Poco::toLower(unit_column->getValue()); @@ -105,23 +112,24 @@ public: const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2); if (unit == "year" || unit == "yy" || unit == "yyyy") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "quarter" || unit == "qq" || unit == "q") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "month" || unit == "mm" || unit == "m") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "week" || unit == "wk" || unit == "ww") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "day" || unit == "dd" || unit == "d") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "hour" || unit == "hh" || unit == "h") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "minute" || unit == "mi" || unit == "n") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else if (unit == "second" || unit == "ss" || unit == "s") - dispatchForColumns(x, y, timezone_x, timezone_y, res->getData()); + dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); else - throw Exception("Function " + getName() + " does not support '" + unit + "' unit", ErrorCodes::BAD_ARGUMENTS); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Function {} does not support '{}' unit", getName(), unit); return res; } @@ -133,20 +141,26 @@ private: const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) const { - if (const auto * x_vec_16 = checkAndGetColumn(&x)) + if (const auto * x_vec_16 = checkAndGetColumn(&x)) dispatchForSecondColumn(*x_vec_16, y, timezone_x, timezone_y, result); - else if (const auto * x_vec_32 = checkAndGetColumn(&x)) + else if (const auto * x_vec_32 = checkAndGetColumn(&x)) dispatchForSecondColumn(*x_vec_32, y, timezone_x, timezone_y, result); + else if (const auto * x_vec_32_s = checkAndGetColumn(&x)) + dispatchForSecondColumn(*x_vec_32_s, y, timezone_x, timezone_y, result); else if (const auto * x_vec_64 = checkAndGetColumn(&x)) dispatchForSecondColumn(*x_vec_64, y, timezone_x, timezone_y, result); - else if (const auto * x_const_16 = checkAndGetColumnConst(&x)) + else if (const auto * x_const_16 = checkAndGetColumnConst(&x)) dispatchConstForSecondColumn(x_const_16->getValue(), y, timezone_x, timezone_y, result); - else if (const auto * x_const_32 = checkAndGetColumnConst(&x)) + else if (const auto * x_const_32 = checkAndGetColumnConst(&x)) dispatchConstForSecondColumn(x_const_32->getValue(), y, timezone_x, timezone_y, result); + else if (const auto * x_const_32_s = checkAndGetColumnConst(&x)) + dispatchConstForSecondColumn(x_const_32_s->getValue(), y, timezone_x, timezone_y, result); else if (const auto * x_const_64 = checkAndGetColumnConst(&x)) dispatchConstForSecondColumn(x_const_64->getValue>(), y, timezone_x, timezone_y, result); else - throw Exception("Illegal column for first argument of function " + getName() + ", must be Date, DateTime or DateTime64", ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column for first argument of function {}, must be Date, Date32, DateTime or DateTime64", + getName()); } template @@ -155,20 +169,26 @@ private: const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) const { - if (const auto * y_vec_16 = checkAndGetColumn(&y)) + if (const auto * y_vec_16 = checkAndGetColumn(&y)) vectorVector(x, *y_vec_16, timezone_x, timezone_y, result); - else if (const auto * y_vec_32 = checkAndGetColumn(&y)) + else if (const auto * y_vec_32 = checkAndGetColumn(&y)) vectorVector(x, *y_vec_32, timezone_x, timezone_y, result); + else if (const auto * y_vec_32_s = checkAndGetColumn(&y)) + vectorVector(x, *y_vec_32_s, timezone_x, timezone_y, result); else if (const auto * y_vec_64 = checkAndGetColumn(&y)) vectorVector(x, *y_vec_64, timezone_x, timezone_y, result); - else if (const auto * y_const_16 = checkAndGetColumnConst(&y)) + else if (const auto * y_const_16 = checkAndGetColumnConst(&y)) vectorConstant(x, y_const_16->getValue(), timezone_x, timezone_y, result); - else if (const auto * y_const_32 = checkAndGetColumnConst(&y)) + else if (const auto * y_const_32 = checkAndGetColumnConst(&y)) vectorConstant(x, y_const_32->getValue(), timezone_x, timezone_y, result); + else if (const auto * y_const_32_s = checkAndGetColumnConst(&y)) + vectorConstant(x, y_const_32_s->getValue(), timezone_x, timezone_y, result); else if (const auto * y_const_64 = checkAndGetColumnConst(&y)) vectorConstant(x, y_const_64->getValue>(), timezone_x, timezone_y, result); else - throw Exception("Illegal column for second argument of function " + getName() + ", must be Date or DateTime", ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column for second argument of function {}, must be Date, Date32, DateTime or DateTime64", + getName()); } template @@ -177,14 +197,18 @@ private: const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) const { - if (const auto * y_vec_16 = checkAndGetColumn(&y)) + if (const auto * y_vec_16 = checkAndGetColumn(&y)) constantVector(x, *y_vec_16, timezone_x, timezone_y, result); - else if (const auto * y_vec_32 = checkAndGetColumn(&y)) + else if (const auto * y_vec_32 = checkAndGetColumn(&y)) constantVector(x, *y_vec_32, timezone_x, timezone_y, result); + else if (const auto * y_vec_32_s = checkAndGetColumn(&y)) + constantVector(x, *y_vec_32_s, timezone_x, timezone_y, result); else if (const auto * y_vec_64 = checkAndGetColumn(&y)) constantVector(x, *y_vec_64, timezone_x, timezone_y, result); else - throw Exception("Illegal column for second argument of function " + getName() + ", must be Date or DateTime", ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column for second argument of function {}, must be Date, Date32, DateTime or DateTime64", + getName()); } template diff --git a/src/Functions/dateName.cpp b/src/Functions/dateName.cpp index 3911b1cf838..36c0be49190 100644 --- a/src/Functions/dateName.cpp +++ b/src/Functions/dateName.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -34,6 +35,11 @@ template <> struct DataTypeToTimeTypeMap using TimeType = UInt16; }; +template <> struct DataTypeToTimeTypeMap +{ + using TimeType = Int32; +}; + template <> struct DataTypeToTimeTypeMap { using TimeType = UInt32; @@ -72,7 +78,7 @@ public: ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Number of arguments for function {} doesn't match: passed {}", getName(), - toString(arguments.size())); + arguments.size()); if (!WhichDataType(arguments[0].type).isString()) throw Exception( @@ -83,7 +89,7 @@ public: WhichDataType first_argument_type(arguments[1].type); - if (!(first_argument_type.isDate() || first_argument_type.isDateTime() || first_argument_type.isDateTime64())) + if (!(first_argument_type.isDate() || first_argument_type.isDateTime() || first_argument_type.isDate32() || first_argument_type.isDateTime64())) throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 2 argument of function {}. Must be a date or a date with time", @@ -108,6 +114,7 @@ public: ColumnPtr res; if (!((res = executeType(arguments, result_type)) + || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) throw Exception( diff --git a/src/Functions/divide/divideImpl.cpp b/src/Functions/divide/divideImpl.cpp index 6c151dfefb5..940f4b35df9 100644 --- a/src/Functions/divide/divideImpl.cpp +++ b/src/Functions/divide/divideImpl.cpp @@ -18,7 +18,7 @@ namespace NAMESPACE template void divideImpl(const A * __restrict a_pos, B b, ResultType * __restrict c_pos, size_t size) { - libdivide::divider divider(b); + libdivide::divider divider(static_cast(b)); const A * a_end = a_pos + size; #if defined(__SSE2__) diff --git a/src/Functions/errorCodeToName.cpp b/src/Functions/errorCodeToName.cpp index 1736311c6cc..0025d38c8f2 100644 --- a/src/Functions/errorCodeToName.cpp +++ b/src/Functions/errorCodeToName.cpp @@ -45,7 +45,8 @@ public: for (size_t i = 0; i < input_rows_count; ++i) { const Int64 error_code = input_column.getInt(i); - std::string_view error_name = ErrorCodes::getName(error_code); + std::string_view error_name = + ErrorCodes::getName(static_cast(error_code)); col_res->insertData(error_name.data(), error_name.size()); } diff --git a/src/Functions/extractAllGroups.h b/src/Functions/extractAllGroups.h index 06b16181c94..e077086a359 100644 --- a/src/Functions/extractAllGroups.h +++ b/src/Functions/extractAllGroups.h @@ -136,7 +136,8 @@ public: const auto * end = pos + current_row.size(); while (pos < end && regexp->Match({pos, static_cast(end - pos)}, - 0, end - pos, regexp->UNANCHORED, matched_groups.data(), matched_groups.size())) + 0, end - pos, regexp->UNANCHORED, + matched_groups.data(), static_cast(matched_groups.size()))) { // 1 is to exclude group #0 which is whole re match. for (size_t group = 1; group <= groups_count; ++group) @@ -179,7 +180,8 @@ public: const auto * end = pos + current_row.size; while (pos < end && regexp->Match({pos, static_cast(end - pos)}, - 0, end - pos, regexp->UNANCHORED, matched_groups.data(), matched_groups.size())) + 0, end - pos, regexp->UNANCHORED, matched_groups.data(), + static_cast(matched_groups.size()))) { // 1 is to exclude group #0 which is whole re match. for (size_t group = 1; group <= groups_count; ++group) diff --git a/src/Functions/extractGroups.cpp b/src/Functions/extractGroups.cpp index eb6e609a4be..8ec389827db 100644 --- a/src/Functions/extractGroups.cpp +++ b/src/Functions/extractGroups.cpp @@ -90,7 +90,8 @@ public: std::string_view current_row = column_haystack->getDataAt(i).toView(); if (re2->Match(re2_st::StringPiece(current_row.data(), current_row.size()), - 0, current_row.size(), re2_st::RE2::UNANCHORED, matched_groups.data(), matched_groups.size())) + 0, current_row.size(), re2_st::RE2::UNANCHORED, matched_groups.data(), + static_cast(matched_groups.size()))) { // 1 is to exclude group #0 which is whole re match. for (size_t group = 1; group <= groups_count; ++group) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index 09071c5c1a0..4db04d61d84 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -45,6 +46,7 @@ template <> struct ActionValueTypeMap { using ActionValueTyp template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt16; }; +template <> struct ActionValueTypeMap { using ActionValueType = Int32; }; template <> struct ActionValueTypeMap { using ActionValueType = UInt32; }; // TODO(vnemkov): to add sub-second format instruction, make that DateTime64 and do some math in Action. template <> struct ActionValueTypeMap { using ActionValueType = Int64; }; @@ -315,44 +317,39 @@ public: if constexpr (support_integer) { if (arguments.size() != 1 && arguments.size() != 2 && arguments.size() != 3) - throw Exception( - "Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size()) - + ", should be 1, 2 or 3", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1, 2 or 3", + getName(), arguments.size()); if (arguments.size() == 1 && !isInteger(arguments[0].type)) - throw Exception( - "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() - + " when arguments size is 1. Should be integer", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDateTime64(arguments[0].type))) - throw Exception( - "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() - + " when arguments size is 2 or 3. Should be a integer or a date with time", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {} when arguments size is 1. Should be integer", + arguments[0].type->getName(), getName()); + if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDate32(arguments[0].type) || isDateTime64(arguments[0].type))) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {} when arguments size is 2 or 3. Should be a integer or a date with time", + arguments[0].type->getName(), getName()); } else { if (arguments.size() != 2 && arguments.size() != 3) - throw Exception( - "Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size()) - + ", should be 2 or 3", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) - throw Exception( - "Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName() - + ". Should be a date or a date with time", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", + getName(), arguments.size()); + if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime64(arguments[0].type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {}. Should be a date or a date with time", + arguments[0].type->getName(), getName()); } if (arguments.size() == 2 && !WhichDataType(arguments[1].type).isString()) - throw Exception( - "Illegal type " + arguments[1].type->getName() + " of 2 argument of function " + getName() + ". Must be String.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}. Must be String.", + arguments[1].type->getName(), getName()); if (arguments.size() == 3 && !WhichDataType(arguments[2].type).isString()) - throw Exception( - "Illegal type " + arguments[2].type->getName() + " of 3 argument of function " + getName() + ". Must be String.", - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of third argument of function {}. Must be String.", + arguments[2].type->getName(), getName()); if (arguments.size() == 1) return std::make_shared(); @@ -373,10 +370,9 @@ public: return true; })) { - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Integer or DateTime when arguments size is 1.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64 when arguments size is 1.", + arguments[0].column->getName(), getName()); } } else @@ -385,32 +381,31 @@ public: { using FromDataType = std::decay_t; if (!(res = executeType(arguments, result_type))) - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Integer or DateTime.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64.", + arguments[0].column->getName(), getName()); return true; })) { if (!((res = executeType(arguments, result_type)) + || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Integer or DateTime.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Integer or DateTime.", + arguments[0].column->getName(), getName()); } } } else { if (!((res = executeType(arguments, result_type)) + || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)) || (res = executeType(arguments, result_type)))) - throw Exception( - "Illegal column " + arguments[0].column->getName() + " of function " + getName() - + ", must be Date or DateTime.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of function {}, must be Date or DateTime.", + arguments[0].column->getName(), getName()); } return res; @@ -425,10 +420,9 @@ public: const ColumnConst * pattern_column = checkAndGetColumnConst(arguments[1].column.get()); if (!pattern_column) - throw Exception("Illegal column " + arguments[1].column->getName() - + " of second ('format') argument of function " + getName() - + ". Must be constant string.", - ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of second ('format') argument of function {}. Must be constant string.", + arguments[1].column->getName(), getName()); String pattern = pattern_column->getValue(); @@ -499,7 +493,7 @@ public: else { for (auto & instruction : instructions) - instruction.perform(pos, vec[i], time_zone); + instruction.perform(pos, static_cast(vec[i]), time_zone); } dst_offsets[i] = pos - begin; @@ -712,12 +706,14 @@ public: // Unimplemented case 'U': [[fallthrough]]; case 'W': - throw Exception("Wrong pattern '" + pattern + "', symbol '" + *pos + " is not implemented ' for function " + getName(), - ErrorCodes::NOT_IMPLEMENTED); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Wrong pattern '{}', symbol '{}' is not implemented for function {}", + pattern, *pos, getName()); default: - throw Exception( - "Wrong pattern '" + pattern + "', unexpected symbol '" + *pos + "' for function " + getName(), ErrorCodes::ILLEGAL_COLUMN); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Wrong pattern '{}', unexpected symbol '{}' for function {}", + pattern, *pos, getName()); } ++pos; diff --git a/src/Functions/grouping.h b/src/Functions/grouping.h index b9ef6ffc107..830c509f1f5 100644 --- a/src/Functions/grouping.h +++ b/src/Functions/grouping.h @@ -13,6 +13,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; +} + class FunctionGroupingBase : public IFunction { protected: @@ -71,6 +76,22 @@ public: } }; +class FunctionGrouping : public FunctionGroupingBase +{ +public: + explicit FunctionGrouping(bool force_compatibility_) + : FunctionGroupingBase(ColumnNumbers(), force_compatibility_) + {} + + String getName() const override { return "grouping"; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr &, size_t) const override + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Method executeImpl is not supported for 'grouping' function"); + } +}; + class FunctionGroupingOrdinary : public FunctionGroupingBase { public: diff --git a/src/Functions/initializeAggregation.cpp b/src/Functions/initializeAggregation.cpp index b7dcce9c188..08352553b9c 100644 --- a/src/Functions/initializeAggregation.cpp +++ b/src/Functions/initializeAggregation.cpp @@ -17,7 +17,6 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int BAD_ARGUMENTS; } @@ -114,13 +113,6 @@ ColumnPtr FunctionInitializeAggregation::executeImpl(const ColumnsWithTypeAndNam MutableColumnPtr result_holder = result_type->createColumn(); IColumn & res_col = *result_holder; - /// AggregateFunction's states should be inserted into column using specific way - auto * res_col_aggregate_function = typeid_cast(&res_col); - - if (!res_col_aggregate_function && agg_func.isState()) - throw Exception("State function " + agg_func.getName() + " inserts results into non-state column " - + result_type->getName(), ErrorCodes::ILLEGAL_COLUMN); - PODArray places(input_rows_count); for (size_t i = 0; i < input_rows_count; ++i) { @@ -151,10 +143,9 @@ ColumnPtr FunctionInitializeAggregation::executeImpl(const ColumnsWithTypeAndNam } for (size_t i = 0; i < input_rows_count; ++i) - if (!res_col_aggregate_function) - agg_func.insertResultInto(places[i], res_col, arena.get()); - else - res_col_aggregate_function->insertFrom(places[i]); + /// We should use insertMergeResultInto to insert result into ColumnAggregateFunction + /// correctly if result contains AggregateFunction's states + agg_func.insertMergeResultInto(places[i], res_col, arena.get()); return result_holder; } diff --git a/src/Functions/makeDate.cpp b/src/Functions/makeDate.cpp index e2d93c0fdc9..7ebca71af13 100644 --- a/src/Functions/makeDate.cpp +++ b/src/Functions/makeDate.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -149,7 +150,7 @@ struct MakeDateTraits { static constexpr auto name = "makeDate"; using ReturnDataType = DataTypeDate; - using ReturnColumnType = ColumnUInt16; + using ReturnColumnType = ColumnDate; static constexpr auto MIN_YEAR = 1970; static constexpr auto MAX_YEAR = 2149; @@ -162,7 +163,7 @@ struct MakeDate32Traits { static constexpr auto name = "makeDate32"; using ReturnDataType = DataTypeDate32; - using ReturnColumnType = ColumnInt32; + using ReturnColumnType = ColumnDate32; static constexpr auto MIN_YEAR = 1900; static constexpr auto MAX_YEAR = 2299; @@ -267,7 +268,7 @@ public: Columns converted_arguments; convertRequiredArguments(arguments, converted_arguments); - auto res_column = ColumnUInt32::create(input_rows_count); + auto res_column = ColumnDateTime::create(input_rows_count); auto & result_data = res_column->getData(); const auto & year_data = typeid_cast(*converted_arguments[0]).getData(); @@ -294,7 +295,7 @@ public: else if (unlikely(date_time > 0x0ffffffffll)) date_time = 0x0ffffffffll; - result_data[i] = date_time; + result_data[i] = static_cast(date_time); } return res_column; @@ -365,7 +366,7 @@ public: fraction_data = &typeid_cast(*converted_arguments[6]).getData(); } - auto res_column = ColumnDecimal::create(input_rows_count, precision); + auto res_column = ColumnDateTime64::create(input_rows_count, static_cast(precision)); auto & result_data = res_column->getData(); const auto & year_data = typeid_cast(*converted_arguments[0]).getData(); @@ -411,7 +412,10 @@ public: fraction = max_fraction; } - result_data[i] = DecimalUtils::decimalFromComponents(date_time, static_cast(fraction), precision); + result_data[i] = DecimalUtils::decimalFromComponents( + date_time, + static_cast(fraction), + static_cast(precision)); } return res_column; diff --git a/src/Functions/minus.cpp b/src/Functions/minus.cpp index 3668e4afc18..04877a42b18 100644 --- a/src/Functions/minus.cpp +++ b/src/Functions/minus.cpp @@ -23,7 +23,7 @@ struct MinusImpl return static_cast(static_cast(a)) - static_cast(static_cast(b)); } else - return static_cast(a) - b; + return static_cast(a) - static_cast(b); } /// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false otherwise. diff --git a/src/Functions/modulo.cpp b/src/Functions/modulo.cpp index 9a3aa12037f..b2411899160 100644 --- a/src/Functions/modulo.cpp +++ b/src/Functions/modulo.cpp @@ -80,7 +80,7 @@ struct ModuloByConstantImpl || (std::is_signed_v && std::is_signed_v && b < std::numeric_limits::lowest()))) { for (size_t i = 0; i < size; ++i) - dst[i] = src[i]; + dst[i] = static_cast(src[i]); return; } @@ -101,16 +101,19 @@ struct ModuloByConstantImpl if (b & (b - 1)) { - libdivide::divider divider(b); + libdivide::divider divider(static_cast(b)); for (size_t i = 0; i < size; ++i) - dst[i] = src[i] - (src[i] / divider) * b; /// NOTE: perhaps, the division semantics with the remainder of negative numbers is not preserved. + { + /// NOTE: perhaps, the division semantics with the remainder of negative numbers is not preserved. + dst[i] = static_cast(src[i] - (src[i] / divider) * b); + } } else { // gcc libdivide doesn't work well for pow2 division auto mask = b - 1; for (size_t i = 0; i < size; ++i) - dst[i] = src[i] & mask; + dst[i] = static_cast(src[i] & mask); } } diff --git a/src/Functions/mortonDecode.cpp b/src/Functions/mortonDecode.cpp new file mode 100644 index 00000000000..337fd5e3a38 --- /dev/null +++ b/src/Functions/mortonDecode.cpp @@ -0,0 +1,433 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#if USE_MULTITARGET_CODE && defined(__BMI2__) +#include +#endif + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int ILLEGAL_COLUMN; + extern const int ARGUMENT_OUT_OF_BOUND; +} + +#define EXTRACT_VECTOR(INDEX) \ + auto col##INDEX = ColumnUInt64::create(); \ + auto & vec##INDEX = col##INDEX->getData(); \ + vec##INDEX.resize(input_rows_count); + +#define DECODE(ND, ...) \ + if (nd == (ND)) \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + auto res = MortonND_##ND##D_Dec.Decode(col_code->getUInt(i)); \ + __VA_ARGS__ \ + } \ + } + +#define MASK(IDX, ...) \ + ((mask) ? shrink(mask->getColumn((IDX)).getUInt(0), std::get(__VA_ARGS__)) : std::get(__VA_ARGS__)) + +#define EXECUTE() \ + size_t nd; \ + const auto * col_const = typeid_cast(arguments[0].column.get()); \ + const auto * mask = typeid_cast(col_const->getDataColumnPtr().get()); \ + if (mask) \ + nd = mask->tupleSize(); \ + else \ + nd = col_const->getUInt(0); \ + auto non_const_arguments = arguments; \ + non_const_arguments[1].column = non_const_arguments[1].column->convertToFullColumnIfConst(); \ + const ColumnPtr & col_code = non_const_arguments[1].column; \ + Columns tuple_columns(nd); \ + EXTRACT_VECTOR(0) \ + if (nd == 1) \ + { \ + if (mask) \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + vec0[i] = shrink(mask->getColumn(0).getUInt(0), col_code->getUInt(i)); \ + } \ + tuple_columns[0] = std::move(col0); \ + } \ + else \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + vec0[i] = col_code->getUInt(i); \ + } \ + tuple_columns[0] = std::move(col0); \ + } \ + return ColumnTuple::create(tuple_columns); \ + } \ + EXTRACT_VECTOR(1) \ + DECODE(2, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res);) \ + EXTRACT_VECTOR(2) \ + DECODE(3, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res); \ + vec2[i] = MASK(2, res);) \ + EXTRACT_VECTOR(3) \ + DECODE(4, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res); \ + vec2[i] = MASK(2, res); \ + vec3[i] = MASK(3, res);) \ + EXTRACT_VECTOR(4) \ + DECODE(5, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res); \ + vec2[i] = MASK(2, res); \ + vec3[i] = MASK(3, res); \ + vec4[i] = MASK(4, res);) \ + EXTRACT_VECTOR(5) \ + DECODE(6, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res); \ + vec2[i] = MASK(2, res); \ + vec3[i] = MASK(3, res); \ + vec4[i] = MASK(4, res); \ + vec5[i] = MASK(5, res);) \ + EXTRACT_VECTOR(6) \ + DECODE(7, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res); \ + vec2[i] = MASK(2, res); \ + vec3[i] = MASK(3, res); \ + vec4[i] = MASK(4, res); \ + vec5[i] = MASK(5, res); \ + vec6[i] = MASK(6, res);) \ + EXTRACT_VECTOR(7) \ + DECODE(8, \ + vec0[i] = MASK(0, res); \ + vec1[i] = MASK(1, res); \ + vec2[i] = MASK(2, res); \ + vec3[i] = MASK(3, res); \ + vec4[i] = MASK(4, res); \ + vec5[i] = MASK(5, res); \ + vec6[i] = MASK(6, res); \ + vec7[i] = MASK(7, res);) \ + switch (nd) \ + { \ + case 2: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + break; \ + case 3: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + tuple_columns[2] = std::move(col2); \ + return ColumnTuple::create(tuple_columns); \ + case 4: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + tuple_columns[2] = std::move(col2); \ + tuple_columns[3] = std::move(col3); \ + return ColumnTuple::create(tuple_columns); \ + case 5: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + tuple_columns[2] = std::move(col2); \ + tuple_columns[3] = std::move(col3); \ + tuple_columns[4] = std::move(col4); \ + return ColumnTuple::create(tuple_columns); \ + case 6: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + tuple_columns[2] = std::move(col2); \ + tuple_columns[3] = std::move(col3); \ + tuple_columns[4] = std::move(col4); \ + tuple_columns[5] = std::move(col5); \ + return ColumnTuple::create(tuple_columns); \ + case 7: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + tuple_columns[2] = std::move(col2); \ + tuple_columns[3] = std::move(col3); \ + tuple_columns[4] = std::move(col4); \ + tuple_columns[5] = std::move(col5); \ + tuple_columns[6] = std::move(col6); \ + return ColumnTuple::create(tuple_columns); \ + case 8: \ + tuple_columns[0] = std::move(col0); \ + tuple_columns[1] = std::move(col1); \ + tuple_columns[2] = std::move(col2); \ + tuple_columns[3] = std::move(col3); \ + tuple_columns[4] = std::move(col4); \ + tuple_columns[5] = std::move(col5); \ + tuple_columns[6] = std::move(col6); \ + tuple_columns[7] = std::move(col7); \ + return ColumnTuple::create(tuple_columns); \ + } \ + return ColumnTuple::create(tuple_columns); + +DECLARE_DEFAULT_CODE( +constexpr auto MortonND_2D_Dec = mortonnd::MortonNDLutDecoder<2, 32, 8>(); +constexpr auto MortonND_3D_Dec = mortonnd::MortonNDLutDecoder<3, 21, 8>(); +constexpr auto MortonND_4D_Dec = mortonnd::MortonNDLutDecoder<4, 16, 8>(); +constexpr auto MortonND_5D_Dec = mortonnd::MortonNDLutDecoder<5, 12, 8>(); +constexpr auto MortonND_6D_Dec = mortonnd::MortonNDLutDecoder<6, 10, 8>(); +constexpr auto MortonND_7D_Dec = mortonnd::MortonNDLutDecoder<7, 9, 8>(); +constexpr auto MortonND_8D_Dec = mortonnd::MortonNDLutDecoder<8, 8, 8>(); +class FunctionMortonDecode : public IFunction +{ +public: + static constexpr auto name = "mortonDecode"; + static FunctionPtr create(ContextPtr) + { + return std::make_shared(); + } + + String getName() const override + { + return name; + } + + size_t getNumberOfArguments() const override + { + return 2; + } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + UInt64 tuple_size = 0; + const auto * col_const = typeid_cast(arguments[0].column.get()); + if (!col_const) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column type {} of function {}, should be a constant (UInt or Tuple)", + arguments[0].type->getName(), getName()); + if (!WhichDataType(arguments[1].type).isNativeUInt()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column type {} of function {}, should be a native UInt", + arguments[1].type->getName(), getName()); + const auto * mask = typeid_cast(col_const->getDataColumnPtr().get()); + if (mask) + { + tuple_size = mask->tupleSize(); + } + else if (WhichDataType(arguments[0].type).isNativeUInt()) + { + tuple_size = col_const->getUInt(0); + } + else + throw Exception(ErrorCodes::ILLEGAL_COLUMN, + "Illegal column type {} of function {}, should be UInt or Tuple", + arguments[0].type->getName(), getName()); + if (tuple_size > 8 || tuple_size < 1) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, + "Illegal first argument for function {}, should be a number in range 1-8 or a Tuple of such size", + getName()); + if (mask) + { + const auto * type_tuple = typeid_cast(arguments[0].type.get()); + for (size_t i = 0; i < tuple_size; i++) + { + if (!WhichDataType(type_tuple->getElement(i)).isNativeUInt()) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument in tuple for function {}, should be a native UInt", + type_tuple->getElement(i)->getName(), getName()); + auto ratio = mask->getColumn(i).getUInt(0); + if (ratio > 8 || ratio < 1) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, + "Illegal argument {} in tuple for function {}, should be a number in range 1-8", + ratio, getName()); + } + } + DataTypes types(tuple_size); + for (size_t i = 0; i < tuple_size; i++) + { + types[i] = std::make_shared(); + } + return std::make_shared(types); + } + + static UInt64 shrink(UInt64 ratio, UInt64 value) + { + switch (ratio) + { + case 1: + return value; + case 2: + return std::get<1>(MortonND_2D_Dec.Decode(value)); + case 3: + return std::get<2>(MortonND_3D_Dec.Decode(value)); + case 4: + return std::get<3>(MortonND_4D_Dec.Decode(value)); + case 5: + return std::get<4>(MortonND_5D_Dec.Decode(value)); + case 6: + return std::get<5>(MortonND_6D_Dec.Decode(value)); + case 7: + return std::get<6>(MortonND_7D_Dec.Decode(value)); + case 8: + return std::get<7>(MortonND_8D_Dec.Decode(value)); + } + return value; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + EXECUTE() + } +}; +) // DECLARE_DEFAULT_CODE + +#if defined(MORTON_ND_BMI2_ENABLED) +#undef DECODE +#define DECODE(ND, ...) \ + if (nd == (ND)) \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + auto res = MortonND_##ND##D::Decode(col_code->getUInt(i)); \ + __VA_ARGS__ \ + } \ + } + +DECLARE_AVX2_SPECIFIC_CODE( +using MortonND_2D = mortonnd::MortonNDBmi<2, uint64_t>; +using MortonND_3D = mortonnd::MortonNDBmi<3, uint64_t>; +using MortonND_4D = mortonnd::MortonNDBmi<4, uint64_t>; +using MortonND_5D = mortonnd::MortonNDBmi<5, uint64_t>; +using MortonND_6D = mortonnd::MortonNDBmi<6, uint64_t>; +using MortonND_7D = mortonnd::MortonNDBmi<7, uint64_t>; +using MortonND_8D = mortonnd::MortonNDBmi<8, uint64_t>; +class FunctionMortonDecode: public TargetSpecific::Default::FunctionMortonDecode +{ + static UInt64 shrink(UInt64 ratio, UInt64 value) + { + switch (ratio) + { + case 1: + return value; + case 2: + return std::get<1>(MortonND_2D::Decode(value)); + case 3: + return std::get<2>(MortonND_3D::Decode(value)); + case 4: + return std::get<3>(MortonND_4D::Decode(value)); + case 5: + return std::get<4>(MortonND_5D::Decode(value)); + case 6: + return std::get<5>(MortonND_6D::Decode(value)); + case 7: + return std::get<6>(MortonND_7D::Decode(value)); + case 8: + return std::get<7>(MortonND_8D::Decode(value)); + } + return value; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + EXECUTE() + } +}; +) +#endif // MORTON_ND_BMI2_ENABLED + +#undef DECODE +#undef MASK +#undef EXTRACT_VECTOR +#undef EXECUTE + +class FunctionMortonDecode: public TargetSpecific::Default::FunctionMortonDecode +{ +public: + explicit FunctionMortonDecode(ContextPtr context) : selector(context) + { + selector.registerImplementation(); + +#if USE_MULTITARGET_CODE && defined(MORTON_ND_BMI2_ENABLED) + selector.registerImplementation(); +#endif + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + return selector.selectAndExecute(arguments, result_type, input_rows_count); + } + + static FunctionPtr create(ContextPtr context) + { + return std::make_shared(context); + } + +private: + ImplementationSelector selector; +}; + +REGISTER_FUNCTION(MortonDecode) +{ + factory.registerFunction({ + R"( +Decodes a Morton encoding (ZCurve) into the corresponding unsigned integer tuple + +The function has two modes of operation: +- Simple +- Expanded + +Simple: accepts a resulting tuple size as a first argument and the code as a second argument. +[example:simple] +Will decode into: `(1,2,3,4)` +The resulting tuple size cannot be more than 8 + +Expanded: accepts a range mask (tuple) as a first argument and the code as a second argument. +Each number in mask configures the amount of range shrink +1 - no shrink +2 - 2x shrink +3 - 3x shrink +.... +Up to 8x shrink. +[example:range_shrank] +Note: see mortonEncode() docs on why range change might be beneficial. +Still limited to 8 numbers at most. + +Morton code for one argument is always the argument itself (as a tuple). +[example:identity] +Produces: `(1)` + +You can shrink one argument too: +[example:identity_shrank] +Produces: `(128)` + +The function accepts a column of codes as a second argument: +[example:from_table] + +The range tuple must be a constant: +[example:from_table_range] +)", + Documentation::Examples{ + {"simple", "SELECT mortonDecode(4, 2149)"}, + {"range_shrank", "SELECT mortonDecode((1,2), 1572864)"}, + {"identity", "SELECT mortonDecode(1, 1)"}, + {"identity_shrank", "SELECT mortonDecode(tuple(2), 32768)"}, + {"from_table", "SELECT mortonDecode(2, code) FROM table"}, + {"from_table_range", "SELECT mortonDecode((1,2), code) FROM table"}, + }, + Documentation::Categories {"ZCurve", "Morton coding"} + }); +} + +} diff --git a/src/Functions/mortonEncode.cpp b/src/Functions/mortonEncode.cpp new file mode 100644 index 00000000000..4bdd237fa9c --- /dev/null +++ b/src/Functions/mortonEncode.cpp @@ -0,0 +1,393 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#if USE_MULTITARGET_CODE && defined(__BMI2__) +#include +#endif + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; +} + +#define EXTRACT_VECTOR(INDEX) \ + const ColumnPtr & col##INDEX = non_const_arguments[(INDEX) + vectorStartIndex].column; + +#define ENCODE(ND, ...) \ + if (nd == (ND)) \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + vec_res[i] = MortonND_##ND##D_Enc.Encode(__VA_ARGS__); \ + } \ + return col_res; \ + } + +#define EXPAND(IDX, ...) \ + (mask) ? expand(mask->getColumn(IDX).getUInt(0), __VA_ARGS__) : __VA_ARGS__ + +#define MASK(ND, IDX, ...) \ + (EXPAND(IDX, __VA_ARGS__) & MortonND_##ND##D_Enc.InputMask()) + +#define EXECUTE() \ + size_t nd = arguments.size(); \ + size_t vectorStartIndex = 0; \ + const auto * const_col = typeid_cast(arguments[0].column.get()); \ + const ColumnTuple * mask; \ + if (const_col) \ + mask = typeid_cast(const_col->getDataColumnPtr().get()); \ + else \ + mask = typeid_cast(arguments[0].column.get()); \ + if (mask) \ + { \ + nd = mask->tupleSize(); \ + vectorStartIndex = 1; \ + for (size_t i = 0; i < nd; i++) \ + { \ + auto ratio = mask->getColumn(i).getUInt(0); \ + if (ratio > 8 || ratio < 1) \ + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, \ + "Illegal argument {} of function {}, should be a number in range 1-8", \ + arguments[0].column->getName(), getName()); \ + } \ + } \ + \ + auto non_const_arguments = arguments; \ + for (auto & argument : non_const_arguments) \ + argument.column = argument.column->convertToFullColumnIfConst(); \ + \ + auto col_res = ColumnUInt64::create(); \ + ColumnUInt64::Container & vec_res = col_res->getData(); \ + vec_res.resize(input_rows_count); \ + \ + EXTRACT_VECTOR(0) \ + if (nd == 1) \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + vec_res[i] = EXPAND(0, col0->getUInt(i)); \ + } \ + return col_res; \ + } \ + \ + EXTRACT_VECTOR(1) \ + ENCODE(2, \ + MASK(2, 0, col0->getUInt(i)), \ + MASK(2, 1, col1->getUInt(i))) \ + EXTRACT_VECTOR(2) \ + ENCODE(3, \ + MASK(3, 0, col0->getUInt(i)), \ + MASK(3, 1, col1->getUInt(i)), \ + MASK(3, 2, col2->getUInt(i))) \ + EXTRACT_VECTOR(3) \ + ENCODE(4, \ + MASK(4, 0, col0->getUInt(i)), \ + MASK(4, 1, col1->getUInt(i)), \ + MASK(4, 2, col2->getUInt(i)), \ + MASK(4, 3, col3->getUInt(i))) \ + EXTRACT_VECTOR(4) \ + ENCODE(5, \ + MASK(5, 0, col0->getUInt(i)), \ + MASK(5, 1, col1->getUInt(i)), \ + MASK(5, 2, col2->getUInt(i)), \ + MASK(5, 3, col3->getUInt(i)), \ + MASK(5, 4, col4->getUInt(i))) \ + EXTRACT_VECTOR(5) \ + ENCODE(6, \ + MASK(6, 0, col0->getUInt(i)), \ + MASK(6, 1, col1->getUInt(i)), \ + MASK(6, 2, col2->getUInt(i)), \ + MASK(6, 3, col3->getUInt(i)), \ + MASK(6, 4, col4->getUInt(i)), \ + MASK(6, 5, col5->getUInt(i))) \ + EXTRACT_VECTOR(6) \ + ENCODE(7, \ + MASK(7, 0, col0->getUInt(i)), \ + MASK(7, 1, col1->getUInt(i)), \ + MASK(7, 2, col2->getUInt(i)), \ + MASK(7, 3, col3->getUInt(i)), \ + MASK(7, 4, col4->getUInt(i)), \ + MASK(7, 5, col5->getUInt(i)), \ + MASK(7, 6, col6->getUInt(i))) \ + EXTRACT_VECTOR(7) \ + ENCODE(8, \ + MASK(8, 0, col0->getUInt(i)), \ + MASK(8, 1, col1->getUInt(i)), \ + MASK(8, 2, col2->getUInt(i)), \ + MASK(8, 3, col3->getUInt(i)), \ + MASK(8, 4, col4->getUInt(i)), \ + MASK(8, 5, col5->getUInt(i)), \ + MASK(8, 6, col6->getUInt(i)), \ + MASK(8, 7, col7->getUInt(i))) \ + \ + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, \ + "Illegal number of UInt arguments of function {}, max: 8", \ + getName()); \ + +DECLARE_DEFAULT_CODE( +constexpr auto MortonND_2D_Enc = mortonnd::MortonNDLutEncoder<2, 32, 8>(); +constexpr auto MortonND_3D_Enc = mortonnd::MortonNDLutEncoder<3, 21, 8>(); +constexpr auto MortonND_4D_Enc = mortonnd::MortonNDLutEncoder<4, 16, 8>(); +constexpr auto MortonND_5D_Enc = mortonnd::MortonNDLutEncoder<5, 12, 8>(); +constexpr auto MortonND_6D_Enc = mortonnd::MortonNDLutEncoder<6, 10, 8>(); +constexpr auto MortonND_7D_Enc = mortonnd::MortonNDLutEncoder<7, 9, 8>(); +constexpr auto MortonND_8D_Enc = mortonnd::MortonNDLutEncoder<8, 8, 8>(); +class FunctionMortonEncode : public IFunction +{ +public: + static constexpr auto name = "mortonEncode"; + static FunctionPtr create(ContextPtr) + { + return std::make_shared(); + } + + String getName() const override + { + return name; + } + + bool isVariadic() const override + { + return true; + } + + size_t getNumberOfArguments() const override + { + return 0; + } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + bool useDefaultImplementationForConstants() const override { return true; } + + DataTypePtr getReturnTypeImpl(const DB::DataTypes & arguments) const override + { + size_t vectorStartIndex = 0; + if (arguments.empty()) + throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, + "At least one UInt argument is required for function {}", + getName()); + if (WhichDataType(arguments[0]).isTuple()) + { + vectorStartIndex = 1; + const auto * type_tuple = typeid_cast(arguments[0].get()); + auto tuple_size = type_tuple->getElements().size(); + if (tuple_size != (arguments.size() - 1)) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, + "Illegal argument {} for function {}, tuple size should be equal to number of UInt arguments", + arguments[0]->getName(), getName()); + for (size_t i = 0; i < tuple_size; i++) + { + if (!WhichDataType(type_tuple->getElement(i)).isNativeUInt()) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument in tuple for function {}, should be a native UInt", + type_tuple->getElement(i)->getName(), getName()); + } + } + + for (size_t i = vectorStartIndex; i < arguments.size(); i++) + { + const auto & arg = arguments[i]; + if (!WhichDataType(arg).isNativeUInt()) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of function {}, should be a native UInt", + arg->getName(), getName()); + } + return std::make_shared(); + } + + static UInt64 expand(UInt64 ratio, UInt64 value) + { + switch (ratio) + { + case 1: + return value; + case 2: + return MortonND_2D_Enc.Encode(0, value & MortonND_2D_Enc.InputMask()); + case 3: + return MortonND_3D_Enc.Encode(0, 0, value & MortonND_3D_Enc.InputMask()); + case 4: + return MortonND_4D_Enc.Encode(0, 0, 0, value & MortonND_4D_Enc.InputMask()); + case 5: + return MortonND_5D_Enc.Encode(0, 0, 0, 0, value & MortonND_5D_Enc.InputMask()); + case 6: + return MortonND_6D_Enc.Encode(0, 0, 0, 0, 0, value & MortonND_6D_Enc.InputMask()); + case 7: + return MortonND_7D_Enc.Encode(0, 0, 0, 0, 0, 0, value & MortonND_7D_Enc.InputMask()); + case 8: + return MortonND_8D_Enc.Encode(0, 0, 0, 0, 0, 0, 0, value & MortonND_8D_Enc.InputMask()); + } + return value; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + EXECUTE() + } +}; +) // DECLARE_DEFAULT_CODE + +#if defined(MORTON_ND_BMI2_ENABLED) +#undef ENCODE +#define ENCODE(ND, ...) \ + if (nd == (ND)) \ + { \ + for (size_t i = 0; i < input_rows_count; i++) \ + { \ + vec_res[i] = MortonND_##ND##D::Encode(__VA_ARGS__); \ + } \ + return col_res; \ + } + +#undef MASK +#define MASK(ND, IDX, ...) \ + (EXPAND(IDX, __VA_ARGS__)) + +DECLARE_AVX2_SPECIFIC_CODE( +using MortonND_2D = mortonnd::MortonNDBmi<2, uint64_t>; +using MortonND_3D = mortonnd::MortonNDBmi<3, uint64_t>; +using MortonND_4D = mortonnd::MortonNDBmi<4, uint64_t>; +using MortonND_5D = mortonnd::MortonNDBmi<5, uint64_t>; +using MortonND_6D = mortonnd::MortonNDBmi<6, uint64_t>; +using MortonND_7D = mortonnd::MortonNDBmi<7, uint64_t>; +using MortonND_8D = mortonnd::MortonNDBmi<8, uint64_t>; + +class FunctionMortonEncode : public TargetSpecific::Default::FunctionMortonEncode +{ +public: + static UInt64 expand(UInt64 ratio, UInt64 value) + { + switch (ratio) + { + case 1: + return value; + case 2: + return MortonND_2D::Encode(0, value); + case 3: + return MortonND_3D::Encode(0, 0, value); + case 4: + return MortonND_4D::Encode(0, 0, 0, value); + case 5: + return MortonND_5D::Encode(0, 0, 0, 0, value); + case 6: + return MortonND_6D::Encode(0, 0, 0, 0, 0, value); + case 7: + return MortonND_7D::Encode(0, 0, 0, 0, 0, 0, value); + case 8: + return MortonND_8D::Encode(0, 0, 0, 0, 0, 0, 0, value); + } + return value; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + EXECUTE() + } +}; +) // DECLARE_AVX2_SPECIFIC_CODE +#endif // MORTON_ND_BMI2_ENABLED + +#undef ENCODE +#undef MASK +#undef EXTRACT_VECTOR +#undef EXPAND +#undef EXECUTE + +class FunctionMortonEncode: public TargetSpecific::Default::FunctionMortonEncode +{ +public: + explicit FunctionMortonEncode(ContextPtr context) : selector(context) + { + selector.registerImplementation(); + +#if USE_MULTITARGET_CODE && defined(MORTON_ND_BMI2_ENABLED) + selector.registerImplementation(); +#endif + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + return selector.selectAndExecute(arguments, result_type, input_rows_count); + } + + static FunctionPtr create(ContextPtr context) + { + return std::make_shared(context); + } + +private: + ImplementationSelector selector; +}; + +REGISTER_FUNCTION(MortonEncode) +{ + factory.registerFunction({ + R"( +Calculates Morton encoding (ZCurve) for a list of unsigned integers + +The function has two modes of operation: +- Simple +- Expanded + +Simple: accepts up to 8 unsigned integers as arguments and produces a UInt64 code. +[example:simple] + +Expanded: accepts a range mask (tuple) as a first argument and up to 8 unsigned integers as other arguments. +Each number in mask configures the amount of range expansion +1 - no expansion +2 - 2x expansion +3 - 3x expansion +.... +Up to 8x expansion. +[example:range_expanded] +Note: tuple size must be equal to the number of the other arguments + +Range expansion can be beneficial when you need a similar distribution for arguments with wildly different ranges (or cardinality) +For example: 'IP Address' (0...FFFFFFFF) and 'Country code' (0...FF) + +Morton encoding for one argument is always the argument itself. +[example:identity] +Produces: `1` + +You can expand one argument too: +[example:identity_expanded] +Produces: `32768` + +The function also accepts columns as arguments: +[example:from_table] + +But the range tuple must still be a constant: +[example:from_table_range] + +Please note that you can fit only so much bits of information into Morton code as UInt64 has. +Two arguments will have a range of maximum 2^32 (64/2) each +Three arguments: range of max 2^21 (64/3) each +And so on, all overflow will be clamped to zero +)", + Documentation::Examples{ + {"simple", "SELECT mortonEncode(1, 2, 3)"}, + {"range_expanded", "SELECT mortonEncode((1,2), 1024, 16)"}, + {"identity", "SELECT mortonEncode(1)"}, + {"identity_expanded", "SELECT mortonEncode(tuple(2), 128)"}, + {"from_table", "SELECT mortonEncode(n1, n2) FROM table"}, + {"from_table_range", "SELECT mortonEncode((1,2), n1, n2) FROM table"}, + }, + Documentation::Categories {"ZCurve", "Morton coding"} + }); +} + +} diff --git a/src/Functions/now64.cpp b/src/Functions/now64.cpp index c5225d3317f..a6df4235d60 100644 --- a/src/Functions/now64.cpp +++ b/src/Functions/now64.cpp @@ -130,7 +130,7 @@ public: ". Expected const integer.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - scale = argument.column->get64(0); + scale = static_cast(argument.column->get64(0)); } if (arguments.size() == 2) { diff --git a/src/Functions/nowInBlock.cpp b/src/Functions/nowInBlock.cpp index db72e791587..b1764590fda 100644 --- a/src/Functions/nowInBlock.cpp +++ b/src/Functions/nowInBlock.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include namespace DB @@ -74,7 +74,7 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr &, size_t input_rows_count) const override { - return ColumnUInt32::create(input_rows_count, time(nullptr)); + return ColumnDateTime::create(input_rows_count, static_cast(time(nullptr))); } }; diff --git a/src/Functions/plus.cpp b/src/Functions/plus.cpp index 4b81c23584c..cd9cf6cec5c 100644 --- a/src/Functions/plus.cpp +++ b/src/Functions/plus.cpp @@ -25,7 +25,7 @@ struct PlusImpl return static_cast(static_cast(a)) + static_cast(static_cast(b)); } else - return static_cast(a) + b; + return static_cast(a) + static_cast(b); } /// Apply operation and check overflow. It's used for Deciamal operations. @returns true if overflowed, false otherwise. diff --git a/src/Functions/pointInEllipses.cpp b/src/Functions/pointInEllipses.cpp index f69886ad71f..07b7f013cac 100644 --- a/src/Functions/pointInEllipses.cpp +++ b/src/Functions/pointInEllipses.cpp @@ -102,7 +102,7 @@ private: Float64 ellipse_data[4]; for (const auto idx : collections::range(0, 4)) { - int arg_idx = 2 + 4 * ellipse_idx + idx; + size_t arg_idx = 2 + 4 * ellipse_idx + idx; const auto * column = arguments[arg_idx].column.get(); if (const auto * col = checkAndGetColumnConst>(column)) { diff --git a/src/Functions/randDistribution.cpp b/src/Functions/randDistribution.cpp new file mode 100644 index 00000000000..94dad4fdc89 --- /dev/null +++ b/src/Functions/randDistribution.cpp @@ -0,0 +1,472 @@ +#include +#include +#include +#include "Common/Exception.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int ILLEGAL_COLUMN; + extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; +} + +namespace +{ +struct UniformDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randUniform"; } + static constexpr size_t getNumberOfArguments() { return 2; } + + static void generate(Float64 min, Float64 max, ColumnFloat64::Container & container) + { + auto distribution = std::uniform_real_distribution<>(min, max); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct NormalDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randNormal"; } + static constexpr size_t getNumberOfArguments() { return 2; } + + static void generate(Float64 mean, Float64 variance, ColumnFloat64::Container & container) + { + auto distribution = std::normal_distribution<>(mean, variance); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct LogNormalDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randLogNormal"; } + static constexpr size_t getNumberOfArguments() { return 2; } + + static void generate(Float64 mean, Float64 variance, ColumnFloat64::Container & container) + { + auto distribution = std::lognormal_distribution<>(mean, variance); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct ExponentialDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randExponential"; } + static constexpr size_t getNumberOfArguments() { return 1; } + + static void generate(Float64 lambda, ColumnFloat64::Container & container) + { + auto distribution = std::exponential_distribution<>(lambda); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct ChiSquaredDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randChiSquared"; } + static constexpr size_t getNumberOfArguments() { return 1; } + + static void generate(Float64 degree_of_freedom, ColumnFloat64::Container & container) + { + auto distribution = std::chi_squared_distribution<>(degree_of_freedom); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct StudentTDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randStudentT"; } + static constexpr size_t getNumberOfArguments() { return 1; } + + static void generate(Float64 degree_of_freedom, ColumnFloat64::Container & container) + { + auto distribution = std::student_t_distribution<>(degree_of_freedom); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct FisherFDistribution +{ + using ReturnType = DataTypeFloat64; + static constexpr const char * getName() { return "randFisherF"; } + static constexpr size_t getNumberOfArguments() { return 2; } + + static void generate(Float64 d1, Float64 d2, ColumnFloat64::Container & container) + { + auto distribution = std::fisher_f_distribution<>(d1, d2); + for (auto & elem : container) + elem = distribution(thread_local_rng); + } +}; + +struct BernoulliDistribution +{ + using ReturnType = DataTypeUInt8; + static constexpr const char * getName() { return "randBernoulli"; } + static constexpr size_t getNumberOfArguments() { return 1; } + + static void generate(Float64 p, ColumnUInt8::Container & container) + { + if (p < 0.0f || p > 1.0f) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of function {} should be inside [0, 1] because it is a probability", getName()); + + auto distribution = std::bernoulli_distribution(p); + for (auto & elem : container) + elem = static_cast(distribution(thread_local_rng)); + } +}; + +struct BinomialDistribution +{ + using ReturnType = DataTypeUInt64; + static constexpr const char * getName() { return "randBinomial"; } + static constexpr size_t getNumberOfArguments() { return 2; } + + static void generate(UInt64 t, Float64 p, ColumnUInt64::Container & container) + { + if (p < 0.0f || p > 1.0f) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of function {} should be inside [0, 1] because it is a probability", getName()); + + auto distribution = std::binomial_distribution(t, p); + for (auto & elem : container) + elem = static_cast(distribution(thread_local_rng)); + } +}; + +struct NegativeBinomialDistribution +{ + using ReturnType = DataTypeUInt64; + static constexpr const char * getName() { return "randNegativeBinomial"; } + static constexpr size_t getNumberOfArguments() { return 2; } + + static void generate(UInt64 t, Float64 p, ColumnUInt64::Container & container) + { + if (p < 0.0f || p > 1.0f) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of function {} should be inside [0, 1] because it is a probability", getName()); + + auto distribution = std::negative_binomial_distribution(t, p); + for (auto & elem : container) + elem = static_cast(distribution(thread_local_rng)); + } +}; + +struct PoissonDistribution +{ + using ReturnType = DataTypeUInt64; + static constexpr const char * getName() { return "randPoisson"; } + static constexpr size_t getNumberOfArguments() { return 1; } + + static void generate(UInt64 n, ColumnUInt64::Container & container) + { + auto distribution = std::poisson_distribution(n); + for (auto & elem : container) + elem = static_cast(distribution(thread_local_rng)); + } +}; + +} + +/** Function which will generate values according to the specified distribution + * Accepts only constant arguments + * Similar to the functions rand and rand64 an additional 'tag' argument could be added to the + * end of arguments list (this argument will be ignored) which will guarantee that functions are not sticked together + * during optimisations. + * Example: SELECT randNormal(0, 1, 1), randNormal(0, 1, 2) FROM numbers(10) + * This query will return two different columns + */ +template +class FunctionRandomDistribution : public IFunction +{ +private: + + template + ResultType getParameterFromConstColumn(size_t parameter_number, const ColumnsWithTypeAndName & arguments) const + { + if (parameter_number >= arguments.size()) + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Parameter number ({}) is greater than the size of arguments ({}). This is a bug", parameter_number, arguments.size()); + + const IColumn * col = arguments[parameter_number].column.get(); + + if (!isColumnConst(*col)) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Parameter number {} of function must be constant.", parameter_number, getName()); + + auto parameter = applyVisitor(FieldVisitorConvertToNumber(), assert_cast(*col).getField()); + + if (isNaN(parameter) || !std::isfinite(parameter)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter number {} of function {} cannot be NaN of infinite", parameter_number, getName()); + + return parameter; + } + +public: + static FunctionPtr create(ContextPtr) + { + return std::make_shared>(); + } + + static constexpr auto name = Distribution::getName(); + String getName() const override { return name; } + size_t getNumberOfArguments() const override { return Distribution::getNumberOfArguments(); } + bool isVariadic() const override { return true; } + bool isDeterministic() const override { return false; } + bool isDeterministicInScopeOfQuery() const override { return false; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + auto desired = Distribution::getNumberOfArguments(); + if (arguments.size() != desired && arguments.size() != desired + 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong number of arguments for function {}. Should be {} or {}", getName(), desired, desired + 1); + + for (size_t i = 0; i < Distribution::getNumberOfArguments(); ++i) + { + const auto & type = arguments[i]; + WhichDataType which(type); + if (!which.isFloat() && !which.isNativeUInt()) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of function {}, expected Float64 or integer", type->getName(), getName()); + } + + return std::make_shared(); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override + { + if constexpr (std::is_same_v) + { + auto res_column = ColumnUInt8::create(input_rows_count); + auto & res_data = res_column->getData(); + Distribution::generate(getParameterFromConstColumn(0, arguments), res_data); + return res_column; + } + else if constexpr (std::is_same_v || std::is_same_v) + { + auto res_column = ColumnUInt64::create(input_rows_count); + auto & res_data = res_column->getData(); + Distribution::generate(getParameterFromConstColumn(0, arguments), getParameterFromConstColumn(1, arguments), res_data); + return res_column; + } + else if constexpr (std::is_same_v) + { + auto res_column = ColumnUInt64::create(input_rows_count); + auto & res_data = res_column->getData(); + Distribution::generate(getParameterFromConstColumn(0, arguments), res_data); + return res_column; + } + else + { + auto res_column = ColumnFloat64::create(input_rows_count); + auto & res_data = res_column->getData(); + if constexpr (Distribution::getNumberOfArguments() == 1) + { + Distribution::generate(getParameterFromConstColumn(0, arguments), res_data); + } + else if constexpr (Distribution::getNumberOfArguments() == 2) + { + Distribution::generate(getParameterFromConstColumn(0, arguments), getParameterFromConstColumn(1, arguments), res_data); + } + else + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "More than two argument specified for function {}", getName()); + } + + return res_column; + } + } +}; + + +REGISTER_FUNCTION(Distribution) +{ + factory.registerFunction>( + { + R"( +Returns a random number from the uniform distribution in the specified range. +Accepts two parameters - minimum bound and maximum bound. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randUniform(0, 1) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + factory.registerFunction>( + { + R"( +Returns a random number from the normal distribution. +Accepts two parameters - mean and variance. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randNormal(0, 5) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the lognormal distribution (a distribution of a random variable whose logarithm is normally distributed). +Accepts two parameters - mean and variance. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randLogNormal(0, 5) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the exponential distribution. +Accepts one parameter. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randExponential(0, 5) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the chi-squared distribution (a distribution of a sum of the squares of k independent standard normal random variables). +Accepts one parameter - degree of freedom. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randChiSquared(5) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + factory.registerFunction>( + { + R"( +Returns a random number from the t-distribution. +Accepts one parameter - degree of freedom. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randStudentT(5) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the f-distribution. +The F-distribution is the distribution of X = (S1 / d1) / (S2 / d2) where d1 and d2 are degrees of freedom. +Accepts two parameters - degrees of freedom. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randFisherF(5) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the Bernoulli distribution. +Accepts two parameters - probability of success. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randBernoulli(0.1) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the binomial distribution. +Accepts two parameters - number of experiments and probability of success in each experiment. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randBinomial(10, 0.1) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the negative binomial distribution. +Accepts two parameters - number of experiments and probability of success in each experiment. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randNegativeBinomial(10, 0.1) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); + + + factory.registerFunction>( + { + R"( +Returns a random number from the poisson distribution. +Accepts two parameters - the mean number of occurrences. + +Typical usage: +[example:typical] +)", + Documentation::Examples{ + {"typical", "SELECT randPoisson(3) FROM numbers(100000);"}}, + Documentation::Categories{"Distribution"} + }); +} + +} diff --git a/src/Functions/randomStringUTF8.cpp b/src/Functions/randomStringUTF8.cpp index 043db179d71..bcaa603b85d 100644 --- a/src/Functions/randomStringUTF8.cpp +++ b/src/Functions/randomStringUTF8.cpp @@ -116,8 +116,8 @@ public: { UInt64 rand = rng(); - UInt32 code_point1 = generate_code_point(rand); - UInt32 code_point2 = generate_code_point(rand >> 32); + UInt32 code_point1 = generate_code_point(static_cast(rand)); + UInt32 code_point2 = generate_code_point(static_cast(rand >> 32u)); /// We have padding in column buffers that we can overwrite. size_t length1 = UTF8::convertCodePointToUTF8(code_point1, pos, sizeof(int)); diff --git a/src/Functions/runningConcurrency.cpp b/src/Functions/runningConcurrency.cpp index 37fa11bce8f..c759476006f 100644 --- a/src/Functions/runningConcurrency.cpp +++ b/src/Functions/runningConcurrency.cpp @@ -43,6 +43,7 @@ namespace DB const typename ColVecArg::Container & vec_end = col_end->getData(); using ColVecConc = typename ConcurrencyDataType::ColumnType; + using FieldType = typename ConcurrencyDataType::FieldType; typename ColVecConc::MutablePtr col_concurrency = ColVecConc::create(input_rows_count); typename ColVecConc::Container & vec_concurrency = col_concurrency->getData(); @@ -74,7 +75,7 @@ namespace DB ongoing_until.erase( ongoing_until.begin(), ongoing_until.upper_bound(begin)); - vec_concurrency[i] = ongoing_until.size(); + vec_concurrency[i] = static_cast(ongoing_until.size()); } return col_concurrency; diff --git a/src/Functions/stem.cpp b/src/Functions/stem.cpp index 9c7ce895fce..91c98ec9b82 100644 --- a/src/Functions/stem.cpp +++ b/src/Functions/stem.cpp @@ -51,8 +51,8 @@ struct StemImpl /// Note that accessing -1th element is valid for PaddedPODArray. size_t original_size = offsets[i] - offsets[i - 1]; const sb_symbol * result = sb_stemmer_stem(stemmer, - reinterpret_cast(data.data() + offsets[i - 1]), - original_size - 1); + reinterpret_cast(data.data() + offsets[i - 1]), + static_cast(original_size - 1)); size_t new_size = sb_stemmer_length(stemmer) + 1; memcpy(res_data.data() + data_size, result, new_size); diff --git a/src/Functions/tests/gtest_has_all.cpp b/src/Functions/tests/gtest_has_all.cpp index ca7bc80b4aa..1776a461580 100644 --- a/src/Functions/tests/gtest_has_all.cpp +++ b/src/Functions/tests/gtest_has_all.cpp @@ -18,9 +18,9 @@ void arrayInit(T* elements_to_have, size_t nb_elements_to_have, T* array_element { for (size_t i = 0; i < array_size; ++i) { - array_elements[i] = i; + array_elements[i] = static_cast(i); } - auto [dist, gen] = uni_int_dist(0, array_size - 1); + auto [dist, gen] = uni_int_dist(0, static_cast(array_size - 1)); for (size_t i = 0; i < nb_elements_to_have; ++i) { elements_to_have[i] = array_elements[dist(gen)]; @@ -28,14 +28,14 @@ void arrayInit(T* elements_to_have, size_t nb_elements_to_have, T* array_element if (!all_elements_present) { /// make one element to be searched for missing from the target array - elements_to_have[nb_elements_to_have - 1] = array_size + 1; + elements_to_have[nb_elements_to_have - 1] = static_cast(array_size + 1); } } void nullMapInit(UInt8 * null_map, size_t null_map_size, size_t nb_null_elements) { /// -2 to keep the last element of the array non-null - auto [dist, gen] = uni_int_dist(0, null_map_size - 2); + auto [dist, gen] = uni_int_dist(0, static_cast(null_map_size - 2)); for (size_t i = 0; i < null_map_size; ++i) { null_map[i] = 0; diff --git a/src/Functions/timeSlots.cpp b/src/Functions/timeSlots.cpp index 949ca7bc0e4..72d6059e0a1 100644 --- a/src/Functions/timeSlots.cpp +++ b/src/Functions/timeSlots.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -19,6 +20,7 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_COLUMN; + extern const int BAD_ARGUMENTS; } namespace @@ -41,6 +43,9 @@ struct TimeSlotsImpl const PaddedPODArray & starts, const PaddedPODArray & durations, UInt32 time_slot_size, PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets) { + if (time_slot_size == 0) + throw Exception("Time slot size cannot be zero", ErrorCodes::BAD_ARGUMENTS); + size_t size = starts.size(); result_offsets.resize(size); @@ -63,6 +68,9 @@ struct TimeSlotsImpl const PaddedPODArray & starts, UInt32 duration, UInt32 time_slot_size, PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets) { + if (time_slot_size == 0) + throw Exception("Time slot size cannot be zero", ErrorCodes::BAD_ARGUMENTS); + size_t size = starts.size(); result_offsets.resize(size); @@ -85,6 +93,9 @@ struct TimeSlotsImpl UInt32 start, const PaddedPODArray & durations, UInt32 time_slot_size, PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets) { + if (time_slot_size == 0) + throw Exception("Time slot size cannot be zero", ErrorCodes::BAD_ARGUMENTS); + size_t size = durations.size(); result_offsets.resize(size); @@ -125,6 +136,9 @@ struct TimeSlotsImpl ColumnArray::Offset current_offset = 0; time_slot_size = time_slot_size.value * ts_multiplier; + if (time_slot_size == 0) + throw Exception("Time slot size cannot be zero", ErrorCodes::BAD_ARGUMENTS); + for (size_t i = 0; i < size; ++i) { for (DateTime64 value = (starts[i] * dt_multiplier) / time_slot_size, end = (starts[i] * dt_multiplier + durations[i] * dur_multiplier) / time_slot_size; value <= end; value += 1) @@ -155,6 +169,9 @@ struct TimeSlotsImpl ColumnArray::Offset current_offset = 0; duration = duration * dur_multiplier; time_slot_size = time_slot_size.value * ts_multiplier; + if (time_slot_size == 0) + throw Exception("Time slot size cannot be zero", ErrorCodes::BAD_ARGUMENTS); + for (size_t i = 0; i < size; ++i) { for (DateTime64 value = (starts[i] * dt_multiplier) / time_slot_size, end = (starts[i] * dt_multiplier + duration) / time_slot_size; value <= end; value += 1) @@ -185,6 +202,9 @@ struct TimeSlotsImpl ColumnArray::Offset current_offset = 0; start = dt_multiplier * start; time_slot_size = time_slot_size.value * ts_multiplier; + if (time_slot_size == 0) + throw Exception("Time slot size cannot be zero", ErrorCodes::BAD_ARGUMENTS); + for (size_t i = 0; i < size; ++i) { for (DateTime64 value = start / time_slot_size, end = (start + durations[i] * dur_multiplier) / time_slot_size; value <= end; value += 1) @@ -281,11 +301,11 @@ public: throw Exception("Third argument for function " + getName() + " must be greater than zero", ErrorCodes::ILLEGAL_COLUMN); } - const auto * dt_starts = checkAndGetColumn(arguments[0].column.get()); - const auto * dt_const_starts = checkAndGetColumnConst(arguments[0].column.get()); + const auto * dt_starts = checkAndGetColumn(arguments[0].column.get()); + const auto * dt_const_starts = checkAndGetColumnConst(arguments[0].column.get()); - const auto * durations = checkAndGetColumn(arguments[1].column.get()); - const auto * const_durations = checkAndGetColumnConst(arguments[1].column.get()); + const auto * durations = checkAndGetColumn(arguments[1].column.get()); + const auto * const_durations = checkAndGetColumnConst(arguments[1].column.get()); auto res = ColumnArray::create(ColumnUInt32::create()); ColumnUInt32::Container & res_values = typeid_cast(res->getData()).getData(); @@ -322,8 +342,8 @@ public: time_slot_scale = assert_cast(arguments[2].type.get())->getScale(); } - const auto * starts = checkAndGetColumn(arguments[0].column.get()); - const auto * const_starts = checkAndGetColumnConst(arguments[0].column.get()); + const auto * starts = checkAndGetColumn(arguments[0].column.get()); + const auto * const_starts = checkAndGetColumnConst(arguments[0].column.get()); const auto * durations = checkAndGetColumn>(arguments[1].column.get()); const auto * const_durations = checkAndGetColumnConst>(arguments[1].column.get()); diff --git a/src/Functions/toRelativeDayNum.cpp b/src/Functions/toRelativeDayNum.cpp index 241104493cd..db3eb119dcf 100644 --- a/src/Functions/toRelativeDayNum.cpp +++ b/src/Functions/toRelativeDayNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeDayNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeDayNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeDayNum) { diff --git a/src/Functions/toRelativeHourNum.cpp b/src/Functions/toRelativeHourNum.cpp index 2404d73c450..838b1bb1ca1 100644 --- a/src/Functions/toRelativeHourNum.cpp +++ b/src/Functions/toRelativeHourNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeHourNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeHourNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeHourNum) { diff --git a/src/Functions/toRelativeMinuteNum.cpp b/src/Functions/toRelativeMinuteNum.cpp index a5ecada1e92..e9318517119 100644 --- a/src/Functions/toRelativeMinuteNum.cpp +++ b/src/Functions/toRelativeMinuteNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeMinuteNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeMinuteNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeMinuteNum) { diff --git a/src/Functions/toRelativeMonthNum.cpp b/src/Functions/toRelativeMonthNum.cpp index 8f46e04e483..7b058c3ba12 100644 --- a/src/Functions/toRelativeMonthNum.cpp +++ b/src/Functions/toRelativeMonthNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeMonthNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeMonthNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeMonthNum) { diff --git a/src/Functions/toRelativeQuarterNum.cpp b/src/Functions/toRelativeQuarterNum.cpp index 8ea0c42ef09..c7702d47f42 100644 --- a/src/Functions/toRelativeQuarterNum.cpp +++ b/src/Functions/toRelativeQuarterNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeQuarterNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeQuarterNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeQuarterNum) { diff --git a/src/Functions/toRelativeSecondNum.cpp b/src/Functions/toRelativeSecondNum.cpp index 7af41ab8334..db80f721fbd 100644 --- a/src/Functions/toRelativeSecondNum.cpp +++ b/src/Functions/toRelativeSecondNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeSecondNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeSecondNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeSecondNum) { diff --git a/src/Functions/toRelativeWeekNum.cpp b/src/Functions/toRelativeWeekNum.cpp index fe7aec3fd9a..beca00d8cc4 100644 --- a/src/Functions/toRelativeWeekNum.cpp +++ b/src/Functions/toRelativeWeekNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeWeekNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeWeekNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeWeekNum) { diff --git a/src/Functions/toRelativeYearNum.cpp b/src/Functions/toRelativeYearNum.cpp index 4574d8513e0..b4fe3318129 100644 --- a/src/Functions/toRelativeYearNum.cpp +++ b/src/Functions/toRelativeYearNum.cpp @@ -7,7 +7,7 @@ namespace DB { -using FunctionToRelativeYearNum = FunctionDateOrDateTimeToSomething; +using FunctionToRelativeYearNum = FunctionDateOrDateTimeToSomething>; REGISTER_FUNCTION(ToRelativeYearNum) { diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index ac648b87448..3054cf280d9 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -133,17 +134,17 @@ namespace { static UInt32 execute(UInt16 d, Int64 days, const DateLUTImpl & time_zone, Int64) { - return time_zone.toStartOfDayInterval(ExtendedDayNum(d), days); + return static_cast(time_zone.toStartOfDayInterval(ExtendedDayNum(d), days)); } static UInt32 execute(Int32 d, Int64 days, const DateLUTImpl & time_zone, Int64) { - return time_zone.toStartOfDayInterval(ExtendedDayNum(d), days); + return static_cast(time_zone.toStartOfDayInterval(ExtendedDayNum(d), days)); } static UInt32 execute(UInt32 t, Int64 days, const DateLUTImpl & time_zone, Int64) { - return time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days); + return static_cast(time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days)); } static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier) @@ -437,7 +438,7 @@ private: if (which_type.isDateTime64()) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); auto scale = assert_cast(from_datatype).getScale(); if (time_column_vec) @@ -445,19 +446,19 @@ private: } if (which_type.isDateTime()) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); if (time_column_vec) return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); } if (which_type.isDate()) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); if (time_column_vec) return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); } if (which_type.isDate32()) { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); if (time_column_vec) return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); } @@ -517,6 +518,7 @@ private: ColumnPtr execute(const FromDataType &, const ColumnType & time_column_type, Int64 num_units, const DataTypePtr & result_type, const DateLUTImpl & time_zone, const UInt16 scale) const { using ToColumnType = typename ToDataType::ColumnType; + using ToFieldType = typename ToDataType::FieldType; const auto & time_data = time_column_type.getData(); size_t size = time_data.size(); @@ -529,7 +531,8 @@ private: Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); for (size_t i = 0; i != size; ++i) - result_data[i] = Transform::execute(time_data[i], num_units, time_zone, scale_multiplier); + result_data[i] = static_cast( + Transform::execute(time_data[i], num_units, time_zone, scale_multiplier)); return result_col; } diff --git a/src/Functions/toValidUTF8.cpp b/src/Functions/toValidUTF8.cpp index 9874e39baa4..4b79bc0bbda 100644 --- a/src/Functions/toValidUTF8.cpp +++ b/src/Functions/toValidUTF8.cpp @@ -106,7 +106,7 @@ struct ToValidUTF8Impl /// Sequence was not fully written to this buffer. break; } - else if (Poco::UTF8Encoding::isLegal(reinterpret_cast(p), len)) + else if (Poco::UTF8Encoding::isLegal(reinterpret_cast(p), static_cast(len))) { /// Valid sequence. p += len; diff --git a/src/Functions/tupleElement.cpp b/src/Functions/tupleElement.cpp index 4f7ddda6b0b..6ac36dc80ed 100644 --- a/src/Functions/tupleElement.cpp +++ b/src/Functions/tupleElement.cpp @@ -82,7 +82,10 @@ public: const DataTypeTuple * tuple = checkAndGetDataType(tuple_col); if (!tuple) - throw Exception("First argument for function " + getName() + " must be tuple or array of tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be tuple or array of tuple. Actual {}", + getName(), + arguments[0].type->getName()); auto index = getElementNum(arguments[1].column, *tuple, number_of_arguments); if (index.has_value()) @@ -137,7 +140,10 @@ public: const DataTypeTuple * tuple_type_concrete = checkAndGetDataType(tuple_type); const ColumnTuple * tuple_col_concrete = checkAndGetColumn(tuple_col); if (!tuple_type_concrete || !tuple_col_concrete) - throw Exception("First argument for function " + getName() + " must be tuple or array of tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be tuple or array of tuple. Actual {}", + getName(), + first_arg.type->getName()); auto index = getElementNum(arguments[1].column, *tuple_type_concrete, arguments.size()); @@ -221,20 +227,18 @@ private: std::optional getElementNum(const ColumnPtr & index_column, const DataTypeTuple & tuple, const size_t argument_size) const { - if ( - checkAndGetColumnConst(index_column.get()) - || checkAndGetColumnConst(index_column.get()) - || checkAndGetColumnConst(index_column.get()) - || checkAndGetColumnConst(index_column.get()) - ) + if (checkAndGetColumnConst(index_column.get()) + || checkAndGetColumnConst(index_column.get()) + || checkAndGetColumnConst(index_column.get()) + || checkAndGetColumnConst(index_column.get())) { size_t index = index_column->getUInt(0); if (index == 0) - throw Exception("Indices in tuples are 1-based.", ErrorCodes::ILLEGAL_INDEX); + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indices in tuples are 1-based."); if (index > tuple.getElements().size()) - throw Exception("Index for tuple element is out of range.", ErrorCodes::ILLEGAL_INDEX); + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Index for tuple element is out of range."); return std::optional(index - 1); } @@ -253,7 +257,9 @@ private: return std::nullopt; } else - throw Exception("Second argument to " + getName() + " must be a constant UInt or String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Second argument to {} must be a constant UInt or String", + getName()); } }; diff --git a/src/IO/AIO.cpp b/src/IO/AIO.cpp index fb762271e4d..494ed3bae38 100644 --- a/src/IO/AIO.cpp +++ b/src/IO/AIO.cpp @@ -23,22 +23,22 @@ namespace DB int io_setup(unsigned nr, aio_context_t * ctxp) { - return syscall(__NR_io_setup, nr, ctxp); + return static_cast(syscall(__NR_io_setup, nr, ctxp)); } int io_destroy(aio_context_t ctx) { - return syscall(__NR_io_destroy, ctx); + return static_cast(syscall(__NR_io_destroy, ctx)); } int io_submit(aio_context_t ctx, long nr, struct iocb * iocbpp[]) // NOLINT { - return syscall(__NR_io_submit, ctx, nr, iocbpp); + return static_cast(syscall(__NR_io_submit, ctx, nr, iocbpp)); } int io_getevents(aio_context_t ctx, long min_nr, long max_nr, io_event * events, struct timespec * timeout) // NOLINT { - return syscall(__NR_io_getevents, ctx, min_nr, max_nr, events, timeout); + return static_cast(syscall(__NR_io_getevents, ctx, min_nr, max_nr, events, timeout)); } diff --git a/src/IO/Archives/ZipArchiveReader.cpp b/src/IO/Archives/ZipArchiveReader.cpp index 3127f299f5c..a7c72c7b575 100644 --- a/src/IO/Archives/ZipArchiveReader.cpp +++ b/src/IO/Archives/ZipArchiveReader.cpp @@ -281,7 +281,7 @@ private: bool nextImpl() override { RawHandle raw_handle = handle.getRawHandle(); - auto bytes_read = unzReadCurrentFile(raw_handle, internal_buffer.begin(), internal_buffer.size()); + auto bytes_read = unzReadCurrentFile(raw_handle, internal_buffer.begin(), static_cast(internal_buffer.size())); if (bytes_read < 0) checkResult(bytes_read); diff --git a/src/IO/Archives/ZipArchiveWriter.cpp b/src/IO/Archives/ZipArchiveWriter.cpp index 817e8132b64..d413783356d 100644 --- a/src/IO/Archives/ZipArchiveWriter.cpp +++ b/src/IO/Archives/ZipArchiveWriter.cpp @@ -134,7 +134,8 @@ private: if (!offset()) return; RawHandle raw_handle = handle.getRawHandle(); - checkResult(zipWriteInFileInZip(raw_handle, working_buffer.begin(), offset())); + int code = zipWriteInFileInZip(raw_handle, working_buffer.begin(), static_cast(offset())); + checkResult(code); } void checkResult(int code) const { handle.checkResult(code); } diff --git a/src/IO/BufferWithOwnMemory.h b/src/IO/BufferWithOwnMemory.h index 2121747500b..2e451e0032e 100644 --- a/src/IO/BufferWithOwnMemory.h +++ b/src/IO/BufferWithOwnMemory.h @@ -34,8 +34,7 @@ namespace ErrorCodes template > struct Memory : boost::noncopyable, Allocator { - /// Padding is needed to allow usage of 'memcpySmallAllowReadWriteOverflow15' function with this buffer. - static constexpr size_t pad_right = 15; + static constexpr size_t pad_right = PADDING_FOR_SIMD - 1; size_t m_capacity = 0; /// With padding. size_t m_size = 0; diff --git a/src/IO/Bzip2ReadBuffer.cpp b/src/IO/Bzip2ReadBuffer.cpp index 9d183393159..9970edcbcf3 100644 --- a/src/IO/Bzip2ReadBuffer.cpp +++ b/src/IO/Bzip2ReadBuffer.cpp @@ -85,11 +85,11 @@ bool Bzip2ReadBuffer::nextImpl() if (!bz->stream.avail_in) { in->nextIfAtEnd(); - bz->stream.avail_in = in->buffer().end() - in->position(); + bz->stream.avail_in = static_cast(in->buffer().end() - in->position()); bz->stream.next_in = in->position(); } - bz->stream.avail_out = internal_buffer.size(); + bz->stream.avail_out = static_cast(internal_buffer.size()); bz->stream.next_out = internal_buffer.begin(); ret = BZ2_bzDecompress(&bz->stream); @@ -99,7 +99,7 @@ bool Bzip2ReadBuffer::nextImpl() if (ret == BZ_STREAM_END && !in->eof()) { bz->reinitialize(); - bz->stream.avail_in = in->buffer().end() - in->position(); + bz->stream.avail_in = static_cast(in->buffer().end() - in->position()); bz->stream.next_in = in->position(); ret = BZ_OK; diff --git a/src/IO/Bzip2WriteBuffer.cpp b/src/IO/Bzip2WriteBuffer.cpp index 10a1803fec8..4b6bed70d35 100644 --- a/src/IO/Bzip2WriteBuffer.cpp +++ b/src/IO/Bzip2WriteBuffer.cpp @@ -58,7 +58,7 @@ void Bzip2WriteBuffer::nextImpl() } bz->stream.next_in = working_buffer.begin(); - bz->stream.avail_in = offset(); + bz->stream.avail_in = static_cast(offset()); try { @@ -66,7 +66,7 @@ void Bzip2WriteBuffer::nextImpl() { out->nextIfAtEnd(); bz->stream.next_out = out->position(); - bz->stream.avail_out = out->buffer().end() - out->position(); + bz->stream.avail_out = static_cast(out->buffer().end() - out->position()); int ret = BZ2_bzCompress(&bz->stream, BZ_RUN); @@ -95,7 +95,7 @@ void Bzip2WriteBuffer::finalizeBefore() out->nextIfAtEnd(); bz->stream.next_out = out->position(); - bz->stream.avail_out = out->buffer().end() - out->position(); + bz->stream.avail_out = static_cast(out->buffer().end() - out->position()); int ret = BZ2_bzCompress(&bz->stream, BZ_FINISH); diff --git a/src/IO/FileEncryptionCommon.cpp b/src/IO/FileEncryptionCommon.cpp index 13d8acb8c7b..5592da8721c 100644 --- a/src/IO/FileEncryptionCommon.cpp +++ b/src/IO/FileEncryptionCommon.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -84,10 +85,13 @@ namespace while (in_size < size) { out.nextIfAtEnd(); + size_t part_size = std::min(size - in_size, out.available()); + part_size = std::min(part_size, INT_MAX); + uint8_t * ciphertext = reinterpret_cast(out.position()); int ciphertext_size = 0; - if (!EVP_EncryptUpdate(evp_ctx, ciphertext, &ciphertext_size, &in[in_size], part_size)) + if (!EVP_EncryptUpdate(evp_ctx, ciphertext, &ciphertext_size, &in[in_size], static_cast(part_size))) throw Exception("Failed to encrypt", ErrorCodes::DATA_ENCRYPTION_ERROR); in_size += part_size; @@ -110,7 +114,7 @@ namespace uint8_t ciphertext[kBlockSize]; int ciphertext_size = 0; - if (!EVP_EncryptUpdate(evp_ctx, ciphertext, &ciphertext_size, padded_data, padded_data_size)) + if (!EVP_EncryptUpdate(evp_ctx, ciphertext, &ciphertext_size, padded_data, safe_cast(padded_data_size))) throw Exception("Failed to encrypt", ErrorCodes::DATA_ENCRYPTION_ERROR); if (!ciphertext_size) @@ -142,7 +146,7 @@ namespace const uint8_t * in = reinterpret_cast(data); uint8_t * plaintext = reinterpret_cast(out); int plaintext_size = 0; - if (!EVP_DecryptUpdate(evp_ctx, plaintext, &plaintext_size, in, size)) + if (!EVP_DecryptUpdate(evp_ctx, plaintext, &plaintext_size, in, safe_cast(size))) throw Exception("Failed to decrypt", ErrorCodes::DATA_ENCRYPTION_ERROR); return plaintext_size; } @@ -153,10 +157,9 @@ namespace uint8_t padded_data[kBlockSize] = {}; memcpy(&padded_data[pad_left], data, size); size_t padded_data_size = pad_left + size; - uint8_t plaintext[kBlockSize]; int plaintext_size = 0; - if (!EVP_DecryptUpdate(evp_ctx, plaintext, &plaintext_size, padded_data, padded_data_size)) + if (!EVP_DecryptUpdate(evp_ctx, plaintext, &plaintext_size, padded_data, safe_cast(padded_data_size))) throw Exception("Failed to decrypt", ErrorCodes::DATA_ENCRYPTION_ERROR); if (!plaintext_size) diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 9fd48914f64..f33b2399492 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -142,7 +142,7 @@ namespace bool proxy_https_, size_t max_pool_size_, bool resolve_host_ = true) - : Base(max_pool_size_, &Poco::Logger::get("HTTPSessionPool")) + : Base(static_cast(max_pool_size_), &Poco::Logger::get("HTTPSessionPool")) , host(host_) , port(port_) , https(https_) @@ -271,7 +271,7 @@ namespace }; } -void setResponseDefaultHeaders(HTTPServerResponse & response, unsigned keep_alive_timeout) +void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout) { if (!response.getKeepAlive()) return; diff --git a/src/IO/HTTPCommon.h b/src/IO/HTTPCommon.h index 18e83abb83b..51da17d4ca7 100644 --- a/src/IO/HTTPCommon.h +++ b/src/IO/HTTPCommon.h @@ -38,7 +38,7 @@ public: using PooledHTTPSessionPtr = SingleEndpointHTTPSessionPool::Entry; using HTTPSessionPtr = std::shared_ptr; -void setResponseDefaultHeaders(HTTPServerResponse & response, unsigned keep_alive_timeout); +void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout); /// Create session object to perform requests and set required parameters. HTTPSessionPtr makeHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & timeouts, bool resolve_host = true); diff --git a/src/IO/MMapReadBufferFromFileDescriptor.cpp b/src/IO/MMapReadBufferFromFileDescriptor.cpp index 5a636971fa0..c0eb73f8638 100644 --- a/src/IO/MMapReadBufferFromFileDescriptor.cpp +++ b/src/IO/MMapReadBufferFromFileDescriptor.cpp @@ -28,7 +28,7 @@ void MMapReadBufferFromFileDescriptor::init() BufferBase::set(mapped.getData(), length, 0); size_t page_size = static_cast(::getPageSize()); - ReadBuffer::padded = (length % page_size) > 0 && (length % page_size) <= (page_size - 15); + ReadBuffer::padded = (length % page_size) > 0 && (length % page_size) <= (page_size - (PADDING_FOR_SIMD - 1)); } diff --git a/src/IO/MMapReadBufferFromFileWithCache.cpp b/src/IO/MMapReadBufferFromFileWithCache.cpp index 0d31c29bdaa..503a58b65b9 100644 --- a/src/IO/MMapReadBufferFromFileWithCache.cpp +++ b/src/IO/MMapReadBufferFromFileWithCache.cpp @@ -17,7 +17,7 @@ void MMapReadBufferFromFileWithCache::init() BufferBase::set(mapped->getData(), length, 0); size_t page_size = static_cast(::getPageSize()); - ReadBuffer::padded = (length % page_size) > 0 && (length % page_size) <= (page_size - 15); + ReadBuffer::padded = (length % page_size) > 0 && (length % page_size) <= (page_size - (PADDING_FOR_SIMD - 1)); } diff --git a/src/IO/PeekableReadBuffer.h b/src/IO/PeekableReadBuffer.h index 15283793755..45763863437 100644 --- a/src/IO/PeekableReadBuffer.h +++ b/src/IO/PeekableReadBuffer.h @@ -99,7 +99,7 @@ private: /// creation (for example if PeekableReadBuffer is often created or if we need to remember small amount of /// data after checkpoint), at the beginning we will use small amount of memory on stack and allocate /// larger buffer only if reserved memory is not enough. - char stack_memory[16]; + char stack_memory[PADDING_FOR_SIMD]; bool use_stack_memory = true; }; diff --git a/src/IO/ReadBufferFromMemory.h b/src/IO/ReadBufferFromMemory.h index dc5c464604b..ad96e4bfa28 100644 --- a/src/IO/ReadBufferFromMemory.h +++ b/src/IO/ReadBufferFromMemory.h @@ -16,6 +16,8 @@ public: requires (sizeof(CharT) == 1) ReadBufferFromMemory(const CharT * buf, size_t size) : SeekableReadBuffer(const_cast(reinterpret_cast(buf)), size, 0) {} + explicit ReadBufferFromMemory(const std::string_view&& str) + : SeekableReadBuffer(const_cast(str.data()), str.size(), 0) {} off_t seek(off_t off, int whence) override; diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 988ad75cdf4..7ba23dd1588 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -29,6 +29,7 @@ namespace ErrorCodes extern const int NETWORK_ERROR; extern const int SOCKET_TIMEOUT; extern const int CANNOT_READ_FROM_SOCKET; + extern const int LOGICAL_ERROR; } @@ -54,7 +55,10 @@ bool ReadBufferFromPocoSocket::nextImpl() while (async_callback && !socket.poll(0, Poco::Net::Socket::SELECT_READ)) async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), socket_description); - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), internal_buffer.size()); + if (internal_buffer.size() > INT_MAX) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); + + bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); } catch (const Poco::Net::NetException & e) { diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index d5b0ce4bebe..27a24eef804 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1095,6 +1095,7 @@ inline void readText(is_floating_point auto & x, ReadBuffer & buf) { readFloatTe inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf); } inline void readText(LocalDate & x, ReadBuffer & buf) { readDateText(x, buf); } +inline void readText(DayNum & x, ReadBuffer & buf) { readDateText(x, buf); } inline void readText(LocalDateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); } inline void readText(UUID & x, ReadBuffer & buf) { readUUIDText(x, buf); } @@ -1176,6 +1177,7 @@ inline void readCSV(T & x, ReadBuffer & buf) inline void readCSV(String & x, ReadBuffer & buf, const FormatSettings::CSV & settings) { readCSVString(x, buf, settings); } inline void readCSV(LocalDate & x, ReadBuffer & buf) { readCSVSimple(x, buf); } +inline void readCSV(DayNum & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(LocalDateTime & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(UUID & x, ReadBuffer & buf) { readCSVSimple(x, buf); } inline void readCSV(UInt128 & x, ReadBuffer & buf) { readCSVSimple(x, buf); } diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 57e4369e565..5649638285d 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -2,20 +2,22 @@ #include "config.h" +#include +#include + #if USE_AWS_S3 #include #include #include #include -#include +#include #include #include #include #include - namespace Aws::Http::Standard { class StandardHttpResponse; @@ -23,6 +25,7 @@ class StandardHttpResponse; namespace DB { + class Context; } diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index df19748b493..859f5ce796b 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -1,9 +1,11 @@ +#include + +#include +#include #include "config.h" #if USE_AWS_S3 -# include - # include # include @@ -780,25 +782,16 @@ namespace S3 boost::to_upper(name); if (name != S3 && name != COS && name != OBS && name != OSS) - { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Object storage system name is unrecognized in virtual hosted style S3 URI: {}", quoteString(name)); - } + if (name == S3) - { storage_name = name; - } else if (name == OBS) - { storage_name = OBS; - } else if (name == OSS) - { storage_name = OSS; - } else - { storage_name = COSN; - } } else if (re2::RE2::PartialMatch(uri.getPath(), path_style_pattern, &bucket, &key)) { @@ -851,8 +844,82 @@ namespace S3 { return getObjectInfo(client_ptr, bucket, key, version_id, throw_on_error, for_disk_s3).size; } + } } #endif + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INVALID_CONFIG_PARAMETER; +} + +namespace S3 +{ + +AuthSettings AuthSettings::loadFromConfig(const std::string & config_elem, const Poco::Util::AbstractConfiguration & config) +{ + auto access_key_id = config.getString(config_elem + ".access_key_id", ""); + auto secret_access_key = config.getString(config_elem + ".secret_access_key", ""); + auto region = config.getString(config_elem + ".region", ""); + auto server_side_encryption_customer_key_base64 = config.getString(config_elem + ".server_side_encryption_customer_key_base64", ""); + + std::optional use_environment_credentials; + if (config.has(config_elem + ".use_environment_credentials")) + use_environment_credentials = config.getBool(config_elem + ".use_environment_credentials"); + + std::optional use_insecure_imds_request; + if (config.has(config_elem + ".use_insecure_imds_request")) + use_insecure_imds_request = config.getBool(config_elem + ".use_insecure_imds_request"); + + HeaderCollection headers; + Poco::Util::AbstractConfiguration::Keys subconfig_keys; + config.keys(config_elem, subconfig_keys); + for (const std::string & subkey : subconfig_keys) + { + if (subkey.starts_with("header")) + { + auto header_str = config.getString(config_elem + "." + subkey); + auto delimiter = header_str.find(':'); + if (delimiter == std::string::npos) + throw Exception("Malformed s3 header value", ErrorCodes::INVALID_CONFIG_PARAMETER); + headers.emplace_back(HttpHeader{header_str.substr(0, delimiter), header_str.substr(delimiter + 1, String::npos)}); + } + } + + return AuthSettings + { + std::move(access_key_id), std::move(secret_access_key), + std::move(region), + std::move(server_side_encryption_customer_key_base64), + std::move(headers), + use_environment_credentials, + use_insecure_imds_request + }; +} + + +void AuthSettings::updateFrom(const AuthSettings & from) +{ + /// Update with check for emptyness only parameters which + /// can be passed not only from config, but via ast. + + if (!from.access_key_id.empty()) + access_key_id = from.access_key_id; + if (!from.secret_access_key.empty()) + secret_access_key = from.secret_access_key; + + headers = from.headers; + region = from.region; + server_side_encryption_customer_key_base64 = from.server_side_encryption_customer_key_base64; + use_environment_credentials = from.use_environment_credentials; + use_insecure_imds_request = from.use_insecure_imds_request; +} + +} +} diff --git a/src/IO/S3Common.h b/src/IO/S3Common.h index 5c27b32985f..93e5eb78c7f 100644 --- a/src/IO/S3Common.h +++ b/src/IO/S3Common.h @@ -1,5 +1,11 @@ #pragma once +#include +#include + +#include +#include + #include "config.h" #if USE_AWS_S3 @@ -8,7 +14,6 @@ #include #include #include -#include #include #include @@ -27,8 +32,6 @@ namespace ErrorCodes } class RemoteHostFilter; -struct HttpHeader; -using HeaderCollection = std::vector; class S3Exception : public Exception { @@ -130,5 +133,33 @@ S3::ObjectInfo getObjectInfo(std::shared_ptr client_ptr size_t getObjectSize(std::shared_ptr client_ptr, const String & bucket, const String & key, const String & version_id, bool throw_on_error, bool for_disk_s3); } - #endif + +namespace Poco::Util +{ +class AbstractConfiguration; +}; + +namespace DB::S3 +{ + +struct AuthSettings +{ + static AuthSettings loadFromConfig(const std::string & config_elem, const Poco::Util::AbstractConfiguration & config); + + std::string access_key_id; + std::string secret_access_key; + std::string region; + std::string server_side_encryption_customer_key_base64; + + HeaderCollection headers; + + std::optional use_environment_credentials; + std::optional use_insecure_imds_request; + + bool operator==(const AuthSettings & other) const = default; + + void updateFrom(const AuthSettings & from); +}; + +} diff --git a/src/IO/VarInt.h b/src/IO/VarInt.h index 3161ca6d8a8..816aa8fd057 100644 --- a/src/IO/VarInt.h +++ b/src/IO/VarInt.h @@ -83,14 +83,14 @@ inline void readVarUInt(UInt32 & x, ReadBuffer & istr) { UInt64 tmp; readVarUInt(tmp, istr); - x = tmp; + x = static_cast(tmp); } inline void readVarInt(Int32 & x, ReadBuffer & istr) { Int64 tmp; readVarInt(tmp, istr); - x = tmp; + x = static_cast(tmp); } inline void readVarUInt(UInt16 & x, ReadBuffer & istr) diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp index fb4e5df9b59..95d532e9bd4 100644 --- a/src/IO/WriteBufferFromPocoSocket.cpp +++ b/src/IO/WriteBufferFromPocoSocket.cpp @@ -31,6 +31,7 @@ namespace ErrorCodes extern const int NETWORK_ERROR; extern const int SOCKET_TIMEOUT; extern const int CANNOT_WRITE_TO_SOCKET; + extern const int LOGICAL_ERROR; } @@ -55,7 +56,11 @@ void WriteBufferFromPocoSocket::nextImpl() try { CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkSend); - res = socket.impl()->sendBytes(working_buffer.begin() + bytes_written, offset() - bytes_written); + char * pos = working_buffer.begin() + bytes_written; + size_t size = offset() - bytes_written; + if (size > INT_MAX) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); + res = socket.impl()->sendBytes(pos, static_cast(size)); } catch (const Poco::Net::NetException & e) { diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 14118c3c04e..f823015bd7d 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -305,7 +305,7 @@ void WriteBufferFromS3::writePart() UploadPartTask task; auto & tags = TSA_SUPPRESS_WARNING_FOR_WRITE(part_tags); /// Suppress warning because schedule == false. - fillUploadRequest(task.req, tags.size() + 1); + fillUploadRequest(task.req, static_cast(tags.size() + 1)); processUploadRequest(task); tags.push_back(task.tag); } @@ -362,7 +362,7 @@ void WriteBufferFromS3::completeMultipartUpload() for (size_t i = 0; i < tags.size(); ++i) { Aws::S3::Model::CompletedPart part; - multipart_upload.AddParts(part.WithETag(tags[i]).WithPartNumber(i + 1)); + multipart_upload.AddParts(part.WithETag(tags[i]).WithPartNumber(static_cast(i + 1))); } req.SetMultipartUpload(multipart_upload); diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index 782e580d8be..28f831856d7 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -110,8 +110,8 @@ private: std::unique_ptr put_object_task; /// Does not need protection by mutex because of the logic around is_finished field. std::list TSA_GUARDED_BY(bg_tasks_mutex) upload_object_tasks; - size_t num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; - size_t num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; + int num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; + int num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0; std::mutex bg_tasks_mutex; std::condition_variable bg_tasks_condvar; diff --git a/src/IO/WriteBufferValidUTF8.cpp b/src/IO/WriteBufferValidUTF8.cpp index 10e86f01343..4c8e172f43c 100644 --- a/src/IO/WriteBufferValidUTF8.cpp +++ b/src/IO/WriteBufferValidUTF8.cpp @@ -102,7 +102,7 @@ void WriteBufferValidUTF8::nextImpl() break; #endif - size_t len = length_of_utf8_sequence[static_cast(*p)]; + UInt8 len = length_of_utf8_sequence[static_cast(*p)]; if (len > 4) { // NOLINT diff --git a/src/IO/WriteHelpers.cpp b/src/IO/WriteHelpers.cpp index cb341e60a8b..a9788505995 100644 --- a/src/IO/WriteHelpers.cpp +++ b/src/IO/WriteHelpers.cpp @@ -18,19 +18,6 @@ void formatHex(IteratorSrc src, IteratorDst dst, size_t num_bytes) } } -void formatUUID(const UInt8 * src16, UInt8 * dst36) -{ - formatHex(&src16[0], &dst36[0], 4); - dst36[8] = '-'; - formatHex(&src16[4], &dst36[9], 2); - dst36[13] = '-'; - formatHex(&src16[6], &dst36[14], 2); - dst36[18] = '-'; - formatHex(&src16[8], &dst36[19], 2); - dst36[23] = '-'; - formatHex(&src16[10], &dst36[24], 6); -} - /** Function used when byte ordering is important when parsing uuid * ex: When we create an UUID type */ diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index c3e1e59218f..39024b33eb1 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -139,7 +139,7 @@ inline void writeBoolText(bool x, WriteBuffer & buf) template inline size_t writeFloatTextFastPath(T x, char * buffer) { - int result = 0; + Int64 result = 0; if constexpr (std::is_same_v) { @@ -624,9 +624,6 @@ inline void writeXMLStringForTextElement(std::string_view s, WriteBuffer & buf) writeXMLStringForTextElement(s.data(), s.data() + s.size(), buf); } -template -void formatHex(IteratorSrc src, IteratorDst dst, size_t num_bytes); -void formatUUID(const UInt8 * src16, UInt8 * dst36); void formatUUID(std::reverse_iterator src16, UInt8 * dst36); inline void writeUUIDText(const UUID & uuid, WriteBuffer & buf) diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index 38a706997cf..a1f5b23fb97 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -15,6 +15,7 @@ struct WriteSettings bool enable_filesystem_cache_on_write_operations = false; bool enable_filesystem_cache_log = false; bool is_file_cache_persistent = false; + bool s3_allow_parallel_part_upload = true; /// Monitoring bool for_object_storage = false; // to choose which profile events should be incremented diff --git a/src/IO/ZlibDeflatingWriteBuffer.cpp b/src/IO/ZlibDeflatingWriteBuffer.cpp index c265791e38a..43014096e2a 100644 --- a/src/IO/ZlibDeflatingWriteBuffer.cpp +++ b/src/IO/ZlibDeflatingWriteBuffer.cpp @@ -49,7 +49,7 @@ void ZlibDeflatingWriteBuffer::nextImpl() return; zstr.next_in = reinterpret_cast(working_buffer.begin()); - zstr.avail_in = offset(); + zstr.avail_in = static_cast(offset()); try { @@ -57,7 +57,7 @@ void ZlibDeflatingWriteBuffer::nextImpl() { out->nextIfAtEnd(); zstr.next_out = reinterpret_cast(out->position()); - zstr.avail_out = out->buffer().end() - out->position(); + zstr.avail_out = static_cast(out->buffer().end() - out->position()); int rc = deflate(&zstr, Z_NO_FLUSH); out->position() = out->buffer().end() - zstr.avail_out; @@ -96,7 +96,7 @@ void ZlibDeflatingWriteBuffer::finalizeBefore() { out->nextIfAtEnd(); zstr.next_out = reinterpret_cast(out->position()); - zstr.avail_out = out->buffer().end() - out->position(); + zstr.avail_out = static_cast(out->buffer().end() - out->position()); int rc = deflate(&zstr, Z_FULL_FLUSH); out->position() = out->buffer().end() - zstr.avail_out; @@ -110,7 +110,7 @@ void ZlibDeflatingWriteBuffer::finalizeBefore() { out->nextIfAtEnd(); zstr.next_out = reinterpret_cast(out->position()); - zstr.avail_out = out->buffer().end() - out->position(); + zstr.avail_out = static_cast(out->buffer().end() - out->position()); int rc = deflate(&zstr, Z_FINISH); out->position() = out->buffer().end() - zstr.avail_out; diff --git a/src/IO/ZlibInflatingReadBuffer.cpp b/src/IO/ZlibInflatingReadBuffer.cpp index 4cb56bef6b1..9c2ee640cbe 100644 --- a/src/IO/ZlibInflatingReadBuffer.cpp +++ b/src/IO/ZlibInflatingReadBuffer.cpp @@ -61,11 +61,11 @@ bool ZlibInflatingReadBuffer::nextImpl() { in->nextIfAtEnd(); zstr.next_in = reinterpret_cast(in->position()); - zstr.avail_in = in->buffer().end() - in->position(); + zstr.avail_in = static_cast(in->buffer().end() - in->position()); } /// init output bytes (place, where decompressed data will be) zstr.next_out = reinterpret_cast(internal_buffer.begin()); - zstr.avail_out = internal_buffer.size(); + zstr.avail_out = static_cast(internal_buffer.size()); int rc = inflate(&zstr, Z_NO_FLUSH); diff --git a/src/IO/ZstdDeflatingAppendableWriteBuffer.cpp b/src/IO/ZstdDeflatingAppendableWriteBuffer.cpp index 459f486af18..79fb4ccead5 100644 --- a/src/IO/ZstdDeflatingAppendableWriteBuffer.cpp +++ b/src/IO/ZstdDeflatingAppendableWriteBuffer.cpp @@ -149,7 +149,7 @@ void ZstdDeflatingAppendableWriteBuffer::finalizeZstd() { try { - int err = ZSTD_freeCCtx(cctx); + size_t err = ZSTD_freeCCtx(cctx); /// This is just in case, since it is impossible to get an error by using this wrapper. if (unlikely(err)) throw Exception(ErrorCodes::ZSTD_ENCODER_FAILED, "ZSTD_freeCCtx failed: error: '{}'; zstd version: {}", ZSTD_getErrorName(err), ZSTD_VERSION_STRING); diff --git a/src/IO/ZstdDeflatingWriteBuffer.cpp b/src/IO/ZstdDeflatingWriteBuffer.cpp index 238645b16df..c7f9b0d718b 100644 --- a/src/IO/ZstdDeflatingWriteBuffer.cpp +++ b/src/IO/ZstdDeflatingWriteBuffer.cpp @@ -100,7 +100,7 @@ void ZstdDeflatingWriteBuffer::finalizeAfter() { try { - int err = ZSTD_freeCCtx(cctx); + size_t err = ZSTD_freeCCtx(cctx); /// This is just in case, since it is impossible to get an error by using this wrapper. if (unlikely(err)) throw Exception(ErrorCodes::ZSTD_ENCODER_FAILED, "ZSTD_freeCCtx failed: error: '{}'; zstd version: {}", ZSTD_getErrorName(err), ZSTD_VERSION_STRING); diff --git a/src/IO/examples/valid_utf8_perf.cpp b/src/IO/examples/valid_utf8_perf.cpp index b95cdb2c27c..f42251188d9 100644 --- a/src/IO/examples/valid_utf8_perf.cpp +++ b/src/IO/examples/valid_utf8_perf.cpp @@ -10,7 +10,7 @@ int main(int argc, char ** argv) { int repeats = 1; if (argc >= 2) - repeats = std::stol(argv[1]); + repeats = static_cast(std::stol(argv[1])); std::string text((std::istreambuf_iterator(std::cin)), std::istreambuf_iterator()); diff --git a/src/IO/examples/zlib_ng_bug.cpp b/src/IO/examples/zlib_ng_bug.cpp index 9fe3c961913..f7c3d1eeefe 100644 --- a/src/IO/examples/zlib_ng_bug.cpp +++ b/src/IO/examples/zlib_ng_bug.cpp @@ -23,9 +23,9 @@ int main(int, char **) throw std::runtime_error("Cannot deflateInit2"); zstr.next_in = in.data(); - zstr.avail_in = in.size(); + zstr.avail_in = static_cast(in.size()); zstr.next_out = out.data(); - zstr.avail_out = out.size(); + zstr.avail_out = static_cast(out.size()); while (zstr.avail_in > 0) if (Z_OK != deflate(&zstr, Z_NO_FLUSH)) diff --git a/src/IO/readDecimalText.h b/src/IO/readDecimalText.h index 2e06acb2f3e..9d7f8137136 100644 --- a/src/IO/readDecimalText.h +++ b/src/IO/readDecimalText.h @@ -106,7 +106,7 @@ inline bool readDigits(ReadBuffer & buf, T & x, uint32_t & digits, int32_t & exp exponent -= places; // TODO: accurate shift10 for big integers - x *= intExp10OfSize(places); + x *= intExp10OfSize(places); places = 0; x += (byte - '0'); @@ -147,23 +147,32 @@ inline bool readDigits(ReadBuffer & buf, T & x, uint32_t & digits, int32_t & exp return true; } -template -inline void readDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_t & scale, bool digits_only = false) +template +inline ReturnType readDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_t & scale, bool digits_only = false) { + static constexpr bool throw_exception = std::is_same_v; + uint32_t digits = precision; int32_t exponent; - readDigits(buf, x, digits, exponent, digits_only); + auto ok = readDigits(buf, x, digits, exponent, digits_only); + + if (!throw_exception && !ok) + return ReturnType(false); if (static_cast(digits) + exponent > static_cast(precision - scale)) { - static constexpr const char * pattern = - "Decimal value is too big: {} digits were read: {}e{}." - " Expected to read decimal with scale {} and precision {}"; + if constexpr (throw_exception) + { + static constexpr const char * pattern = "Decimal value is too big: {} digits were read: {}e{}." + " Expected to read decimal with scale {} and precision {}"; - if constexpr (is_big_int_v) - throw Exception(fmt::format(pattern, digits, x.value, exponent, scale, precision), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + if constexpr (is_big_int_v) + throw Exception(fmt::format(pattern, digits, x.value, exponent, scale, precision), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + else + throw Exception(fmt::format(pattern, digits, x, exponent, scale, precision), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + } else - throw Exception(fmt::format(pattern, digits, x, exponent, scale, precision), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + return ReturnType(false); } if (static_cast(scale) + exponent < 0) @@ -175,7 +184,7 @@ inline void readDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_ /// Too big negative exponent x.value = 0; scale = 0; - return; + return ReturnType(true); } else { @@ -184,26 +193,18 @@ inline void readDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_ assert(divisor > 0); /// This is for Clang Static Analyzer. It is not smart enough to infer it automatically. x.value /= divisor; scale = 0; - return; + return ReturnType(true); } } scale += exponent; + return ReturnType(true); } template inline bool tryReadDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_t & scale) { - uint32_t digits = precision; - int32_t exponent; - - if (!readDigits(buf, x, digits, exponent, true) || - static_cast(digits) + exponent > static_cast(precision - scale) || - static_cast(scale) + exponent < 0) - return false; - - scale += exponent; - return true; + return readDecimalText(buf, x, precision, scale, true); } template diff --git a/src/IO/readFloatText.h b/src/IO/readFloatText.h index b8d0c1ba3c0..a72ff82008e 100644 --- a/src/IO/readFloatText.h +++ b/src/IO/readFloatText.h @@ -349,11 +349,11 @@ ReturnType readFloatTextFastImpl(T & x, ReadBuffer & in) constexpr int significant_digits = std::numeric_limits::digits10; readUIntTextUpToNSignificantDigits(before_point, in); - int read_digits = in.count() - count_after_sign; + size_t read_digits = in.count() - count_after_sign; if (unlikely(read_digits > significant_digits)) { - int before_point_additional_exponent = read_digits - significant_digits; + int before_point_additional_exponent = static_cast(read_digits) - significant_digits; x = static_cast(shift10(before_point, before_point_additional_exponent)); } else @@ -377,11 +377,11 @@ ReturnType readFloatTextFastImpl(T & x, ReadBuffer & in) ++in.position(); auto after_leading_zeros_count = in.count(); - auto after_point_num_leading_zeros = after_leading_zeros_count - after_point_count; + int after_point_num_leading_zeros = static_cast(after_leading_zeros_count - after_point_count); readUIntTextUpToNSignificantDigits(after_point, in); read_digits = in.count() - after_leading_zeros_count; - after_point_exponent = (read_digits > significant_digits ? -significant_digits : -read_digits) - after_point_num_leading_zeros; + after_point_exponent = (read_digits > significant_digits ? -significant_digits : static_cast(-read_digits)) - after_point_num_leading_zeros; } if (checkChar('e', in) || checkChar('E', in)) diff --git a/src/IO/tests/gtest_memory_resize.cpp b/src/IO/tests/gtest_memory_resize.cpp index 8619419a47a..d760a948075 100644 --- a/src/IO/tests/gtest_memory_resize.cpp +++ b/src/IO/tests/gtest_memory_resize.cpp @@ -79,24 +79,24 @@ TEST(MemoryResizeTest, SmallInitAndSmallResize) memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); } { auto memory = Memory(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); memory.resize(0); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 0); memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); } } @@ -116,52 +116,52 @@ TEST(MemoryResizeTest, SmallInitAndBigResizeOverflowWhenPadding) memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); memory.resize(2); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 17); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 1); ASSERT_EQ(memory.m_size, 2); EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); ASSERT_TRUE(memory.m_data); // state is intact after exception - ASSERT_EQ(memory.m_capacity, 17); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 1); ASSERT_EQ(memory.m_size, 2); - memory.resize(0x8000000000000000ULL-16); + memory.resize(0x8000000000000000ULL - PADDING_FOR_SIMD); ASSERT_TRUE(memory.m_data); ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); - ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - 16); + ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - PADDING_FOR_SIMD); #ifndef ABORT_ON_LOGICAL_ERROR - EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL-15), Exception, ErrorCodes::LOGICAL_ERROR); + EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL - (PADDING_FOR_SIMD - 1)), Exception, ErrorCodes::LOGICAL_ERROR); ASSERT_TRUE(memory.m_data); // state is intact after exception ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); - ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - 16); + ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - PADDING_FOR_SIMD); #endif } { auto memory = Memory(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); ASSERT_TRUE(memory.m_data); // state is intact after exception - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); #ifndef ABORT_ON_LOGICAL_ERROR - EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL-15), Exception, ErrorCodes::LOGICAL_ERROR); + EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL - (PADDING_FOR_SIMD - 1)), Exception, ErrorCodes::LOGICAL_ERROR); ASSERT_TRUE(memory.m_data); // state is intact after exception - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); #endif } @@ -201,7 +201,7 @@ TEST(MemoryResizeTest, BigInitAndSmallResizeOverflowWhenPadding) { EXPECT_THROW_ERROR_CODE( { - auto memory = Memory(std::numeric_limits::max() - 15); + auto memory = Memory(std::numeric_limits::max() - (PADDING_FOR_SIMD - 1)); } , Exception , ErrorCodes::LOGICAL_ERROR); @@ -210,7 +210,7 @@ TEST(MemoryResizeTest, BigInitAndSmallResizeOverflowWhenPadding) { EXPECT_THROW_ERROR_CODE( { - auto memory = Memory(0x8000000000000000ULL - 15); + auto memory = Memory(0x8000000000000000ULL - (PADDING_FOR_SIMD - 1)); } , Exception , ErrorCodes::LOGICAL_ERROR); @@ -218,10 +218,10 @@ TEST(MemoryResizeTest, BigInitAndSmallResizeOverflowWhenPadding) #endif { - auto memory = Memory(0x8000000000000000ULL - 16); - ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); - ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - 16); + auto memory = Memory(0x8000000000000000ULL - PADDING_FOR_SIMD); + ASSERT_TRUE(memory.m_data); + ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); + ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - PADDING_FOR_SIMD); memory.resize(1); ASSERT_TRUE(memory.m_data); @@ -240,32 +240,32 @@ TEST(MemoryResizeTest, AlignmentWithRealAllocator) memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); memory.resize(2); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 17); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 1); ASSERT_EQ(memory.m_size, 2); memory.resize(3); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 18); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 2); ASSERT_EQ(memory.m_size, 3); memory.resize(4); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 19); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 3); ASSERT_EQ(memory.m_size, 4); memory.resize(0); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 19); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 3); ASSERT_EQ(memory.m_size, 0); memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 19); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 3); ASSERT_EQ(memory.m_size, 1); } @@ -291,12 +291,12 @@ TEST(MemoryResizeTest, AlignmentWithRealAllocator) memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); memory.resize(32); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 47); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD + 31); ASSERT_EQ(memory.m_size, 32); } } @@ -316,13 +316,12 @@ TEST(MemoryResizeTest, SomeAlignmentOverflowWhenAlignment) memory.resize(1); ASSERT_TRUE(memory.m_data); - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); EXPECT_THROW_ERROR_CODE(memory.resize(std::numeric_limits::max()), Exception, ErrorCodes::ARGUMENT_OUT_OF_BOUND); ASSERT_TRUE(memory.m_data); // state is intact after exception - ASSERT_EQ(memory.m_capacity, 16); + ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); } - } diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index a27745d2cfa..fea013fd075 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -140,7 +140,7 @@ public: * when we add lots of column with same prefix. One counter for all * prefixes is good enough. */ - int next_unique_suffix; + size_t next_unique_suffix; Data( ContextPtr context_, diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index e9a72ce0156..182f2292b28 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1638,14 +1638,14 @@ Block Aggregator::convertOneBucketToBlock( Method & method, Arena * arena, bool final, - size_t bucket) const + Int32 bucket) const { // Used in ConvertingAggregatedToChunksSource -> ConvertingAggregatedToChunksTransform (expects single chunk for each bucket_id). constexpr bool return_single_block = true; Block block = convertToBlockImpl( method, method.data.impls[bucket], arena, data_variants.aggregates_pools, final, method.data.impls[bucket].size()); - block.info.bucket_num = bucket; + block.info.bucket_num = static_cast(bucket); return block; } @@ -1653,7 +1653,7 @@ Block Aggregator::mergeAndConvertOneBucketToBlock( ManyAggregatedDataVariants & variants, Arena * arena, bool final, - size_t bucket, + Int32 bucket, std::atomic * is_cancelled) const { auto & merged_data = *variants[0]; @@ -1697,7 +1697,7 @@ void Aggregator::writeToTemporaryFileImpl( max_temporary_block_size_bytes = block_size_bytes; }; - for (size_t bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket) + for (UInt32 bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket) { Block block = convertOneBucketToBlock(data_variants, method, data_variants.aggregates_pool, false, bucket); out.write(block); @@ -3159,19 +3159,19 @@ void NO_INLINE Aggregator::convertBlockToTwoLevelImpl( selector[i] = bucket; } - size_t num_buckets = destinations.size(); + UInt32 num_buckets = static_cast(destinations.size()); for (size_t column_idx = 0; column_idx < columns; ++column_idx) { const ColumnWithTypeAndName & src_col = source.getByPosition(column_idx); MutableColumns scattered_columns = src_col.column->scatter(num_buckets, selector); - for (size_t bucket = 0, size = num_buckets; bucket < size; ++bucket) + for (UInt32 bucket = 0, size = num_buckets; bucket < size; ++bucket) { if (!scattered_columns[bucket]->empty()) { Block & dst = destinations[bucket]; - dst.info.bucket_num = bucket; + dst.info.bucket_num = static_cast(bucket); dst.insert({std::move(scattered_columns[bucket]), src_col.type, src_col.name}); } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index b8aab7a3343..c81cfa2c0a2 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1301,13 +1301,13 @@ private: Method & method, Arena * arena, bool final, - size_t bucket) const; + Int32 bucket) const; Block mergeAndConvertOneBucketToBlock( ManyAggregatedDataVariants & variants, Arena * arena, bool final, - size_t bucket, + Int32 bucket, std::atomic * is_cancelled = nullptr) const; Block prepareBlockAndFillWithoutKey(AggregatedDataVariants & data_variants, bool final, bool is_overflows) const; diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 338ae1bbbfd..488ac77e956 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -703,19 +703,26 @@ void AsynchronousMetrics::update(TimePoint update_time) Int64 free_memory_in_allocator_arenas = 0; #if USE_JEMALLOC - /// This is a memory which is kept by allocator. - /// Will subsract it from RSS to decrease memory drift. + /// According to jemalloc man, pdirty is: + /// + /// Number of pages within unused extents that are potentially + /// dirty, and for which madvise() or similar has not been called. + /// + /// So they will be subtracted from RSS to make accounting more + /// accurate, since those pages are not really RSS but a memory + /// that can be used at anytime via jemalloc. free_memory_in_allocator_arenas = je_malloc_pdirty * getPageSize(); #endif - Int64 difference = rss - free_memory_in_allocator_arenas - amount; + Int64 difference = rss - amount; /// Log only if difference is high. This is for convenience. The threshold is arbitrary. if (difference >= 1048576 || difference <= -1048576) LOG_TRACE(log, - "MemoryTracking: was {}, peak {}, will set to {} (RSS), difference: {}", + "MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}", ReadableSize(amount), ReadableSize(peak), + ReadableSize(free_memory_in_allocator_arenas), ReadableSize(rss), ReadableSize(difference)); diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 20a9f6cce1d..72fa1b3c324 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -32,6 +32,8 @@ FileCache::FileCache( , allow_persistent_files(cache_settings_.do_not_evict_index_and_mark_files) , enable_cache_hits_threshold(cache_settings_.enable_cache_hits_threshold) , enable_filesystem_query_cache_limit(cache_settings_.enable_filesystem_query_cache_limit) + , enable_bypass_cache_with_threashold(cache_settings_.enable_bypass_cache_with_threashold) + , bypass_cache_threashold(cache_settings_.bypass_cache_threashold) , log(&Poco::Logger::get("FileCache")) , main_priority(std::make_unique()) , stash_priority(std::make_unique()) @@ -185,6 +187,20 @@ FileSegments FileCache::getImpl( /// Given range = [left, right] and non-overlapping ordered set of file segments, /// find list [segment1, ..., segmentN] of segments which intersect with given range. + FileSegments result; + + if (enable_bypass_cache_with_threashold && (range.size() > bypass_cache_threashold)) + { + auto file_segment = std::make_shared( + range.left, range.size(), key, this, FileSegment::State::SKIP_CACHE, CreateFileSegmentSettings{}); + { + std::unique_lock segment_lock(file_segment->mutex); + file_segment->detachAssumeStateFinalized(segment_lock); + } + result.emplace_back(file_segment); + return result; + } + auto it = files.find(key); if (it == files.end()) return {}; @@ -197,7 +213,6 @@ FileSegments FileCache::getImpl( return {}; } - FileSegments result; auto segment_it = file_segments.lower_bound(range.left); if (segment_it == file_segments.end()) { @@ -392,7 +407,6 @@ FileSegmentsHolder FileCache::getOrSet(const Key & key, size_t offset, size_t si #endif FileSegment::Range range(offset, offset + size - 1); - /// Get all segments which intersect with the given range. auto file_segments = getImpl(key, range, cache_lock); @@ -404,7 +418,6 @@ FileSegmentsHolder FileCache::getOrSet(const Key & key, size_t offset, size_t si { fillHolesWithEmptyFileSegments(file_segments, key, range, /* fill_with_detached */false, settings, cache_lock); } - assert(!file_segments.empty()); return FileSegmentsHolder(std::move(file_segments)); } diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 07aea230803..706762b6915 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -140,6 +140,9 @@ private: const size_t enable_cache_hits_threshold; const bool enable_filesystem_query_cache_limit; + const bool enable_bypass_cache_with_threashold; + const size_t bypass_cache_threashold; + mutable std::mutex mutex; Poco::Logger * log; diff --git a/src/Interpreters/Cache/FileCacheSettings.cpp b/src/Interpreters/Cache/FileCacheSettings.cpp index 4b8d806bb53..b13cdd2ed04 100644 --- a/src/Interpreters/Cache/FileCacheSettings.cpp +++ b/src/Interpreters/Cache/FileCacheSettings.cpp @@ -35,6 +35,13 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration & enable_filesystem_query_cache_limit = config.getUInt64(config_prefix + ".enable_filesystem_query_cache_limit", false); enable_cache_hits_threshold = config.getUInt64(config_prefix + ".enable_cache_hits_threshold", REMOTE_FS_OBJECTS_CACHE_ENABLE_HITS_THRESHOLD); + enable_bypass_cache_with_threashold = config.getUInt64(config_prefix + ".enable_bypass_cache_with_threashold", false); + + if (config.has(config_prefix + ".bypass_cache_threashold")) + bypass_cache_threashold = parseWithSizeSuffix(config.getString(config_prefix + ".bypass_cache_threashold")); + else + bypass_cache_threashold = REMOTE_FS_OBJECTS_CACHE_BYPASS_THRESHOLD; + do_not_evict_index_and_mark_files = config.getUInt64(config_prefix + ".do_not_evict_index_and_mark_files", false); } diff --git a/src/Interpreters/Cache/FileCacheSettings.h b/src/Interpreters/Cache/FileCacheSettings.h index c6155edad85..80f7b5fa93f 100644 --- a/src/Interpreters/Cache/FileCacheSettings.h +++ b/src/Interpreters/Cache/FileCacheSettings.h @@ -20,6 +20,9 @@ struct FileCacheSettings bool do_not_evict_index_and_mark_files = true; + bool enable_bypass_cache_with_threashold = false; + size_t bypass_cache_threashold = REMOTE_FS_OBJECTS_CACHE_BYPASS_THRESHOLD; + void loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); }; diff --git a/src/Interpreters/Cache/FileCache_fwd.h b/src/Interpreters/Cache/FileCache_fwd.h index 25c16b4e840..72dc1144fb9 100644 --- a/src/Interpreters/Cache/FileCache_fwd.h +++ b/src/Interpreters/Cache/FileCache_fwd.h @@ -7,6 +7,7 @@ namespace DB static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE = 100 * 1024 * 1024; static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS = 1024 * 1024; static constexpr int REMOTE_FS_OBJECTS_CACHE_ENABLE_HITS_THRESHOLD = 0; +static constexpr size_t REMOTE_FS_OBJECTS_CACHE_BYPASS_THRESHOLD = 256 * 1024 * 1024;; class FileCache; using FileCachePtr = std::shared_ptr; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index cf48c5cd976..418bcee05d9 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -66,7 +66,7 @@ FileSegment::FileSegment( { throw Exception( ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, - "Can create cell with either EMPTY, DOWNLOADED, DOWNLOADING state"); + "Can only create cell with either EMPTY, DOWNLOADED or SKIP_CACHE state"); } } } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 617e7173c2f..8f9c0097d77 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -66,10 +66,10 @@ public: */ DOWNLOADING, /** - * Space reservation for a file segment is incremental, i.e. downaloder reads buffer_size bytes + * Space reservation for a file segment is incremental, i.e. downloader reads buffer_size bytes * from remote fs -> tries to reserve buffer_size bytes to put them to cache -> writes to cache * on successful reservation and stops cache write otherwise. Those, who waited for the same file - * file segment, will read downloaded part from cache and remaining part directly from remote fs. + * segment, will read downloaded part from cache and remaining part directly from remote fs. */ PARTIALLY_DOWNLOADED_NO_CONTINUATION, /** diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 6877c0ece06..b76434b23e7 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -423,7 +423,7 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, info.all_addresses.push_back(address); auto pool = ConnectionPoolFactory::instance().get( - settings.distributed_connections_pool_size, + static_cast(settings.distributed_connections_pool_size), address.host_name, address.port, address.default_database, address.user, address.password, address.quota_key, address.cluster, address.cluster_secret, @@ -497,7 +497,7 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, for (const auto & replica : replica_addresses) { auto replica_pool = ConnectionPoolFactory::instance().get( - settings.distributed_connections_pool_size, + static_cast(settings.distributed_connections_pool_size), replica.host_name, replica.port, replica.default_database, replica.user, replica.password, replica.quota_key, replica.cluster, replica.cluster_secret, @@ -585,11 +585,11 @@ Cluster::Cluster( for (const auto & replica : current) { auto replica_pool = ConnectionPoolFactory::instance().get( - settings.distributed_connections_pool_size, - replica.host_name, replica.port, - replica.default_database, replica.user, replica.password, replica.quota_key, - replica.cluster, replica.cluster_secret, - "server", replica.compression, replica.secure, replica.priority); + static_cast(settings.distributed_connections_pool_size), + replica.host_name, replica.port, + replica.default_database, replica.user, replica.password, replica.quota_key, + replica.cluster, replica.cluster_secret, + "server", replica.compression, replica.secure, replica.priority); all_replicas.emplace_back(replica_pool); if (replica.is_local && !treat_local_as_remote) shard_local_addresses.push_back(replica); @@ -693,7 +693,7 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti info.all_addresses.push_back(address); auto pool = ConnectionPoolFactory::instance().get( - settings.distributed_connections_pool_size, + static_cast(settings.distributed_connections_pool_size), address.host_name, address.port, address.default_database, diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index fce2e9b2f08..4653491aac9 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -69,7 +69,7 @@ void SelectStreamFactory::createForShard( query_ast, header, context, processed_stage, shard_info.shard_num, shard_count, /*replica_num=*/0, /*replica_count=*/0, /*coordinator=*/nullptr)); }; - auto emplace_remote_stream = [&](bool lazy = false, UInt32 local_delay = 0) + auto emplace_remote_stream = [&](bool lazy = false, time_t local_delay = 0) { remote_shards.emplace_back(Shard{ .query = query_ast, @@ -131,7 +131,7 @@ void SelectStreamFactory::createForShard( return; } - UInt32 local_delay = replicated_storage->getAbsoluteDelay(); + UInt64 local_delay = replicated_storage->getAbsoluteDelay(); if (local_delay < max_allowed_delay) { @@ -205,7 +205,7 @@ SelectStreamFactory::ShardPlans SelectStreamFactory::createForShardWithParallelR if (!max_allowed_delay) return false; - UInt32 local_delay = replicated_storage->getAbsoluteDelay(); + UInt64 local_delay = replicated_storage->getAbsoluteDelay(); return local_delay >= max_allowed_delay; }; diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h index 440017a8e80..8ebddea4988 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -46,7 +46,7 @@ public: /// If we connect to replicas lazily. /// (When there is a local replica with big delay). bool lazy = false; - UInt32 local_delay = 0; + time_t local_delay = 0; }; using Shards = std::vector; diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index d974721627e..e9ec38f3806 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -26,7 +27,7 @@ namespace ErrorCodes namespace ClusterProxy { -ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, Poco::Logger * log) +ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, const StorageID & main_table, const SelectQueryInfo * query_info, Poco::Logger * log) { Settings new_settings = settings; new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.max_execution_time); @@ -96,6 +97,20 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr c new_settings.limit.changed = false; } + /// Setting additional_table_filters may be applied to Distributed table. + /// In case if query is executed up to WithMergableState on remote shard, it is impossible to filter on initiator. + /// We need to propagate the setting, but change the table name from distributed to source. + /// + /// Here we don't try to analyze setting again. In case if query_info->additional_filter_ast is not empty, some filter was applied. + /// It's just easier to add this filter for a source table. + if (query_info && query_info->additional_filter_ast) + { + Tuple tuple; + tuple.push_back(main_table.getShortName()); + tuple.push_back(queryToString(query_info->additional_filter_ast)); + new_settings.additional_table_filters.value.push_back(std::move(tuple)); + } + auto new_context = Context::createCopy(context); new_context->setSettings(new_settings); return new_context; @@ -121,12 +136,12 @@ void executeQuery( std::vector plans; SelectStreamFactory::Shards remote_shards; - auto new_context = updateSettingsForCluster(*query_info.getCluster(), context, settings, log); + auto new_context = updateSettingsForCluster(*query_info.getCluster(), context, settings, main_table, &query_info, log); new_context->getClientInfo().distributed_depth += 1; ThrottlerPtr user_level_throttler; - if (auto * process_list_element = context->getProcessListElement()) + if (auto process_list_element = context->getProcessListElement()) user_level_throttler = process_list_element->getUserNetworkThrottler(); /// Network bandwidth limit, if needed. @@ -165,7 +180,7 @@ void executeQuery( stream_factory.createForShard(shard_info, query_ast_for_shard, main_table, table_func_ptr, - new_context, plans, remote_shards, shards); + new_context, plans, remote_shards, static_cast(shards)); } if (!remote_shards.empty()) @@ -228,7 +243,7 @@ void executeQueryWithParallelReplicas( const Settings & settings = context->getSettingsRef(); ThrottlerPtr user_level_throttler; - if (auto * process_list_element = context->getProcessListElement()) + if (auto process_list_element = context->getProcessListElement()) user_level_throttler = process_list_element->getUserNetworkThrottler(); /// Network bandwidth limit, if needed. @@ -269,7 +284,8 @@ void executeQueryWithParallelReplicas( query_ast_for_shard = query_ast; auto shard_plans = stream_factory.createForShardWithParallelReplicas(shard_info, - query_ast_for_shard, main_table, table_func_ptr, throttler, context, shards, query_info.storage_limits); + query_ast_for_shard, main_table, table_func_ptr, throttler, context, + static_cast(shards), query_info.storage_limits); if (!shard_plans.local_plan && !shard_plans.remote_plan) throw Exception(ErrorCodes::LOGICAL_ERROR, "No plans were generated for reading from shard. This is a bug"); diff --git a/src/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h index 1a5035015a7..ac88752ce74 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.h +++ b/src/Interpreters/ClusterProxy/executeQuery.h @@ -35,7 +35,7 @@ class SelectStreamFactory; /// /// @return new Context with adjusted settings ContextMutablePtr updateSettingsForCluster( - const Cluster & cluster, ContextPtr context, const Settings & settings, Poco::Logger * log = nullptr); + const Cluster & cluster, ContextPtr context, const Settings & settings, const StorageID & main_table, const SelectQueryInfo * query_info = nullptr, Poco::Logger * log = nullptr); /// Execute a distributed query, creating a query plan, from which the query pipeline can be built. /// `stream_factory` object encapsulates the logic of creating plans for a different type of query diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index b7ad56dca91..cc79a71245b 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -38,7 +38,7 @@ static UInt32 toPowerOfTwo(UInt32 x) ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_) : context(context_) , table_join(table_join_) - , slots(toPowerOfTwo(std::min(slots_, 256))) + , slots(toPowerOfTwo(std::min(static_cast(slots_), 256))) { for (size_t i = 0; i < slots; ++i) { diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index e82e821beac..b1e44564c5d 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -465,6 +465,18 @@ struct ContextSharedPart : boost::noncopyable std::unique_ptr delete_ddl_worker; std::unique_ptr delete_access_control; + /// Delete DDLWorker before zookeeper. + /// Cause it can call Context::getZooKeeper and resurrect it. + + { + auto lock = std::lock_guard(mutex); + delete_ddl_worker = std::move(ddl_worker); + } + + /// DDLWorker should be deleted without lock, cause its internal thread can + /// take it as well, which will cause deadlock. + delete_ddl_worker.reset(); + { auto lock = std::lock_guard(mutex); @@ -501,7 +513,6 @@ struct ContextSharedPart : boost::noncopyable delete_schedule_pool = std::move(schedule_pool); delete_distributed_schedule_pool = std::move(distributed_schedule_pool); delete_message_broker_schedule_pool = std::move(message_broker_schedule_pool); - delete_ddl_worker = std::move(ddl_worker); delete_access_control = std::move(access_control); /// Stop trace collector if any @@ -530,7 +541,6 @@ struct ContextSharedPart : boost::noncopyable delete_schedule_pool.reset(); delete_distributed_schedule_pool.reset(); delete_message_broker_schedule_pool.reset(); - delete_ddl_worker.reset(); delete_access_control.reset(); total_memory_tracker.resetOvercommitTracker(); @@ -1511,10 +1521,8 @@ void Context::setCurrentQueryId(const String & query_id) void Context::killCurrentQuery() { - if (process_list_elem) - { - process_list_elem->cancelQuery(true); - } + if (auto elem = process_list_elem.lock()) + elem->cancelQuery(true); } String Context::getDefaultFormat() const @@ -1764,15 +1772,15 @@ ProgressCallback Context::getProgressCallback() const } -void Context::setProcessListElement(ProcessList::Element * elem) +void Context::setProcessListElement(QueryStatusPtr elem) { /// Set to a session or query. In the session, only one query is processed at a time. Therefore, the lock is not needed. process_list_elem = elem; } -ProcessList::Element * Context::getProcessListElement() const +QueryStatusPtr Context::getProcessListElement() const { - return process_list_elem; + return process_list_elem.lock(); } @@ -2120,7 +2128,12 @@ zkutil::ZooKeeperPtr Context::getZooKeeper() const if (!shared->zookeeper) shared->zookeeper = std::make_shared(config, "zookeeper", getZooKeeperLog()); else if (shared->zookeeper->expired()) + { + Stopwatch watch; + LOG_DEBUG(shared->log, "Trying to establish a new connection with ZooKeeper"); shared->zookeeper = shared->zookeeper->startNewSession(); + LOG_DEBUG(shared->log, "Establishing a new connection with ZooKeeper took {} ms", watch.elapsedMilliseconds()); + } return shared->zookeeper; } @@ -3691,6 +3704,7 @@ WriteSettings Context::getWriteSettings() const res.enable_filesystem_cache_on_write_operations = settings.enable_filesystem_cache_on_write_operations; res.enable_filesystem_cache_log = settings.enable_filesystem_cache_log; + res.s3_allow_parallel_part_upload = settings.s3_allow_parallel_part_upload; res.remote_throttler = getRemoteWriteThrottler(); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 1c06aca49cb..174fa0c3433 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -69,6 +69,7 @@ class MMappedFileCache; class UncompressedCache; class ProcessList; class QueryStatus; +using QueryStatusPtr = std::shared_ptr; class Macros; struct Progress; struct FileProgress; @@ -231,7 +232,7 @@ private: using FileProgressCallback = std::function; FileProgressCallback file_progress_callback; /// Callback for tracking progress of file loading. - QueryStatus * process_list_elem = nullptr; /// For tracking total resource usage for query. + std::weak_ptr process_list_elem; /// For tracking total resource usage for query. StorageID insertion_table = StorageID::createEmpty(); /// Saved insertion table in query context bool is_distributed = false; /// Whether the current context it used for distributed query @@ -759,9 +760,9 @@ public: /** Set in executeQuery and InterpreterSelectQuery. Then it is used in QueryPipeline, * to update and monitor information about the total number of resources spent for the query. */ - void setProcessListElement(QueryStatus * elem); + void setProcessListElement(QueryStatusPtr elem); /// Can return nullptr if the query was not inserted into the ProcessList. - QueryStatus * getProcessListElement() const; + QueryStatusPtr getProcessListElement() const; /// List all queries. ProcessList & getProcessList(); diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 8873d851de1..6bfa9ecd591 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -114,7 +115,7 @@ DDLWorker::DDLWorker( void DDLWorker::startup() { [[maybe_unused]] bool prev_stop_flag = stop_flag.exchange(false); - chassert(true); + chassert(prev_stop_flag); main_thread = ThreadFromGlobalPool(&DDLWorker::runMainThread, this); cleanup_thread = ThreadFromGlobalPool(&DDLWorker::runCleanupThread, this); } @@ -498,7 +499,7 @@ bool DDLWorker::tryExecuteQuery(const String & query, DDLTaskBase & task, const void DDLWorker::updateMaxDDLEntryID(const String & entry_name) { - UInt64 id = DDLTaskBase::getLogEntryNumber(entry_name); + UInt32 id = DDLTaskBase::getLogEntryNumber(entry_name); auto prev_id = max_id.load(std::memory_order_relaxed); while (prev_id < id) { @@ -532,7 +533,8 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper) auto active_node = zkutil::EphemeralNodeHolder::existing(active_node_path, *zookeeper); /// Try fast path - auto create_active_res = zookeeper->tryCreate(active_node_path, {}, zkutil::CreateMode::Ephemeral); + const String canary_value = Field(ServerUUID::get()).dump(); + auto create_active_res = zookeeper->tryCreate(active_node_path, canary_value, zkutil::CreateMode::Ephemeral); if (create_active_res != Coordination::Error::ZOK) { if (create_active_res != Coordination::Error::ZNONODE && create_active_res != Coordination::Error::ZNODEEXISTS) @@ -563,10 +565,10 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper) { /// Connection has been lost and now we are retrying, /// but our previous ephemeral node still exists. - zookeeper->waitForEphemeralToDisappearIfAny(active_node_path); + zookeeper->handleEphemeralNodeExistence(active_node_path, canary_value); } - zookeeper->create(active_node_path, {}, zkutil::CreateMode::Ephemeral); + zookeeper->create(active_node_path, canary_value, zkutil::CreateMode::Ephemeral); } /// We must hold the lock until task execution status is committed to ZooKeeper, diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index e3c1fa4c271..5aea460ad2e 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -159,7 +159,7 @@ protected: /// How many tasks could be in the queue size_t max_tasks_in_queue = 1000; - std::atomic max_id = 0; + std::atomic max_id = 0; const CurrentMetrics::Metric * max_entry_metric; const CurrentMetrics::Metric * max_pushed_entry_metric; }; diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index ab6f088ac69..7ceb0bf3a00 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -144,9 +144,9 @@ StoragePtr TemporaryTableHolder::getTable() const void DatabaseCatalog::initializeAndLoadTemporaryDatabase() { drop_delay_sec = getContext()->getConfigRef().getInt("database_atomic_delay_before_drop_table_sec", default_drop_delay_sec); - unused_dir_hide_timeout_sec = getContext()->getConfigRef().getInt("database_catalog_unused_dir_hide_timeout_sec", unused_dir_hide_timeout_sec); - unused_dir_rm_timeout_sec = getContext()->getConfigRef().getInt("database_catalog_unused_dir_rm_timeout_sec", unused_dir_rm_timeout_sec); - unused_dir_cleanup_period_sec = getContext()->getConfigRef().getInt("database_catalog_unused_dir_cleanup_period_sec", unused_dir_cleanup_period_sec); + unused_dir_hide_timeout_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_hide_timeout_sec", unused_dir_hide_timeout_sec); + unused_dir_rm_timeout_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_rm_timeout_sec", unused_dir_rm_timeout_sec); + unused_dir_cleanup_period_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_cleanup_period_sec", unused_dir_cleanup_period_sec); auto db_for_temporary_and_external_tables = std::make_shared(TEMPORARY_DATABASE, getContext()); attachDatabase(TEMPORARY_DATABASE, db_for_temporary_and_external_tables); diff --git a/src/Interpreters/DirectJoin.cpp b/src/Interpreters/DirectJoin.cpp index 02b3854a47b..e148db1d8e6 100644 --- a/src/Interpreters/DirectJoin.cpp +++ b/src/Interpreters/DirectJoin.cpp @@ -93,6 +93,16 @@ DirectKeyValueJoin::DirectKeyValueJoin(std::shared_ptr table_join_, LOG_TRACE(log, "Using direct join"); } +DirectKeyValueJoin::DirectKeyValueJoin( + std::shared_ptr table_join_, + const Block & right_sample_block_, + std::shared_ptr storage_, + const Block & right_sample_block_with_storage_column_names_) + : DirectKeyValueJoin(table_join_, right_sample_block_, storage_) +{ + right_sample_block_with_storage_column_names = right_sample_block_with_storage_column_names_; +} + bool DirectKeyValueJoin::addJoinedBlock(const Block &, bool) { throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable code reached"); @@ -114,14 +124,15 @@ void DirectKeyValueJoin::joinBlock(Block & block, std::shared_ptr &) return; Block original_right_block = originalRightBlock(right_sample_block, *table_join); - const Names & attribute_names = original_right_block.getNames(); + Block right_block_to_use = right_sample_block_with_storage_column_names ? right_sample_block_with_storage_column_names : original_right_block; + const Names & attribute_names = right_block_to_use.getNames(); NullMap null_map; Chunk joined_chunk = storage->getByKeys({key_col}, null_map, attribute_names); /// Expected right block may differ from structure in storage, because of `join_use_nulls` or we just select not all joined attributes Block sample_storage_block = storage->getSampleBlock(attribute_names); - MutableColumns result_columns = convertBlockStructure(sample_storage_block, original_right_block, joined_chunk.mutateColumns(), null_map); + MutableColumns result_columns = convertBlockStructure(sample_storage_block, right_block_to_use, joined_chunk.mutateColumns(), null_map); for (size_t i = 0; i < result_columns.size(); ++i) { diff --git a/src/Interpreters/DirectJoin.h b/src/Interpreters/DirectJoin.h index 8e82b59da02..6a6f4505474 100644 --- a/src/Interpreters/DirectJoin.h +++ b/src/Interpreters/DirectJoin.h @@ -25,6 +25,12 @@ public: const Block & right_sample_block_, std::shared_ptr storage_); + DirectKeyValueJoin( + std::shared_ptr table_join_, + const Block & right_sample_block_, + std::shared_ptr storage_, + const Block & right_sample_block_with_storage_column_names_); + virtual const TableJoin & getTableJoin() const override { return *table_join; } virtual bool addJoinedBlock(const Block &, bool) override; @@ -52,6 +58,7 @@ private: std::shared_ptr table_join; std::shared_ptr storage; Block right_sample_block; + Block right_sample_block_with_storage_column_names; Block sample_block_with_columns_to_add; Poco::Logger * log; diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index b27df0f1c35..9b38072b5af 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -1073,8 +1073,8 @@ void ExpressionActionsChain::JoinStep::finalize(const NameSet & required_output_ } /// Result will also contain joined columns. - for (const auto & column_name : analyzed_join->columnsAddedByJoin()) - required_names.emplace(column_name); + for (const auto & column : analyzed_join->columnsAddedByJoin()) + required_names.emplace(column.name); for (const auto & column : result_columns) { diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 704dff325b7..ea2b9045120 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -714,7 +714,10 @@ public: /// Object was never loaded successfully and should be reloaded. startLoading(info); } - LOG_TRACE(log, "Object '{}' is neither loaded nor failed, so it will not be reloaded as outdated.", info.name); + else + { + LOG_TRACE(log, "Object '{}' is neither loaded nor failed, so it will not be reloaded as outdated.", info.name); + } } } } diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 26b9b843567..41c7c28a6fa 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -658,7 +658,9 @@ void HashJoin::initRightBlockStructure(Block & saved_block_sample) /// Save non key columns for (auto & column : sample_block_with_columns_to_add) { - if (!saved_block_sample.findByName(column.name)) + if (auto * col = saved_block_sample.findByName(column.name)) + *col = column; + else saved_block_sample.insert(column); } } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index e89aa2244fe..4a6ce63eb84 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -12,17 +12,14 @@ #include #include -#include #include #include #include -#include #include #include #include -#include #include #include #include @@ -37,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -59,7 +55,6 @@ #include #include -#include #include #include #include @@ -484,9 +479,8 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( { column_type = DataTypeFactory::instance().get(col_decl.type); - const auto * aggregate_function_type = typeid_cast(column_type.get()); - if (attach && aggregate_function_type && aggregate_function_type->isVersioned()) - aggregate_function_type->setVersion(0, /* if_empty */true); + if (attach) + setVersionToAggregateFunctions(column_type, true); if (col_decl.null_modifier) { @@ -835,7 +829,7 @@ void InterpreterCreateQuery::validateTableStructure(const ASTCreateQuery & creat { for (const auto & [name, type] : properties.columns.getAllPhysical()) { - if (isObject(type)) + if (type->hasDynamicSubcolumns()) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot create table with column '{}' which type is '{}' " @@ -1404,7 +1398,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, /// we can safely destroy the object without a call to "shutdown", because there is guarantee /// that no background threads/similar resources remain after exception from "startup". - if (!res->supportsDynamicSubcolumns() && hasObjectColumns(res->getInMemoryMetadataPtr()->getColumns())) + if (!res->supportsDynamicSubcolumns() && hasDynamicSubcolumns(res->getInMemoryMetadataPtr()->getColumns())) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot create table with column of type Object, " @@ -1704,8 +1698,12 @@ void InterpreterCreateQuery::addColumnsDescriptionToCreateQueryIfNecessary(ASTCr return; auto ast_storage = std::make_shared(); - auto query_from_storage = DB::getCreateQueryFromStorage(storage, ast_storage, false, - getContext()->getSettingsRef().max_parser_depth, true); + unsigned max_parser_depth = static_cast(getContext()->getSettingsRef().max_parser_depth); + auto query_from_storage = DB::getCreateQueryFromStorage(storage, + ast_storage, + false, + max_parser_depth, + true); auto & create_query_from_storage = query_from_storage->as(); if (!create.columns_list) diff --git a/src/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp index 0524feea1f6..512f9be6fa1 100644 --- a/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/src/Interpreters/InterpreterDescribeQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -17,7 +18,6 @@ #include #include - namespace DB { @@ -60,10 +60,9 @@ Block InterpreterDescribeQuery::getSampleBlock(bool include_subcolumns) return block; } - BlockIO InterpreterDescribeQuery::execute() { - ColumnsDescription columns; + std::vector columns; StorageSnapshotPtr storage_snapshot; const auto & ast = query_ptr->as(); @@ -72,14 +71,34 @@ BlockIO InterpreterDescribeQuery::execute() if (table_expression.subquery) { - auto names_and_types = InterpreterSelectWithUnionQuery::getSampleBlock( - table_expression.subquery->children.at(0), getContext()).getNamesAndTypesList(); - columns = ColumnsDescription(std::move(names_and_types)); + NamesAndTypesList names_and_types; + auto select_query = table_expression.subquery->children.at(0); + auto current_context = getContext(); + + if (settings.allow_experimental_analyzer) + { + SelectQueryOptions select_query_options; + names_and_types = InterpreterSelectQueryAnalyzer(select_query, select_query_options, current_context).getSampleBlock().getNamesAndTypesList(); + } + else + { + names_and_types = InterpreterSelectWithUnionQuery::getSampleBlock(select_query, current_context).getNamesAndTypesList(); + } + + for (auto && [name, type] : names_and_types) + { + ColumnDescription description; + description.name = std::move(name); + description.type = std::move(type); + columns.emplace_back(std::move(description)); + } } else if (table_expression.table_function) { TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().get(table_expression.table_function, getContext()); - columns = table_function_ptr->getActualTableStructure(getContext()); + auto table_function_column_descriptions = table_function_ptr->getActualTableStructure(getContext()); + for (const auto & table_function_column_description : table_function_column_descriptions) + columns.emplace_back(table_function_column_description); } else { @@ -90,7 +109,9 @@ BlockIO InterpreterDescribeQuery::execute() auto metadata_snapshot = table->getInMemoryMetadataPtr(); storage_snapshot = table->getStorageSnapshot(metadata_snapshot, getContext()); - columns = metadata_snapshot->getColumns(); + auto metadata_column_descriptions = metadata_snapshot->getColumns(); + for (const auto & metadata_column_description : metadata_column_descriptions) + columns.emplace_back(metadata_column_description); } bool extend_object_types = settings.describe_extend_object_types && storage_snapshot; diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 8be3dce7bf1..28f8e43ee9b 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -213,7 +213,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ContextPtr context_, ASTDropQue { /// And for simple MergeTree we can stop merges before acquiring the lock auto merges_blocker = table->getActionLock(ActionLocks::PartsMerge); - auto table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); + table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout); } auto metadata_snapshot = table->getInMemoryMetadataPtr(); diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 4799970b6a1..fb8d3c6049f 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,9 @@ #include +#include +#include + namespace DB { @@ -155,6 +159,30 @@ struct QueryASTSettings {"graph", graph}, {"optimize", optimize} }; + + std::unordered_map> integer_settings; +}; + +struct QueryTreeSettings +{ + bool run_passes = false; + bool dump_passes = false; + bool dump_ast = false; + Int64 passes = -1; + + constexpr static char name[] = "QUERY TREE"; + + std::unordered_map> boolean_settings = + { + {"run_passes", run_passes}, + {"dump_passes", dump_passes}, + {"dump_ast", dump_ast} + }; + + std::unordered_map> integer_settings = + { + {"passes", passes} + }; }; struct QueryPlanSettings @@ -177,6 +205,8 @@ struct QueryPlanSettings {"json", json}, {"sorting", query_plan_options.sorting}, }; + + std::unordered_map> integer_settings; }; struct QueryPipelineSettings @@ -193,18 +223,31 @@ struct QueryPipelineSettings {"graph", graph}, {"compact", compact}, }; + + std::unordered_map> integer_settings; }; template struct ExplainSettings : public Settings { using Settings::boolean_settings; + using Settings::integer_settings; bool has(const std::string & name_) const + { + return hasBooleanSetting(name_) || hasIntegerSetting(name_); + } + + bool hasBooleanSetting(const std::string & name_) const { return boolean_settings.count(name_) > 0; } + bool hasIntegerSetting(const std::string & name_) const + { + return integer_settings.count(name_) > 0; + } + void setBooleanSetting(const std::string & name_, bool value) { auto it = boolean_settings.find(name_); @@ -214,6 +257,15 @@ struct ExplainSettings : public Settings it->second.get() = value; } + void setIntegerSetting(const std::string & name_, Int64 value) + { + auto it = integer_settings.find(name_); + if (it == integer_settings.end()) + throw Exception("Unknown setting for ExplainSettings: " + name_, ErrorCodes::LOGICAL_ERROR); + + it->second.get() = value; + } + std::string getSettingsList() const { std::string res; @@ -224,6 +276,13 @@ struct ExplainSettings : public Settings res += setting.first; } + for (const auto & setting : integer_settings) + { + if (!res.empty()) + res += ", "; + + res += setting.first; + } return res; } @@ -246,15 +305,23 @@ ExplainSettings checkAndGetSettings(const ASTPtr & ast_settings) if (change.value.getType() != Field::Types::UInt64) throw Exception(ErrorCodes::INVALID_SETTING_VALUE, - "Invalid type {} for setting \"{}\" only boolean settings are supported", + "Invalid type {} for setting \"{}\" only integer settings are supported", change.value.getTypeName(), change.name); - auto value = change.value.get(); - if (value > 1) - throw Exception("Invalid value " + std::to_string(value) + " for setting \"" + change.name + - "\". Only boolean settings are supported", ErrorCodes::INVALID_SETTING_VALUE); + if (settings.hasBooleanSetting(change.name)) + { + auto value = change.value.get(); + if (value > 1) + throw Exception("Invalid value " + std::to_string(value) + " for setting \"" + change.name + + "\". Expected boolean type", ErrorCodes::INVALID_SETTING_VALUE); - settings.setBooleanSetting(change.name, value); + settings.setBooleanSetting(change.name, value); + } + else + { + auto value = change.value.get(); + settings.setIntegerSetting(change.name, value); + } } return settings; @@ -304,6 +371,46 @@ QueryPipeline InterpreterExplainQuery::executeImpl() ast.getExplainedQuery()->format(IAST::FormatSettings(buf, false)); break; } + case ASTExplainQuery::QueryTree: + { + if (ast.getExplainedQuery()->as() == nullptr) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Only SELECT is supported for EXPLAIN QUERY TREE query"); + + auto settings = checkAndGetSettings(ast.getSettings()); + auto query_tree = buildQueryTree(ast.getExplainedQuery(), getContext()); + + if (settings.run_passes) + { + auto query_tree_pass_manager = QueryTreePassManager(getContext()); + addQueryTreePasses(query_tree_pass_manager); + + size_t pass_index = settings.passes < 0 ? query_tree_pass_manager.getPasses().size() : static_cast(settings.passes); + + if (settings.dump_passes) + { + query_tree_pass_manager.dump(buf, pass_index); + if (pass_index > 0) + buf << '\n'; + } + + query_tree_pass_manager.run(query_tree, pass_index); + + query_tree->dumpTree(buf); + } + else + { + query_tree->dumpTree(buf); + } + + if (settings.dump_ast) + { + buf << '\n'; + buf << '\n'; + query_tree->toAST()->format(IAST::FormatSettings(buf, false)); + } + + break; + } case ASTExplainQuery::QueryPlan: { if (!dynamic_cast(ast.getExplainedQuery().get())) @@ -312,8 +419,16 @@ QueryPipeline InterpreterExplainQuery::executeImpl() auto settings = checkAndGetSettings(ast.getSettings()); QueryPlan plan; - InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), options); - interpreter.buildQueryPlan(plan); + if (getContext()->getSettingsRef().allow_experimental_analyzer) + { + InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), options, getContext()); + plan = std::move(interpreter).extractQueryPlan(); + } + else + { + InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), options); + interpreter.buildQueryPlan(plan); + } if (settings.optimize) plan.optimize(QueryPlanOptimizationSettings::fromContext(getContext())); @@ -347,8 +462,17 @@ QueryPipeline InterpreterExplainQuery::executeImpl() auto settings = checkAndGetSettings(ast.getSettings()); QueryPlan plan; - InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), options); - interpreter.buildQueryPlan(plan); + if (getContext()->getSettingsRef().allow_experimental_analyzer) + { + InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), options, getContext()); + plan = std::move(interpreter).extractQueryPlan(); + } + else + { + InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), options); + interpreter.buildQueryPlan(plan); + } + auto pipeline = plan.buildQueryPipeline( QueryPlanOptimizationSettings::fromContext(getContext()), BuildQueryPipelineSettings::fromContext(getContext())); diff --git a/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp index ca0a59c0c1a..06d5746af59 100644 --- a/src/Interpreters/InterpreterFactory.cpp +++ b/src/Interpreters/InterpreterFactory.cpp @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include @@ -118,6 +119,9 @@ std::unique_ptr InterpreterFactory::get(ASTPtr & query, ContextMut if (query->as()) { + if (context->getSettingsRef().allow_experimental_analyzer) + return std::make_unique(query, options, context); + /// This is internal part of ASTSelectWithUnionQuery. /// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child. return std::make_unique(query, context, options); @@ -125,6 +129,10 @@ std::unique_ptr InterpreterFactory::get(ASTPtr & query, ContextMut else if (query->as()) { ProfileEvents::increment(ProfileEvents::SelectQuery); + + if (context->getSettingsRef().allow_experimental_analyzer) + return std::make_unique(query, options, context); + return std::make_unique(query, context, options); } else if (query->as()) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 51a3dde261a..107740c3b96 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -292,7 +292,7 @@ Chain InterpreterInsertQuery::buildChainImpl( out.addSource(std::make_shared( out.getInputHeader(), table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size, - table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0)); + table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL)); } auto counting = std::make_shared(out.getInputHeader(), thread_status, getContext()->getQuota()); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 79deb38317c..d8ac263e3d1 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -117,7 +117,8 @@ FilterDAGInfoPtr generateFilterActions( const StoragePtr & storage, const StorageSnapshotPtr & storage_snapshot, const StorageMetadataPtr & metadata_snapshot, - Names & prerequisite_columns) + Names & prerequisite_columns, + PreparedSetsPtr prepared_sets) { auto filter_info = std::make_shared(); @@ -155,7 +156,7 @@ FilterDAGInfoPtr generateFilterActions( /// Using separate expression analyzer to prevent any possible alias injection auto syntax_result = TreeRewriter(context).analyzeSelect(query_ast, TreeRewriterResult({}, storage, storage_snapshot)); - SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot); + SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot, {}, false, {}, prepared_sets); filter_info->actions = analyzer.simpleSelectActions(); filter_info->column_name = expr_list->children.at(0)->getColumnName(); @@ -615,7 +616,8 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (row_policy_filter) { filter_info = generateFilterActions( - table_id, row_policy_filter, context, storage, storage_snapshot, metadata_snapshot, required_columns); + table_id, row_policy_filter, context, storage, storage_snapshot, metadata_snapshot, required_columns, + prepared_sets); query_info.filter_asts.push_back(row_policy_filter); } @@ -623,7 +625,8 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (query_info.additional_filter_ast) { additional_filter_info = generateFilterActions( - table_id, query_info.additional_filter_ast, context, storage, storage_snapshot, metadata_snapshot, required_columns); + table_id, query_info.additional_filter_ast, context, storage, storage_snapshot, metadata_snapshot, required_columns, + prepared_sets); additional_filter_info->do_remove_column = true; @@ -2143,6 +2146,8 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc auto [limit_length, limit_offset] = getLimitLengthAndOffset(query, context); + auto local_limits = getStorageLimits(*context, options); + /** Optimization - if not specified DISTINCT, WHERE, GROUP, HAVING, ORDER, JOIN, LIMIT BY, WITH TIES * but LIMIT is specified, and limit + offset < max_block_size, * then as the block size we will use limit + offset (not to read more from the table than requested), @@ -2161,17 +2166,22 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc && !query_analyzer->hasAggregation() && !query_analyzer->hasWindow() && query.limitLength() - && limit_length <= std::numeric_limits::max() - limit_offset - && limit_length + limit_offset < max_block_size) + && limit_length <= std::numeric_limits::max() - limit_offset) { - max_block_size = std::max(1, limit_length + limit_offset); - max_threads_execute_query = max_streams = 1; + if (limit_length + limit_offset < max_block_size) + { + max_block_size = std::max(1, limit_length + limit_offset); + max_threads_execute_query = max_streams = 1; + } + if (limit_length + limit_offset < local_limits.local_limits.size_limits.max_rows) + { + query_info.limit = limit_length + limit_offset; + } } if (!max_block_size) throw Exception("Setting 'max_block_size' cannot be zero", ErrorCodes::PARAMETER_OUT_OF_BOUND); - auto local_limits = getStorageLimits(*context, options); storage_limits.emplace_back(local_limits); /// Initialize the initial data streams to which the query transforms are superimposed. Table or subquery or prepared input? diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp new file mode 100644 index 00000000000..61ec5932b7d --- /dev/null +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.cpp @@ -0,0 +1,120 @@ +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; +} + +namespace +{ + +ASTPtr normalizeAndValidateQuery(const ASTPtr & query) +{ + if (query->as() || query->as()) + { + return query; + } + else if (auto * subquery = query->as()) + { + return subquery->children[0]; + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Expected ASTSelectWithUnionQuery or ASTSelectQuery. Actual {}", + query->formatForErrorMessage()); + } +} + +QueryTreeNodePtr buildQueryTreeAndRunPasses(const ASTPtr & query, const ContextPtr & context) +{ + auto query_tree = buildQueryTree(query, context); + + QueryTreePassManager query_tree_pass_manager(context); + addQueryTreePasses(query_tree_pass_manager); + query_tree_pass_manager.run(query_tree); + + return query_tree; +} + +} + +InterpreterSelectQueryAnalyzer::InterpreterSelectQueryAnalyzer( + const ASTPtr & query_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_) + : WithContext(context_) + , query(normalizeAndValidateQuery(query_)) + , query_tree(buildQueryTreeAndRunPasses(query, context_)) + , select_query_options(select_query_options_) + , planner(query_tree, select_query_options, context_) +{ +} + +InterpreterSelectQueryAnalyzer::InterpreterSelectQueryAnalyzer( + const QueryTreeNodePtr & query_tree_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_) + : WithContext(context_) + , query(query_tree_->toAST()) + , query_tree(query_tree_) + , select_query_options(select_query_options_) + , planner(query_tree, select_query_options, context_) +{ +} + +Block InterpreterSelectQueryAnalyzer::getSampleBlock() +{ + planner.buildQueryPlanIfNeeded(); + return planner.getQueryPlan().getCurrentDataStream().header; +} + +BlockIO InterpreterSelectQueryAnalyzer::execute() +{ + planner.buildQueryPlanIfNeeded(); + auto & query_plan = planner.getQueryPlan(); + + QueryPlanOptimizationSettings optimization_settings; + BuildQueryPipelineSettings build_pipeline_settings; + auto pipeline_builder = query_plan.buildQueryPipeline(optimization_settings, build_pipeline_settings); + + BlockIO result; + result.pipeline = QueryPipelineBuilder::getPipeline(std::move(*pipeline_builder)); + + if (!select_query_options.ignore_quota && (select_query_options.to_stage == QueryProcessingStage::Complete)) + result.pipeline.setQuota(getContext()->getQuota()); + + return result; +} + +QueryPlan && InterpreterSelectQueryAnalyzer::extractQueryPlan() && +{ + planner.buildQueryPlanIfNeeded(); + return std::move(planner).extractQueryPlan(); +} + +void InterpreterSelectQueryAnalyzer::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const +{ + elem.query_kind = "Select"; +} + +} diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.h b/src/Interpreters/InterpreterSelectQueryAnalyzer.h new file mode 100644 index 00000000000..e9884567ab0 --- /dev/null +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include + +namespace DB +{ + +class InterpreterSelectQueryAnalyzer : public IInterpreter, public WithContext +{ +public: + /// Initialize interpreter with query AST + InterpreterSelectQueryAnalyzer(const ASTPtr & query_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_); + + /// Initialize interpreter with query tree + InterpreterSelectQueryAnalyzer(const QueryTreeNodePtr & query_tree_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_); + + Block getSampleBlock(); + + BlockIO execute() override; + + QueryPlan && extractQueryPlan() &&; + + bool supportsTransactions() const override { return true; } + + bool ignoreLimits() const override { return select_query_options.ignore_limits; } + + bool ignoreQuota() const override { return select_query_options.ignore_quota; } + + void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr &, ContextPtr) const override; + +private: + ASTPtr query; + QueryTreeNodePtr query_tree; + SelectQueryOptions select_query_options; + Planner planner; +}; + +} diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index d3a7eb0cfaa..e12b4894eb0 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -614,7 +614,7 @@ static void compileSortDescription(llvm::Module & module, /** Join results from all comparator steps. * Result of columns comparison equals to first compare block where lhs is not equal to lhs or last compare block. */ - auto * compare_result_phi = b.CreatePHI(b.getInt8Ty(), comparator_steps_and_results.size()); + auto * compare_result_phi = b.CreatePHI(b.getInt8Ty(), static_cast(comparator_steps_and_results.size())); for (const auto & [block, result_value] : comparator_steps_and_results) compare_result_phi->addIncoming(result_value, block); diff --git a/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp b/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp index 40c42f7728e..b3c2063c6f6 100644 --- a/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp +++ b/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.cpp @@ -45,8 +45,7 @@ void NormalizeSelectWithUnionQueryMatcher::visit(ASTSelectWithUnionQuery & ast, SelectUnionModesSet current_set_of_modes; bool distinct_found = false; - int i; - for (i = union_modes.size() - 1; i >= 0; --i) + for (Int64 i = union_modes.size() - 1; i >= 0; --i) { current_set_of_modes.insert(union_modes[i]); if (const auto * union_ast = typeid_cast(select_list[i + 1].get())) diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index 75e6d02d6e1..b35ee50b98e 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -207,8 +207,8 @@ bool PartLog::addNewParts( elem.table_name = table_id.table_name; elem.partition_id = part->info.partition_id; elem.part_name = part->name; - elem.disk_name = part->data_part_storage->getDiskName(); - elem.path_on_disk = part->data_part_storage->getFullPath(); + elem.disk_name = part->getDataPartStorage().getDiskName(); + elem.path_on_disk = part->getDataPartStorage().getFullPath(); elem.part_type = part->getType(); elem.bytes_compressed_on_disk = part->getBytesOnDisk(); diff --git a/src/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h index 06600c49f13..a50e390ee5a 100644 --- a/src/Interpreters/PreparedSets.h +++ b/src/Interpreters/PreparedSets.h @@ -39,7 +39,6 @@ public: /// This is a temporary table for transferring to remote servers for distributed query processing. StoragePtr table; -private: /// The source is obtained using the InterpreterSelectQuery subquery. std::unique_ptr source; }; diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index d5194a02513..551d20f835a 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -242,16 +242,21 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as /// since allocation and deallocation could happen in different threads } - auto process_it = processes.emplace(processes.end(), - query_context, query_, client_info, priorities.insert(settings.priority), std::move(thread_group), query_kind); + auto process_it = processes.emplace(processes.end(), std::make_shared( + query_context, + query_, + client_info, + priorities.insert(static_cast(settings.priority)), + std::move(thread_group), + query_kind)); increaseQueryKindAmount(query_kind); res = std::make_shared(*this, process_it); - process_it->setUserProcessList(&user_process_list); + (*process_it)->setUserProcessList(&user_process_list); - user_process_list.queries.emplace(client_info.current_query_id, &res->get()); + user_process_list.queries.emplace(client_info.current_query_id, res->getQueryStatus()); /// Track memory usage for all simultaneously running queries from single user. user_process_list.user_memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage_for_user); @@ -280,11 +285,11 @@ ProcessListEntry::~ProcessListEntry() { auto lock = parent.safeLock(); - String user = it->getClientInfo().current_user; - String query_id = it->getClientInfo().current_query_id; - IAST::QueryKind query_kind = it->query_kind; + String user = (*it)->getClientInfo().current_user; + String query_id = (*it)->getClientInfo().current_query_id; + IAST::QueryKind query_kind = (*it)->query_kind; - const QueryStatus * process_list_element_ptr = &*it; + const QueryStatusPtr process_list_element_ptr = *it; auto user_process_list_it = parent.user_to_queries.find(user); if (user_process_list_it == parent.user_to_queries.end()) @@ -307,7 +312,7 @@ ProcessListEntry::~ProcessListEntry() } /// Wait for the query if it is in the cancellation right now. - parent.cancelled_cv.wait(lock.lock, [&]() { return it->is_cancelling == false; }); + parent.cancelled_cv.wait(lock.lock, [&]() { return process_list_element_ptr->is_cancelling == false; }); /// This removes the memory_tracker of one request. parent.processes.erase(it); @@ -344,6 +349,7 @@ QueryStatus::QueryStatus( , client_info(client_info_) , thread_group(std::move(thread_group_)) , priority_handle(std::move(priority_handle_)) + , global_overcommit_tracker(context_->getGlobalOvercommitTracker()) , query_kind(query_kind_) , num_queries_increment(CurrentMetrics::Query) { @@ -360,8 +366,8 @@ QueryStatus::~QueryStatus() { if (user_process_list) user_process_list->user_overcommit_tracker.onQueryStop(memory_tracker); - if (auto shared_context = getContext()) - shared_context->getGlobalOvercommitTracker()->onQueryStop(memory_tracker); + if (global_overcommit_tracker) + global_overcommit_tracker->onQueryStop(memory_tracker); } } @@ -430,7 +436,7 @@ ThrottlerPtr QueryStatus::getUserNetworkThrottler() } -QueryStatus * ProcessList::tryGetProcessListElement(const String & current_query_id, const String & current_user) +QueryStatusPtr ProcessList::tryGetProcessListElement(const String & current_query_id, const String & current_user) { auto user_it = user_to_queries.find(current_user); if (user_it != user_to_queries.end()) @@ -442,13 +448,13 @@ QueryStatus * ProcessList::tryGetProcessListElement(const String & current_query return query_it->second; } - return nullptr; + return {}; } CancellationCode ProcessList::sendCancelToQuery(const String & current_query_id, const String & current_user, bool kill) { - QueryStatus * elem; + QueryStatusPtr elem; /// Cancelling the query should be done without the lock. /// @@ -484,7 +490,7 @@ CancellationCode ProcessList::sendCancelToQuery(const String & current_query_id, void ProcessList::killAllQueries() { - std::vector cancelled_processes; + std::vector cancelled_processes; SCOPE_EXIT({ auto lock = safeLock(); @@ -498,8 +504,8 @@ void ProcessList::killAllQueries() cancelled_processes.reserve(processes.size()); for (auto & process : processes) { - cancelled_processes.push_back(&process); - process.is_cancelling = true; + cancelled_processes.push_back(process); + process->is_cancelling = true; } } @@ -558,7 +564,7 @@ ProcessList::Info ProcessList::getInfo(bool get_thread_list, bool get_profile_ev per_query_infos.reserve(processes.size()); for (const auto & process : processes) - per_query_infos.emplace_back(process.getInfo(get_thread_list, get_profile_events, get_settings)); + per_query_infos.emplace_back(process->getInfo(get_thread_list, get_profile_events, get_settings)); return per_query_infos; } diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index 6943c7cfcd8..5fbdce358f9 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -133,6 +133,8 @@ protected: ProcessListForUser * user_process_list = nullptr; + OvercommitTracker * global_overcommit_tracker = nullptr; + IAST::QueryKind query_kind; /// This field is unused in this class, but it @@ -221,6 +223,8 @@ public: [[nodiscard]] bool checkTimeLimitSoft(); }; +using QueryStatusPtr = std::shared_ptr; + /// Information of process list for user. struct ProcessListForUserInfo @@ -241,7 +245,7 @@ struct ProcessListForUser ProcessListForUser(ContextPtr global_context, ProcessList * global_process_list); /// query_id -> ProcessListElement(s). There can be multiple queries with the same query_id as long as all queries except one are cancelled. - using QueryToElement = std::unordered_map; + using QueryToElement = std::unordered_map; QueryToElement queries; ProfileEvents::Counters user_performance_counters{VariableContext::User, &ProfileEvents::global_counters}; @@ -278,7 +282,7 @@ class ProcessList; class ProcessListEntry { private: - using Container = std::list; + using Container = std::list; ProcessList & parent; Container::iterator it; @@ -289,11 +293,8 @@ public: ~ProcessListEntry(); - QueryStatus * operator->() { return &*it; } - const QueryStatus * operator->() const { return &*it; } - - QueryStatus & get() { return *it; } - const QueryStatus & get() const { return *it; } + QueryStatusPtr getQueryStatus() { return *it; } + const QueryStatusPtr getQueryStatus() const { return *it; } }; @@ -319,7 +320,7 @@ protected: class ProcessList : public ProcessListBase { public: - using Element = QueryStatus; + using Element = QueryStatusPtr; using Entry = ProcessListEntry; using QueryAmount = UInt64; @@ -358,7 +359,7 @@ protected: ThrottlerPtr total_network_throttler; /// Call under lock. Finds process with specified current_user and current_query_id. - QueryStatus * tryGetProcessListElement(const String & current_query_id, const String & current_user); + QueryStatusPtr tryGetProcessListElement(const String & current_query_id, const String & current_user); /// limit for insert. 0 means no limit. Otherwise, when limit exceeded, an exception is thrown. size_t max_insert_queries_amount = 0; diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 68076e1fec2..0e553ef145e 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -89,7 +89,7 @@ public: assert(!sorted.load(std::memory_order_acquire)); - entries.emplace_back(key, row_refs.size()); + entries.emplace_back(key, static_cast(row_refs.size())); row_refs.emplace_back(RowRef(block, row_num)); } diff --git a/src/Interpreters/RowRefs.h b/src/Interpreters/RowRefs.h index 2c9f2062a82..294da1da571 100644 --- a/src/Interpreters/RowRefs.h +++ b/src/Interpreters/RowRefs.h @@ -29,7 +29,10 @@ struct RowRef SizeT row_num = 0; RowRef() = default; - RowRef(const Block * block_, size_t row_num_) : block(block_), row_num(row_num_) {} + RowRef(const Block * block_, size_t row_num_) + : block(block_) + , row_num(static_cast(row_num_)) + {} }; /// Single linked list of references to rows. Used for ALL JOINs (non-unique JOINs) diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index ded8b04a589..e75232aa0f5 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -131,6 +131,7 @@ void Set::setHeader(const ColumnsWithTypeAndName & header) if (const auto * low_cardinality_type = typeid_cast(data_types.back().get())) { data_types.back() = low_cardinality_type->getDictionaryType(); + set_elements_types.back() = low_cardinality_type->getDictionaryType(); materialized_columns.emplace_back(key_columns.back()->convertToFullColumnIfLowCardinality()); key_columns.back() = materialized_columns.back().get(); } diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index 3835ef77deb..316beccae80 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -55,15 +55,26 @@ public: ASTPtr on_filter_condition_left; ASTPtr on_filter_condition_right; + std::string analyzer_left_filter_condition_column_name; + std::string analyzer_right_filter_condition_column_name; + JoinOnClause() = default; std::pair condColumnNames() const { std::pair res; + + if (!analyzer_left_filter_condition_column_name.empty()) + res.first = analyzer_left_filter_condition_column_name; + + if (!analyzer_right_filter_condition_column_name.empty()) + res.second = analyzer_right_filter_condition_column_name; + if (on_filter_condition_left) res.first = on_filter_condition_left->getColumnName(); if (on_filter_condition_right) res.second = on_filter_condition_right->getColumnName(); + return res; } @@ -111,9 +122,6 @@ private: * to the subquery will be added expression `expr(t2 columns)`. * It's possible to use name `expr(t2 columns)`. */ - - friend class TreeRewriter; - SizeLimits size_limits; const size_t default_max_bytes = 0; const bool join_use_nulls = false; @@ -124,9 +132,6 @@ private: const size_t max_files_to_merge = 0; const String temporary_files_codec = "LZ4"; - /// the limit has no technical reasons, it supposed to improve safety - const size_t MAX_DISJUNCTS = 16; /// NOLINT - ASTs key_asts_left; ASTs key_asts_right; @@ -160,6 +165,8 @@ private: std::string right_storage_name; + bool is_join_with_constant = false; + Names requiredJoinedNames() const; /// Create converting actions and change key column names if required @@ -178,6 +185,8 @@ private: NamesAndTypesList correctedColumnsAddedByJoin() const; + void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix); + public: TableJoin() = default; @@ -217,8 +226,8 @@ public: bool allowParallelHashJoin() const; bool joinUseNulls() const { return join_use_nulls; } - bool forceNullableRight() const { return join_use_nulls && isLeftOrFull(table_join.kind); } - bool forceNullableLeft() const { return join_use_nulls && isRightOrFull(table_join.kind); } + bool forceNullableRight() const { return join_use_nulls && isLeftOrFull(kind()); } + bool forceNullableLeft() const { return join_use_nulls && isRightOrFull(kind()); } size_t defaultMaxBytes() const { return default_max_bytes; } size_t maxJoinedBlockRows() const { return max_joined_block_rows; } size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; } @@ -229,6 +238,9 @@ public: bool oneDisjunct() const; + ASTTableJoin & getTableJoin() { return table_join; } + const ASTTableJoin & getTableJoin() const { return table_join; } + JoinOnClause & getOnlyClause() { assertHasOneOnExpr(); return clauses[0]; } const JoinOnClause & getOnlyClause() const { assertHasOneOnExpr(); return clauses[0]; } @@ -266,13 +278,26 @@ public: NamesWithAliases getNamesWithAliases(const NameSet & required_columns) const; NamesWithAliases getRequiredColumns(const Block & sample, const Names & action_required_columns) const; - void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix); size_t rightKeyInclusion(const String & name) const; NameSet requiredRightKeys() const; + bool isJoinWithConstant() const + { + return is_join_with_constant; + } + + void setIsJoinWithConstant(bool is_join_with_constant_value) + { + is_join_with_constant = is_join_with_constant_value; + } + bool leftBecomeNullable(const DataTypePtr & column_type) const; bool rightBecomeNullable(const DataTypePtr & column_type) const; void addJoinedColumn(const NameAndTypePair & joined_column); + void setColumnsAddedByJoin(const NamesAndTypesList & columns_added_by_join_value) + { + columns_added_by_join = columns_added_by_join_value; + } template void addJoinedColumnsAndCorrectTypesImpl(TColumns & left_columns, bool correct_nullability); @@ -294,15 +319,13 @@ public: ASTPtr leftKeysList() const; ASTPtr rightKeysList() const; /// For ON syntax only - const NamesAndTypesList & columnsFromJoinedTable() const { return columns_from_joined_table; } - - Names columnsAddedByJoin() const + void setColumnsFromJoinedTable(NamesAndTypesList columns_from_joined_table_value, const NameSet & left_table_columns, const String & right_table_prefix) { - Names res; - for (const auto & col : columns_added_by_join) - res.push_back(col.name); - return res; + columns_from_joined_table = std::move(columns_from_joined_table_value); + deduplicateAndQualifyColumnNames(left_table_columns, right_table_prefix); } + const NamesAndTypesList & columnsFromJoinedTable() const { return columns_from_joined_table; } + const NamesAndTypesList & columnsAddedByJoin() const { return columns_added_by_join; } /// StorageJoin overrides key names (cause of different names qualification) void setRightKeys(const Names & keys) { getOnlyClause().key_names_right = keys; } diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 3eb93f1f20e..c5ae6f6c885 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -20,7 +20,7 @@ namespace ErrorCodes extern const int NOT_ENOUGH_SPACE; } -void TemporaryDataOnDiskScope::deltaAllocAndCheck(int compressed_delta, int uncompressed_delta) +void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta) { if (parent) parent->deltaAllocAndCheck(compressed_delta, uncompressed_delta); diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index 81bd2067650..11edc8700d2 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -53,7 +53,7 @@ public: VolumePtr getVolume() const { return volume; } protected: - void deltaAllocAndCheck(int compressed_delta, int uncompressed_delta); + void deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta); TemporaryDataOnDiskScopePtr parent = nullptr; VolumePtr volume; diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 4810174e395..ee126f2da11 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -55,12 +55,12 @@ void ThreadStatus::applyQuerySettings() #if defined(OS_LINUX) /// Set "nice" value if required. - Int32 new_os_thread_priority = settings.os_thread_priority; + Int32 new_os_thread_priority = static_cast(settings.os_thread_priority); if (new_os_thread_priority && hasLinuxCapability(CAP_SYS_NICE)) { LOG_TRACE(log, "Setting nice to {}", new_os_thread_priority); - if (0 != setpriority(PRIO_PROCESS, thread_id, new_os_thread_priority)) + if (0 != setpriority(PRIO_PROCESS, static_cast(thread_id), new_os_thread_priority)) throwFromErrno("Cannot 'setpriority'", ErrorCodes::CANNOT_SET_THREAD_PRIORITY); os_thread_priority = new_os_thread_priority; @@ -349,7 +349,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) { LOG_TRACE(log, "Resetting nice"); - if (0 != setpriority(PRIO_PROCESS, thread_id, 0)) + if (0 != setpriority(PRIO_PROCESS, static_cast(thread_id), 0)) LOG_ERROR(log, "Cannot 'setpriority' back to zero: {}", errnoToString()); os_thread_priority = 0; diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index ac49d79c6ba..da12dccd8d8 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -611,7 +611,7 @@ void getArrayJoinedColumns(ASTPtr & query, TreeRewriterResult & result, const AS } } -void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_default_strictness, bool old_any, ASTTableJoin & out_table_join) +void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_default_strictness, bool old_any, std::shared_ptr & analyzed_join) { const ASTTablesInSelectQueryElement * node = select_query.join(); if (!node) @@ -649,7 +649,7 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul throw Exception("ANY FULL JOINs are not implemented", ErrorCodes::NOT_IMPLEMENTED); } - out_table_join = table_join; + analyzed_join->getTableJoin() = table_join; } /// Evaluate expression and return boolean value if it can be interpreted as bool. @@ -1236,14 +1236,11 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( if (tables_with_columns.size() > 1) { const auto & right_table = tables_with_columns[1]; - auto & cols_from_joined = result.analyzed_join->columns_from_joined_table; - cols_from_joined = right_table.columns; + auto columns_from_joined_table = right_table.columns; /// query can use materialized or aliased columns from right joined table, /// we want to request it for right table - cols_from_joined.insert(cols_from_joined.end(), right_table.hidden_columns.begin(), right_table.hidden_columns.end()); - - result.analyzed_join->deduplicateAndQualifyColumnNames( - source_columns_set, right_table.table.getQualifiedNamePrefix()); + columns_from_joined_table.insert(columns_from_joined_table.end(), right_table.hidden_columns.begin(), right_table.hidden_columns.end()); + result.analyzed_join->setColumnsFromJoinedTable(std::move(columns_from_joined_table), source_columns_set, right_table.table.getQualifiedNamePrefix()); } translateQualifiedNames(query, *select_query, source_columns_set, tables_with_columns); @@ -1254,7 +1251,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( NameSet all_source_columns_set = source_columns_set; if (table_join) { - for (const auto & [name, _] : table_join->columns_from_joined_table) + for (const auto & [name, _] : table_join->columnsFromJoinedTable()) all_source_columns_set.insert(name); } @@ -1304,7 +1301,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( getArrayJoinedColumns(query, result, select_query, result.source_columns, source_columns_set); setJoinStrictness( - *select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys, result.analyzed_join->table_join); + *select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys, result.analyzed_join); auto * table_join_ast = select_query->join() ? select_query->join()->table_join->as() : nullptr; if (table_join_ast && tables_with_columns.size() >= 2) diff --git a/src/Interpreters/WindowDescription.cpp b/src/Interpreters/WindowDescription.cpp index 335610b2be9..7ed7788cf1d 100644 --- a/src/Interpreters/WindowDescription.cpp +++ b/src/Interpreters/WindowDescription.cpp @@ -20,7 +20,8 @@ std::string WindowFunctionDescription::dump() const WriteBufferFromOwnString ss; ss << "window function '" << column_name << "\n"; - ss << "function node " << function_node->dumpTree() << "\n"; + if (function_node) + ss << "function node " << function_node->dumpTree() << "\n"; ss << "aggregate function '" << aggregate_function->getName() << "'\n"; if (!function_parameters.empty()) { diff --git a/src/Interpreters/WindowDescription.h b/src/Interpreters/WindowDescription.h index e7bc0473c26..3b9af6575e8 100644 --- a/src/Interpreters/WindowDescription.h +++ b/src/Interpreters/WindowDescription.h @@ -99,7 +99,6 @@ struct WindowDescription // The window functions that are calculated for this window. std::vector window_functions; - std::string dump() const; void checkValid() const; diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 55156cde7be..e57016d969a 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -88,7 +88,7 @@ Field convertIntToDecimalType(const Field & from, const DataTypeDecimal & typ if (!type.canStoreWhole(value)) throw Exception("Number is too big to place in " + type.getName(), ErrorCodes::ARGUMENT_OUT_OF_BOUND); - T scaled_value = type.getScaleMultiplier() * static_cast(value); + T scaled_value = type.getScaleMultiplier() * T(static_cast(value)); return DecimalField(scaled_value, type.getScale()); } @@ -236,10 +236,11 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } if (which_type.isDateTime64() - && (which_from_type.isNativeInt() || which_from_type.isNativeUInt() || which_from_type.isDate() || which_from_type.isDate32() || which_from_type.isDateTime() || which_from_type.isDateTime64())) + && (src.getType() == Field::Types::UInt64 || src.getType() == Field::Types::Int64 || src.getType() == Field::Types::Decimal64)) { const auto scale = static_cast(type).getScale(); - const auto decimal_value = DecimalUtils::decimalFromComponents(applyVisitor(FieldVisitorConvertToNumber(), src), 0, scale); + const auto decimal_value + = DecimalUtils::decimalFromComponents(applyVisitor(FieldVisitorConvertToNumber(), src), 0, scale); return Field(DecimalField(decimal_value, scale)); } } @@ -386,6 +387,9 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } else if (isObject(type)) { + if (src.getType() == Field::Types::Object) + return src; /// Already in needed type. + const auto * from_type_tuple = typeid_cast(from_type_hint); if (src.getType() == Field::Types::Tuple && from_type_tuple && from_type_tuple->haveExplicitNames()) { diff --git a/src/Interpreters/createBlockSelector.cpp b/src/Interpreters/createBlockSelector.cpp index b1a9a4e9e35..fce9833ddfb 100644 --- a/src/Interpreters/createBlockSelector.cpp +++ b/src/Interpreters/createBlockSelector.cpp @@ -50,7 +50,7 @@ IColumn::Selector createBlockSelector( /// libdivide support only UInt32 and UInt64. using TUInt32Or64 = std::conditional_t; - libdivide::divider divider(total_weight); + libdivide::divider divider(static_cast(total_weight)); const auto & data = typeid_cast &>(column).getData(); diff --git a/src/Interpreters/examples/hash_map_string_small.cpp b/src/Interpreters/examples/hash_map_string_small.cpp index 4a96f717bf7..b58cdfbacd0 100644 --- a/src/Interpreters/examples/hash_map_string_small.cpp +++ b/src/Interpreters/examples/hash_map_string_small.cpp @@ -23,7 +23,7 @@ struct SmallStringRef { - UInt32 size = 0; + size_t size = 0; union { diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 729088f38b7..7c038bf4f4f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -538,7 +538,7 @@ static std::tuple executeQueryImpl( { /// processlist also has query masked now, to avoid secrets leaks though SHOW PROCESSLIST by other users. process_list_entry = context->getProcessList().insert(query_for_logging, ast.get(), context); - context->setProcessListElement(&process_list_entry->get()); + context->setProcessListElement(process_list_entry->getQueryStatus()); } /// Load external tables if they were provided @@ -714,9 +714,9 @@ static std::tuple executeQueryImpl( if (process_list_entry) { /// Query was killed before execution - if ((*process_list_entry)->isKilled()) - throw Exception("Query '" + (*process_list_entry)->getInfo().client_info.current_query_id + "' is killed in pending state", - ErrorCodes::QUERY_WAS_CANCELLED); + if (process_list_entry->getQueryStatus()->isKilled()) + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, + "Query '{}' is killed in pending state", process_list_entry->getQueryStatus()->getInfo().client_info.current_query_id); } /// Hold element of process list till end of query execution. @@ -860,7 +860,7 @@ static std::tuple executeQueryImpl( pulling_pipeline = pipeline.pulling(), query_span](QueryPipeline & query_pipeline) mutable { - QueryStatus * process_list_elem = context->getProcessListElement(); + QueryStatusPtr process_list_elem = context->getProcessListElement(); if (process_list_elem) { @@ -945,9 +945,10 @@ static std::tuple executeQueryImpl( processor_elem.processor_name = processor->getName(); - processor_elem.elapsed_us = processor->getElapsedUs(); - processor_elem.input_wait_elapsed_us = processor->getInputWaitElapsedUs(); - processor_elem.output_wait_elapsed_us = processor->getOutputWaitElapsedUs(); + /// NOTE: convert this to UInt64 + processor_elem.elapsed_us = static_cast(processor->getElapsedUs()); + processor_elem.input_wait_elapsed_us = static_cast(processor->getInputWaitElapsedUs()); + processor_elem.output_wait_elapsed_us = static_cast(processor->getOutputWaitElapsedUs()); auto stats = processor->getProcessorDataStats(); processor_elem.input_rows = stats.input_rows; @@ -1026,7 +1027,7 @@ static std::tuple executeQueryImpl( elem.exception_code = getCurrentExceptionCode(); elem.exception = getCurrentExceptionMessage(false); - QueryStatus * process_list_elem = context->getProcessListElement(); + QueryStatusPtr process_list_elem = context->getProcessListElement(); const Settings & current_settings = context->getSettingsRef(); /// Update performance counters before logging to query_log diff --git a/src/Interpreters/getHeaderForProcessingStage.cpp b/src/Interpreters/getHeaderForProcessingStage.cpp index e16647091ba..48acfb5512a 100644 --- a/src/Interpreters/getHeaderForProcessingStage.cpp +++ b/src/Interpreters/getHeaderForProcessingStage.cpp @@ -14,6 +14,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int UNSUPPORTED_METHOD; } bool hasJoin(const ASTSelectQuery & select) @@ -118,6 +119,10 @@ Block getHeaderForProcessingStage( case QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit: case QueryProcessingStage::MAX: { + /// TODO: Analyzer syntax analyzer result + if (!query_info.syntax_analyzer_result) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "getHeaderForProcessingStage is unsupported"); + auto query = query_info.query->clone(); TreeRewriterResult new_rewriter_result = *query_info.syntax_analyzer_result; removeJoin(*query->as(), new_rewriter_result, context); diff --git a/src/Parsers/ASTColumnsMatcher.cpp b/src/Parsers/ASTColumnsMatcher.cpp index 8f167f99b37..0fc6847de68 100644 --- a/src/Parsers/ASTColumnsMatcher.cpp +++ b/src/Parsers/ASTColumnsMatcher.cpp @@ -60,6 +60,11 @@ void ASTColumnsRegexpMatcher::setPattern(String pattern) DB::ErrorCodes::CANNOT_COMPILE_REGEXP); } +const std::shared_ptr & ASTColumnsRegexpMatcher::getMatcher() const +{ + return column_matcher; +} + bool ASTColumnsRegexpMatcher::isColumnMatching(const String & column_name) const { return RE2::PartialMatch(column_name, *column_matcher); @@ -114,4 +119,128 @@ void ASTColumnsListMatcher::formatImpl(const FormatSettings & settings, FormatSt } } +ASTPtr ASTQualifiedColumnsRegexpMatcher::clone() const +{ + auto clone = std::make_shared(*this); + clone->cloneChildren(); + return clone; +} + +void ASTQualifiedColumnsRegexpMatcher::appendColumnName(WriteBuffer & ostr) const +{ + const auto & qualifier = children.at(0); + qualifier->appendColumnName(ostr); + writeCString(".COLUMNS(", ostr); + writeQuotedString(original_pattern, ostr); + writeChar(')', ostr); +} + +void ASTQualifiedColumnsRegexpMatcher::setPattern(String pattern) +{ + original_pattern = std::move(pattern); + column_matcher = std::make_shared(original_pattern, RE2::Quiet); + if (!column_matcher->ok()) + throw DB::Exception( + "COLUMNS pattern " + original_pattern + " cannot be compiled: " + column_matcher->error(), + DB::ErrorCodes::CANNOT_COMPILE_REGEXP); +} + +void ASTQualifiedColumnsRegexpMatcher::setMatcher(std::shared_ptr matcher) +{ + column_matcher = std::move(matcher); +} + +const std::shared_ptr & ASTQualifiedColumnsRegexpMatcher::getMatcher() const +{ + return column_matcher; +} + +void ASTQualifiedColumnsRegexpMatcher::updateTreeHashImpl(SipHash & hash_state) const +{ + hash_state.update(original_pattern.size()); + hash_state.update(original_pattern); + IAST::updateTreeHashImpl(hash_state); +} + +void ASTQualifiedColumnsRegexpMatcher::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + settings.ostr << (settings.hilite ? hilite_keyword : ""); + + const auto & qualifier = children.at(0); + qualifier->formatImpl(settings, state, frame); + + settings.ostr << ".COLUMNS" << (settings.hilite ? hilite_none : "") << "("; + settings.ostr << quoteString(original_pattern); + settings.ostr << ")"; + + /// Format column transformers + size_t children_size = children.size(); + + for (size_t i = 1; i < children_size; ++i) + { + const auto & child = children[i]; + settings.ostr << ' '; + child->formatImpl(settings, state, frame); + } +} + +ASTPtr ASTQualifiedColumnsListMatcher::clone() const +{ + auto clone = std::make_shared(*this); + clone->column_list = column_list->clone(); + clone->cloneChildren(); + return clone; +} + +void ASTQualifiedColumnsListMatcher::appendColumnName(WriteBuffer & ostr) const +{ + const auto & qualifier = children.at(0); + qualifier->appendColumnName(ostr); + writeCString(".COLUMNS(", ostr); + + for (auto it = column_list->children.begin(); it != column_list->children.end(); ++it) + { + if (it != column_list->children.begin()) + writeCString(", ", ostr); + + (*it)->appendColumnName(ostr); + } + writeChar(')', ostr); +} + +void ASTQualifiedColumnsListMatcher::updateTreeHashImpl(SipHash & hash_state) const +{ + column_list->updateTreeHash(hash_state); + IAST::updateTreeHashImpl(hash_state); +} + +void ASTQualifiedColumnsListMatcher::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + settings.ostr << (settings.hilite ? hilite_keyword : ""); + + const auto & qualifier = children.at(0); + qualifier->formatImpl(settings, state, frame); + + settings.ostr << ".COLUMNS" << (settings.hilite ? hilite_none : "") << "("; + + for (ASTs::const_iterator it = column_list->children.begin(); it != column_list->children.end(); ++it) + { + if (it != column_list->children.begin()) + settings.ostr << ", "; + + (*it)->formatImpl(settings, state, frame); + } + settings.ostr << ")"; + + /// Format column transformers + size_t children_size = children.size(); + + for (size_t i = 1; i < children_size; ++i) + { + const auto & child = children[i]; + settings.ostr << ' '; + child->formatImpl(settings, state, frame); + } +} + } diff --git a/src/Parsers/ASTColumnsMatcher.h b/src/Parsers/ASTColumnsMatcher.h index 5aaf3cbe30d..7ce246608b9 100644 --- a/src/Parsers/ASTColumnsMatcher.h +++ b/src/Parsers/ASTColumnsMatcher.h @@ -24,6 +24,7 @@ public: void appendColumnName(WriteBuffer & ostr) const override; void setPattern(String pattern); + const std::shared_ptr & getMatcher() const; bool isColumnMatching(const String & column_name) const; void updateTreeHashImpl(SipHash & hash_state) const override; @@ -49,5 +50,39 @@ protected: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; }; +/// Same as ASTColumnsRegexpMatcher. Qualified identifier is first child. +class ASTQualifiedColumnsRegexpMatcher : public IAST +{ +public: + String getID(char) const override { return "QualifiedColumnsRegexpMatcher"; } + ASTPtr clone() const override; + + void appendColumnName(WriteBuffer & ostr) const override; + const std::shared_ptr & getMatcher() const; + void setPattern(String pattern); + void setMatcher(std::shared_ptr matcher); + void updateTreeHashImpl(SipHash & hash_state) const override; + +protected: + void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; + +private: + std::shared_ptr column_matcher; + String original_pattern; +}; + +/// Same as ASTColumnsListMatcher. Qualified identifier is first child. +class ASTQualifiedColumnsListMatcher : public IAST +{ +public: + String getID(char) const override { return "QualifiedColumnsListMatcher"; } + ASTPtr clone() const override; + void appendColumnName(WriteBuffer & ostr) const override; + void updateTreeHashImpl(SipHash & hash_state) const override; + + ASTPtr column_list; +protected: + void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; +}; } diff --git a/src/Parsers/ASTColumnsTransformers.cpp b/src/Parsers/ASTColumnsTransformers.cpp index 71207724a89..118c22b463f 100644 --- a/src/Parsers/ASTColumnsTransformers.cpp +++ b/src/Parsers/ASTColumnsTransformers.cpp @@ -270,6 +270,11 @@ void ASTColumnsExceptTransformer::setPattern(String pattern) DB::ErrorCodes::CANNOT_COMPILE_REGEXP); } +const std::shared_ptr & ASTColumnsExceptTransformer::getMatcher() const +{ + return column_matcher; +} + bool ASTColumnsExceptTransformer::isColumnMatching(const String & column_name) const { return RE2::PartialMatch(column_name, *column_matcher); diff --git a/src/Parsers/ASTColumnsTransformers.h b/src/Parsers/ASTColumnsTransformers.h index 0f16f6b93e7..5179726e8cb 100644 --- a/src/Parsers/ASTColumnsTransformers.h +++ b/src/Parsers/ASTColumnsTransformers.h @@ -60,6 +60,7 @@ public: } void transform(ASTs & nodes) const override; void setPattern(String pattern); + const std::shared_ptr & getMatcher() const; bool isColumnMatching(const String & column_name) const; void appendColumnName(WriteBuffer & ostr) const override; void updateTreeHashImpl(SipHash & hash_state) const override; diff --git a/src/Parsers/ASTExplainQuery.h b/src/Parsers/ASTExplainQuery.h index ea9ccf5a4f4..156ffdeacb9 100644 --- a/src/Parsers/ASTExplainQuery.h +++ b/src/Parsers/ASTExplainQuery.h @@ -15,6 +15,7 @@ public: { ParsedAST, /// 'EXPLAIN AST SELECT ...' AnalyzedSyntax, /// 'EXPLAIN SYNTAX SELECT ...' + QueryTree, /// 'EXPLAIN QUERY TREE SELECT ...' QueryPlan, /// 'EXPLAIN SELECT ...' QueryPipeline, /// 'EXPLAIN PIPELINE ...' QueryEstimates, /// 'EXPLAIN ESTIMATE ...' @@ -109,6 +110,7 @@ private: { case ParsedAST: return "EXPLAIN AST"; case AnalyzedSyntax: return "EXPLAIN SYNTAX"; + case QueryTree: return "EXPLAIN QUERY TREE"; case QueryPlan: return "EXPLAIN"; case QueryPipeline: return "EXPLAIN PIPELINE"; case QueryEstimates: return "EXPLAIN ESTIMATE"; diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 6d5089f802e..5756fb9ba86 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -24,6 +24,8 @@ public: bool compute_after_window_functions = false; + bool is_lambda_function = false; + // We have to make these fields ASTPtr because this is what the visitors // expect. Some of them take const ASTPtr & (makes no sense), and some // take ASTPtr & and modify it. I don't understand how the latter is diff --git a/src/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h index 14e2fcef39d..c9712d578e0 100644 --- a/src/Parsers/ASTIdentifier.h +++ b/src/Parsers/ASTIdentifier.h @@ -49,9 +49,10 @@ public: void restoreTable(); // TODO(ilezhankin): get rid of this std::shared_ptr createTable() const; // returns |nullptr| if identifier is not table. -protected: String full_name; std::vector name_parts; + +protected: std::shared_ptr semantic; /// pimpl void formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; diff --git a/src/Parsers/ASTSampleRatio.h b/src/Parsers/ASTSampleRatio.h index a3e70b7dab7..220f938335b 100644 --- a/src/Parsers/ASTSampleRatio.h +++ b/src/Parsers/ASTSampleRatio.h @@ -34,4 +34,14 @@ public: void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; }; +inline bool operator==(const ASTSampleRatio::Rational & lhs, const ASTSampleRatio::Rational & rhs) +{ + return lhs.numerator == rhs.numerator && lhs.denominator == rhs.denominator; +} + +inline bool operator!=(const ASTSampleRatio::Rational & lhs, const ASTSampleRatio::Rational & rhs) +{ + return !(lhs == rhs); +} + } diff --git a/src/Parsers/ASTTTLElement.cpp b/src/Parsers/ASTTTLElement.cpp index 90278e27c0c..86dd85e0eb8 100644 --- a/src/Parsers/ASTTTLElement.cpp +++ b/src/Parsers/ASTTTLElement.cpp @@ -93,7 +93,7 @@ void ASTTTLElement::setExpression(int & pos, ASTPtr && ast) { if (pos == -1) { - pos = children.size(); + pos = static_cast(children.size()); children.emplace_back(ast); } else diff --git a/src/Parsers/Access/ParserCreateUserQuery.cpp b/src/Parsers/Access/ParserCreateUserQuery.cpp index 9e32b3c4618..ed6ecb62667 100644 --- a/src/Parsers/Access/ParserCreateUserQuery.cpp +++ b/src/Parsers/Access/ParserCreateUserQuery.cpp @@ -295,11 +295,11 @@ namespace } - bool parseHosts(IParserBase::Pos & pos, Expected & expected, const String & prefix, AllowedClientHosts & hosts) + bool parseHosts(IParserBase::Pos & pos, Expected & expected, std::string_view prefix, AllowedClientHosts & hosts) { return IParserBase::wrapParseImpl(pos, [&] { - if (!prefix.empty() && !ParserKeyword{prefix.c_str()}.ignore(pos, expected)) + if (!prefix.empty() && !ParserKeyword{prefix}.ignore(pos, expected)) return false; if (!ParserKeyword{"HOST"}.ignore(pos, expected)) @@ -492,7 +492,6 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (alter) { - String maybe_new_name; if (!new_name && (names->size() == 1) && parseRenameTo(pos, expected, new_name)) continue; diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 88784329ece..c4e07ea2e15 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -1192,54 +1192,6 @@ bool ParserAlias::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return true; } - -bool ParserColumnsMatcher::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) -{ - ParserKeyword columns("COLUMNS"); - ParserList columns_p(std::make_unique(false, true), std::make_unique(TokenType::Comma), false); - ParserStringLiteral regex; - - if (!columns.ignore(pos, expected)) - return false; - - if (pos->type != TokenType::OpeningRoundBracket) - return false; - ++pos; - - ASTPtr column_list; - ASTPtr regex_node; - if (!columns_p.parse(pos, column_list, expected) && !regex.parse(pos, regex_node, expected)) - return false; - - if (pos->type != TokenType::ClosingRoundBracket) - return false; - ++pos; - - ASTPtr res; - if (column_list) - { - auto list_matcher = std::make_shared(); - list_matcher->column_list = column_list; - res = list_matcher; - } - else - { - auto regexp_matcher = std::make_shared(); - regexp_matcher->setPattern(regex_node->as().value.get()); - res = regexp_matcher; - } - - ParserColumnsTransformers transformers_p(allowed_transformers); - ASTPtr transformer; - while (transformers_p.parse(pos, transformer, expected)) - { - res->children.push_back(transformer); - } - node = std::move(res); - return true; -} - - bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserKeyword apply("APPLY"); @@ -1488,6 +1440,122 @@ bool ParserQualifiedAsterisk::parseImpl(Pos & pos, ASTPtr & node, Expected & exp return true; } +/// Parse (columns_list) or ('REGEXP'). +static bool parseColumnsMatcherBody(IParser::Pos & pos, ASTPtr & node, Expected & expected, ParserColumnsTransformers::ColumnTransformers allowed_transformers) +{ + if (pos->type != TokenType::OpeningRoundBracket) + return false; + ++pos; + + ParserList columns_p(std::make_unique(false, true), std::make_unique(TokenType::Comma), false); + ParserStringLiteral regex; + + ASTPtr column_list; + ASTPtr regex_node; + if (!columns_p.parse(pos, column_list, expected) && !regex.parse(pos, regex_node, expected)) + return false; + + if (pos->type != TokenType::ClosingRoundBracket) + return false; + ++pos; + + ASTPtr res; + if (column_list) + { + auto list_matcher = std::make_shared(); + list_matcher->column_list = column_list; + res = list_matcher; + } + else + { + auto regexp_matcher = std::make_shared(); + regexp_matcher->setPattern(regex_node->as().value.get()); + res = regexp_matcher; + } + + ParserColumnsTransformers transformers_p(allowed_transformers); + ASTPtr transformer; + while (transformers_p.parse(pos, transformer, expected)) + { + res->children.push_back(transformer); + } + + node = std::move(res); + return true; +} + +bool ParserColumnsMatcher::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + ParserKeyword columns("COLUMNS"); + + if (!columns.ignore(pos, expected)) + return false; + + return parseColumnsMatcherBody(pos, node, expected, allowed_transformers); +} + +bool ParserQualifiedColumnsMatcher::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + if (!ParserCompoundIdentifier(true, true).parse(pos, node, expected)) + return false; + + auto identifier_node = node; + const auto & identifier_node_typed = identifier_node->as(); + + /// ParserCompoundIdentifier parse identifier.COLUMNS + if (identifier_node_typed.name_parts.size() == 1 || identifier_node_typed.name_parts.back() != "COLUMNS") + return false; + + /// TODO: ASTTableIdentifier can contain only 2 parts + + if (identifier_node_typed.name_parts.size() == 2) + { + auto table_name = identifier_node_typed.name_parts[0]; + identifier_node = std::make_shared(table_name); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected identifier to contain no more than 2 parts. Actual {}", + identifier_node_typed.full_name); + } + + if (!parseColumnsMatcherBody(pos, node, expected, allowed_transformers)) + return false; + + if (auto * columns_list_matcher = node->as()) + { + auto result = std::make_shared(); + result->column_list = std::move(columns_list_matcher->column_list); + + result->children.reserve(columns_list_matcher->children.size() + 1); + result->children.push_back(std::move(identifier_node)); + + for (auto && child : columns_list_matcher->children) + result->children.push_back(std::move(child)); + + node = result; + } + else if (auto * column_regexp_matcher = node->as()) + { + auto result = std::make_shared(); + result->setMatcher(column_regexp_matcher->getMatcher()); + + result->children.reserve(column_regexp_matcher->children.size() + 1); + result->children.push_back(std::move(identifier_node)); + + for (auto && child : column_regexp_matcher->children) + result->children.push_back(std::move(child)); + + node = result; + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Qualified COLUMNS matcher expected to be list or regexp"); + } + + return true; +} bool ParserSubstitution::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index f538555f0c1..8a9647dc86f 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -104,7 +104,7 @@ protected: bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; }; -/** COLUMNS('') +/** COLUMNS(columns_names) or COLUMNS('') */ class ParserColumnsMatcher : public IParserBase { @@ -121,6 +121,23 @@ protected: ColumnTransformers allowed_transformers; }; +/** Qualified columns matcher identifier.COLUMNS(columns_names) or identifier.COLUMNS('') + */ +class ParserQualifiedColumnsMatcher : public IParserBase +{ +public: + using ColumnTransformers = ParserColumnsTransformers::ColumnTransformers; + explicit ParserQualifiedColumnsMatcher(ColumnTransformers allowed_transformers_ = ParserColumnsTransformers::AllTransformers) + : allowed_transformers(allowed_transformers_) + {} + +protected: + const char * getName() const override { return "qualified COLUMNS matcher"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + + ColumnTransformers allowed_transformers; +}; + // Allows to make queries like SELECT SUM() FILTER(WHERE ) FROM ... class ParserFilterClause : public IParserBase { diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index f7a016a59e4..c362340d013 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -478,7 +478,10 @@ struct Operator { Operator() = default; - Operator(const std::string & function_name_, int priority_, int arity_ = 2, OperatorType type_ = OperatorType::None) + Operator(const std::string & function_name_, + int priority_, + int arity_, + OperatorType type_ = OperatorType::None) : type(type_), priority(priority_), arity(arity_), function_name(function_name_) {} OperatorType type; @@ -487,6 +490,14 @@ struct Operator std::string function_name; }; +template +static std::shared_ptr makeASTFunction(Operator & op, Args &&... args) +{ + auto ast_function = makeASTFunction(op.function_name, std::forward(args)...); + ast_function->is_lambda_function = op.type == OperatorType::Lambda; + return ast_function; +} + enum class Checkpoint { None, @@ -506,10 +517,8 @@ enum class Checkpoint class Layer { public: - explicit Layer(bool allow_alias_ = true, bool allow_alias_without_as_keyword_ = true) : - allow_alias(allow_alias_), allow_alias_without_as_keyword(allow_alias_without_as_keyword_) - { - } + explicit Layer(bool allow_alias_ = true, bool allow_alias_without_as_keyword_ = false) : + allow_alias(allow_alias_), allow_alias_without_as_keyword(allow_alias_without_as_keyword_) {} virtual ~Layer() = default; @@ -552,13 +561,10 @@ public: virtual bool getResult(ASTPtr & node) { - if (elements.size() == 1) - { - node = std::move(elements[0]); - return true; - } + if (!finished) + return false; - return false; + return getResultImpl(node); } virtual bool parse(IParser::Pos & /*pos*/, Expected & /*expected*/, Action & /*action*/) = 0; @@ -612,13 +618,17 @@ public: /// bool mergeElement(bool push_to_elements = true) { + parsed_alias = false; + Operator cur_op; while (popOperator(cur_op)) { ASTPtr function; - // Special case of ternary operator - if (cur_op.type == OperatorType::StartIf) + // We should not meet the starting part of the operator while finishing an element + if (cur_op.type == OperatorType::StartIf || + cur_op.type == OperatorType::StartBetween || + cur_op.type == OperatorType::StartNotBetween) return false; if (cur_op.type == OperatorType::FinishIf) @@ -628,10 +638,6 @@ public: return false; } - // Special case of a BETWEEN b AND c operator - if (cur_op.type == OperatorType::StartBetween || cur_op.type == OperatorType::StartNotBetween) - return false; - if (cur_op.type == OperatorType::FinishBetween) { Operator tmp_op; @@ -651,7 +657,7 @@ public: } else { - function = makeASTFunction(cur_op.function_name); + function = makeASTFunction(cur_op); if (!popLastNOperands(function->children[0]->children, cur_op.arity)) return false; @@ -727,6 +733,9 @@ public: /// In order to distinguish them we keep a counter of BETWEENs without matching ANDs. int between_counter = 0; + /// Flag we set when we parsed alias to avoid parsing next element as alias + bool parsed_alias = false; + bool allow_alias = true; bool allow_alias_without_as_keyword = true; @@ -734,6 +743,17 @@ public: Checkpoint current_checkpoint = Checkpoint::None; protected: + virtual bool getResultImpl(ASTPtr & node) + { + if (elements.size() == 1) + { + node = std::move(elements[0]); + return true; + } + + return false; + } + std::vector operators; ASTs operands; ASTs elements; @@ -754,17 +774,12 @@ public: bool getResult(ASTPtr & node) override { /// We can exit the main cycle outside the parse() function, - /// so we need to merge the element here + /// so we need to merge the element here. + /// Because of this 'finished' flag can also not be set. if (!mergeElement()) return false; - if (elements.size() == 1) - { - node = std::move(elements[0]); - return true; - } - - return false; + return Layer::getResultImpl(node); } bool parse(IParser::Pos & pos, Expected & /*expected*/, Action & /*action*/) override @@ -776,16 +791,18 @@ public: } }; - /// Basic layer for a function with certain separator and end tokens: /// 1. If we parse a separator we should merge current operands and operators /// into one element and push in to 'elements' vector. /// 2. If we parse an ending token, we should merge everything as in (1) and /// also set 'finished' flag. template -class BaseLayer : public Layer +class LayerWithSeparator : public Layer { public: + explicit LayerWithSeparator(bool allow_alias_ = true, bool allow_alias_without_as_keyword_ = false) : + Layer(allow_alias_, allow_alias_without_as_keyword_) {} + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { if (ParserToken(separator).ignore(pos, expected)) @@ -809,11 +826,11 @@ public: } }; - -class OrdinaryFunctionLayer : public Layer +/// Layer for regular and aggregate functions without syntax sugar +class FunctionLayer : public Layer { public: - explicit OrdinaryFunctionLayer(String function_name_, bool allow_function_parameters_ = true) + explicit FunctionLayer(String function_name_, bool allow_function_parameters_ = true) : function_name(function_name_), allow_function_parameters(allow_function_parameters_){} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override @@ -958,7 +975,7 @@ public: if (parameters) { - function_node->parameters = parameters; + function_node->parameters = std::move(parameters); function_node->children.push_back(function_node->parameters); } @@ -991,7 +1008,7 @@ public: return false; } - elements = {function_node}; + elements = {std::move(function_node)}; finished = true; } @@ -1015,17 +1032,6 @@ private: class RoundBracketsLayer : public Layer { public: - bool getResult(ASTPtr & node) override - { - // Round brackets can mean priority operator as well as function tuple() - if (!is_tuple && elements.size() == 1) - node = std::move(elements[0]); - else - node = makeASTFunction("tuple", std::move(elements)); - - return true; - } - bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { if (ParserToken(TokenType::Comma).ignore(pos, expected)) @@ -1055,41 +1061,57 @@ public: return true; } + +protected: + bool getResultImpl(ASTPtr & node) override + { + // Round brackets can mean priority operator as well as function tuple() + if (!is_tuple && elements.size() == 1) + node = std::move(elements[0]); + else + node = makeASTFunction("tuple", std::move(elements)); + + return true; + } + private: bool is_tuple = false; }; /// Layer for array square brackets operator -class ArrayLayer : public BaseLayer +class ArrayLayer : public LayerWithSeparator { public: - bool getResult(ASTPtr & node) override + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override + { + return LayerWithSeparator::parse(pos, expected, action); + } + +protected: + bool getResultImpl(ASTPtr & node) override { node = makeASTFunction("array", std::move(elements)); return true; } - - bool parse(IParser::Pos & pos, Expected & expected, Action & action) override - { - return BaseLayer::parse(pos, expected, action); - } }; /// Layer for arrayElement square brackets operator /// This layer does not create a function, it is only needed to parse closing token /// and return only one element. -class ArrayElementLayer : public BaseLayer +class ArrayElementLayer : public LayerWithSeparator { public: bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { - return BaseLayer::parse(pos, expected, action); + return LayerWithSeparator::parse(pos, expected, action); } }; class CastLayer : public Layer { public: + CastLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// CAST(x [AS alias1], T [AS alias2]) or CAST(x [AS alias1] AS T) @@ -1185,25 +1207,10 @@ public: } }; -class ExtractLayer : public BaseLayer +class ExtractLayer : public LayerWithSeparator { public: - bool getResult(ASTPtr & node) override - { - if (state == 2) - { - if (elements.empty()) - return false; - - node = makeASTFunction(interval_kind.toNameOfFunctionExtractTimePart(), elements[0]); - } - else - { - node = makeASTFunction("extract", std::move(elements)); - } - - return true; - } + ExtractLayer() : LayerWithSeparator(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { @@ -1232,7 +1239,7 @@ public: if (state == 1) { - return BaseLayer::parse(pos, expected, action); + return LayerWithSeparator::parse(pos, expected, action); } if (state == 2) @@ -1250,6 +1257,25 @@ public: return true; } +protected: + bool getResultImpl(ASTPtr & node) override + { + if (state == 2) + { + if (elements.empty()) + return false; + + node = makeASTFunction(interval_kind.toNameOfFunctionExtractTimePart(), elements[0]); + } + else + { + node = makeASTFunction("extract", std::move(elements)); + } + + return true; + } + + private: IntervalKind interval_kind; }; @@ -1257,11 +1283,7 @@ private: class SubstringLayer : public Layer { public: - bool getResult(ASTPtr & node) override - { - node = makeASTFunction("substring", std::move(elements)); - return true; - } + SubstringLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { @@ -1312,19 +1334,19 @@ public: return true; } + +protected: + bool getResultImpl(ASTPtr & node) override + { + node = makeASTFunction("substring", std::move(elements)); + return true; + } }; class PositionLayer : public Layer { public: - bool getResult(ASTPtr & node) override - { - if (state == 2) - std::swap(elements[1], elements[0]); - - node = makeASTFunction("position", std::move(elements)); - return true; - } + PositionLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { @@ -1380,12 +1402,23 @@ public: return true; } -}; +protected: + bool getResultImpl(ASTPtr & node) override + { + if (state == 2) + std::swap(elements[1], elements[0]); + + node = makeASTFunction("position", std::move(elements)); + return true; + } +}; class ExistsLayer : public Layer { public: + ExistsLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} + bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override { ASTPtr node; @@ -1410,15 +1443,8 @@ public: class TrimLayer : public Layer { public: - TrimLayer(bool trim_left_, bool trim_right_) : trim_left(trim_left_), trim_right(trim_right_) - { - } - - bool getResult(ASTPtr & node) override - { - node = makeASTFunction(function_name, std::move(elements)); - return true; - } + TrimLayer(bool trim_left_, bool trim_right_) + : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true), trim_left(trim_left_), trim_right(trim_right_) {} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { @@ -1561,6 +1587,14 @@ public: return true; } + +protected: + bool getResultImpl(ASTPtr & node) override + { + node = makeASTFunction(function_name, std::move(elements)); + return true; + } + private: bool trim_left; bool trim_right; @@ -1570,27 +1604,11 @@ private: String function_name; }; - -class DateAddLayer : public BaseLayer +class DateAddLayer : public LayerWithSeparator { public: - explicit DateAddLayer(const char * function_name_) : function_name(function_name_) - { - } - - bool getResult(ASTPtr & node) override - { - if (parsed_interval_kind) - { - elements[0] = makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), elements[0]); - node = makeASTFunction(function_name, elements[1], elements[0]); - } - else - node = makeASTFunction(function_name, std::move(elements)); - - return true; - } - + explicit DateAddLayer(const char * function_name_) + : LayerWithSeparator(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true), function_name(function_name_) {} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { @@ -1615,39 +1633,39 @@ public: if (state == 1) { - return BaseLayer::parse(pos, expected, action); + return LayerWithSeparator::parse(pos, expected, action); } return true; } +protected: + bool getResultImpl(ASTPtr & node) override + { + if (parsed_interval_kind) + { + if (elements.size() < 2) + return false; + + elements[0] = makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), elements[0]); + node = makeASTFunction(function_name, elements[1], elements[0]); + } + else + node = makeASTFunction(function_name, std::move(elements)); + + return true; + } + private: IntervalKind interval_kind; const char * function_name; bool parsed_interval_kind = false; }; - -class DateDiffLayer : public BaseLayer +class DateDiffLayer : public LayerWithSeparator { public: - bool getResult(ASTPtr & node) override - { - if (parsed_interval_kind) - { - if (elements.size() == 2) - node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1]); - else if (elements.size() == 3) - node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1], elements[2]); - else - return false; - } - else - { - node = makeASTFunction("dateDiff", std::move(elements)); - } - return true; - } + DateDiffLayer() : LayerWithSeparator(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { @@ -1669,21 +1687,41 @@ public: if (state == 1) { - return BaseLayer::parse(pos, expected, action); + return LayerWithSeparator::parse(pos, expected, action); } return true; } +protected: + bool getResultImpl(ASTPtr & node) override + { + if (parsed_interval_kind) + { + if (elements.size() == 2) + node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1]); + else if (elements.size() == 3) + node = makeASTFunction("dateDiff", std::make_shared(interval_kind.toDateDiffUnit()), elements[0], elements[1], elements[2]); + else + return false; + } + else + { + node = makeASTFunction("dateDiff", std::move(elements)); + } + return true; + } + private: IntervalKind interval_kind; bool parsed_interval_kind = false; }; - class IntervalLayer : public Layer { public: + IntervalLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// INTERVAL 1 HOUR or INTERVAL expr HOUR @@ -1758,86 +1796,11 @@ private: IntervalKind interval_kind; }; -/// Layer for table function 'view' and 'viewIfPermitted' -class ViewLayer : public Layer -{ -public: - explicit ViewLayer(bool if_permitted_) : if_permitted(if_permitted_) {} - - bool getResult(ASTPtr & node) override - { - if (if_permitted) - node = makeASTFunction("viewIfPermitted", std::move(elements)); - else - node = makeASTFunction("view", std::move(elements)); - - return true; - } - - bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override - { - /// view(SELECT ...) - /// viewIfPermitted(SELECT ... ELSE func(...)) - /// - /// 0. Parse the SELECT query and if 'if_permitted' parse 'ELSE' keyword (-> 1) else (finished) - /// 1. Parse closing token - - if (state == 0) - { - ASTPtr query; - - bool maybe_an_subquery = pos->type == TokenType::OpeningRoundBracket; - - if (!ParserSelectWithUnionQuery().parse(pos, query, expected)) - return false; - - auto & select_ast = query->as(); - if (select_ast.list_of_selects->children.size() == 1 && maybe_an_subquery) - { - // It's an subquery. Bail out. - return false; - } - - pushResult(query); - - if (!if_permitted) - { - if (!ParserToken(TokenType::ClosingRoundBracket).ignore(pos, expected)) - return false; - - finished = true; - return true; - } - - if (!ParserKeyword{"ELSE"}.ignore(pos, expected)) - return false; - - state = 1; - return true; - } - - if (state == 1) - { - if (ParserToken(TokenType::ClosingRoundBracket).ignore(pos, expected)) - { - if (!mergeElement()) - return false; - - finished = true; - } - } - - return true; - } - -private: - bool if_permitted; -}; - - class CaseLayer : public Layer { public: + CaseLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {} + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// CASE [x] WHEN expr THEN expr [WHEN expr THEN expr [...]] [ELSE expr] END @@ -1926,6 +1889,83 @@ private: bool has_case_expr; }; +/// Layer for table function 'view' and 'viewIfPermitted' +class ViewLayer : public Layer +{ +public: + explicit ViewLayer(bool if_permitted_) : if_permitted(if_permitted_) {} + + bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override + { + /// view(SELECT ...) + /// viewIfPermitted(SELECT ... ELSE func(...)) + /// + /// 0. Parse the SELECT query and if 'if_permitted' parse 'ELSE' keyword (-> 1) else (finished) + /// 1. Parse closing token + + if (state == 0) + { + ASTPtr query; + + bool maybe_an_subquery = pos->type == TokenType::OpeningRoundBracket; + + if (!ParserSelectWithUnionQuery().parse(pos, query, expected)) + return false; + + auto & select_ast = query->as(); + if (select_ast.list_of_selects->children.size() == 1 && maybe_an_subquery) + { + // It's an subquery. Bail out. + return false; + } + + pushResult(query); + + if (!if_permitted) + { + if (!ParserToken(TokenType::ClosingRoundBracket).ignore(pos, expected)) + return false; + + finished = true; + return true; + } + + if (!ParserKeyword{"ELSE"}.ignore(pos, expected)) + return false; + + state = 1; + return true; + } + + if (state == 1) + { + if (ParserToken(TokenType::ClosingRoundBracket).ignore(pos, expected)) + { + if (!mergeElement()) + return false; + + finished = true; + } + } + + return true; + } + +protected: + bool getResultImpl(ASTPtr & node) override + { + if (if_permitted) + node = makeASTFunction("viewIfPermitted", std::move(elements)); + else + node = makeASTFunction("view", std::move(elements)); + + return true; + } + +private: + bool if_permitted; +}; + std::unique_ptr getFunctionLayer(ASTPtr identifier, bool is_table_function, bool allow_function_parameters_ = true) { @@ -1990,9 +2030,9 @@ std::unique_ptr getFunctionLayer(ASTPtr identifier, bool is_table_functio || function_name_lowercase == "timestampdiff" || function_name_lowercase == "timestamp_diff") return std::make_unique(); else if (function_name_lowercase == "grouping") - return std::make_unique(function_name_lowercase, allow_function_parameters_); + return std::make_unique(function_name_lowercase, allow_function_parameters_); else - return std::make_unique(function_name, allow_function_parameters_); + return std::make_unique(function_name, allow_function_parameters_); } @@ -2076,6 +2116,7 @@ struct ParserExpressionImpl // Recursion ParserQualifiedAsterisk qualified_asterisk_parser; ParserColumnsMatcher columns_matcher_parser; + ParserQualifiedColumnsMatcher qualified_columns_matcher_parser; ParserSubquery subquery_parser; bool parse(std::unique_ptr start, IParser::Pos & pos, ASTPtr & node, Expected & expected); @@ -2141,22 +2182,22 @@ std::vector> ParserExpressionImpl::operators_t {"<", Operator("less", 9, 2, OperatorType::Comparison)}, {">", Operator("greater", 9, 2, OperatorType::Comparison)}, {"=", Operator("equals", 9, 2, OperatorType::Comparison)}, - {"LIKE", Operator("like", 9)}, - {"ILIKE", Operator("ilike", 9)}, - {"NOT LIKE", Operator("notLike", 9)}, - {"NOT ILIKE", Operator("notILike", 9)}, - {"IN", Operator("in", 9)}, - {"NOT IN", Operator("notIn", 9)}, - {"GLOBAL IN", Operator("globalIn", 9)}, - {"GLOBAL NOT IN", Operator("globalNotIn", 9)}, + {"LIKE", Operator("like", 9, 2)}, + {"ILIKE", Operator("ilike", 9, 2)}, + {"NOT LIKE", Operator("notLike", 9, 2)}, + {"NOT ILIKE", Operator("notILike", 9, 2)}, + {"IN", Operator("in", 9, 2)}, + {"NOT IN", Operator("notIn", 9, 2)}, + {"GLOBAL IN", Operator("globalIn", 9, 2)}, + {"GLOBAL NOT IN", Operator("globalNotIn", 9, 2)}, {"||", Operator("concat", 10, 2, OperatorType::Mergeable)}, - {"+", Operator("plus", 11)}, - {"-", Operator("minus", 11)}, - {"*", Operator("multiply", 12)}, - {"/", Operator("divide", 12)}, - {"%", Operator("modulo", 12)}, - {"MOD", Operator("modulo", 12)}, - {"DIV", Operator("intDiv", 12)}, + {"+", Operator("plus", 11, 2)}, + {"-", Operator("minus", 11, 2)}, + {"*", Operator("multiply", 12, 2)}, + {"/", Operator("divide", 12, 2)}, + {"%", Operator("modulo", 12, 2)}, + {"MOD", Operator("modulo", 12, 2)}, + {"DIV", Operator("intDiv", 12, 2)}, {".", Operator("tupleElement", 14, 2, OperatorType::TupleElement)}, {"[", Operator("arrayElement", 14, 2, OperatorType::ArrayElement)}, {"::", Operator("CAST", 14, 2, OperatorType::Cast)}, @@ -2304,7 +2345,7 @@ Action ParserExpressionImpl::tryParseOperand(Layers & layers, IParser::Pos & pos if (!layers.back()->popOperand(argument)) return Action::NONE; - function = makeASTFunction(prev_op.function_name, argument, tmp); + function = makeASTFunction(prev_op, argument, tmp); if (!modifyAST(function, subquery_function_type)) return Action::NONE; @@ -2353,7 +2394,8 @@ Action ParserExpressionImpl::tryParseOperand(Layers & layers, IParser::Pos & pos literal_parser.parse(pos, tmp, expected) || asterisk_parser.parse(pos, tmp, expected) || qualified_asterisk_parser.parse(pos, tmp, expected) || - columns_matcher_parser.parse(pos, tmp, expected)) + columns_matcher_parser.parse(pos, tmp, expected) || + qualified_columns_matcher_parser.parse(pos, tmp, expected)) { layers.back()->pushOperand(std::move(tmp)); } @@ -2427,11 +2469,15 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po if (cur_op == operators_table.end()) { + ParserAlias alias_parser(layers.back()->allow_alias_without_as_keyword); auto old_pos = pos; - if (layers.back()->allow_alias && ParserAlias(layers.back()->allow_alias_without_as_keyword).parse(pos, tmp, expected)) + if (layers.back()->allow_alias && + !layers.back()->parsed_alias && + alias_parser.parse(pos, tmp, expected) && + layers.back()->insertAlias(tmp)) { - if (layers.back()->insertAlias(tmp)) - return Action::OPERATOR; + layers.back()->parsed_alias = true; + return Action::OPERATOR; } pos = old_pos; return Action::NONE; @@ -2488,7 +2534,7 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po } else { - function = makeASTFunction(prev_op.function_name); + function = makeASTFunction(prev_op); if (!layers.back()->popLastNOperands(function->children[0]->children, prev_op.arity)) return Action::NONE; diff --git a/src/Parsers/ParserDescribeTableQuery.cpp b/src/Parsers/ParserDescribeTableQuery.cpp index 0f768e22324..ad6d2c5bcc6 100644 --- a/src/Parsers/ParserDescribeTableQuery.cpp +++ b/src/Parsers/ParserDescribeTableQuery.cpp @@ -33,7 +33,8 @@ bool ParserDescribeTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & ex if (!ParserTableExpression().parse(pos, table_expression, expected)) return false; - query->table_expression = table_expression; + query->children.push_back(std::move(table_expression)); + query->table_expression = query->children.back(); node = query; diff --git a/src/Parsers/ParserExplainQuery.cpp b/src/Parsers/ParserExplainQuery.cpp index d32d4444c36..7fc997f9548 100644 --- a/src/Parsers/ParserExplainQuery.cpp +++ b/src/Parsers/ParserExplainQuery.cpp @@ -19,6 +19,7 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ParserKeyword s_ast("AST"); ParserKeyword s_explain("EXPLAIN"); ParserKeyword s_syntax("SYNTAX"); + ParserKeyword s_query_tree("QUERY TREE"); ParserKeyword s_pipeline("PIPELINE"); ParserKeyword s_plan("PLAN"); ParserKeyword s_estimates("ESTIMATE"); @@ -33,6 +34,8 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected kind = ASTExplainQuery::ExplainKind::ParsedAST; else if (s_syntax.ignore(pos, expected)) kind = ASTExplainQuery::ExplainKind::AnalyzedSyntax; + else if (s_query_tree.ignore(pos, expected)) + kind = ASTExplainQuery::ExplainKind::QueryTree; else if (s_pipeline.ignore(pos, expected)) kind = ASTExplainQuery::ExplainKind::QueryPipeline; else if (s_plan.ignore(pos, expected)) @@ -84,6 +87,13 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected explain_query->setTableFunction(table_function); explain_query->setTableOverride(table_override); } + else if (kind == ASTExplainQuery::ExplainKind::QueryTree) + { + if (select_p.parse(pos, query, expected)) + explain_query->setExplainedQuery(std::move(query)); + else + return false; + } else if (kind == ASTExplainQuery::ExplainKind::CurrentTransaction) { /// Nothing to parse @@ -103,7 +113,9 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected explain_query->setExplainedQuery(std::move(query)); } else + { return false; + } node = std::move(explain_query); return true; diff --git a/src/Parsers/ParserSampleRatio.cpp b/src/Parsers/ParserSampleRatio.cpp index 2f444bcf9e8..b6be04cbcc0 100644 --- a/src/Parsers/ParserSampleRatio.cpp +++ b/src/Parsers/ParserSampleRatio.cpp @@ -14,7 +14,7 @@ static bool parseDecimal(const char * pos, const char * end, ASTSampleRatio::Rat { UInt64 num_before = 0; UInt64 num_after = 0; - Int64 exponent = 0; + Int32 exponent = 0; const char * pos_after_first_num = tryReadIntText(num_before, pos, end); @@ -28,12 +28,12 @@ static bool parseDecimal(const char * pos, const char * end, ASTSampleRatio::Rat if (!has_num_before_point && !has_point) return false; - size_t number_of_digits_after_point = 0; + int number_of_digits_after_point = 0; if (has_point) { const char * pos_after_second_num = tryReadIntText(num_after, pos, end); - number_of_digits_after_point = pos_after_second_num - pos; + number_of_digits_after_point = static_cast(pos_after_second_num - pos); pos = pos_after_second_num; } diff --git a/src/Parsers/SelectUnionMode.cpp b/src/Parsers/SelectUnionMode.cpp new file mode 100644 index 00000000000..6d56a2b219f --- /dev/null +++ b/src/Parsers/SelectUnionMode.cpp @@ -0,0 +1,32 @@ +#include + + +namespace DB +{ + +const char * toString(SelectUnionMode mode) +{ + switch (mode) + { + case SelectUnionMode::UNION_DEFAULT: + return "UNION_DEFAULT"; + case SelectUnionMode::UNION_ALL: + return "UNION_ALL"; + case SelectUnionMode::UNION_DISTINCT: + return "UNION_DISTINCT"; + case SelectUnionMode::EXCEPT_DEFAULT: + return "EXCEPT_DEFAULT"; + case SelectUnionMode::EXCEPT_ALL: + return "EXCEPT_ALL"; + case SelectUnionMode::EXCEPT_DISTINCT: + return "EXCEPT_DISTINCT"; + case SelectUnionMode::INTERSECT_DEFAULT: + return "INTERSECT_DEFAULT"; + case SelectUnionMode::INTERSECT_ALL: + return "INTERSECT_ALL"; + case SelectUnionMode::INTERSECT_DISTINCT: + return "INTERSECT_DEFAULT"; + } +} + +} diff --git a/src/Parsers/SelectUnionMode.h b/src/Parsers/SelectUnionMode.h index ca3637612aa..5c72ce65eb2 100644 --- a/src/Parsers/SelectUnionMode.h +++ b/src/Parsers/SelectUnionMode.h @@ -18,6 +18,8 @@ enum class SelectUnionMode INTERSECT_DISTINCT }; +const char * toString(SelectUnionMode mode); + using SelectUnionModes = std::vector; using SelectUnionModesSet = std::unordered_set; diff --git a/src/Parsers/parseQuery.cpp b/src/Parsers/parseQuery.cpp index af8c9dc58a6..4a0c60da48d 100644 --- a/src/Parsers/parseQuery.cpp +++ b/src/Parsers/parseQuery.cpp @@ -236,7 +236,8 @@ ASTPtr tryParseQuery( { const char * query_begin = _out_query_end; Tokens tokens(query_begin, all_queries_end, max_query_size); - IParser::Pos token_iterator(tokens, max_parser_depth); + /// NOTE: consider use UInt32 for max_parser_depth setting. + IParser::Pos token_iterator(tokens, static_cast(max_parser_depth)); if (token_iterator->isEnd() || token_iterator->type == TokenType::Semicolon) diff --git a/src/Planner/ActionsChain.cpp b/src/Planner/ActionsChain.cpp new file mode 100644 index 00000000000..594d26a679c --- /dev/null +++ b/src/Planner/ActionsChain.cpp @@ -0,0 +1,170 @@ +#include + +#include +#include + +#include +#include +#include +#include + +namespace DB +{ + +ActionsChainStep::ActionsChainStep(ActionsDAGPtr actions_, AvailableOutputColumnsStrategy available_output_columns_stategy_) + : actions(std::move(actions_)) + , available_output_columns_strategy(available_output_columns_stategy_) +{ + initialize(); +} + +ActionsChainStep::ActionsChainStep(ActionsDAGPtr actions_, + AvailableOutputColumnsStrategy available_output_columns_stategy_, + ColumnsWithTypeAndName additional_output_columns_) + : actions(std::move(actions_)) + , available_output_columns_strategy(available_output_columns_stategy_) + , additional_output_columns(std::move(additional_output_columns_)) +{ + initialize(); +} + + +void ActionsChainStep::finalizeInputAndOutputColumns(const NameSet & child_input_columns) +{ + child_required_output_columns_names.clear(); + + auto child_input_columns_copy = child_input_columns; + + std::unordered_set output_nodes_names; + output_nodes_names.reserve(actions->getOutputs().size()); + + for (auto & output_node : actions->getOutputs()) + output_nodes_names.insert(output_node->result_name); + + for (const auto & node : actions->getNodes()) + { + auto it = child_input_columns_copy.find(node.result_name); + if (it == child_input_columns_copy.end()) + continue; + + child_input_columns_copy.erase(it); + child_required_output_columns_names.insert(node.result_name); + + if (output_nodes_names.contains(node.result_name)) + continue; + + actions->getOutputs().push_back(&node); + output_nodes_names.insert(node.result_name); + } + + actions->removeUnusedActions(); + /// TODO: Analyzer fix ActionsDAG input and constant nodes with same name + actions->projectInput(); + initialize(); +} + +void ActionsChainStep::dump(WriteBuffer & buffer) const +{ + buffer << "DAG" << '\n'; + buffer << actions->dumpDAG(); + + if (!additional_output_columns.empty()) + { + buffer << "Additional output columns " << additional_output_columns.size() << '\n'; + for (const auto & column : additional_output_columns) + buffer << "Name " << column.name << " type " << column.type->getName() << '\n'; + } + + if (!child_required_output_columns_names.empty()) + { + buffer << "Child required output columns " << boost::join(child_required_output_columns_names, ", "); + buffer << '\n'; + } +} + +String ActionsChainStep::dump() const +{ + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); +} + +void ActionsChainStep::initialize() +{ + auto required_columns_names = actions->getRequiredColumnsNames(); + input_columns_names = NameSet(required_columns_names.begin(), required_columns_names.end()); + + available_output_columns.clear(); + + /// TODO: Analyzer fix ActionsDAG input and constant nodes with same name + std::unordered_set available_output_columns_names; + + if (available_output_columns_strategy == AvailableOutputColumnsStrategy::ALL_NODES) + { + for (const auto & node : actions->getNodes()) + { + if (available_output_columns_names.contains(node.result_name)) + continue; + + available_output_columns.emplace_back(node.column, node.result_type, node.result_name); + available_output_columns_names.insert(node.result_name); + } + } + else if (available_output_columns_strategy == AvailableOutputColumnsStrategy::OUTPUT_NODES) + { + for (const auto & node : actions->getOutputs()) + { + if (available_output_columns_names.contains(node->result_name)) + continue; + + available_output_columns.emplace_back(node->column, node->result_type, node->result_name); + available_output_columns_names.insert(node->result_name); + } + } + + available_output_columns.insert(available_output_columns.end(), additional_output_columns.begin(), additional_output_columns.end()); +} + +void ActionsChain::finalize() +{ + if (steps.empty()) + return; + + /// For last chain step there are no columns required in child nodes + NameSet empty_child_input_columns; + steps.back().get()->finalizeInputAndOutputColumns(empty_child_input_columns); + + Int64 steps_last_index = steps.size() - 1; + for (Int64 i = steps_last_index; i >= 1; --i) + { + auto & current_step = steps[i]; + auto & previous_step = steps[i - 1]; + + previous_step->finalizeInputAndOutputColumns(current_step->getInputColumnNames()); + } +} + +void ActionsChain::dump(WriteBuffer & buffer) const +{ + size_t steps_size = steps.size(); + + for (size_t i = 0; i < steps_size; ++i) + { + const auto & step = steps[i]; + buffer << "Step " << i << '\n'; + step->dump(buffer); + + buffer << '\n'; + } +} + +String ActionsChain::dump() const +{ + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); +} + +} diff --git a/src/Planner/ActionsChain.h b/src/Planner/ActionsChain.h new file mode 100644 index 00000000000..e2791ab7e35 --- /dev/null +++ b/src/Planner/ActionsChain.h @@ -0,0 +1,239 @@ +#pragma once + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +/** Chain of query actions steps. This class is needed to eliminate unnecessary actions calculations. + * Each step is represented by actions DAG. + * + * Consider such example query: + * SELECT expr(id) FROM test_table WHERE expr(id) > 0. + * + * We want to reuse expr(id) from previous expressions step, and not recalculate it in projection. + * To do this we build a chain of all query action steps. + * For example: + * 1. Before where. + * 2. Before order by. + * 3. Projection. + * + * Initially root of chain is initialized with join tree query plan header. + * Each next chain step, must be initialized with previous step available output columns. + * That way we forward all available output columns (functions, columns, aliases) from first step of the chain to the + * last step. After chain is build we can finalize it. + * + * Each step has input columns (some of them are not necessary) and output columns. Before chain finalize output columns + * contain only necessary actions for step output calculation. + * For each step starting from last (i), we add columns that are necessary for this step to previous step (i - 1), + * and remove unused input columns of previous step(i - 1). + * That way we reuse already calculated expressions from first step to last step. + */ + +class ActionsChainStep; +using ActionsChainStepPtr = std::unique_ptr; +using ActionsChainSteps = std::vector; + +/// Actions chain step represent single step in actions chain. +class ActionsChainStep +{ +public: + /// Available output columns strategy for actions chain step + enum class AvailableOutputColumnsStrategy + { + ALL_NODES, + OUTPUT_NODES + }; + + /** Initialize actions step with actions dag. + * Input column names initialized using actions dag nodes with INPUT type. + * + * If available output columns strategy is ALL_NODES, then available output columns initialized using actions dag nodes. + * If available output columns strategy is OUTPUT_NODES, then available output columns initialized using actions dag output nodes. + */ + explicit ActionsChainStep(ActionsDAGPtr actions_, AvailableOutputColumnsStrategy available_output_columns_stategy_ = AvailableOutputColumnsStrategy::ALL_NODES); + + explicit ActionsChainStep(ActionsDAGPtr actions_, + AvailableOutputColumnsStrategy available_output_columns_stategy_, + ColumnsWithTypeAndName additional_output_columns_); + + /// Get actions + ActionsDAGPtr & getActions() + { + return actions; + } + + /// Get actions + const ActionsDAGPtr & getActions() const + { + return actions; + } + + /// Get available output columns + const ColumnsWithTypeAndName & getAvailableOutputColumns() const + { + return available_output_columns; + } + + /// Get input column names + const NameSet & getInputColumnNames() const + { + return input_columns_names; + } + + /** Get child required output columns names. + * Initialized during finalizeOutputColumns method call. + */ + const NameSet & getChildRequiredOutputColumnsNames() const + { + return child_required_output_columns_names; + } + + /** Finalize step output columns and remove unnecessary input columns. + * If actions dag node has same name as child input column, it is added to actions output nodes. + */ + void finalizeInputAndOutputColumns(const NameSet & child_input_columns); + + /// Dump step into buffer + void dump(WriteBuffer & buffer) const; + + /// Dump step + String dump() const; + +private: + void initialize(); + + ActionsDAGPtr actions; + + AvailableOutputColumnsStrategy available_output_columns_strategy; + + NameSet input_columns_names; + + NameSet child_required_output_columns_names; + + ColumnsWithTypeAndName available_output_columns; + + ColumnsWithTypeAndName additional_output_columns; +}; + +/// Query actions chain +class ActionsChain +{ +public: + /// Add step into actions chain + void addStep(ActionsChainStepPtr step) + { + steps.emplace_back(std::move(step)); + } + + /// Get steps + const ActionsChainSteps & getSteps() const + { + return steps; + } + + /// Get steps size + size_t getStepsSize() const + { + return steps.size(); + } + + const ActionsChainStepPtr & at(size_t index) const + { + if (index >= steps.size()) + throw std::out_of_range("actions chain access is out of range"); + + return steps[index]; + } + + ActionsChainStepPtr & at(size_t index) + { + if (index >= steps.size()) + throw std::out_of_range("actions chain access is out of range"); + + return steps[index]; + } + + ActionsChainStepPtr & operator[](size_t index) + { + return steps[index]; + } + + const ActionsChainStepPtr & operator[](size_t index) const + { + return steps[index]; + } + + /// Get last step + ActionsChainStep * getLastStep() + { + return steps.back().get(); + } + + /// Get last step or throw exception if chain is empty + ActionsChainStep * getLastStepOrThrow() + { + if (steps.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "ActionsChain is empty"); + + return steps.back().get(); + } + + /// Get last step index + size_t getLastStepIndex() + { + return steps.size() - 1; + } + + /// Get last step index or throw exception if chain is empty + size_t getLastStepIndexOrThrow() + { + if (steps.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "ActionsChain is empty"); + + return steps.size() - 1; + } + + /// Get last step available output columns + const ColumnsWithTypeAndName & getLastStepAvailableOutputColumns() const + { + return steps.back()->getAvailableOutputColumns(); + } + + /// Get last step available output columns or throw exception if chain is empty + const ColumnsWithTypeAndName & getLastStepAvailableOutputColumnsOrThrow() const + { + if (steps.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "ActionsChain is empty"); + + return steps.back()->getAvailableOutputColumns(); + } + + /// Get last step available output columns or null if chain is empty + const ColumnsWithTypeAndName * getLastStepAvailableOutputColumnsOrNull() const + { + if (steps.empty()) + return nullptr; + + return &steps.back()->getAvailableOutputColumns(); + } + + /// Finalize chain + void finalize(); + + /// Dump chain into buffer + void dump(WriteBuffer & buffer) const; + + /// Dump chain + String dump() const; + +private: + ActionsChainSteps steps; +}; + +} diff --git a/src/Planner/CMakeLists.txt b/src/Planner/CMakeLists.txt new file mode 100644 index 00000000000..766767b5c13 --- /dev/null +++ b/src/Planner/CMakeLists.txt @@ -0,0 +1,7 @@ +if (ENABLE_TESTS) + add_subdirectory(tests) +endif() + +if (ENABLE_EXAMPLES) + add_subdirectory(examples) +endif() diff --git a/src/Planner/CollectSets.cpp b/src/Planner/CollectSets.cpp new file mode 100644 index 00000000000..aa7014aba48 --- /dev/null +++ b/src/Planner/CollectSets.cpp @@ -0,0 +1,101 @@ +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; +} + +namespace +{ + +class CollectSetsVisitor : public ConstInDepthQueryTreeVisitor +{ +public: + explicit CollectSetsVisitor(PlannerContext & planner_context_) + : planner_context(planner_context_) + {} + + void visitImpl(const QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || !isNameOfInFunction(function_node->getFunctionName())) + return; + + auto in_first_argument = function_node->getArguments().getNodes().at(0); + auto in_second_argument = function_node->getArguments().getNodes().at(1); + auto in_second_argument_node_type = in_second_argument->getNodeType(); + + const auto & settings = planner_context.getQueryContext()->getSettingsRef(); + + String set_key = planner_context.createSetKey(in_second_argument); + + if (planner_context.hasSet(set_key)) + return; + + /// Tables and table functions are replaced with subquery at Analysis stage, except special Set table. + auto * second_argument_table = in_second_argument->as(); + StorageSet * storage_set = second_argument_table != nullptr ? dynamic_cast(second_argument_table->getStorage().get()) : nullptr; + + if (storage_set) + { + planner_context.registerSet(set_key, PlannerSet(storage_set->getSet())); + } + else if (auto constant_value = in_second_argument->getConstantValueOrNull()) + { + auto set = makeSetForConstantValue( + in_first_argument->getResultType(), + constant_value->getValue(), + constant_value->getType(), + settings); + + planner_context.registerSet(set_key, PlannerSet(std::move(set))); + } + else if (in_second_argument_node_type == QueryTreeNodeType::QUERY || + in_second_argument_node_type == QueryTreeNodeType::UNION) + { + SizeLimits size_limits_for_set = {settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode}; + bool tranform_null_in = settings.transform_null_in; + auto set = std::make_shared(size_limits_for_set, false /*fill_set_elements*/, tranform_null_in); + + planner_context.registerSet(set_key, PlannerSet(std::move(set), in_second_argument)); + } + else + { + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Function '{}' is supported only if second argument is constant or table expression", + function_node->getFunctionName()); + } + } + + static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + PlannerContext & planner_context; +}; + +} + +void collectSets(const QueryTreeNodePtr & node, PlannerContext & planner_context) +{ + CollectSetsVisitor visitor(planner_context); + visitor.visit(node); +} + +} diff --git a/src/Planner/CollectSets.h b/src/Planner/CollectSets.h new file mode 100644 index 00000000000..94f792e877b --- /dev/null +++ b/src/Planner/CollectSets.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#include + +namespace DB +{ + +/** Collect prepared sets and sets for subqueries that are necessary to execute IN function and its variations. + * Collected sets are registered in planner context. + */ +void collectSets(const QueryTreeNodePtr & node, PlannerContext & planner_context); + +} diff --git a/src/Planner/CollectTableExpressionData.cpp b/src/Planner/CollectTableExpressionData.cpp new file mode 100644 index 00000000000..30ccc541507 --- /dev/null +++ b/src/Planner/CollectTableExpressionData.cpp @@ -0,0 +1,116 @@ +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int UNSUPPORTED_METHOD; +} + +namespace +{ + +class CollectSourceColumnsVisitor : public InDepthQueryTreeVisitor +{ +public: + explicit CollectSourceColumnsVisitor(PlannerContext & planner_context_) + : planner_context(planner_context_) + {} + + void visitImpl(QueryTreeNodePtr & node) + { + auto * column_node = node->as(); + if (!column_node) + return; + + auto column_source_node = column_node->getColumnSource(); + auto column_source_node_type = column_source_node->getNodeType(); + + if (column_source_node_type == QueryTreeNodeType::ARRAY_JOIN || + column_source_node_type == QueryTreeNodeType::LAMBDA) + return; + + /// JOIN using expression + if (column_node->hasExpression() && column_source_node->getNodeType() == QueryTreeNodeType::JOIN) + return; + + auto & table_expression_data = planner_context.getOrCreateTableExpressionData(column_source_node); + + if (column_node->hasExpression()) + { + /// Replace ALIAS column with expression + table_expression_data.addAliasColumnName(column_node->getColumnName()); + node = column_node->getExpression(); + visitImpl(node); + return; + } + + if (column_source_node_type != QueryTreeNodeType::TABLE && + column_source_node_type != QueryTreeNodeType::TABLE_FUNCTION && + column_source_node_type != QueryTreeNodeType::QUERY && + column_source_node_type != QueryTreeNodeType::UNION) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected table, table function, query or union column source. Actual {}", + column_source_node->formatASTForErrorMessage()); + + bool column_already_exists = table_expression_data.hasColumn(column_node->getColumnName()); + if (column_already_exists) + return; + + auto column_identifier = planner_context.getGlobalPlannerContext()->createColumnIdentifier(node); + table_expression_data.addColumn(column_node->getColumn(), column_identifier); + } + + static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + PlannerContext & planner_context; +}; + +} + +void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContext & planner_context) +{ + auto & query_node_typed = query_node->as(); + auto table_expressions_nodes = extractTableExpressions(query_node_typed.getJoinTree()); + + for (auto & table_expression_node : table_expressions_nodes) + { + auto & table_expression_data = planner_context.getOrCreateTableExpressionData(table_expression_node); + + if (auto * table_node = table_expression_node->as()) + { + bool storage_is_remote = table_node->getStorage()->isRemote(); + table_expression_data.setIsRemote(storage_is_remote); + } + else if (auto * table_function_node = table_expression_node->as()) + { + bool storage_is_remote = table_function_node->getStorage()->isRemote(); + table_expression_data.setIsRemote(storage_is_remote); + } + + if (table_expression_data.isRemote()) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Remote storages are not supported"); + } + + CollectSourceColumnsVisitor collect_source_columns_visitor(planner_context); + collect_source_columns_visitor.visit(query_node); +} + +} diff --git a/src/Planner/CollectTableExpressionData.h b/src/Planner/CollectTableExpressionData.h new file mode 100644 index 00000000000..f4e2d579dca --- /dev/null +++ b/src/Planner/CollectTableExpressionData.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +#include + +namespace DB +{ + +/** Collect table expression data for query node. + * Collected table expression data is registered in planner context. + * + * ALIAS table column nodes are registered in table expression data and replaced in query tree with inner alias expression. + */ +void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContext & planner_context); + +} diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp new file mode 100644 index 00000000000..97f82d06463 --- /dev/null +++ b/src/Planner/Planner.cpp @@ -0,0 +1,873 @@ +#include + +#include + +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; + extern const int TOO_DEEP_SUBQUERIES; + extern const int NOT_IMPLEMENTED; +} + +/** ClickHouse query planner. + * + * TODO: Support JOIN with JOIN engine. + * TODO: Support VIEWs. + * TODO: JOIN drop unnecessary columns after ON, USING section + * TODO: Support RBAC. Support RBAC for ALIAS columns + * TODO: Support distributed query processing + * TODO: Support PREWHERE + * TODO: Support DISTINCT + * TODO: Support trivial count optimization + * TODO: Support projections + * TODO: Support read in order optimization + * TODO: UNION storage limits + * TODO: Support max streams + * TODO: Support ORDER BY read in order optimization + * TODO: Support GROUP BY read in order optimization + * TODO: Support Key Condition. Support indexes for IN function. + * TODO: Better support for quota and limits. + */ + +namespace +{ + +/** Check that table and table function table expressions from planner context support transactions. + * + * There is precondition that table expression data for table expression nodes is collected in planner context. + */ +void checkStoragesSupportTransactions(const PlannerContextPtr & planner_context) +{ + const auto & query_context = planner_context->getQueryContext(); + if (query_context->getSettingsRef().throw_on_unsupported_query_inside_transaction) + return; + + if (!query_context->getCurrentTransaction()) + return; + + for (const auto & [table_expression, _] : planner_context->getTableExpressionNodeToData()) + { + StoragePtr storage; + if (auto * table_node = table_expression->as()) + storage = table_node->getStorage(); + else if (auto * table_function_node = table_expression->as()) + storage = table_function_node->getStorage(); + + if (storage->supportsTransactions()) + continue; + + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Storage {} (table {}) does not support transactions", + storage->getName(), + storage->getStorageID().getNameForLogs()); + } +} + +void addBuildSubqueriesForSetsStepIfNeeded(QueryPlan & query_plan, const SelectQueryOptions & select_query_options, const PlannerContextPtr & planner_context) +{ + PreparedSets::SubqueriesForSets subqueries_for_sets; + const auto & set_key_to_planner_set = planner_context->getRegisteredSets(); + + for (const auto & [key, planner_set] : set_key_to_planner_set) + { + const auto subquery_node = planner_set.getSubqueryNode(); + if (!subquery_node) + continue; + + auto subquery_context = buildSubqueryContext(planner_context->getQueryContext()); + auto subquery_options = select_query_options.subquery(); + + Planner subquery_planner( + subquery_node, + subquery_options, + std::move(subquery_context), + planner_context->getGlobalPlannerContext()); + subquery_planner.buildQueryPlanIfNeeded(); + + SubqueryForSet subquery_for_set; + subquery_for_set.set = planner_set.getSet(); + subquery_for_set.source = std::make_unique(std::move(subquery_planner).extractQueryPlan()); + + subqueries_for_sets.emplace(key, std::move(subquery_for_set)); + } + + addCreatingSetsStep(query_plan, std::move(subqueries_for_sets), planner_context->getQueryContext()); +} + +/// Extend lifetime of query context, storages, and table locks +void extendQueryContextAndStoragesLifetime(QueryPlan & query_plan, const PlannerContextPtr & planner_context) +{ + query_plan.addInterpreterContext(planner_context->getQueryContext()); + + for (const auto & [table_expression, _] : planner_context->getTableExpressionNodeToData()) + { + if (auto * table_node = table_expression->as()) + { + query_plan.addStorageHolder(table_node->getStorage()); + query_plan.addTableLock(table_node->getStorageLock()); + } + else if (auto * table_function_node = table_expression->as()) + { + query_plan.addStorageHolder(table_function_node->getStorage()); + } + } +} + +} + +Planner::Planner(const QueryTreeNodePtr & query_tree_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_) + : query_tree(query_tree_) + , select_query_options(select_query_options_) + , planner_context(std::make_shared(std::move(context_), std::make_shared())) +{ + initialize(); +} + +Planner::Planner(const QueryTreeNodePtr & query_tree_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_, + GlobalPlannerContextPtr global_planner_context_) + : query_tree(query_tree_) + , select_query_options(select_query_options_) + , planner_context(std::make_shared(std::move(context_), std::move(global_planner_context_))) +{ + initialize(); +} + +void Planner::initialize() +{ + checkStackSize(); + + if (query_tree->getNodeType() != QueryTreeNodeType::QUERY && + query_tree->getNodeType() != QueryTreeNodeType::UNION) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Expected QUERY or UNION node. Actual {}", + query_tree->formatASTForErrorMessage()); + + auto & query_context = planner_context->getQueryContext(); + + size_t max_subquery_depth = query_context->getSettingsRef().max_subquery_depth; + if (max_subquery_depth && select_query_options.subquery_depth > max_subquery_depth) + throw Exception(ErrorCodes::TOO_DEEP_SUBQUERIES, + "Too deep subqueries. Maximum: {}", + max_subquery_depth); + + auto * query_node = query_tree->as(); + if (!query_node) + return; + + bool need_apply_query_settings = query_node->hasSettingsChanges(); + + const auto & client_info = query_context->getClientInfo(); + auto min_major = static_cast(DBMS_MIN_MAJOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD); + auto min_minor = static_cast(DBMS_MIN_MINOR_VERSION_WITH_CURRENT_AGGREGATION_VARIANT_SELECTION_METHOD); + + bool need_to_disable_two_level_aggregation = client_info.query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && + client_info.connection_client_version_major < min_major && + client_info.connection_client_version_minor < min_minor; + + if (need_apply_query_settings || need_to_disable_two_level_aggregation) + { + auto updated_context = Context::createCopy(query_context); + + if (need_apply_query_settings) + updated_context->applySettingsChanges(query_node->getSettingsChanges()); + + /// Disable two-level aggregation due to version incompatibility + if (need_to_disable_two_level_aggregation) + { + updated_context->setSetting("group_by_two_level_threshold", Field(0)); + updated_context->setSetting("group_by_two_level_threshold_bytes", Field(0)); + } + + query_context = std::move(updated_context); + } +} + +void Planner::buildQueryPlanIfNeeded() +{ + if (query_plan.isInitialized()) + return; + + auto query_context = planner_context->getQueryContext(); + + if (auto * union_query_tree = query_tree->as()) + { + auto union_mode = union_query_tree->getUnionMode(); + if (union_mode == SelectUnionMode::UNION_DEFAULT || + union_mode == SelectUnionMode::EXCEPT_DEFAULT || + union_mode == SelectUnionMode::INTERSECT_DEFAULT) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "UNION mode must be initialized"); + + size_t queries_size = union_query_tree->getQueries().getNodes().size(); + + std::vector> query_plans; + query_plans.reserve(queries_size); + + Blocks query_plans_headers; + query_plans_headers.reserve(queries_size); + + for (auto & query_node : union_query_tree->getQueries().getNodes()) + { + Planner query_planner(query_node, select_query_options, query_context); + query_planner.buildQueryPlanIfNeeded(); + auto query_node_plan = std::make_unique(std::move(query_planner).extractQueryPlan()); + query_plans_headers.push_back(query_node_plan->getCurrentDataStream().header); + query_plans.push_back(std::move(query_node_plan)); + } + + Block union_common_header = buildCommonHeaderForUnion(query_plans_headers); + DataStreams query_plans_streams; + query_plans_streams.reserve(query_plans.size()); + + for (auto & query_node_plan : query_plans) + { + if (blocksHaveEqualStructure(query_node_plan->getCurrentDataStream().header, union_common_header)) + { + query_plans_streams.push_back(query_node_plan->getCurrentDataStream()); + continue; + } + + auto actions_dag = ActionsDAG::makeConvertingActions( + query_node_plan->getCurrentDataStream().header.getColumnsWithTypeAndName(), + union_common_header.getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Position); + auto converting_step = std::make_unique(query_node_plan->getCurrentDataStream(), std::move(actions_dag)); + converting_step->setStepDescription("Conversion before UNION"); + query_node_plan->addStep(std::move(converting_step)); + + query_plans_streams.push_back(query_node_plan->getCurrentDataStream()); + } + + const auto & settings = query_context->getSettingsRef(); + auto max_threads = settings.max_threads; + + bool is_distinct = union_mode == SelectUnionMode::UNION_DISTINCT || union_mode == SelectUnionMode::INTERSECT_DISTINCT || + union_mode == SelectUnionMode::EXCEPT_DISTINCT; + + if (union_mode == SelectUnionMode::UNION_ALL || union_mode == SelectUnionMode::UNION_DISTINCT) + { + auto union_step = std::make_unique(std::move(query_plans_streams), max_threads); + query_plan.unitePlans(std::move(union_step), std::move(query_plans)); + } + else if (union_mode == SelectUnionMode::INTERSECT_ALL || union_mode == SelectUnionMode::INTERSECT_DISTINCT || + union_mode == SelectUnionMode::EXCEPT_ALL || union_mode == SelectUnionMode::EXCEPT_DISTINCT) + { + IntersectOrExceptStep::Operator intersect_or_except_operator = IntersectOrExceptStep::Operator::UNKNOWN; + + if (union_mode == SelectUnionMode::INTERSECT_ALL) + intersect_or_except_operator = IntersectOrExceptStep::Operator::INTERSECT_ALL; + else if (union_mode == SelectUnionMode::INTERSECT_DISTINCT) + intersect_or_except_operator = IntersectOrExceptStep::Operator::INTERSECT_DISTINCT; + else if (union_mode == SelectUnionMode::EXCEPT_ALL) + intersect_or_except_operator = IntersectOrExceptStep::Operator::EXCEPT_ALL; + else if (union_mode == SelectUnionMode::EXCEPT_DISTINCT) + intersect_or_except_operator = IntersectOrExceptStep::Operator::EXCEPT_DISTINCT; + + auto union_step = std::make_unique(std::move(query_plans_streams), intersect_or_except_operator, max_threads); + query_plan.unitePlans(std::move(union_step), std::move(query_plans)); + } + + if (is_distinct) + { + /// Add distinct transform + SizeLimits limits(settings.max_rows_in_distinct, settings.max_bytes_in_distinct, settings.distinct_overflow_mode); + + auto distinct_step = std::make_unique( + query_plan.getCurrentDataStream(), + limits, + 0 /*limit hint*/, + query_plan.getCurrentDataStream().header.getNames(), + false /*pre distinct*/, + settings.optimize_distinct_in_order); + + query_plan.addStep(std::move(distinct_step)); + } + + return; + } + + auto & query_node = query_tree->as(); + + if (query_node.hasPrewhere()) + { + if (query_node.hasWhere()) + { + auto function_node = std::make_shared("and"); + auto and_function = FunctionFactory::instance().get("and", query_context); + function_node->resolveAsFunction(std::move(and_function), std::make_shared()); + function_node->getArguments().getNodes() = {query_node.getPrewhere(), query_node.getWhere()}; + query_node.getWhere() = std::move(function_node); + query_node.getPrewhere() = {}; + } + else + { + query_node.getWhere() = query_node.getPrewhere(); + } + } + + SelectQueryInfo select_query_info; + select_query_info.original_query = queryNodeToSelectQuery(query_tree); + select_query_info.query = select_query_info.original_query; + select_query_info.planner_context = planner_context; + + StorageLimitsList storage_limits; + storage_limits.push_back(buildStorageLimits(*query_context, select_query_options)); + select_query_info.storage_limits = std::make_shared(storage_limits); + + collectTableExpressionData(query_tree, *planner_context); + checkStoragesSupportTransactions(planner_context); + + collectSets(query_tree, *planner_context); + + query_plan = buildQueryPlanForJoinTreeNode(query_node.getJoinTree(), select_query_info, select_query_options, planner_context); + auto expression_analysis_result = buildExpressionAnalysisResult(query_tree, query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(), planner_context); + + if (expression_analysis_result.hasWhere()) + { + const auto & where_analysis_result = expression_analysis_result.getWhere(); + auto where_step = std::make_unique(query_plan.getCurrentDataStream(), + where_analysis_result.filter_actions, + where_analysis_result.filter_column_name, + where_analysis_result.remove_filter_column); + where_step->setStepDescription("WHERE"); + query_plan.addStep(std::move(where_step)); + } + + bool having_executed = false; + + if (expression_analysis_result.hasAggregation()) + { + const auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); + + if (aggregation_analysis_result.before_aggregation_actions) + { + auto expression_before_aggregation = std::make_unique(query_plan.getCurrentDataStream(), aggregation_analysis_result.before_aggregation_actions); + expression_before_aggregation->setStepDescription("Before GROUP BY"); + query_plan.addStep(std::move(expression_before_aggregation)); + } + + const Settings & settings = planner_context->getQueryContext()->getSettingsRef(); + + const auto stats_collecting_params = Aggregator::Params::StatsCollectingParams( + select_query_info.query, + settings.collect_hash_table_stats_during_aggregation, + settings.max_entries_for_hash_table_stats, + settings.max_size_to_preallocate_for_aggregation); + + bool aggregate_overflow_row = + query_node.isGroupByWithTotals() && + settings.max_rows_to_group_by && + settings.group_by_overflow_mode == OverflowMode::ANY && + settings.totals_mode != TotalsMode::AFTER_HAVING_EXCLUSIVE; + + Aggregator::Params aggregator_params = Aggregator::Params( + aggregation_analysis_result.aggregation_keys, + aggregation_analysis_result.aggregate_descriptions, + aggregate_overflow_row, + settings.max_rows_to_group_by, + settings.group_by_overflow_mode, + settings.group_by_two_level_threshold, + settings.group_by_two_level_threshold_bytes, + settings.max_bytes_before_external_group_by, + settings.empty_result_for_aggregation_by_empty_set + || (settings.empty_result_for_aggregation_by_constant_keys_on_empty_set && aggregation_analysis_result.aggregation_keys.empty() + && aggregation_analysis_result.group_by_with_constant_keys), + planner_context->getQueryContext()->getTempDataOnDisk(), + settings.max_threads, + settings.min_free_disk_space_for_temporary_data, + settings.compile_aggregate_expressions, + settings.min_count_to_compile_aggregate_expression, + settings.max_block_size, + settings.enable_software_prefetch_in_aggregation, + /* only_merge */ false, + stats_collecting_params + ); + + SortDescription group_by_sort_description; + + auto merge_threads = settings.max_threads; + auto temporary_data_merge_threads = settings.aggregation_memory_efficient_merge_threads + ? static_cast(settings.aggregation_memory_efficient_merge_threads) + : static_cast(settings.max_threads); + + bool storage_has_evenly_distributed_read = false; + const auto & table_expression_node_to_data = planner_context->getTableExpressionNodeToData(); + + if (table_expression_node_to_data.size() == 1) + { + auto it = table_expression_node_to_data.begin(); + const auto & table_expression_node = it->first; + if (const auto * table_node = table_expression_node->as()) + storage_has_evenly_distributed_read = table_node->getStorage()->hasEvenlyDistributedRead(); + else if (const auto * table_function_node = table_expression_node->as()) + storage_has_evenly_distributed_read = table_function_node->getStorageOrThrow()->hasEvenlyDistributedRead(); + } + + const bool should_produce_results_in_order_of_bucket_number + = select_query_options.to_stage == QueryProcessingStage::WithMergeableState && settings.distributed_aggregation_memory_efficient; + + InputOrderInfoPtr input_order_info; + bool aggregate_final = + select_query_options.to_stage > QueryProcessingStage::WithMergeableState && + !query_node.isGroupByWithTotals() && !query_node.isGroupByWithRollup() && !query_node.isGroupByWithCube(); + + auto aggregating_step = std::make_unique( + query_plan.getCurrentDataStream(), + aggregator_params, + aggregation_analysis_result.grouping_sets_parameters_list, + aggregate_final, + settings.max_block_size, + settings.aggregation_in_order_max_block_bytes, + merge_threads, + temporary_data_merge_threads, + storage_has_evenly_distributed_read, + settings.group_by_use_nulls, + std::move(input_order_info), + std::move(group_by_sort_description), + should_produce_results_in_order_of_bucket_number); + query_plan.addStep(std::move(aggregating_step)); + + if (query_node.isGroupByWithRollup()) + { + auto rollup_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(aggregator_params), true /*final*/, settings.group_by_use_nulls); + query_plan.addStep(std::move(rollup_step)); + } + else if (query_node.isGroupByWithCube()) + { + auto cube_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(aggregator_params), true /*final*/, settings.group_by_use_nulls); + query_plan.addStep(std::move(cube_step)); + } + + if (query_node.isGroupByWithTotals()) + { + const auto & having_analysis_result = expression_analysis_result.getHaving(); + bool final = !query_node.isGroupByWithRollup() && !query_node.isGroupByWithCube(); + having_executed = true; + + auto totals_having_step = std::make_unique( + query_plan.getCurrentDataStream(), + aggregation_analysis_result.aggregate_descriptions, + aggregate_overflow_row, + having_analysis_result.filter_actions, + having_analysis_result.filter_column_name, + having_analysis_result.remove_filter_column, + settings.totals_mode, + settings.totals_auto_threshold, + final); + + query_plan.addStep(std::move(totals_having_step)); + } + } + + if (!having_executed && expression_analysis_result.hasHaving()) + { + const auto & having_analysis_result = expression_analysis_result.getHaving(); + + auto having_step = std::make_unique(query_plan.getCurrentDataStream(), + having_analysis_result.filter_actions, + having_analysis_result.filter_column_name, + having_analysis_result.remove_filter_column); + having_step->setStepDescription("HAVING"); + query_plan.addStep(std::move(having_step)); + } + + if (expression_analysis_result.hasWindow()) + { + const auto & window_analysis_result = expression_analysis_result.getWindow(); + + if (window_analysis_result.before_window_actions) + { + auto expression_step_before_window = std::make_unique(query_plan.getCurrentDataStream(), window_analysis_result.before_window_actions); + expression_step_before_window->setStepDescription("Before WINDOW"); + query_plan.addStep(std::move(expression_step_before_window)); + } + + auto window_descriptions = window_analysis_result.window_descriptions; + sortWindowDescriptions(window_descriptions); + + size_t window_descriptions_size = window_descriptions.size(); + + const auto & settings = query_context->getSettingsRef(); + for (size_t i = 0; i < window_descriptions_size; ++i) + { + const auto & window_description = window_descriptions[i]; + + /** We don't need to sort again if the input from previous window already + * has suitable sorting. Also don't create sort steps when there are no + * columns to sort by, because the sort nodes are confused by this. It + * happens in case of `over ()`. + */ + if (!window_description.full_sort_description.empty() && + (i == 0 || !sortDescriptionIsPrefix(window_description.full_sort_description, window_descriptions[i - 1].full_sort_description))) + { + auto sorting_step = std::make_unique( + query_plan.getCurrentDataStream(), + window_description.full_sort_description, + settings.max_block_size, + 0 /*limit*/, + SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode), + settings.max_bytes_before_remerge_sort, + settings.remerge_sort_lowered_memory_bytes_ratio, + settings.max_bytes_before_external_sort, + query_context->getTempDataOnDisk(), + settings.min_free_disk_space_for_temporary_data, + settings.optimize_sorting_by_input_stream_properties); + + sorting_step->setStepDescription("Sorting for window '" + window_description.window_name + "'"); + query_plan.addStep(std::move(sorting_step)); + } + + auto window_step = std::make_unique(query_plan.getCurrentDataStream(), window_description, window_description.window_functions); + window_step->setStepDescription("Window step for window '" + window_description.window_name + "'"); + query_plan.addStep(std::move(window_step)); + } + } + + const auto & projection_analysis_result = expression_analysis_result.getProjection(); + auto expression_step_projection = std::make_unique(query_plan.getCurrentDataStream(), projection_analysis_result.projection_actions); + expression_step_projection->setStepDescription("Projection"); + query_plan.addStep(std::move(expression_step_projection)); + + UInt64 limit_offset = 0; + if (query_node.hasOffset()) + { + /// Constness of offset is validated during query analysis stage + limit_offset = query_node.getOffset()->getConstantValue().getValue().safeGet(); + } + + UInt64 limit_length = 0; + + if (query_node.hasLimit()) + { + /// Constness of limit is validated during query analysis stage + limit_length = query_node.getLimit()->getConstantValue().getValue().safeGet(); + } + + if (query_node.isDistinct()) + { + const Settings & settings = planner_context->getQueryContext()->getSettingsRef(); + UInt64 limit_hint_for_distinct = 0; + bool pre_distinct = true; + + SizeLimits limits(settings.max_rows_in_distinct, settings.max_bytes_in_distinct, settings.distinct_overflow_mode); + bool no_order_by = !query_node.hasOrderBy(); + + /** If after this stage of DISTINCT ORDER BY is not executed, + * then you can get no more than limit_length + limit_offset of different rows. + */ + if (no_order_by && limit_length <= std::numeric_limits::max() - limit_offset) + limit_hint_for_distinct = limit_length + limit_offset; + + auto distinct_step = std::make_unique( + query_plan.getCurrentDataStream(), + limits, + limit_hint_for_distinct, + projection_analysis_result.projection_column_names, + pre_distinct, + settings.optimize_distinct_in_order); + + if (pre_distinct) + distinct_step->setStepDescription("Preliminary DISTINCT"); + else + distinct_step->setStepDescription("DISTINCT"); + + query_plan.addStep(std::move(distinct_step)); + } + + if (expression_analysis_result.hasSort()) + { + const auto & sort_analysis_result = expression_analysis_result.getSort(); + auto expression_step_before_order_by = std::make_unique(query_plan.getCurrentDataStream(), sort_analysis_result.before_order_by_actions); + expression_step_before_order_by->setStepDescription("Before ORDER BY"); + query_plan.addStep(std::move(expression_step_before_order_by)); + } + + QueryPlanStepPtr filling_step; + SortDescription sort_description; + + if (query_node.hasOrderBy()) + { + sort_description = extractSortDescription(query_node.getOrderByNode(), *planner_context); + + bool query_has_array_join_in_join_tree = queryHasArrayJoinInJoinTree(query_tree); + + UInt64 partial_sorting_limit = 0; + + /// Partial sort can be done if there is LIMIT, but no DISTINCT, LIMIT WITH TIES, LIMIT BY, ARRAY JOIN + if (limit_length != 0 && !query_node.isDistinct() && !query_node.hasLimitBy() && !query_node.isLimitWithTies() && + !query_has_array_join_in_join_tree && limit_length <= std::numeric_limits::max() - limit_offset) + { + partial_sorting_limit = limit_length + limit_offset; + } + + const Settings & settings = query_context->getSettingsRef(); + + /// Merge the sorted blocks + auto sorting_step = std::make_unique( + query_plan.getCurrentDataStream(), + sort_description, + settings.max_block_size, + partial_sorting_limit, + SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode), + settings.max_bytes_before_remerge_sort, + settings.remerge_sort_lowered_memory_bytes_ratio, + settings.max_bytes_before_external_sort, + query_context->getTempDataOnDisk(), + settings.min_free_disk_space_for_temporary_data, + settings.optimize_sorting_by_input_stream_properties); + + sorting_step->setStepDescription("Sorting for ORDER BY"); + query_plan.addStep(std::move(sorting_step)); + + NameSet column_names_with_fill; + SortDescription fill_description; + for (auto & description : sort_description) + { + if (description.with_fill) + { + fill_description.push_back(description); + column_names_with_fill.insert(description.column_name); + } + } + + if (!fill_description.empty()) + { + InterpolateDescriptionPtr interpolate_description; + + if (query_node.hasInterpolate()) + { + auto interpolate_actions_dag = std::make_shared(); + + auto & interpolate_list_node = query_node.getInterpolate()->as(); + auto & interpolate_list_nodes = interpolate_list_node.getNodes(); + + if (interpolate_list_nodes.empty()) + { + auto query_plan_columns = query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); + for (auto & query_plan_column : query_plan_columns) + { + if (column_names_with_fill.contains(query_plan_column.name)) + continue; + + const auto * input_action_node = &interpolate_actions_dag->addInput(query_plan_column); + interpolate_actions_dag->getOutputs().push_back(input_action_node); + } + } + else + { + for (auto & interpolate_node : interpolate_list_nodes) + { + auto & interpolate_node_typed = interpolate_node->as(); + + PlannerActionsVisitor planner_actions_visitor(planner_context); + auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getExpression()); + auto interpolate_expression_nodes = planner_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getInterpolateExpression()); + + if (expression_to_interpolate_expression_nodes.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expression to interpolate expected to have single action node"); + + if (interpolate_expression_nodes.size() != 1) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Interpolate expression expected to have single action node"); + + const auto * expression_to_interpolate = expression_to_interpolate_expression_nodes[0]; + const auto & expression_to_interpolate_name = expression_to_interpolate->result_name; + + const auto * interpolate_expression = interpolate_expression_nodes[0]; + if (!interpolate_expression->result_type->equals(*expression_to_interpolate->result_type)) + { + auto cast_type_name = expression_to_interpolate->result_type->getName(); + Field cast_type_constant_value(cast_type_name); + + ColumnWithTypeAndName column; + column.name = calculateConstantActionNodeName(cast_type_name); + column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); + column.type = std::make_shared(); + + const auto * cast_type_constant_node = &interpolate_actions_dag->addColumn(std::move(column)); + + FunctionCastBase::Diagnostic diagnostic = {interpolate_expression->result_name, interpolate_expression->result_name}; + FunctionOverloadResolverPtr func_builder_cast + = CastInternalOverloadResolver::createImpl(std::move(diagnostic)); + + ActionsDAG::NodeRawConstPtrs children = {interpolate_expression, cast_type_constant_node}; + interpolate_expression = &interpolate_actions_dag->addFunction(func_builder_cast, std::move(children), interpolate_expression->result_name); + } + + const auto * alias_node = &interpolate_actions_dag->addAlias(*interpolate_expression, expression_to_interpolate_name); + interpolate_actions_dag->getOutputs().push_back(alias_node); + } + + interpolate_actions_dag->removeUnusedActions(); + } + + Aliases empty_aliases; + interpolate_description = std::make_shared(std::move(interpolate_actions_dag), empty_aliases); + } + + filling_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(fill_description), interpolate_description); + } + } + + if (expression_analysis_result.hasLimitBy()) + { + const auto & limit_by_analysis_result = expression_analysis_result.getLimitBy(); + auto expression_step_before_limit_by = std::make_unique(query_plan.getCurrentDataStream(), limit_by_analysis_result.before_limit_by_actions); + expression_step_before_limit_by->setStepDescription("Before LIMIT BY"); + query_plan.addStep(std::move(expression_step_before_limit_by)); + + /// Constness of LIMIT BY limit is validated during query analysis stage + UInt64 limit_by_limit = query_node.getLimitByLimit()->getConstantValue().getValue().safeGet(); + UInt64 limit_by_offset = 0; + + if (query_node.hasLimitByOffset()) + { + /// Constness of LIMIT BY offset is validated during query analysis stage + limit_by_offset = query_node.getLimitByOffset()->getConstantValue().getValue().safeGet(); + } + + auto limit_by_step = std::make_unique(query_plan.getCurrentDataStream(), + limit_by_limit, + limit_by_offset, + limit_by_analysis_result.limit_by_column_names); + query_plan.addStep(std::move(limit_by_step)); + } + + if (filling_step) + query_plan.addStep(std::move(filling_step)); + + if (query_context->getSettingsRef().extremes) + { + auto extremes_step = std::make_unique(query_plan.getCurrentDataStream()); + query_plan.addStep(std::move(extremes_step)); + } + + if (query_node.hasLimit()) + { + const Settings & settings = query_context->getSettingsRef(); + bool always_read_till_end = settings.exact_rows_before_limit; + bool limit_with_ties = query_node.isLimitWithTies(); + + /** Special cases: + * + * 1. If there is WITH TOTALS and there is no ORDER BY, then read the data to the end, + * otherwise TOTALS is counted according to incomplete data. + * + * 2. If there is no WITH TOTALS and there is a subquery in FROM, and there is WITH TOTALS on one of the levels, + * then when using LIMIT, you should read the data to the end, rather than cancel the query earlier, + * because if you cancel the query, we will not get `totals` data from the remote server. + */ + if (query_node.isGroupByWithTotals() && !query_node.hasOrderBy()) + always_read_till_end = true; + + if (!query_node.isGroupByWithTotals() && queryHasWithTotalsInAnySubqueryInJoinTree(query_tree)) + always_read_till_end = true; + + SortDescription limit_with_ties_sort_description; + + if (query_node.isLimitWithTies()) + { + /// Validated during parser stage + if (!query_node.hasOrderBy()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "LIMIT WITH TIES without ORDER BY"); + + limit_with_ties_sort_description = sort_description; + } + + auto limit = std::make_unique(query_plan.getCurrentDataStream(), + limit_length, + limit_offset, + always_read_till_end, + limit_with_ties, + limit_with_ties_sort_description); + + if (limit_with_ties) + limit->setStepDescription("LIMIT WITH TIES"); + + query_plan.addStep(std::move(limit)); + } + else if (query_node.hasOffset()) + { + auto offsets_step = std::make_unique(query_plan.getCurrentDataStream(), limit_offset); + query_plan.addStep(std::move(offsets_step)); + } + + auto projection_step = std::make_unique(query_plan.getCurrentDataStream(), projection_analysis_result.project_names_actions); + projection_step->setStepDescription("Project names"); + query_plan.addStep(std::move(projection_step)); + + addBuildSubqueriesForSetsStepIfNeeded(query_plan, select_query_options, planner_context); + extendQueryContextAndStoragesLifetime(query_plan, planner_context); +} + +} diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h new file mode 100644 index 00000000000..03f8e19df56 --- /dev/null +++ b/src/Planner/Planner.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace DB +{ + +class GlobalPlannerContext; +using GlobalPlannerContextPtr = std::shared_ptr; + +class PlannerContext; +using PlannerContextPtr = std::shared_ptr; + +class Planner +{ +public: + /// Initialize planner with query tree after analysis phase + Planner(const QueryTreeNodePtr & query_tree_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_); + + /// Initialize planner with query tree after query analysis phase and global planner context + Planner(const QueryTreeNodePtr & query_tree_, + const SelectQueryOptions & select_query_options_, + ContextPtr context_, + GlobalPlannerContextPtr global_planner_context_); + + const QueryPlan & getQueryPlan() const + { + return query_plan; + } + + QueryPlan & getQueryPlan() + { + return query_plan; + } + + void buildQueryPlanIfNeeded(); + + QueryPlan && extractQueryPlan() && + { + return std::move(query_plan); + } + +private: + void initialize(); + + QueryTreeNodePtr query_tree; + QueryPlan query_plan; + SelectQueryOptions select_query_options; + PlannerContextPtr planner_context; +}; + +} diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp new file mode 100644 index 00000000000..a6f1a74f251 --- /dev/null +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -0,0 +1,765 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNSUPPORTED_METHOD; + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + +class ActionsScopeNode +{ +public: + explicit ActionsScopeNode(ActionsDAGPtr actions_dag_, QueryTreeNodePtr scope_node_) + : actions_dag(std::move(actions_dag_)) + , scope_node(std::move(scope_node_)) + { + for (const auto & node : actions_dag->getNodes()) + node_name_to_node[node.result_name] = &node; + } + + const QueryTreeNodePtr & getScopeNode() const + { + return scope_node; + } + + [[maybe_unused]] bool containsNode(const std::string & node_name) + { + return node_name_to_node.find(node_name) != node_name_to_node.end(); + } + + [[maybe_unused]] const ActionsDAG::Node * tryGetNode(const std::string & node_name) + { + auto it = node_name_to_node.find(node_name); + if (it == node_name_to_node.end()) + return {}; + + return it->second; + } + + const ActionsDAG::Node * getNodeOrThrow(const std::string & node_name) + { + auto it = node_name_to_node.find(node_name); + if (it == node_name_to_node.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No node with name {}. There are only nodes {}", + node_name, + actions_dag->dumpNames()); + + return it->second; + } + + const ActionsDAG::Node * addInputColumnIfNecessary(const std::string & node_name, const DataTypePtr & column_type) + { + auto it = node_name_to_node.find(node_name); + if (it != node_name_to_node.end()) + return it->second; + + const auto * node = &actions_dag->addInput(node_name, column_type); + node_name_to_node[node->result_name] = node; + + return node; + } + + const ActionsDAG::Node * addInputConstantColumnIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column) + { + auto it = node_name_to_node.find(node_name); + if (it != node_name_to_node.end()) + return it->second; + + const auto * node = &actions_dag->addInput(column); + node_name_to_node[node->result_name] = node; + + return node; + } + + const ActionsDAG::Node * addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column) + { + auto it = node_name_to_node.find(node_name); + if (it != node_name_to_node.end()) + return it->second; + + const auto * node = &actions_dag->addColumn(column); + node_name_to_node[node->result_name] = node; + + return node; + } + + const ActionsDAG::Node * addFunctionIfNecessary(const std::string & node_name, ActionsDAG::NodeRawConstPtrs children, FunctionOverloadResolverPtr function) + { + auto it = node_name_to_node.find(node_name); + if (it != node_name_to_node.end()) + return it->second; + + const auto * node = &actions_dag->addFunction(function, children, node_name); + node_name_to_node[node->result_name] = node; + + return node; + } + + const ActionsDAG::Node * addArrayJoinIfNecessary(const std::string & node_name, const ActionsDAG::Node * child) + { + auto it = node_name_to_node.find(node_name); + if (it != node_name_to_node.end()) + return it->second; + + const auto * node = &actions_dag->addArrayJoin(*child, node_name); + node_name_to_node[node->result_name] = node; + + return node; + } + +private: + std::unordered_map node_name_to_node; + ActionsDAGPtr actions_dag; + QueryTreeNodePtr scope_node; +}; + +class PlannerActionsVisitorImpl +{ +public: + PlannerActionsVisitorImpl(ActionsDAGPtr actions_dag, const PlannerContextPtr & planner_context_); + + ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node); + +private: + using NodeNameAndNodeMinLevel = std::pair; + + NodeNameAndNodeMinLevel visitImpl(QueryTreeNodePtr node); + + NodeNameAndNodeMinLevel visitColumn(const QueryTreeNodePtr & node); + + NodeNameAndNodeMinLevel visitConstantValue(const Field & constant_literal, const DataTypePtr & constant_type); + + NodeNameAndNodeMinLevel visitConstant(const QueryTreeNodePtr & node); + + NodeNameAndNodeMinLevel visitLambda(const QueryTreeNodePtr & node); + + NodeNameAndNodeMinLevel makeSetForInFunction(const QueryTreeNodePtr & node); + + NodeNameAndNodeMinLevel visitFunction(const QueryTreeNodePtr & node); + + NodeNameAndNodeMinLevel visitQueryOrUnion(const QueryTreeNodePtr & node); + + std::vector actions_stack; + std::unordered_map node_to_node_name; + const PlannerContextPtr planner_context; +}; + +PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAGPtr actions_dag, const PlannerContextPtr & planner_context_) + : planner_context(planner_context_) +{ + actions_stack.emplace_back(std::move(actions_dag), nullptr); +} + +ActionsDAG::NodeRawConstPtrs PlannerActionsVisitorImpl::visit(QueryTreeNodePtr expression_node) +{ + ActionsDAG::NodeRawConstPtrs result; + + if (auto * expression_list_node = expression_node->as()) + { + for (auto & node : expression_list_node->getNodes()) + { + auto [node_name, _] = visitImpl(node); + result.push_back(actions_stack.front().getNodeOrThrow(node_name)); + } + } + else + { + auto [node_name, _] = visitImpl(expression_node); + result.push_back(actions_stack.front().getNodeOrThrow(node_name)); + } + + return result; +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitImpl(QueryTreeNodePtr node) +{ + auto node_type = node->getNodeType(); + + if (node_type == QueryTreeNodeType::COLUMN) + return visitColumn(node); + else if (node_type == QueryTreeNodeType::CONSTANT) + return visitConstant(node); + else if (node_type == QueryTreeNodeType::FUNCTION) + return visitFunction(node); + else if (node_type == QueryTreeNodeType::QUERY || node_type == QueryTreeNodeType::UNION) + return visitQueryOrUnion(node); + + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, + "Expected column, constant, function, query or union node. Actual {}", + node->formatASTForErrorMessage()); +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitColumn(const QueryTreeNodePtr & node) +{ + auto column_node_name = calculateActionNodeName(node, *planner_context, node_to_node_name); + const auto & column_node = node->as(); + + Int64 actions_stack_size = static_cast(actions_stack.size() - 1); + for (Int64 i = actions_stack_size; i >= 0; --i) + { + actions_stack[i].addInputColumnIfNecessary(column_node_name, column_node.getColumnType()); + + auto column_source = column_node.getColumnSourceOrNull(); + if (column_source && + column_source->getNodeType() == QueryTreeNodeType::LAMBDA && + actions_stack[i].getScopeNode().get() == column_source.get()) + { + return {column_node_name, i}; + } + } + + return {column_node_name, 0}; +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitConstantValue(const Field & constant_literal, const DataTypePtr & constant_type) +{ + auto constant_node_name = calculateConstantActionNodeName(constant_literal, constant_type); + + ColumnWithTypeAndName column; + column.name = constant_node_name; + column.type = constant_type; + column.column = column.type->createColumnConst(1, constant_literal); + + actions_stack[0].addConstantIfNecessary(constant_node_name, column); + + size_t actions_stack_size = actions_stack.size(); + for (size_t i = 1; i < actions_stack_size; ++i) + { + auto & actions_stack_node = actions_stack[i]; + actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column); + } + + return {constant_node_name, 0}; +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitConstant(const QueryTreeNodePtr & node) +{ + const auto & constant_node = node->as(); + return visitConstantValue(constant_node.getValue(), constant_node.getResultType()); +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitLambda(const QueryTreeNodePtr & node) +{ + auto & lambda_node = node->as(); + auto result_type = lambda_node.getResultType(); + if (!result_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Lambda {} is not resolved during query analysis", + lambda_node.formatASTForErrorMessage()); + + auto & lambda_arguments_nodes = lambda_node.getArguments().getNodes(); + size_t lambda_arguments_nodes_size = lambda_arguments_nodes.size(); + + NamesAndTypesList lambda_arguments_names_and_types; + + for (size_t i = 0; i < lambda_arguments_nodes_size; ++i) + { + const auto & lambda_argument_name = lambda_node.getArgumentNames().at(i); + auto lambda_argument_type = lambda_arguments_nodes[i]->getResultType(); + lambda_arguments_names_and_types.emplace_back(lambda_argument_name, std::move(lambda_argument_type)); + } + + auto lambda_actions_dag = std::make_shared(); + actions_stack.emplace_back(lambda_actions_dag, node); + + auto [lambda_expression_node_name, level] = visitImpl(lambda_node.getExpression()); + lambda_actions_dag->getOutputs().push_back(actions_stack.back().getNodeOrThrow(lambda_expression_node_name)); + lambda_actions_dag->removeUnusedActions(Names(1, lambda_expression_node_name)); + + auto expression_actions_settings = ExpressionActionsSettings::fromContext(planner_context->getQueryContext(), CompileExpressions::yes); + auto lambda_actions = std::make_shared(lambda_actions_dag, expression_actions_settings); + + Names captured_column_names; + ActionsDAG::NodeRawConstPtrs lambda_children; + Names required_column_names = lambda_actions->getRequiredColumns(); + + if (level == actions_stack.size() - 1) + --level; + + const auto & lambda_argument_names = lambda_node.getArgumentNames(); + + for (const auto & required_column_name : required_column_names) + { + auto it = std::find(lambda_argument_names.begin(), lambda_argument_names.end(), required_column_name); + + if (it == lambda_argument_names.end()) + { + lambda_children.push_back(actions_stack[level].getNodeOrThrow(required_column_name)); + captured_column_names.push_back(required_column_name); + } + } + + auto lambda_node_name = calculateActionNodeName(node, *planner_context); + auto function_capture = std::make_shared( + lambda_actions, captured_column_names, lambda_arguments_names_and_types, result_type, lambda_expression_node_name); + actions_stack.pop_back(); + + actions_stack[level].addFunctionIfNecessary(lambda_node_name, std::move(lambda_children), std::move(function_capture)); + + size_t actions_stack_size = actions_stack.size(); + for (size_t i = level + 1; i < actions_stack_size; ++i) + { + auto & actions_stack_node = actions_stack[i]; + actions_stack_node.addInputColumnIfNecessary(lambda_node_name, result_type); + } + + return {lambda_node_name, level}; +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::makeSetForInFunction(const QueryTreeNodePtr & node) +{ + const auto & function_node = node->as(); + auto in_second_argument = function_node.getArguments().getNodes().at(1); + + auto set_key = planner_context->createSetKey(in_second_argument); + const auto & planner_set = planner_context->getSetOrThrow(set_key); + + ColumnWithTypeAndName column; + column.name = set_key; + column.type = std::make_shared(); + + bool set_is_created = planner_set.getSet()->isCreated(); + auto column_set = ColumnSet::create(1, planner_set.getSet()); + + if (set_is_created) + column.column = ColumnConst::create(std::move(column_set), 1); + else + column.column = std::move(column_set); + + actions_stack[0].addConstantIfNecessary(set_key, column); + + size_t actions_stack_size = actions_stack.size(); + for (size_t i = 1; i < actions_stack_size; ++i) + { + auto & actions_stack_node = actions_stack[i]; + actions_stack_node.addInputConstantColumnIfNecessary(set_key, column); + } + + node_to_node_name.emplace(in_second_argument, set_key); + + return {set_key, 0}; +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitFunction(const QueryTreeNodePtr & node) +{ + const auto & function_node = node->as(); + if (const auto constant_value_or_null = function_node.getConstantValueOrNull()) + return visitConstantValue(constant_value_or_null->getValue(), constant_value_or_null->getType()); + + std::optional in_function_second_argument_node_name_with_level; + + if (isNameOfInFunction(function_node.getFunctionName())) + in_function_second_argument_node_name_with_level = makeSetForInFunction(node); + + const auto & function_arguments = function_node.getArguments().getNodes(); + size_t function_arguments_size = function_arguments.size(); + + Names function_arguments_node_names; + function_arguments_node_names.reserve(function_arguments_size); + + size_t level = 0; + for (size_t function_argument_index = 0; function_argument_index < function_arguments_size; ++function_argument_index) + { + if (in_function_second_argument_node_name_with_level && function_argument_index == 1) + { + auto & [node_name, node_min_level] = *in_function_second_argument_node_name_with_level; + function_arguments_node_names.push_back(std::move(node_name)); + level = std::max(level, node_min_level); + continue; + } + + const auto & argument = function_arguments[function_argument_index]; + + if (argument->getNodeType() == QueryTreeNodeType::LAMBDA) + { + auto [node_name, node_min_level] = visitLambda(argument); + function_arguments_node_names.push_back(std::move(node_name)); + level = std::max(level, node_min_level); + continue; + } + + auto [node_name, node_min_level] = visitImpl(argument); + function_arguments_node_names.push_back(std::move(node_name)); + level = std::max(level, node_min_level); + } + + auto function_node_name = calculateActionNodeName(node, *planner_context, node_to_node_name); + + if (function_node.isAggregateFunction() || function_node.isWindowFunction()) + { + size_t actions_stack_size = actions_stack.size(); + + for (size_t i = 0; i < actions_stack_size; ++i) + { + auto & actions_stack_node = actions_stack[i]; + actions_stack_node.addInputColumnIfNecessary(function_node_name, function_node.getResultType()); + } + + return {function_node_name, 0}; + } + + ActionsDAG::NodeRawConstPtrs children; + children.reserve(function_arguments_size); + + for (auto & function_argument_node_name : function_arguments_node_names) + children.push_back(actions_stack[level].getNodeOrThrow(function_argument_node_name)); + + if (function_node.getFunctionName() == "arrayJoin") + { + if (level != 0) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Expression in arrayJoin cannot depend on lambda argument: {} ", + function_arguments_node_names.at(0)); + + actions_stack[level].addArrayJoinIfNecessary(function_node_name, children.at(0)); + } + else + { + actions_stack[level].addFunctionIfNecessary(function_node_name, children, function_node.getFunction()); + } + + size_t actions_stack_size = actions_stack.size(); + for (size_t i = level + 1; i < actions_stack_size; ++i) + { + auto & actions_stack_node = actions_stack[i]; + actions_stack_node.addInputColumnIfNecessary(function_node_name, function_node.getResultType()); + } + + return {function_node_name, level}; +} + +PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitQueryOrUnion(const QueryTreeNodePtr & node) +{ + const auto constant_value = node->getConstantValueOrNull(); + if (!constant_value) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Scalar subqueries must be evaluated as constants"); + + return visitConstantValue(constant_value->getValue(), constant_value->getType()); +} + +} + +PlannerActionsVisitor::PlannerActionsVisitor(const PlannerContextPtr & planner_context_) + : planner_context(planner_context_) +{} + +ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAGPtr actions_dag, QueryTreeNodePtr expression_node) +{ + PlannerActionsVisitorImpl actions_visitor_impl(actions_dag, planner_context); + return actions_visitor_impl.visit(expression_node); +} + +String calculateActionNodeName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, QueryTreeNodeToName & node_to_name) +{ + auto it = node_to_name.find(node); + if (it != node_to_name.end()) + return it->second; + + String result; + auto node_type = node->getNodeType(); + + switch (node_type) + { + case QueryTreeNodeType::COLUMN: + { + const auto * column_identifier = planner_context.getColumnNodeIdentifierOrNull(node); + + if (column_identifier) + { + result = *column_identifier; + } + else + { + const auto & column_node = node->as(); + result = column_node.getColumnName(); + } + + break; + } + case QueryTreeNodeType::CONSTANT: + { + const auto & constant_node = node->as(); + result = calculateConstantActionNodeName(constant_node.getValue(), constant_node.getResultType()); + break; + } + case QueryTreeNodeType::FUNCTION: + { + if (auto node_constant_value = node->getConstantValueOrNull()) + { + result = calculateConstantActionNodeName(node_constant_value->getValue(), node_constant_value->getType()); + } + else + { + const auto & function_node = node->as(); + String in_function_second_argument_node_name; + + if (isNameOfInFunction(function_node.getFunctionName())) + { + const auto & in_second_argument_node = function_node.getArguments().getNodes().at(1); + in_function_second_argument_node_name = planner_context.createSetKey(in_second_argument_node); + } + + WriteBufferFromOwnString buffer; + buffer << function_node.getFunctionName(); + + const auto & function_parameters_nodes = function_node.getParameters().getNodes(); + + if (!function_parameters_nodes.empty()) + { + buffer << '('; + + size_t function_parameters_nodes_size = function_parameters_nodes.size(); + for (size_t i = 0; i < function_parameters_nodes_size; ++i) + { + const auto & function_parameter_node = function_parameters_nodes[i]; + buffer << calculateActionNodeName(function_parameter_node, planner_context, node_to_name); + + if (i + 1 != function_parameters_nodes_size) + buffer << ", "; + } + + buffer << ')'; + } + + const auto & function_arguments_nodes = function_node.getArguments().getNodes(); + String function_argument_name; + + buffer << '('; + + size_t function_arguments_nodes_size = function_arguments_nodes.size(); + for (size_t i = 0; i < function_arguments_nodes_size; ++i) + { + if (i == 1 && !in_function_second_argument_node_name.empty()) + { + function_argument_name = in_function_second_argument_node_name; + } + else + { + const auto & function_argument_node = function_arguments_nodes[i]; + function_argument_name = calculateActionNodeName(function_argument_node, planner_context, node_to_name); + } + + buffer << function_argument_name; + + if (i + 1 != function_arguments_nodes_size) + buffer << ", "; + } + + buffer << ')'; + + if (function_node.isWindowFunction()) + { + buffer << " OVER ("; + buffer << calculateWindowNodeActionName(function_node.getWindowNode(), planner_context, node_to_name); + buffer << ')'; + } + + result = buffer.str(); + } + break; + } + case QueryTreeNodeType::UNION: + [[fallthrough]]; + case QueryTreeNodeType::QUERY: + { + if (auto node_constant_value = node->getConstantValueOrNull()) + { + result = calculateConstantActionNodeName(node_constant_value->getValue(), node_constant_value->getType()); + } + else + { + auto query_hash = node->getTreeHash(); + result = "__subquery_" + std::to_string(query_hash.first) + '_' + std::to_string(query_hash.second); + } + break; + } + case QueryTreeNodeType::LAMBDA: + { + auto lambda_hash = node->getTreeHash(); + + result = "__lambda_" + toString(lambda_hash.first) + '_' + toString(lambda_hash.second); + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid action query tree node {}", node->formatASTForErrorMessage()); + } + } + + node_to_name.emplace(node, result); + + return result; +} + +String calculateActionNodeName(const QueryTreeNodePtr & node, const PlannerContext & planner_context) +{ + QueryTreeNodeToName empty_map; + return calculateActionNodeName(node, planner_context, empty_map); +} + +String calculateConstantActionNodeName(const Field & constant_literal, const DataTypePtr & constant_type) +{ + auto constant_name = applyVisitor(FieldVisitorToString(), constant_literal); + return constant_name + "_" + constant_type->getName(); +} + +String calculateConstantActionNodeName(const Field & constant_literal) +{ + return calculateConstantActionNodeName(constant_literal, applyVisitor(FieldToDataType(), constant_literal)); +} + +String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, QueryTreeNodeToName & node_to_name) +{ + auto & window_node = node->as(); + WriteBufferFromOwnString buffer; + + if (window_node.hasPartitionBy()) + { + buffer << "PARTITION BY "; + + auto & partition_by_nodes = window_node.getPartitionBy().getNodes(); + size_t partition_by_nodes_size = partition_by_nodes.size(); + + for (size_t i = 0; i < partition_by_nodes_size; ++i) + { + auto & partition_by_node = partition_by_nodes[i]; + buffer << calculateActionNodeName(partition_by_node, planner_context, node_to_name); + if (i + 1 != partition_by_nodes_size) + buffer << ", "; + } + } + + if (window_node.hasOrderBy()) + { + if (window_node.hasPartitionBy()) + buffer << ' '; + + buffer << "ORDER BY "; + + auto & order_by_nodes = window_node.getOrderBy().getNodes(); + size_t order_by_nodes_size = order_by_nodes.size(); + + for (size_t i = 0; i < order_by_nodes_size; ++i) + { + auto & sort_node = order_by_nodes[i]->as(); + buffer << calculateActionNodeName(sort_node.getExpression(), planner_context, node_to_name); + + auto sort_direction = sort_node.getSortDirection(); + buffer << (sort_direction == SortDirection::ASCENDING ? " ASC" : " DESC"); + + auto nulls_sort_direction = sort_node.getNullsSortDirection(); + + if (nulls_sort_direction) + buffer << " NULLS " << (nulls_sort_direction == sort_direction ? "LAST" : "FIRST"); + + if (auto collator = sort_node.getCollator()) + buffer << " COLLATE " << collator->getLocale(); + + if (sort_node.withFill()) + { + buffer << " WITH FILL"; + + if (sort_node.hasFillFrom()) + buffer << " FROM " << calculateActionNodeName(sort_node.getFillFrom(), planner_context, node_to_name); + + if (sort_node.hasFillTo()) + buffer << " TO " << calculateActionNodeName(sort_node.getFillTo(), planner_context, node_to_name); + + if (sort_node.hasFillStep()) + buffer << " STEP " << calculateActionNodeName(sort_node.getFillStep(), planner_context, node_to_name); + } + + if (i + 1 != order_by_nodes_size) + buffer << ", "; + } + } + + auto & window_frame = window_node.getWindowFrame(); + if (!window_frame.is_default) + { + if (window_node.hasPartitionBy() || window_node.hasOrderBy()) + buffer << ' '; + + buffer << window_frame.type << " BETWEEN "; + if (window_frame.begin_type == WindowFrame::BoundaryType::Current) + { + buffer << "CURRENT ROW"; + } + else if (window_frame.begin_type == WindowFrame::BoundaryType::Unbounded) + { + buffer << "UNBOUNDED"; + buffer << " " << (window_frame.begin_preceding ? "PRECEDING" : "FOLLOWING"); + } + else + { + buffer << calculateActionNodeName(window_node.getFrameBeginOffsetNode(), planner_context, node_to_name); + buffer << " " << (window_frame.begin_preceding ? "PRECEDING" : "FOLLOWING"); + } + + buffer << " AND "; + + if (window_frame.end_type == WindowFrame::BoundaryType::Current) + { + buffer << "CURRENT ROW"; + } + else if (window_frame.end_type == WindowFrame::BoundaryType::Unbounded) + { + buffer << "UNBOUNDED"; + buffer << " " << (window_frame.end_preceding ? "PRECEDING" : "FOLLOWING"); + } + else + { + buffer << calculateActionNodeName(window_node.getFrameEndOffsetNode(), planner_context, node_to_name); + buffer << " " << (window_frame.end_preceding ? "PRECEDING" : "FOLLOWING"); + } + } + + return buffer.str(); +} + +String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context) +{ + QueryTreeNodeToName empty_map; + return calculateWindowNodeActionName(node, planner_context, empty_map); +} + +} diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h new file mode 100644 index 00000000000..405031daa40 --- /dev/null +++ b/src/Planner/PlannerActionsVisitor.h @@ -0,0 +1,78 @@ +#pragma once + +#include + +#include +#include + +#include + +#include + +#include + +namespace DB +{ + +class PlannerContext; +using PlannerContextPtr = std::shared_ptr; + +/** Planner actions visitor is responsible for adding necessary actions to calculate query tree expression node + * into actions dag. + * + * Preconditions: + * 1. Table expression data for table expression nodes is collected in planner context. + * For column node, that has column table expression source, identifier for column name in table expression data + * is used as action dag node name. + * 2. Sets for IN functions are already collected in planner context. + * + * During actions build, there is special handling for following functions: + * 1. Aggregate functions are added in actions dag as INPUT nodes. Aggregate functions arguments are not added. + * 2. For function `in` and its variants, already collected sets from planner context are used. + */ +class PlannerActionsVisitor +{ +public: + explicit PlannerActionsVisitor(const PlannerContextPtr & planner_context_); + + /** Add actions necessary to calculate expression node into expression dag. + * Necessary actions are not added in actions dag output. + * Returns query tree expression node actions dag nodes. + */ + ActionsDAG::NodeRawConstPtrs visit(ActionsDAGPtr actions_dag, QueryTreeNodePtr expression_node); + +private: + const PlannerContextPtr planner_context; +}; + +/** Calculate query tree expression node action dag name and add them into node to name map. + * If node exists in map, name from map is used. + * + * For column node column node identifier from planner context is used. + */ +using QueryTreeNodeToName = std::unordered_map; +String calculateActionNodeName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, QueryTreeNodeToName & node_to_name); + +/** Calculate query tree expression node action dag name. + * + * For column node column node identifier from planner context is used. + */ +String calculateActionNodeName(const QueryTreeNodePtr & node, const PlannerContext & planner_context); + +/// Calculate action node name for constant +String calculateConstantActionNodeName(const Field & constant_literal, const DataTypePtr & constant_type); + +/// Calculate action node name for constant, data type will be derived from constant literal value +String calculateConstantActionNodeName(const Field & constant_literal); + +/** Calculate action node name for window node. + * Window node action name can only be part of window function action name. + */ +String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, QueryTreeNodeToName & node_to_name); + +/** Calculate action node name for window node. + * Window node action name can only be part of window function action name. + */ +String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context); + +} diff --git a/src/Planner/PlannerAggregation.cpp b/src/Planner/PlannerAggregation.cpp new file mode 100644 index 00000000000..3322ef9364f --- /dev/null +++ b/src/Planner/PlannerAggregation.cpp @@ -0,0 +1,225 @@ +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + +enum class GroupByKind +{ + ORDINARY, + ROLLUP, + CUBE, + GROUPING_SETS +}; + +class GroupingFunctionResolveVisitor : public InDepthQueryTreeVisitor +{ +public: + GroupingFunctionResolveVisitor(GroupByKind group_by_kind_, + const Names & aggregation_keys_, + const GroupingSetsParamsList & grouping_sets_parameters_list_, + const PlannerContext & planner_context_) + : group_by_kind(group_by_kind_) + , planner_context(planner_context_) + { + size_t aggregation_keys_size = aggregation_keys_.size(); + for (size_t i = 0; i < aggregation_keys_size; ++i) + aggegation_key_to_index.emplace(aggregation_keys_[i], i); + + for (const auto & grouping_sets_parameter : grouping_sets_parameters_list_) + { + grouping_sets_keys_indices.emplace_back(); + auto & grouping_set_keys_indices = grouping_sets_keys_indices.back(); + + for (const auto & used_key : grouping_sets_parameter.used_keys) + { + auto aggregation_key_index_it = aggegation_key_to_index.find(used_key); + if (aggregation_key_index_it == aggegation_key_to_index.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Aggregation key {} in GROUPING SETS is not found in GROUP BY keys"); + + grouping_set_keys_indices.push_back(aggregation_key_index_it->second); + } + } + } + + void visitImpl(const QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || function_node->getFunctionName() != "grouping") + return; + + size_t aggregation_keys_size = aggegation_key_to_index.size(); + + ColumnNumbers arguments_indexes; + + for (const auto & argument : function_node->getArguments().getNodes()) + { + String action_node_name = calculateActionNodeName(argument, planner_context); + + auto it = aggegation_key_to_index.find(action_node_name); + if (it == aggegation_key_to_index.end()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Argument of GROUPING function {} is not a part of GROUP BY clause", + argument->formatASTForErrorMessage()); + + arguments_indexes.push_back(it->second); + } + + QueryTreeNodeWeakPtr column_source; + auto grouping_set_argument_column = std::make_shared(NameAndTypePair{"__grouping_set", std::make_shared()}, column_source); + function_node->getArguments().getNodes().clear(); + + bool force_grouping_standard_compatibility = planner_context.getQueryContext()->getSettingsRef().force_grouping_standard_compatibility; + + switch (group_by_kind) + { + case GroupByKind::ORDINARY: + { + auto grouping_ordinary_function = std::make_shared(arguments_indexes, force_grouping_standard_compatibility); + auto grouping_ordinary_function_adaptor = std::make_shared(std::move(grouping_ordinary_function)); + function_node->resolveAsFunction(std::move(grouping_ordinary_function_adaptor), std::make_shared()); + break; + } + case GroupByKind::ROLLUP: + { + auto grouping_rollup_function = std::make_shared(arguments_indexes, aggregation_keys_size, force_grouping_standard_compatibility); + auto grouping_rollup_function_adaptor = std::make_shared(std::move(grouping_rollup_function)); + function_node->resolveAsFunction(std::move(grouping_rollup_function_adaptor), std::make_shared()); + function_node->getArguments().getNodes().push_back(std::move(grouping_set_argument_column)); + break; + } + case GroupByKind::CUBE: + { + auto grouping_cube_function = std::make_shared(arguments_indexes, aggregation_keys_size, force_grouping_standard_compatibility); + auto grouping_cube_function_adaptor = std::make_shared(std::move(grouping_cube_function)); + function_node->resolveAsFunction(std::move(grouping_cube_function_adaptor), std::make_shared()); + function_node->getArguments().getNodes().push_back(std::move(grouping_set_argument_column)); + break; + } + case GroupByKind::GROUPING_SETS: + { + auto grouping_grouping_sets_function = std::make_shared(arguments_indexes, grouping_sets_keys_indices, force_grouping_standard_compatibility); + auto grouping_grouping_sets_function_adaptor = std::make_shared(std::move(grouping_grouping_sets_function)); + function_node->resolveAsFunction(std::move(grouping_grouping_sets_function_adaptor), std::make_shared()); + function_node->getArguments().getNodes().push_back(std::move(grouping_set_argument_column)); + break; + } + } + } + + static bool needChildVisit(const QueryTreeNodePtr &, const QueryTreeNodePtr & child_node) + { + return !(child_node->getNodeType() == QueryTreeNodeType::QUERY || child_node->getNodeType() == QueryTreeNodeType::UNION); + } + +private: + GroupByKind group_by_kind; + std::unordered_map aggegation_key_to_index; + // Indexes of aggregation keys used in each grouping set (only for GROUP BY GROUPING SETS) + ColumnNumbersList grouping_sets_keys_indices; + const PlannerContext & planner_context; +}; + +void resolveGroupingFunctions(QueryTreeNodePtr & node, + GroupByKind group_by_kind, + const Names & aggregation_keys, + const GroupingSetsParamsList & grouping_sets_parameters_list, + const PlannerContext & planner_context) +{ + auto & query_node_typed = node->as(); + + GroupingFunctionResolveVisitor visitor(group_by_kind, aggregation_keys, grouping_sets_parameters_list, planner_context); + + if (query_node_typed.hasHaving()) + visitor.visit(query_node_typed.getHaving()); + + if (query_node_typed.hasOrderBy()) + visitor.visit(query_node_typed.getOrderByNode()); + + visitor.visit(query_node_typed.getProjectionNode()); +} + +} + +void resolveGroupingFunctions(QueryTreeNodePtr & query_node, + const Names & aggregation_keys, + const GroupingSetsParamsList & grouping_sets_parameters_list, + const PlannerContext & planner_context) +{ + auto & query_node_typed = query_node->as(); + + GroupByKind group_by_kind = GroupByKind::ORDINARY; + if (query_node_typed.isGroupByWithRollup()) + group_by_kind = GroupByKind::ROLLUP; + else if (query_node_typed.isGroupByWithCube()) + group_by_kind = GroupByKind::CUBE; + else if (query_node_typed.isGroupByWithGroupingSets()) + group_by_kind = GroupByKind::GROUPING_SETS; + + resolveGroupingFunctions(query_node, group_by_kind, aggregation_keys, grouping_sets_parameters_list, planner_context); +} + +AggregateDescriptions extractAggregateDescriptions(const QueryTreeNodes & aggregate_function_nodes, const PlannerContext & planner_context) +{ + QueryTreeNodeToName node_to_name; + NameSet unique_aggregate_action_node_names; + AggregateDescriptions aggregate_descriptions; + + for (const auto & aggregate_function_node : aggregate_function_nodes) + { + const auto & aggregate_function_node_typed = aggregate_function_node->as(); + String node_name = calculateActionNodeName(aggregate_function_node, planner_context, node_to_name); + auto [_, inserted] = unique_aggregate_action_node_names.emplace(node_name); + if (!inserted) + continue; + + AggregateDescription aggregate_description; + aggregate_description.function = aggregate_function_node_typed.getAggregateFunction(); + + const auto & parameters_nodes = aggregate_function_node_typed.getParameters().getNodes(); + aggregate_description.parameters.reserve(parameters_nodes.size()); + + for (const auto & parameter_node : parameters_nodes) + { + /// Function parameters constness validated during analysis stage + aggregate_description.parameters.push_back(parameter_node->getConstantValue().getValue()); + } + + const auto & arguments_nodes = aggregate_function_node_typed.getArguments().getNodes(); + aggregate_description.argument_names.reserve(arguments_nodes.size()); + + for (const auto & argument_node : arguments_nodes) + { + String argument_node_name = calculateActionNodeName(argument_node, planner_context, node_to_name); + aggregate_description.argument_names.emplace_back(std::move(argument_node_name)); + } + + aggregate_description.column_name = std::move(node_name); + aggregate_descriptions.push_back(std::move(aggregate_description)); + } + + return aggregate_descriptions; +} + +} diff --git a/src/Planner/PlannerAggregation.h b/src/Planner/PlannerAggregation.h new file mode 100644 index 00000000000..6dfd7faca22 --- /dev/null +++ b/src/Planner/PlannerAggregation.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include + +#include +#include + +#include + +namespace DB +{ + +/** Resolve GROUPING functions in query node. + * GROUPING function is replaced with specialized GROUPING function based on GROUP BY modifiers. + * For ROLLUP, CUBE, GROUPING SETS specialized GROUPING function take special __grouping_set column as argument. + */ +void resolveGroupingFunctions(QueryTreeNodePtr & query_node, + const Names & aggregation_keys, + const GroupingSetsParamsList & grouping_sets_parameters_list, + const PlannerContext & planner_context); + +/// Extract aggregate descriptions from aggregate function nodes +AggregateDescriptions extractAggregateDescriptions(const QueryTreeNodes & aggregate_function_nodes, const PlannerContext & planner_context); + +} diff --git a/src/Planner/PlannerContext.cpp b/src/Planner/PlannerContext.cpp new file mode 100644 index 00000000000..9f4a489bf5f --- /dev/null +++ b/src/Planner/PlannerContext.cpp @@ -0,0 +1,174 @@ +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +const ColumnIdentifier & GlobalPlannerContext::createColumnIdentifier(const QueryTreeNodePtr & column_node) +{ + const auto & column_node_typed = column_node->as(); + auto column_source_node = column_node_typed.getColumnSource(); + + return createColumnIdentifier(column_node_typed.getColumn(), column_source_node); +} + +const ColumnIdentifier & GlobalPlannerContext::createColumnIdentifier(const NameAndTypePair & column, const QueryTreeNodePtr & column_source_node) +{ + std::string column_identifier; + + if (column_source_node->hasAlias()) + column_identifier += column_source_node->getAlias(); + else if (const auto * table_source_node = column_source_node->as()) + column_identifier += table_source_node->getStorageID().getFullNameNotQuoted(); + + if (!column_identifier.empty()) + column_identifier += '.'; + + column_identifier += column.name; + column_identifier += '_' + std::to_string(column_identifiers.size()); + + auto [it, inserted] = column_identifiers.emplace(column_identifier); + assert(inserted); + + return *it; +} + +bool GlobalPlannerContext::hasColumnIdentifier(const ColumnIdentifier & column_identifier) +{ + return column_identifiers.contains(column_identifier); +} + +PlannerContext::PlannerContext(ContextPtr query_context_, GlobalPlannerContextPtr global_planner_context_) + : query_context(std::move(query_context_)) + , global_planner_context(std::move(global_planner_context_)) +{} + +TableExpressionData & PlannerContext::getOrCreateTableExpressionData(const QueryTreeNodePtr & table_expression_node) +{ + auto [it, _] = table_expression_node_to_data.emplace(table_expression_node, TableExpressionData()); + return it->second; +} + +const TableExpressionData & PlannerContext::getTableExpressionDataOrThrow(const QueryTreeNodePtr & table_expression_node) const +{ + auto table_expression_data_it = table_expression_node_to_data.find(table_expression_node); + if (table_expression_data_it == table_expression_node_to_data.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Table expression {} is not registered in planner context", + table_expression_node->formatASTForErrorMessage()); + + return table_expression_data_it->second; +} + +TableExpressionData & PlannerContext::getTableExpressionDataOrThrow(const QueryTreeNodePtr & table_expression_node) +{ + auto table_expression_data_it = table_expression_node_to_data.find(table_expression_node); + if (table_expression_data_it == table_expression_node_to_data.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Table expression {} is not registered in planner context", + table_expression_node->formatASTForErrorMessage()); + + return table_expression_data_it->second; +} + +const TableExpressionData * PlannerContext::getTableExpressionDataOrNull(const QueryTreeNodePtr & table_expression_node) const +{ + auto table_expression_data_it = table_expression_node_to_data.find(table_expression_node); + if (table_expression_data_it == table_expression_node_to_data.end()) + return nullptr; + + return &table_expression_data_it->second; +} + +TableExpressionData * PlannerContext::getTableExpressionDataOrNull(const QueryTreeNodePtr & table_expression_node) +{ + auto table_expression_data_it = table_expression_node_to_data.find(table_expression_node); + if (table_expression_data_it == table_expression_node_to_data.end()) + return nullptr; + + return &table_expression_data_it->second; +} + +const ColumnIdentifier & PlannerContext::getColumnNodeIdentifierOrThrow(const QueryTreeNodePtr & column_node) const +{ + auto & column_node_typed = column_node->as(); + const auto & column_name = column_node_typed.getColumnName(); + auto column_source = column_node_typed.getColumnSource(); + const auto & table_expression_data = getTableExpressionDataOrThrow(column_source); + return table_expression_data.getColumnIdentifierOrThrow(column_name); +} + +const ColumnIdentifier * PlannerContext::getColumnNodeIdentifierOrNull(const QueryTreeNodePtr & column_node) const +{ + auto & column_node_typed = column_node->as(); + const auto & column_name = column_node_typed.getColumnName(); + auto column_source = column_node_typed.getColumnSourceOrNull(); + if (!column_source) + return nullptr; + + const auto * table_expression_data = getTableExpressionDataOrNull(column_source); + if (!table_expression_data) + return nullptr; + + return table_expression_data->getColumnIdentifierOrNull(column_name); +} + +PlannerContext::SetKey PlannerContext::createSetKey(const QueryTreeNodePtr & set_source_node) +{ + auto set_source_hash = set_source_node->getTreeHash(); + return "__set_" + toString(set_source_hash.first) + '_' + toString(set_source_hash.second); +} + +void PlannerContext::registerSet(const SetKey & key, PlannerSet planner_set) +{ + if (!planner_set.getSet()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Set must be initialized"); + + const auto & subquery_node = planner_set.getSubqueryNode(); + if (subquery_node) + { + auto node_type = subquery_node->getNodeType(); + + if (node_type != QueryTreeNodeType::QUERY && + node_type != QueryTreeNodeType::UNION) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Invalid node for set table expression. Expected query or union. Actual {}", + subquery_node->formatASTForErrorMessage()); + } + + set_key_to_set.emplace(key, std::move(planner_set)); +} + +bool PlannerContext::hasSet(const SetKey & key) const +{ + return set_key_to_set.contains(key); +} + +const PlannerSet & PlannerContext::getSetOrThrow(const SetKey & key) const +{ + auto it = set_key_to_set.find(key); + if (it == set_key_to_set.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No set is registered for key {}", + key); + + return it->second; +} + +const PlannerSet * PlannerContext::getSetOrNull(const SetKey & key) const +{ + auto it = set_key_to_set.find(key); + if (it == set_key_to_set.end()) + return nullptr; + + return &it->second; +} + +} diff --git a/src/Planner/PlannerContext.h b/src/Planner/PlannerContext.h new file mode 100644 index 00000000000..63874bf7ab9 --- /dev/null +++ b/src/Planner/PlannerContext.h @@ -0,0 +1,205 @@ +#pragma once + +#include + +#include +#include + +#include +#include + +#include + +#include + +namespace DB +{ + +/** Global planner context contains common objects that are shared between each planner context. + * + * 1. Column identifiers. + */ +class GlobalPlannerContext +{ +public: + GlobalPlannerContext() = default; + + /** Create column identifier for column node. + * + * Result column identifier is added into context. + */ + const ColumnIdentifier & createColumnIdentifier(const QueryTreeNodePtr & column_node); + + /** Create column identifier for column and column source. + * + * Result column identifier is added into context. + */ + const ColumnIdentifier & createColumnIdentifier(const NameAndTypePair & column, const QueryTreeNodePtr & column_source_node); + + /// Check if context has column identifier + bool hasColumnIdentifier(const ColumnIdentifier & column_identifier); + +private: + std::unordered_set column_identifiers; +}; + +using GlobalPlannerContextPtr = std::shared_ptr; + +/** PlannerSet is wrapper around Set that is used during query planning. + * + * If subquery node is null, such set is already prepared for execution. + * + * If subquery node is not null, then set must be build from the result of the subquery. + * If subquery node is not null, it must have QUERY or UNION type. + */ +class PlannerSet +{ +public: + /// Construct planner set that is ready for execution + explicit PlannerSet(SetPtr set_) + : set(std::move(set_)) + {} + + /// Construct planner set with set and subquery node + explicit PlannerSet(SetPtr set_, QueryTreeNodePtr subquery_node_) + : set(std::move(set_)) + , subquery_node(std::move(subquery_node_)) + {} + + /// Get set + const SetPtr & getSet() const + { + return set; + } + + /// Get subquery node + const QueryTreeNodePtr & getSubqueryNode() const + { + return subquery_node; + } + +private: + SetPtr set; + + QueryTreeNodePtr subquery_node; +}; + +class PlannerContext +{ +public: + /// Create planner context with query context and global planner context + PlannerContext(ContextPtr query_context_, GlobalPlannerContextPtr global_planner_context_); + + /// Get planner context query context + const ContextPtr & getQueryContext() const + { + return query_context; + } + + /// Get planner context query context + ContextPtr & getQueryContext() + { + return query_context; + } + + /// Get global planner context + const GlobalPlannerContextPtr & getGlobalPlannerContext() const + { + return global_planner_context; + } + + /// Get global planner context + GlobalPlannerContextPtr & getGlobalPlannerContext() + { + return global_planner_context; + } + + /// Get or create table expression data for table expression node. + TableExpressionData & getOrCreateTableExpressionData(const QueryTreeNodePtr & table_expression_node); + + /** Get table expression data. + * Exception is thrown if there are no table expression data for table expression node. + */ + const TableExpressionData & getTableExpressionDataOrThrow(const QueryTreeNodePtr & table_expression_node) const; + + /** Get table expression data. + * Exception is thrown if there are no table expression data for table expression node. + */ + TableExpressionData & getTableExpressionDataOrThrow(const QueryTreeNodePtr & table_expression_node); + + /** Get table expression data. + * Null is returned if there are no table expression data for table expression node. + */ + const TableExpressionData * getTableExpressionDataOrNull(const QueryTreeNodePtr & table_expression_node) const; + + /** Get table expression data. + * Null is returned if there are no table expression data for table expression node. + */ + TableExpressionData * getTableExpressionDataOrNull(const QueryTreeNodePtr & table_expression_node); + + /// Get table expression node to data read only map + const std::unordered_map & getTableExpressionNodeToData() const + { + return table_expression_node_to_data; + } + + /** Get column node identifier. + * For column node source check if table expression data is registered. + * If table expression data is not registered exception is thrown. + * In table expression data get column node identifier using column name. + */ + const ColumnIdentifier & getColumnNodeIdentifierOrThrow(const QueryTreeNodePtr & column_node) const; + + /** Get column node identifier. + * For column node source check if table expression data is registered. + * If table expression data is not registered null is returned. + * In table expression data get column node identifier or null using column name. + */ + const ColumnIdentifier * getColumnNodeIdentifierOrNull(const QueryTreeNodePtr & column_node) const; + + using SetKey = std::string; + + using SetKeyToSet = std::unordered_map; + + /// Create set key for set source node + static SetKey createSetKey(const QueryTreeNodePtr & set_source_node); + + /// Register set for set key + void registerSet(const SetKey & key, PlannerSet planner_set); + + /// Returns true if set is registered for key, false otherwise + bool hasSet(const SetKey & key) const; + + /// Get set for key, if no set is registered logical exception is thrown + const PlannerSet & getSetOrThrow(const SetKey & key) const; + + /// Get set for key, if no set is registered null is returned + const PlannerSet * getSetOrNull(const SetKey & key) const; + + /// Get registered sets + const SetKeyToSet & getRegisteredSets() const + { + return set_key_to_set; + } + +private: + /// Query context + ContextPtr query_context; + + /// Global planner context + GlobalPlannerContextPtr global_planner_context; + + /// Column node to column identifier + std::unordered_map column_node_to_column_identifier; + + /// Table expression node to data + std::unordered_map table_expression_node_to_data; + + /// Set key to set + SetKeyToSet set_key_to_set; + +}; + +using PlannerContextPtr = std::shared_ptr; + +} diff --git a/src/Planner/PlannerExpressionAnalysis.cpp b/src/Planner/PlannerExpressionAnalysis.cpp new file mode 100644 index 00000000000..b034edf97d8 --- /dev/null +++ b/src/Planner/PlannerExpressionAnalysis.cpp @@ -0,0 +1,508 @@ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace +{ + +/** Construct filter analysis result for filter expression node + * Actions before filter are added into into actions chain. + * It is client responsibility to update filter analysis result if filter column must be removed after chain is finalized. + */ +FilterAnalysisResult analyzeFilter(const QueryTreeNodePtr & filter_expression_node, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context, + ActionsChain & actions_chain) +{ + const auto * chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & filter_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + + FilterAnalysisResult result; + + result.filter_actions = buildActionsDAGFromExpressionNode(filter_expression_node, filter_input, planner_context); + result.filter_column_name = result.filter_actions->getOutputs().at(0)->result_name; + actions_chain.addStep(std::make_unique(result.filter_actions)); + + return result; +} + +/** Construct aggregation analysis result if query tree has GROUP BY or aggregates. + * Actions before aggregation are added into actions chain, if result is not null optional. + */ +std::optional analyzeAggregation(QueryTreeNodePtr & query_tree, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context, + ActionsChain & actions_chain) +{ + auto & query_node = query_tree->as(); + + auto aggregate_function_nodes = collectAggregateFunctionNodes(query_tree); + auto aggregates_descriptions = extractAggregateDescriptions(aggregate_function_nodes, *planner_context); + + ColumnsWithTypeAndName aggregates_columns; + aggregates_columns.reserve(aggregates_descriptions.size()); + for (auto & aggregate_description : aggregates_descriptions) + aggregates_columns.emplace_back(nullptr, aggregate_description.function->getReturnType(), aggregate_description.column_name); + + Names aggregation_keys; + + const auto * chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & group_by_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + + ActionsDAGPtr before_aggregation_actions = std::make_shared(group_by_input); + before_aggregation_actions->getOutputs().clear(); + + std::unordered_set before_aggregation_actions_output_node_names; + + GroupingSetsParamsList grouping_sets_parameters_list; + bool group_by_with_constant_keys = false; + bool disable_grouping_sets = false; + + PlannerActionsVisitor actions_visitor(planner_context); + + /// Add expressions from GROUP BY + + if (query_node.hasGroupBy()) + { + if (query_node.isGroupByWithGroupingSets()) + { + for (auto & grouping_set_keys_list_node : query_node.getGroupBy().getNodes()) + { + auto & grouping_set_keys_list_node_typed = grouping_set_keys_list_node->as(); + grouping_sets_parameters_list.emplace_back(); + auto & grouping_sets_parameters = grouping_sets_parameters_list.back(); + + for (auto & grouping_set_key_node : grouping_set_keys_list_node_typed.getNodes()) + { + group_by_with_constant_keys |= grouping_set_key_node->hasConstantValue(); + + auto expression_dag_nodes = actions_visitor.visit(before_aggregation_actions, grouping_set_key_node); + aggregation_keys.reserve(expression_dag_nodes.size()); + + for (auto & expression_dag_node : expression_dag_nodes) + { + grouping_sets_parameters.used_keys.push_back(expression_dag_node->result_name); + if (before_aggregation_actions_output_node_names.contains(expression_dag_node->result_name)) + continue; + + aggregation_keys.push_back(expression_dag_node->result_name); + before_aggregation_actions->getOutputs().push_back(expression_dag_node); + before_aggregation_actions_output_node_names.insert(expression_dag_node->result_name); + } + } + } + + for (auto & grouping_sets_parameter : grouping_sets_parameters_list) + { + NameSet grouping_sets_used_keys; + Names grouping_sets_keys; + + for (auto & key : grouping_sets_parameter.used_keys) + { + auto [_, inserted] = grouping_sets_used_keys.insert(key); + if (inserted) + grouping_sets_keys.push_back(key); + } + + for (auto & key : aggregation_keys) + { + if (grouping_sets_used_keys.contains(key)) + continue; + + grouping_sets_parameter.missing_keys.push_back(key); + } + + grouping_sets_parameter.used_keys = std::move(grouping_sets_keys); + } + + /// It is expected by execution layer that if there are only 1 grouping sets it will be removed + if (grouping_sets_parameters_list.size() == 1) + { + disable_grouping_sets = true; + grouping_sets_parameters_list.clear(); + } + } + else + { + for (auto & group_by_key_node : query_node.getGroupBy().getNodes()) + group_by_with_constant_keys |= group_by_key_node->hasConstantValue(); + + auto expression_dag_nodes = actions_visitor.visit(before_aggregation_actions, query_node.getGroupByNode()); + aggregation_keys.reserve(expression_dag_nodes.size()); + + for (auto & expression_dag_node : expression_dag_nodes) + { + if (before_aggregation_actions_output_node_names.contains(expression_dag_node->result_name)) + continue; + + aggregation_keys.push_back(expression_dag_node->result_name); + before_aggregation_actions->getOutputs().push_back(expression_dag_node); + before_aggregation_actions_output_node_names.insert(expression_dag_node->result_name); + } + } + } + + /// Add expressions from aggregate functions arguments + + for (auto & aggregate_function_node : aggregate_function_nodes) + { + auto & aggregate_function_node_typed = aggregate_function_node->as(); + for (const auto & aggregate_function_node_argument : aggregate_function_node_typed.getArguments().getNodes()) + { + auto expression_dag_nodes = actions_visitor.visit(before_aggregation_actions, aggregate_function_node_argument); + for (auto & expression_dag_node : expression_dag_nodes) + { + if (before_aggregation_actions_output_node_names.contains(expression_dag_node->result_name)) + continue; + + before_aggregation_actions->getOutputs().push_back(expression_dag_node); + before_aggregation_actions_output_node_names.insert(expression_dag_node->result_name); + } + } + } + + if (aggregation_keys.empty() && aggregates_descriptions.empty()) + return {}; + + /** For non ordinary GROUP BY we add virtual __grouping_set column + * With set number, which is used as an additional key at the stage of merging aggregating data. + */ + if (query_node.isGroupByWithRollup() || query_node.isGroupByWithCube() || (query_node.isGroupByWithGroupingSets() && !disable_grouping_sets)) + aggregates_columns.emplace_back(nullptr, std::make_shared(), "__grouping_set"); + + resolveGroupingFunctions(query_tree, aggregation_keys, grouping_sets_parameters_list, *planner_context); + + /// Only aggregation keys and aggregates are available for next steps after GROUP BY step + auto aggregate_step = std::make_unique(before_aggregation_actions, ActionsChainStep::AvailableOutputColumnsStrategy::OUTPUT_NODES, aggregates_columns); + actions_chain.addStep(std::move(aggregate_step)); + + AggregationAnalysisResult aggregation_analysis_result; + aggregation_analysis_result.before_aggregation_actions = before_aggregation_actions; + aggregation_analysis_result.aggregation_keys = std::move(aggregation_keys); + aggregation_analysis_result.aggregate_descriptions = std::move(aggregates_descriptions); + aggregation_analysis_result.grouping_sets_parameters_list = std::move(grouping_sets_parameters_list); + aggregation_analysis_result.group_by_with_constant_keys = group_by_with_constant_keys; + + return aggregation_analysis_result; +} + +/** Construct window analysis result if query tree has window functions. + * Actions before window functions are added into actions chain, if result is not null optional. + */ +std::optional analyzeWindow(QueryTreeNodePtr & query_tree, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context, + ActionsChain & actions_chain) +{ + auto window_function_nodes = collectWindowFunctionNodes(query_tree); + if (window_function_nodes.empty()) + return {}; + + auto window_descriptions = extractWindowDescriptions(window_function_nodes, *planner_context); + + const auto * chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & window_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + + PlannerActionsVisitor actions_visitor(planner_context); + + ActionsDAGPtr before_window_actions = std::make_shared(window_input); + before_window_actions->getOutputs().clear(); + + std::unordered_set before_window_actions_output_node_names; + + for (auto & window_function_node : window_function_nodes) + { + auto & window_function_node_typed = window_function_node->as(); + auto & window_node = window_function_node_typed.getWindowNode()->as(); + + auto expression_dag_nodes = actions_visitor.visit(before_window_actions, window_function_node_typed.getArgumentsNode()); + + for (auto & expression_dag_node : expression_dag_nodes) + { + if (before_window_actions_output_node_names.contains(expression_dag_node->result_name)) + continue; + + before_window_actions->getOutputs().push_back(expression_dag_node); + before_window_actions_output_node_names.insert(expression_dag_node->result_name); + } + + expression_dag_nodes = actions_visitor.visit(before_window_actions, window_node.getPartitionByNode()); + + for (auto & expression_dag_node : expression_dag_nodes) + { + if (before_window_actions_output_node_names.contains(expression_dag_node->result_name)) + continue; + + before_window_actions->getOutputs().push_back(expression_dag_node); + before_window_actions_output_node_names.insert(expression_dag_node->result_name); + } + + /** We add only sort column sort expression in before WINDOW actions DAG. + * WITH fill expressions must be constant nodes. + */ + auto & order_by_node_list = window_node.getOrderBy(); + for (auto & sort_node : order_by_node_list.getNodes()) + { + auto & sort_node_typed = sort_node->as(); + expression_dag_nodes = actions_visitor.visit(before_window_actions, sort_node_typed.getExpression()); + + for (auto & expression_dag_node : expression_dag_nodes) + { + if (before_window_actions_output_node_names.contains(expression_dag_node->result_name)) + continue; + + before_window_actions->getOutputs().push_back(expression_dag_node); + before_window_actions_output_node_names.insert(expression_dag_node->result_name); + } + } + } + + ColumnsWithTypeAndName window_functions_additional_columns; + + for (auto & window_description : window_descriptions) + for (auto & window_function : window_description.window_functions) + window_functions_additional_columns.emplace_back(nullptr, window_function.aggregate_function->getReturnType(), window_function.column_name); + + auto before_window_step = std::make_unique(before_window_actions, + ActionsChainStep::AvailableOutputColumnsStrategy::ALL_NODES, + window_functions_additional_columns); + actions_chain.addStep(std::move(before_window_step)); + + WindowAnalysisResult result; + result.before_window_actions = std::move(before_window_actions); + result.window_descriptions = std::move(window_descriptions); + + return result; +} + +/** Construct projection analysis result. + * Projection actions are added into actions chain. + * It is client responsibility to update projection analysis result with project names actions after chain is finalized. + */ +ProjectionAnalysisResult analyzeProjection(const QueryNode & query_node, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context, + ActionsChain & actions_chain) +{ + const auto * chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & projection_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + auto projection_actions = buildActionsDAGFromExpressionNode(query_node.getProjectionNode(), projection_input, planner_context); + + auto projection_columns = query_node.getProjectionColumns(); + size_t projection_columns_size = projection_columns.size(); + + Names projection_column_names; + NamesWithAliases projection_column_names_with_display_aliases; + projection_column_names_with_display_aliases.reserve(projection_columns_size); + + auto & projection_actions_outputs = projection_actions->getOutputs(); + size_t projection_outputs_size = projection_actions_outputs.size(); + + if (projection_columns_size != projection_outputs_size) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "QueryTree projection nodes size mismatch. Expected {}. Actual {}", + projection_columns_size, + projection_outputs_size); + + for (size_t i = 0; i < projection_outputs_size; ++i) + { + auto & projection_column = projection_columns[i]; + const auto * projection_node = projection_actions_outputs[i]; + const auto & projection_node_name = projection_node->result_name; + + projection_column_names.push_back(projection_node_name); + projection_column_names_with_display_aliases.push_back({projection_node_name, projection_column.name}); + } + + auto projection_actions_step = std::make_unique(projection_actions); + actions_chain.addStep(std::move(projection_actions_step)); + + ProjectionAnalysisResult result; + result.projection_actions = std::move(projection_actions); + result.projection_column_names = std::move(projection_column_names); + result.projection_column_names_with_display_aliases = std::move(projection_column_names_with_display_aliases); + + return result; +} + +/** Construct sort analysis result. + * Actions before sort are added into actions chain. + */ +SortAnalysisResult analyzeSort(const QueryNode & query_node, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context, + ActionsChain & actions_chain) +{ + const auto *chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & order_by_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + + ActionsDAGPtr before_sort_actions = std::make_shared(order_by_input); + auto & before_sort_actions_outputs = before_sort_actions->getOutputs(); + before_sort_actions_outputs.clear(); + + PlannerActionsVisitor actions_visitor(planner_context); + + std::unordered_set before_sort_actions_dag_output_node_names; + + /** We add only sort node sort expression in before ORDER BY actions DAG. + * WITH fill expressions must be constant nodes. + */ + const auto & order_by_node_list = query_node.getOrderBy(); + for (const auto & sort_node : order_by_node_list.getNodes()) + { + auto & sort_node_typed = sort_node->as(); + auto expression_dag_nodes = actions_visitor.visit(before_sort_actions, sort_node_typed.getExpression()); + + for (auto & action_dag_node : expression_dag_nodes) + { + if (before_sort_actions_dag_output_node_names.contains(action_dag_node->result_name)) + continue; + + before_sort_actions_outputs.push_back(action_dag_node); + before_sort_actions_dag_output_node_names.insert(action_dag_node->result_name); + } + } + + auto actions_step_before_sort = std::make_unique(before_sort_actions); + actions_chain.addStep(std::move(actions_step_before_sort)); + + return SortAnalysisResult{std::move(before_sort_actions)}; +} + +/** Construct limit by analysis result. + * Actions before limit by are added into actions chain. + */ +LimitByAnalysisResult analyzeLimitBy(const QueryNode & query_node, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context, + ActionsChain & actions_chain) +{ + const auto * chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & limit_by_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + auto before_limit_by_actions = buildActionsDAGFromExpressionNode(query_node.getLimitByNode(), limit_by_input, planner_context); + + Names limit_by_column_names; + limit_by_column_names.reserve(before_limit_by_actions->getOutputs().size()); + for (auto & output_node : before_limit_by_actions->getOutputs()) + limit_by_column_names.push_back(output_node->result_name); + + auto actions_step_before_limit_by = std::make_unique(before_limit_by_actions); + actions_chain.addStep(std::move(actions_step_before_limit_by)); + + return LimitByAnalysisResult{std::move(before_limit_by_actions), std::move(limit_by_column_names)}; +} + +} + +PlannerExpressionsAnalysisResult buildExpressionAnalysisResult(QueryTreeNodePtr query_tree, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context) +{ + auto & query_node = query_tree->as(); + + ActionsChain actions_chain; + + std::optional where_analysis_result_optional; + std::optional where_action_step_index_optional; + + if (query_node.hasWhere()) + { + where_analysis_result_optional = analyzeFilter(query_node.getWhere(), join_tree_input_columns, planner_context, actions_chain); + where_action_step_index_optional = actions_chain.getLastStepIndex(); + } + + auto aggregation_analysis_result_optional = analyzeAggregation(query_tree, join_tree_input_columns, planner_context, actions_chain); + + std::optional having_analysis_result_optional; + std::optional having_action_step_index_optional; + + if (query_node.hasHaving()) + { + having_analysis_result_optional = analyzeFilter(query_node.getHaving(), join_tree_input_columns, planner_context, actions_chain); + having_action_step_index_optional = actions_chain.getLastStepIndex(); + } + + auto window_analysis_result_optional = analyzeWindow(query_tree, join_tree_input_columns, planner_context, actions_chain); + auto projection_analysis_result = analyzeProjection(query_node, join_tree_input_columns, planner_context, actions_chain); + + std::optional sort_analysis_result_optional; + if (query_node.hasOrderBy()) + sort_analysis_result_optional = analyzeSort(query_node, join_tree_input_columns, planner_context, actions_chain); + + std::optional limit_by_analysis_result_optional; + + if (query_node.hasLimitBy()) + limit_by_analysis_result_optional = analyzeLimitBy(query_node, join_tree_input_columns, planner_context, actions_chain); + + const auto * chain_available_output_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull(); + const auto & project_names_input = chain_available_output_columns ? *chain_available_output_columns : join_tree_input_columns; + auto project_names_actions = std::make_shared(project_names_input); + project_names_actions->project(projection_analysis_result.projection_column_names_with_display_aliases); + actions_chain.addStep(std::make_unique(project_names_actions)); + + // std::cout << "Chain dump before finalize" << std::endl; + // std::cout << actions_chain.dump() << std::endl; + + actions_chain.finalize(); + + // std::cout << "Chain dump after finalize" << std::endl; + // std::cout << actions_chain.dump() << std::endl; + + projection_analysis_result.project_names_actions = std::move(project_names_actions); + + PlannerExpressionsAnalysisResult expressions_analysis_result(std::move(projection_analysis_result)); + + if (where_action_step_index_optional && where_analysis_result_optional) + { + auto & where_analysis_result = *where_analysis_result_optional; + auto & where_actions_chain_node = actions_chain.at(*where_action_step_index_optional); + where_analysis_result.remove_filter_column = !where_actions_chain_node->getChildRequiredOutputColumnsNames().contains(where_analysis_result.filter_column_name); + expressions_analysis_result.addWhere(std::move(where_analysis_result)); + } + + if (aggregation_analysis_result_optional) + expressions_analysis_result.addAggregation(std::move(*aggregation_analysis_result_optional)); + + if (having_action_step_index_optional && having_analysis_result_optional) + { + auto & having_analysis_result = *having_analysis_result_optional; + auto & having_actions_chain_node = actions_chain.at(*having_action_step_index_optional); + having_analysis_result.remove_filter_column = !having_actions_chain_node->getChildRequiredOutputColumnsNames().contains(having_analysis_result.filter_column_name); + expressions_analysis_result.addHaving(std::move(having_analysis_result)); + } + + if (window_analysis_result_optional) + expressions_analysis_result.addWindow(std::move(*window_analysis_result_optional)); + + if (sort_analysis_result_optional) + expressions_analysis_result.addSort(std::move(*sort_analysis_result_optional)); + + if (limit_by_analysis_result_optional) + expressions_analysis_result.addLimitBy(std::move(*limit_by_analysis_result_optional)); + + return expressions_analysis_result; +} + +} diff --git a/src/Planner/PlannerExpressionAnalysis.h b/src/Planner/PlannerExpressionAnalysis.h new file mode 100644 index 00000000000..aefb3c369d0 --- /dev/null +++ b/src/Planner/PlannerExpressionAnalysis.h @@ -0,0 +1,175 @@ +#pragma once + +#include +#include + +#include + +#include + +#include +#include +#include + +namespace DB +{ + +struct ProjectionAnalysisResult +{ + ActionsDAGPtr projection_actions; + Names projection_column_names; + NamesWithAliases projection_column_names_with_display_aliases; + ActionsDAGPtr project_names_actions; +}; + +struct FilterAnalysisResult +{ + ActionsDAGPtr filter_actions; + std::string filter_column_name; + bool remove_filter_column = false; +}; + +struct AggregationAnalysisResult +{ + ActionsDAGPtr before_aggregation_actions; + Names aggregation_keys; + AggregateDescriptions aggregate_descriptions; + GroupingSetsParamsList grouping_sets_parameters_list; + bool group_by_with_constant_keys = false; +}; + +struct WindowAnalysisResult +{ + ActionsDAGPtr before_window_actions; + std::vector window_descriptions; +}; + +struct SortAnalysisResult +{ + ActionsDAGPtr before_order_by_actions; +}; + +struct LimitByAnalysisResult +{ + ActionsDAGPtr before_limit_by_actions; + Names limit_by_column_names; +}; + +class PlannerExpressionsAnalysisResult +{ +public: + explicit PlannerExpressionsAnalysisResult(ProjectionAnalysisResult projection_analysis_result_) + : projection_analysis_result(std::move(projection_analysis_result_)) + {} + + const ProjectionAnalysisResult & getProjection() const + { + return projection_analysis_result; + } + + bool hasWhere() const + { + return where_analysis_result.filter_actions != nullptr; + } + + const FilterAnalysisResult & getWhere() const + { + return where_analysis_result; + } + + void addWhere(FilterAnalysisResult where_analysis_result_) + { + where_analysis_result = std::move(where_analysis_result_); + } + + bool hasAggregation() const + { + return !aggregation_analysis_result.aggregation_keys.empty() || !aggregation_analysis_result.aggregate_descriptions.empty(); + } + + const AggregationAnalysisResult & getAggregation() const + { + return aggregation_analysis_result; + } + + void addAggregation(AggregationAnalysisResult aggregation_analysis_result_) + { + aggregation_analysis_result = std::move(aggregation_analysis_result_); + } + + bool hasHaving() const + { + return having_analysis_result.filter_actions != nullptr; + } + + const FilterAnalysisResult & getHaving() const + { + return having_analysis_result; + } + + void addHaving(FilterAnalysisResult having_analysis_result_) + { + having_analysis_result = std::move(having_analysis_result_); + } + + bool hasWindow() const + { + return !window_analysis_result.window_descriptions.empty(); + } + + const WindowAnalysisResult & getWindow() const + { + return window_analysis_result; + } + + void addWindow(WindowAnalysisResult window_analysis_result_) + { + window_analysis_result = std::move(window_analysis_result_); + } + + bool hasSort() const + { + return sort_analysis_result.before_order_by_actions != nullptr; + } + + const SortAnalysisResult & getSort() const + { + return sort_analysis_result; + } + + void addSort(SortAnalysisResult sort_analysis_result_) + { + sort_analysis_result = std::move(sort_analysis_result_); + } + + bool hasLimitBy() const + { + return limit_by_analysis_result.before_limit_by_actions != nullptr; + } + + const LimitByAnalysisResult & getLimitBy() const + { + return limit_by_analysis_result; + } + + void addLimitBy(LimitByAnalysisResult limit_by_analysis_result_) + { + limit_by_analysis_result = std::move(limit_by_analysis_result_); + } + +private: + ProjectionAnalysisResult projection_analysis_result; + FilterAnalysisResult where_analysis_result; + AggregationAnalysisResult aggregation_analysis_result; + FilterAnalysisResult having_analysis_result; + WindowAnalysisResult window_analysis_result; + SortAnalysisResult sort_analysis_result; + LimitByAnalysisResult limit_by_analysis_result; +}; + +/// Build expression analysis result for query tree, join tree input columns and planner context +PlannerExpressionsAnalysisResult buildExpressionAnalysisResult(QueryTreeNodePtr query_tree, + const ColumnsWithTypeAndName & join_tree_input_columns, + const PlannerContextPtr & planner_context); + +} diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp new file mode 100644 index 00000000000..4cb446a65a0 --- /dev/null +++ b/src/Planner/PlannerJoinTree.cpp @@ -0,0 +1,708 @@ +#include + +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INVALID_JOIN_ON_EXPRESSION; + extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; + extern const int SYNTAX_ERROR; + extern const int ACCESS_DENIED; +} + +namespace +{ + +/// Check if current user has privileges to SELECT columns from table +void checkAccessRights(const TableNode & table_node, const Names & column_names, const ContextPtr & query_context) +{ + const auto & storage_id = table_node.getStorageID(); + const auto & storage_snapshot = table_node.getStorageSnapshot(); + + if (column_names.empty()) + { + /** For a trivial queries like "SELECT count() FROM table", "SELECT 1 FROM table" access is granted if at least + * one table column is accessible. + */ + auto access = query_context->getAccess(); + + for (const auto & column : storage_snapshot->metadata->getColumns()) + { + if (access->isGranted(AccessType::SELECT, storage_id.database_name, storage_id.table_name, column.name)) + return; + } + + throw Exception(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. To execute this query it's necessary to have grant SELECT for at least one column on {}", + query_context->getUserName(), + storage_id.getFullTableName()); + } + + query_context->checkAccess(AccessType::SELECT, storage_id, column_names); +} + +QueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, + SelectQueryInfo & select_query_info, + const SelectQueryOptions & select_query_options, + PlannerContextPtr & planner_context) +{ + auto * table_node = table_expression->as(); + auto * table_function_node = table_expression->as(); + auto * query_node = table_expression->as(); + auto * union_node = table_expression->as(); + + QueryPlan query_plan; + + auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); + + if (table_node || table_function_node) + { + const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage(); + const auto & storage_snapshot = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot(); + + auto table_expression_query_info = select_query_info; + table_expression_query_info.table_expression = table_expression; + + if (table_node) + table_expression_query_info.table_expression_modifiers = table_node->getTableExpressionModifiers(); + else + table_expression_query_info.table_expression_modifiers = table_function_node->getTableExpressionModifiers(); + + auto & query_context = planner_context->getQueryContext(); + + auto from_stage = storage->getQueryProcessingStage(query_context, select_query_options.to_stage, storage_snapshot, table_expression_query_info); + const auto & columns_names_set = table_expression_data.getColumnsNames(); + Names columns_names(columns_names_set.begin(), columns_names_set.end()); + + /** The current user must have the SELECT privilege. + * We do not check access rights for table functions because they have been already checked in ITableFunction::execute(). + */ + if (table_node) + { + auto column_names_with_aliases = columns_names; + const auto & alias_columns_names = table_expression_data.getAliasColumnsNames(); + column_names_with_aliases.insert(column_names_with_aliases.end(), alias_columns_names.begin(), alias_columns_names.end()); + checkAccessRights(*table_node, column_names_with_aliases, planner_context->getQueryContext()); + } + + if (columns_names.empty()) + { + auto column_names_and_types = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::All).withSubcolumns()); + auto additional_column_to_read = column_names_and_types.front(); + + const auto & column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(additional_column_to_read, table_expression); + columns_names.push_back(additional_column_to_read.name); + table_expression_data.addColumn(additional_column_to_read, column_identifier); + } + + size_t max_block_size = query_context->getSettingsRef().max_block_size; + size_t max_streams = query_context->getSettingsRef().max_threads; + + bool need_rewrite_query_with_final = storage->needRewriteQueryWithFinal(columns_names); + if (need_rewrite_query_with_final) + { + if (table_expression_query_info.table_expression_modifiers) + { + const auto & table_expression_modifiers = table_expression_query_info.table_expression_modifiers; + auto sample_size_ratio = table_expression_modifiers->getSampleSizeRatio(); + auto sample_offset_ratio = table_expression_modifiers->getSampleOffsetRatio(); + + table_expression_query_info.table_expression_modifiers = TableExpressionModifiers(true /*has_final*/, + sample_size_ratio, + sample_offset_ratio); + } + else + { + table_expression_query_info.table_expression_modifiers = TableExpressionModifiers(true /*has_final*/, + {} /*sample_size_ratio*/, + {} /*sample_offset_ratio*/); + } + } + + storage->read(query_plan, columns_names, storage_snapshot, table_expression_query_info, query_context, from_stage, max_block_size, max_streams); + + /// Create step which reads from empty source if storage has no data. + if (!query_plan.isInitialized()) + { + auto source_header = storage_snapshot->getSampleBlockForColumns(columns_names); + Pipe pipe(std::make_shared(source_header)); + auto read_from_pipe = std::make_unique(std::move(pipe)); + read_from_pipe->setStepDescription("Read from NullSource"); + query_plan.addStep(std::move(read_from_pipe)); + } + } + else if (query_node || union_node) + { + auto subquery_options = select_query_options.subquery(); + auto subquery_context = buildSubqueryContext(planner_context->getQueryContext()); + Planner subquery_planner(table_expression, subquery_options, std::move(subquery_context), planner_context->getGlobalPlannerContext()); + subquery_planner.buildQueryPlanIfNeeded(); + query_plan = std::move(subquery_planner).extractQueryPlan(); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected table, table function, query or union. Actual {}", table_expression->formatASTForErrorMessage()); + } + + auto rename_actions_dag = std::make_shared(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + + for (auto & output_node : rename_actions_dag->getOutputs()) + { + const auto * column_identifier = table_expression_data.getColumnIdentifierOrNull(output_node->result_name); + + if (!column_identifier) + continue; + + const auto * node_to_rename = output_node; + output_node = &rename_actions_dag->addAlias(*node_to_rename, *column_identifier); + } + + auto rename_step = std::make_unique(query_plan.getCurrentDataStream(), rename_actions_dag); + rename_step->setStepDescription("Change column names to column identifiers"); + query_plan.addStep(std::move(rename_step)); + + return query_plan; +} + +QueryPlan buildQueryPlanForJoinNode(QueryTreeNodePtr join_tree_node, + SelectQueryInfo & select_query_info, + const SelectQueryOptions & select_query_options, + PlannerContextPtr & planner_context) +{ + auto & join_node = join_tree_node->as(); + + auto left_plan = buildQueryPlanForJoinTreeNode(join_node.getLeftTableExpression(), + select_query_info, + select_query_options, + planner_context); + auto left_plan_output_columns = left_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); + + auto right_plan = buildQueryPlanForJoinTreeNode(join_node.getRightTableExpression(), + select_query_info, + select_query_options, + planner_context); + auto right_plan_output_columns = right_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); + + JoinClausesAndActions join_clauses_and_actions; + JoinKind join_kind = join_node.getKind(); + + auto join_constant = tryExtractConstantFromJoinNode(join_tree_node); + if (join_constant) + { + /** If there is JOIN with always true constant, we transform it to cross. + * If there is JOIN with always false constant, we do not process JOIN keys. + * It is expected by join algorithm to handle such case. + * + * Example: SELECT * FROM test_table AS t1 INNER JOIN test_table AS t2 ON 1; + */ + if (*join_constant) + join_kind = JoinKind::Cross; + } + else if (join_node.isOnJoinExpression()) + { + join_clauses_and_actions = buildJoinClausesAndActions(left_plan_output_columns, + right_plan_output_columns, + join_tree_node, + planner_context); + + join_clauses_and_actions.left_join_expressions_actions->projectInput(); + auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), join_clauses_and_actions.left_join_expressions_actions); + left_join_expressions_actions_step->setStepDescription("JOIN actions"); + left_plan.addStep(std::move(left_join_expressions_actions_step)); + + join_clauses_and_actions.right_join_expressions_actions->projectInput(); + auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), join_clauses_and_actions.right_join_expressions_actions); + right_join_expressions_actions_step->setStepDescription("JOIN actions"); + right_plan.addStep(std::move(right_join_expressions_actions_step)); + } + + std::unordered_map left_plan_column_name_to_cast_type; + std::unordered_map right_plan_column_name_to_cast_type; + + if (join_node.isUsingJoinExpression()) + { + auto & join_node_using_columns_list = join_node.getJoinExpression()->as(); + for (auto & join_node_using_node : join_node_using_columns_list.getNodes()) + { + auto & join_node_using_column_node = join_node_using_node->as(); + auto & inner_columns_list = join_node_using_column_node.getExpressionOrThrow()->as(); + + auto & left_inner_column_node = inner_columns_list.getNodes().at(0); + auto & left_inner_column = left_inner_column_node->as(); + + auto & right_inner_column_node = inner_columns_list.getNodes().at(1); + auto & right_inner_column = right_inner_column_node->as(); + + const auto & join_node_using_column_node_type = join_node_using_column_node.getColumnType(); + if (!left_inner_column.getColumnType()->equals(*join_node_using_column_node_type)) + { + const auto & left_inner_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(left_inner_column_node); + left_plan_column_name_to_cast_type.emplace(left_inner_column_identifier, join_node_using_column_node_type); + } + + if (!right_inner_column.getColumnType()->equals(*join_node_using_column_node_type)) + { + const auto & right_inner_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(right_inner_column_node); + right_plan_column_name_to_cast_type.emplace(right_inner_column_identifier, join_node_using_column_node_type); + } + } + } + + auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map & plan_column_name_to_cast_type) + { + auto cast_actions_dag = std::make_shared(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + + for (auto & output_node : cast_actions_dag->getOutputs()) + { + auto it = plan_column_name_to_cast_type.find(output_node->result_name); + if (it == plan_column_name_to_cast_type.end()) + continue; + + const auto & cast_type = it->second; + auto cast_type_name = cast_type->getName(); + Field cast_type_constant_value(cast_type_name); + + ColumnWithTypeAndName column; + column.name = calculateConstantActionNodeName(cast_type_constant_value); + column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); + column.type = std::make_shared(); + + const auto * cast_type_constant_node = &cast_actions_dag->addColumn(std::move(column)); + + FunctionCastBase::Diagnostic diagnostic = {output_node->result_name, output_node->result_name}; + FunctionOverloadResolverPtr func_builder_cast + = CastInternalOverloadResolver::createImpl(std::move(diagnostic)); + + ActionsDAG::NodeRawConstPtrs children = {output_node, cast_type_constant_node}; + output_node = &cast_actions_dag->addFunction(func_builder_cast, std::move(children), output_node->result_name); + } + + cast_actions_dag->projectInput(); + auto cast_join_columns_step + = std::make_unique(plan_to_add_cast.getCurrentDataStream(), std::move(cast_actions_dag)); + cast_join_columns_step->setStepDescription("Cast JOIN USING columns"); + plan_to_add_cast.addStep(std::move(cast_join_columns_step)); + }; + + if (!left_plan_column_name_to_cast_type.empty()) + join_cast_plan_output_nodes(left_plan, left_plan_column_name_to_cast_type); + + if (!right_plan_column_name_to_cast_type.empty()) + join_cast_plan_output_nodes(right_plan, right_plan_column_name_to_cast_type); + + const auto & query_context = planner_context->getQueryContext(); + const auto & settings = query_context->getSettingsRef(); + + bool join_use_nulls = settings.join_use_nulls; + auto to_nullable_function = FunctionFactory::instance().get("toNullable", query_context); + + auto join_cast_plan_columns_to_nullable = [&](QueryPlan & plan_to_add_cast) + { + auto cast_actions_dag = std::make_shared(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + + for (auto & output_node : cast_actions_dag->getOutputs()) + { + if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(output_node->result_name)) + output_node = &cast_actions_dag->addFunction(to_nullable_function, {output_node}, output_node->result_name); + } + + cast_actions_dag->projectInput(); + auto cast_join_columns_step = std::make_unique(plan_to_add_cast.getCurrentDataStream(), std::move(cast_actions_dag)); + cast_join_columns_step->setStepDescription("Cast JOIN columns to Nullable"); + plan_to_add_cast.addStep(std::move(cast_join_columns_step)); + }; + + if (join_use_nulls) + { + if (isFull(join_kind)) + { + join_cast_plan_columns_to_nullable(left_plan); + join_cast_plan_columns_to_nullable(right_plan); + } + else if (isLeft(join_kind)) + { + join_cast_plan_columns_to_nullable(right_plan); + } + else if (isRight(join_kind)) + { + join_cast_plan_columns_to_nullable(left_plan); + } + } + + auto table_join = std::make_shared(settings, query_context->getTemporaryVolume()); + table_join->getTableJoin() = join_node.toASTTableJoin()->as(); + table_join->getTableJoin().kind = join_kind; + + if (join_kind == JoinKind::Comma) + { + join_kind = JoinKind::Cross; + table_join->getTableJoin().kind = JoinKind::Cross; + } + + table_join->setIsJoinWithConstant(join_constant != std::nullopt); + + if (join_node.isOnJoinExpression()) + { + const auto & join_clauses = join_clauses_and_actions.join_clauses; + bool is_asof = table_join->strictness() == JoinStrictness::Asof; + + if (join_clauses.size() > 1) + { + if (is_asof) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "ASOF join {} doesn't support multiple ORs for keys in JOIN ON section", + join_node.formatASTForErrorMessage()); + } + + auto & table_join_clauses = table_join->getClauses(); + + for (const auto & join_clause : join_clauses) + { + table_join_clauses.emplace_back(); + auto & table_join_clause = table_join_clauses.back(); + + const auto & join_clause_left_key_nodes = join_clause.getLeftKeyNodes(); + const auto & join_clause_right_key_nodes = join_clause.getRightKeyNodes(); + + size_t join_clause_key_nodes_size = join_clause_left_key_nodes.size(); + assert(join_clause_key_nodes_size == join_clause_right_key_nodes.size()); + + for (size_t i = 0; i < join_clause_key_nodes_size; ++i) + { + table_join_clause.key_names_left.push_back(join_clause_left_key_nodes[i]->result_name); + table_join_clause.key_names_right.push_back(join_clause_right_key_nodes[i]->result_name); + } + + const auto & join_clause_get_left_filter_condition_nodes = join_clause.getLeftFilterConditionNodes(); + if (!join_clause_get_left_filter_condition_nodes.empty()) + { + if (join_clause_get_left_filter_condition_nodes.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "JOIN {} left filter conditions size must be 1. Actual {}", + join_node.formatASTForErrorMessage(), + join_clause_get_left_filter_condition_nodes.size()); + + const auto & join_clause_left_filter_condition_name = join_clause_get_left_filter_condition_nodes[0]->result_name; + table_join_clause.analyzer_left_filter_condition_column_name = join_clause_left_filter_condition_name; + } + + const auto & join_clause_get_right_filter_condition_nodes = join_clause.getRightFilterConditionNodes(); + if (!join_clause_get_right_filter_condition_nodes.empty()) + { + if (join_clause_get_right_filter_condition_nodes.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "JOIN {} right filter conditions size must be 1. Actual {}", + join_node.formatASTForErrorMessage(), + join_clause_get_right_filter_condition_nodes.size()); + + const auto & join_clause_right_filter_condition_name = join_clause_get_right_filter_condition_nodes[0]->result_name; + table_join_clause.analyzer_right_filter_condition_column_name = join_clause_right_filter_condition_name; + } + + if (is_asof) + { + if (!join_clause.hasASOF()) + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} no inequality in ASOF JOIN ON section.", + join_node.formatASTForErrorMessage()); + + if (table_join_clause.key_names_left.size() <= 1) + throw Exception(ErrorCodes::SYNTAX_ERROR, + "JOIN {} ASOF join needs at least one equi-join column", + join_node.formatASTForErrorMessage()); + } + + if (join_clause.hasASOF()) + { + const auto & asof_conditions = join_clause.getASOFConditions(); + assert(asof_conditions.size() == 1); + + const auto & asof_condition = asof_conditions[0]; + table_join->setAsofInequality(asof_condition.asof_inequality); + + /// Execution layer of JOIN algorithms expects that ASOF keys are last JOIN keys + std::swap(table_join_clause.key_names_left.at(asof_condition.key_index), table_join_clause.key_names_left.back()); + std::swap(table_join_clause.key_names_right.at(asof_condition.key_index), table_join_clause.key_names_right.back()); + } + } + } + else if (join_node.isUsingJoinExpression()) + { + auto & table_join_clauses = table_join->getClauses(); + table_join_clauses.emplace_back(); + auto & table_join_clause = table_join_clauses.back(); + + auto & using_list = join_node.getJoinExpression()->as(); + + for (auto & join_using_node : using_list.getNodes()) + { + auto & join_using_column_node = join_using_node->as(); + auto & using_join_columns_list = join_using_column_node.getExpressionOrThrow()->as(); + auto & using_join_left_join_column_node = using_join_columns_list.getNodes().at(0); + auto & using_join_right_join_column_node = using_join_columns_list.getNodes().at(1); + + const auto & left_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(using_join_left_join_column_node); + const auto & right_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(using_join_right_join_column_node); + + table_join_clause.key_names_left.push_back(left_column_identifier); + table_join_clause.key_names_right.push_back(right_column_identifier); + } + } + + auto left_table_names = left_plan.getCurrentDataStream().header.getNames(); + NameSet left_table_names_set(left_table_names.begin(), left_table_names.end()); + + auto columns_from_joined_table = right_plan.getCurrentDataStream().header.getNamesAndTypesList(); + table_join->setColumnsFromJoinedTable(columns_from_joined_table, left_table_names_set, ""); + + for (auto & column_from_joined_table : columns_from_joined_table) + { + if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(column_from_joined_table.name)) + table_join->addJoinedColumn(column_from_joined_table); + } + + auto join_algorithm = chooseJoinAlgorithm(table_join, join_node.getRightTableExpression(), right_plan.getCurrentDataStream().header, planner_context); + + auto result_plan = QueryPlan(); + + if (join_algorithm->isFilled()) + { + size_t max_block_size = query_context->getSettingsRef().max_block_size; + + auto filled_join_step = std::make_unique( + left_plan.getCurrentDataStream(), + join_algorithm, + max_block_size); + + filled_join_step->setStepDescription("Filled JOIN"); + left_plan.addStep(std::move(filled_join_step)); + + result_plan = std::move(left_plan); + } + else + { + auto add_sorting = [&] (QueryPlan & plan, const Names & key_names, JoinTableSide join_table_side) + { + SortDescription sort_description; + sort_description.reserve(key_names.size()); + for (const auto & key_name : key_names) + sort_description.emplace_back(key_name); + + auto sorting_step = std::make_unique( + plan.getCurrentDataStream(), + std::move(sort_description), + settings.max_block_size, + 0 /*limit*/, + SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode), + settings.max_bytes_before_remerge_sort, + settings.remerge_sort_lowered_memory_bytes_ratio, + settings.max_bytes_before_external_sort, + query_context->getTempDataOnDisk(), + settings.min_free_disk_space_for_temporary_data, + settings.optimize_sorting_by_input_stream_properties); + sorting_step->setStepDescription(fmt::format("Sort {} before JOIN", join_table_side)); + plan.addStep(std::move(sorting_step)); + }; + + auto crosswise_connection = CreateSetAndFilterOnTheFlyStep::createCrossConnection(); + auto add_create_set = [&settings, crosswise_connection](QueryPlan & plan, const Names & key_names, JoinTableSide join_table_side) + { + auto creating_set_step = std::make_unique( + plan.getCurrentDataStream(), + key_names, + settings.max_rows_in_set_to_optimize_join, + crosswise_connection, + join_table_side); + creating_set_step->setStepDescription(fmt::format("Create set and filter {} joined stream", join_table_side)); + + auto * step_raw_ptr = creating_set_step.get(); + plan.addStep(std::move(creating_set_step)); + return step_raw_ptr; + }; + + if (join_algorithm->pipelineType() == JoinPipelineType::YShaped) + { + const auto & join_clause = table_join->getOnlyClause(); + + bool kind_allows_filtering = isInner(join_kind) || isLeft(join_kind) || isRight(join_kind); + if (settings.max_rows_in_set_to_optimize_join > 0 && kind_allows_filtering) + { + auto * left_set = add_create_set(left_plan, join_clause.key_names_left, JoinTableSide::Left); + auto * right_set = add_create_set(right_plan, join_clause.key_names_right, JoinTableSide::Right); + + if (isInnerOrLeft(join_kind)) + right_set->setFiltering(left_set->getSet()); + + if (isInnerOrRight(join_kind)) + left_set->setFiltering(right_set->getSet()); + } + + add_sorting(left_plan, join_clause.key_names_left, JoinTableSide::Left); + add_sorting(right_plan, join_clause.key_names_right, JoinTableSide::Right); + } + + size_t max_block_size = query_context->getSettingsRef().max_block_size; + size_t max_streams = query_context->getSettingsRef().max_threads; + + auto join_step = std::make_unique( + left_plan.getCurrentDataStream(), + right_plan.getCurrentDataStream(), + std::move(join_algorithm), + max_block_size, + max_streams, + false /*optimize_read_in_order*/); + + join_step->setStepDescription(fmt::format("JOIN {}", JoinPipelineType::FillRightFirst)); + + std::vector plans; + plans.emplace_back(std::make_unique(std::move(left_plan))); + plans.emplace_back(std::make_unique(std::move(right_plan))); + + result_plan.unitePlans(std::move(join_step), {std::move(plans)}); + } + + auto drop_unused_columns_after_join_actions_dag = std::make_shared(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG::NodeRawConstPtrs updated_outputs; + std::unordered_set updated_outputs_names; + + for (auto & output : drop_unused_columns_after_join_actions_dag->getOutputs()) + { + if (updated_outputs_names.contains(output->result_name) || !planner_context->getGlobalPlannerContext()->hasColumnIdentifier(output->result_name)) + continue; + + updated_outputs.push_back(output); + updated_outputs_names.insert(output->result_name); + } + + drop_unused_columns_after_join_actions_dag->getOutputs() = std::move(updated_outputs); + + auto drop_unused_columns_after_join_transform_step = std::make_unique(result_plan.getCurrentDataStream(), std::move(drop_unused_columns_after_join_actions_dag)); + drop_unused_columns_after_join_transform_step->setStepDescription("DROP unused columns after JOIN"); + result_plan.addStep(std::move(drop_unused_columns_after_join_transform_step)); + + return result_plan; +} + +QueryPlan buildQueryPlanForArrayJoinNode(QueryTreeNodePtr table_expression, + SelectQueryInfo & select_query_info, + const SelectQueryOptions & select_query_options, + PlannerContextPtr & planner_context) +{ + auto & array_join_node = table_expression->as(); + + auto plan = buildQueryPlanForJoinTreeNode(array_join_node.getTableExpression(), + select_query_info, + select_query_options, + planner_context); + auto plan_output_columns = plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); + + ActionsDAGPtr array_join_action_dag = std::make_shared(plan_output_columns); + PlannerActionsVisitor actions_visitor(planner_context); + + NameSet array_join_columns; + for (auto & array_join_expression : array_join_node.getJoinExpressions().getNodes()) + { + auto & array_join_expression_column = array_join_expression->as(); + const auto & array_join_column_name = array_join_expression_column.getColumnName(); + array_join_columns.insert(array_join_column_name); + + auto expression_dag_index_nodes = actions_visitor.visit(array_join_action_dag, array_join_expression_column.getExpressionOrThrow()); + for (auto & expression_dag_index_node : expression_dag_index_nodes) + { + const auto * array_join_column_node = &array_join_action_dag->addAlias(*expression_dag_index_node, array_join_column_name); + array_join_action_dag->getOutputs().push_back(array_join_column_node); + } + } + + array_join_action_dag->projectInput(); + auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), array_join_action_dag); + array_join_actions->setStepDescription("ARRAY JOIN actions"); + plan.addStep(std::move(array_join_actions)); + + auto array_join_action = std::make_shared(array_join_columns, array_join_node.isLeft(), planner_context->getQueryContext()); + auto array_join_step = std::make_unique(plan.getCurrentDataStream(), std::move(array_join_action)); + array_join_step->setStepDescription("ARRAY JOIN"); + plan.addStep(std::move(array_join_step)); + + return plan; +} + +} + +QueryPlan buildQueryPlanForJoinTreeNode(QueryTreeNodePtr join_tree_node, + SelectQueryInfo & select_query_info, + const SelectQueryOptions & select_query_options, + PlannerContextPtr & planner_context) +{ + auto join_tree_node_type = join_tree_node->getNodeType(); + + switch (join_tree_node_type) + { + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + [[fallthrough]]; + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + { + return buildQueryPlanForTableExpression(join_tree_node, select_query_info, select_query_options, planner_context); + } + case QueryTreeNodeType::JOIN: + { + return buildQueryPlanForJoinNode(join_tree_node, select_query_info, select_query_options, planner_context); + } + case QueryTreeNodeType::ARRAY_JOIN: + { + return buildQueryPlanForArrayJoinNode(join_tree_node, select_query_info, select_query_options, planner_context); + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Expected table, table function, query, union, join or array join query node. Actual {}", + join_tree_node->formatASTForErrorMessage()); + } + } +} + +} diff --git a/src/Planner/PlannerJoinTree.h b/src/Planner/PlannerJoinTree.h new file mode 100644 index 00000000000..c93b71e0df1 --- /dev/null +++ b/src/Planner/PlannerJoinTree.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +#include + +#include + +#include + +namespace DB +{ + +/// Build query plan for query JOIN TREE node +QueryPlan buildQueryPlanForJoinTreeNode(QueryTreeNodePtr join_tree_node, + SelectQueryInfo & select_query_info, + const SelectQueryOptions & select_query_options, + PlannerContextPtr & planner_context); + +} diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp new file mode 100644 index 00000000000..f62517eaaad --- /dev/null +++ b/src/Planner/PlannerJoins.cpp @@ -0,0 +1,695 @@ +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int INVALID_JOIN_ON_EXPRESSION; + extern const int NOT_IMPLEMENTED; +} + +void JoinClause::dump(WriteBuffer & buffer) const +{ + auto dump_dag_nodes = [&](const ActionsDAG::NodeRawConstPtrs & dag_nodes) + { + String dag_nodes_dump; + + if (!dag_nodes.empty()) + { + for (const auto & dag_node : dag_nodes) + { + dag_nodes_dump += dag_node->result_name; + dag_nodes_dump += ", "; + } + + dag_nodes_dump.pop_back(); + dag_nodes_dump.pop_back(); + } + + return dag_nodes_dump; + }; + + buffer << "left_key_nodes: " << dump_dag_nodes(left_key_nodes); + buffer << " right_key_nodes: " << dump_dag_nodes(right_key_nodes); + + if (!left_filter_condition_nodes.empty()) + buffer << " left_condition_nodes: " + dump_dag_nodes(left_filter_condition_nodes); + + if (!right_filter_condition_nodes.empty()) + buffer << " right_condition_nodes: " + dump_dag_nodes(right_filter_condition_nodes); +} + +String JoinClause::dump() const +{ + WriteBufferFromOwnString buffer; + dump(buffer); + + return buffer.str(); +} + +namespace +{ + +std::optional extractJoinTableSideFromExpression(const ActionsDAG::Node * expression_root_node, + const std::unordered_set & join_expression_dag_input_nodes, + const NameSet & left_table_expression_columns_names, + const NameSet & right_table_expression_columns_names, + const JoinNode & join_node) +{ + std::optional table_side; + std::vector nodes_to_process; + nodes_to_process.push_back(expression_root_node); + + while (!nodes_to_process.empty()) + { + const auto * node_to_process = nodes_to_process.back(); + nodes_to_process.pop_back(); + + for (const auto & child : node_to_process->children) + nodes_to_process.push_back(child); + + if (!join_expression_dag_input_nodes.contains(node_to_process)) + continue; + + const auto & input_name = node_to_process->result_name; + + bool left_table_expression_contains_input = left_table_expression_columns_names.contains(input_name); + bool right_table_expression_contains_input = right_table_expression_columns_names.contains(input_name); + + if (!left_table_expression_contains_input && !right_table_expression_contains_input) + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} actions has column {} that do not exist in left {} or right {} table expression columns", + join_node.formatASTForErrorMessage(), + input_name, + boost::join(left_table_expression_columns_names, ", "), + boost::join(right_table_expression_columns_names, ", ")); + + auto input_table_side = left_table_expression_contains_input ? JoinTableSide::Left : JoinTableSide::Right; + if (table_side && (*table_side) != input_table_side) + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} join expression contains column from left and right table", + join_node.formatASTForErrorMessage()); + + table_side = input_table_side; + } + + return table_side; +} + +void buildJoinClause(ActionsDAGPtr join_expression_dag, + const std::unordered_set & join_expression_dag_input_nodes, + const ActionsDAG::Node * join_expressions_actions_node, + const NameSet & left_table_expression_columns_names, + const NameSet & right_table_expression_columns_names, + const JoinNode & join_node, + JoinClause & join_clause) +{ + std::string function_name; + + if (join_expressions_actions_node->function) + function_name = join_expressions_actions_node->function->getName(); + + /// For 'and' function go into children + if (function_name == "and") + { + for (const auto & child : join_expressions_actions_node->children) + { + buildJoinClause(join_expression_dag, + join_expression_dag_input_nodes, + child, + left_table_expression_columns_names, + right_table_expression_columns_names, + join_node, + join_clause); + } + + return; + } + + auto asof_inequality = getASOFJoinInequality(function_name); + bool is_asof_join_inequality = join_node.getStrictness() == JoinStrictness::Asof && asof_inequality != ASOFJoinInequality::None; + + if (function_name == "equals" || is_asof_join_inequality) + { + const auto * left_child = join_expressions_actions_node->children.at(0); + const auto * right_child = join_expressions_actions_node->children.at(1); + + auto left_expression_side_optional = extractJoinTableSideFromExpression(left_child, + join_expression_dag_input_nodes, + left_table_expression_columns_names, + right_table_expression_columns_names, + join_node); + + auto right_expression_side_optional = extractJoinTableSideFromExpression(right_child, + join_expression_dag_input_nodes, + left_table_expression_columns_names, + right_table_expression_columns_names, + join_node); + + if (!left_expression_side_optional && !right_expression_side_optional) + { + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} ON expression {} with constants is not supported", + join_node.formatASTForErrorMessage(), + join_expressions_actions_node->result_name); + } + else if (left_expression_side_optional && !right_expression_side_optional) + { + join_clause.addCondition(*left_expression_side_optional, join_expressions_actions_node); + } + else if (!left_expression_side_optional && right_expression_side_optional) + { + join_clause.addCondition(*right_expression_side_optional, join_expressions_actions_node); + } + else + { + auto left_expression_side = *left_expression_side_optional; + auto right_expression_side = *right_expression_side_optional; + + if (left_expression_side != right_expression_side) + { + const ActionsDAG::Node * left_key = left_child; + const ActionsDAG::Node * right_key = right_child; + + if (left_expression_side == JoinTableSide::Right) + { + left_key = right_child; + right_key = left_child; + asof_inequality = reverseASOFJoinInequality(asof_inequality); + } + + if (is_asof_join_inequality) + { + if (join_clause.hasASOF()) + { + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} ASOF JOIN expects exactly one inequality in ON section", + join_node.formatASTForErrorMessage()); + } + + join_clause.addASOFKey(left_key, right_key, asof_inequality); + } + else + { + join_clause.addKey(left_key, right_key); + } + } + else + { + join_clause.addCondition(left_expression_side, join_expressions_actions_node); + } + } + + return; + } + + auto expression_side_optional = extractJoinTableSideFromExpression(join_expressions_actions_node, + join_expression_dag_input_nodes, + left_table_expression_columns_names, + right_table_expression_columns_names, + join_node); + + if (!expression_side_optional) + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} with constants is not supported", + join_node.formatASTForErrorMessage()); + + auto expression_side = *expression_side_optional; + join_clause.addCondition(expression_side, join_expressions_actions_node); +} + +JoinClausesAndActions buildJoinClausesAndActions(const ColumnsWithTypeAndName & join_expression_input_columns, + const ColumnsWithTypeAndName & left_table_expression_columns, + const ColumnsWithTypeAndName & right_table_expression_columns, + const JoinNode & join_node, + const PlannerContextPtr & planner_context) +{ + ActionsDAGPtr join_expression_actions = std::make_shared(join_expression_input_columns); + + /** In ActionsDAG if input node has constant representation additional constant column is added. + * That way we cannot simply check that node has INPUT type during resolution of expression join table side. + * Put all nodes after actions dag initialization in set. + * To check if actions dag node is input column, we check if set contains it. + */ + const auto & join_expression_actions_nodes = join_expression_actions->getNodes(); + + std::unordered_set join_expression_dag_input_nodes; + join_expression_dag_input_nodes.reserve(join_expression_actions_nodes.size()); + for (const auto & node : join_expression_actions_nodes) + join_expression_dag_input_nodes.insert(&node); + + PlannerActionsVisitor join_expression_visitor(planner_context); + auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(join_expression_actions, join_node.getJoinExpression()); + if (join_expression_dag_node_raw_pointers.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "JOIN {} ON clause contains multiple expressions", + join_node.formatASTForErrorMessage()); + + const auto * join_expressions_actions_root_node = join_expression_dag_node_raw_pointers[0]; + if (!join_expressions_actions_root_node->function) + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, + "JOIN {} join expression expected function", + join_node.formatASTForErrorMessage()); + + size_t left_table_expression_columns_size = left_table_expression_columns.size(); + + Names join_left_actions_names; + join_left_actions_names.reserve(left_table_expression_columns_size); + + NameSet join_left_actions_names_set; + join_left_actions_names_set.reserve(left_table_expression_columns_size); + + for (const auto & left_table_expression_column : left_table_expression_columns) + { + join_left_actions_names.push_back(left_table_expression_column.name); + join_left_actions_names_set.insert(left_table_expression_column.name); + } + + size_t right_table_expression_columns_size = right_table_expression_columns.size(); + + Names join_right_actions_names; + join_right_actions_names.reserve(right_table_expression_columns_size); + + NameSet join_right_actions_names_set; + join_right_actions_names_set.reserve(right_table_expression_columns_size); + + for (const auto & right_table_expression_column : right_table_expression_columns) + { + join_right_actions_names.push_back(right_table_expression_column.name); + join_right_actions_names_set.insert(right_table_expression_column.name); + } + + JoinClausesAndActions result; + result.join_expression_actions = join_expression_actions; + + const auto & function_name = join_expressions_actions_root_node->function->getName(); + if (function_name == "or") + { + for (const auto & child : join_expressions_actions_root_node->children) + { + result.join_clauses.emplace_back(); + + buildJoinClause(join_expression_actions, + join_expression_dag_input_nodes, + child, + join_left_actions_names_set, + join_right_actions_names_set, + join_node, + result.join_clauses.back()); + } + } + else + { + result.join_clauses.emplace_back(); + + buildJoinClause(join_expression_actions, + join_expression_dag_input_nodes, + join_expressions_actions_root_node, + join_left_actions_names_set, + join_right_actions_names_set, + join_node, + result.join_clauses.back()); + } + + auto and_function = FunctionFactory::instance().get("and", planner_context->getQueryContext()); + + auto add_necessary_name_if_needed = [&](JoinTableSide join_table_side, const String & name) + { + auto & necessary_names = join_table_side == JoinTableSide::Left ? join_left_actions_names : join_right_actions_names; + auto & necessary_names_set = join_table_side == JoinTableSide::Left ? join_left_actions_names_set : join_right_actions_names_set; + + auto [_, inserted] = necessary_names_set.emplace(name); + if (inserted) + necessary_names.push_back(name); + }; + + for (auto & join_clause : result.join_clauses) + { + const auto & left_filter_condition_nodes = join_clause.getLeftFilterConditionNodes(); + if (!left_filter_condition_nodes.empty()) + { + const ActionsDAG::Node * dag_filter_condition_node = nullptr; + + if (left_filter_condition_nodes.size() > 1) + dag_filter_condition_node = &join_expression_actions->addFunction(and_function, left_filter_condition_nodes, {}); + else + dag_filter_condition_node = left_filter_condition_nodes[0]; + + join_clause.getLeftFilterConditionNodes() = {dag_filter_condition_node}; + join_expression_actions->addOrReplaceInOutputs(*dag_filter_condition_node); + + add_necessary_name_if_needed(JoinTableSide::Left, dag_filter_condition_node->result_name); + } + + const auto & right_filter_condition_nodes = join_clause.getRightFilterConditionNodes(); + if (!right_filter_condition_nodes.empty()) + { + const ActionsDAG::Node * dag_filter_condition_node = nullptr; + + if (right_filter_condition_nodes.size() > 1) + dag_filter_condition_node = &join_expression_actions->addFunction(and_function, right_filter_condition_nodes, {}); + else + dag_filter_condition_node = right_filter_condition_nodes[0]; + + join_clause.getRightFilterConditionNodes() = {dag_filter_condition_node}; + join_expression_actions->addOrReplaceInOutputs(*dag_filter_condition_node); + + add_necessary_name_if_needed(JoinTableSide::Right, dag_filter_condition_node->result_name); + } + + assert(join_clause.getLeftKeyNodes().size() == join_clause.getRightKeyNodes().size()); + size_t join_clause_key_nodes_size = join_clause.getLeftKeyNodes().size(); + + if (join_clause_key_nodes_size == 0) + throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, "JOIN {} cannot get JOIN keys", + join_node.formatASTForErrorMessage()); + + for (size_t i = 0; i < join_clause_key_nodes_size; ++i) + { + auto & left_key_node = join_clause.getLeftKeyNodes()[i]; + auto & right_key_node = join_clause.getRightKeyNodes()[i]; + + if (!left_key_node->result_type->equals(*right_key_node->result_type)) + { + DataTypePtr common_type; + + try + { + common_type = getLeastSupertype(DataTypes{left_key_node->result_type, right_key_node->result_type}); + } + catch (Exception & ex) + { + ex.addMessage("JOIN {} cannot infer common type in ON section for keys. Left key {} type {}. Right key {} type {}", + join_node.formatASTForErrorMessage(), + left_key_node->result_name, + left_key_node->result_type->getName(), + right_key_node->result_name, + right_key_node->result_type->getName()); + throw; + } + + auto cast_type_name = common_type->getName(); + Field cast_type_constant_value(cast_type_name); + + ColumnWithTypeAndName cast_column; + cast_column.name = calculateConstantActionNodeName(cast_type_constant_value); + cast_column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); + cast_column.type = std::make_shared(); + + const ActionsDAG::Node * cast_type_constant_node = nullptr; + + if (!left_key_node->result_type->equals(*common_type)) + { + cast_type_constant_node = &join_expression_actions->addColumn(cast_column); + + FunctionCastBase::Diagnostic diagnostic = {left_key_node->result_name, left_key_node->result_name}; + FunctionOverloadResolverPtr func_builder_cast + = CastInternalOverloadResolver::createImpl(diagnostic); + + ActionsDAG::NodeRawConstPtrs children = {left_key_node, cast_type_constant_node}; + left_key_node = &join_expression_actions->addFunction(func_builder_cast, std::move(children), {}); + } + + if (!right_key_node->result_type->equals(*common_type)) + { + if (!cast_type_constant_node) + cast_type_constant_node = &join_expression_actions->addColumn(cast_column); + + FunctionCastBase::Diagnostic diagnostic = {right_key_node->result_name, right_key_node->result_name}; + FunctionOverloadResolverPtr func_builder_cast + = CastInternalOverloadResolver::createImpl(std::move(diagnostic)); + + ActionsDAG::NodeRawConstPtrs children = {right_key_node, cast_type_constant_node}; + right_key_node = &join_expression_actions->addFunction(func_builder_cast, std::move(children), {}); + } + } + + join_expression_actions->addOrReplaceInOutputs(*left_key_node); + join_expression_actions->addOrReplaceInOutputs(*right_key_node); + + add_necessary_name_if_needed(JoinTableSide::Left, left_key_node->result_name); + add_necessary_name_if_needed(JoinTableSide::Right, right_key_node->result_name); + } + } + + result.left_join_expressions_actions = join_expression_actions->clone(); + result.left_join_expressions_actions->removeUnusedActions(join_left_actions_names); + + result.right_join_expressions_actions = join_expression_actions->clone(); + result.right_join_expressions_actions->removeUnusedActions(join_right_actions_names); + + return result; +} + +} + +JoinClausesAndActions buildJoinClausesAndActions( + const ColumnsWithTypeAndName & left_table_expression_columns, + const ColumnsWithTypeAndName & right_table_expression_columns, + const QueryTreeNodePtr & join_node, + const PlannerContextPtr & planner_context) +{ + auto & join_node_typed = join_node->as(); + if (!join_node_typed.isOnJoinExpression()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "JOIN {} join does not have ON section", + join_node_typed.formatASTForErrorMessage()); + + auto join_expression_input_columns = left_table_expression_columns; + join_expression_input_columns.insert(join_expression_input_columns.end(), right_table_expression_columns.begin(), right_table_expression_columns.end()); + + return buildJoinClausesAndActions(join_expression_input_columns, left_table_expression_columns, right_table_expression_columns, join_node_typed, planner_context); +} + +std::optional tryExtractConstantFromJoinNode(const QueryTreeNodePtr & join_node) +{ + auto & join_node_typed = join_node->as(); + if (!join_node_typed.getJoinExpression()) + return {}; + + auto constant_value = join_node_typed.getJoinExpression()->getConstantValueOrNull(); + if (!constant_value) + return {}; + + const auto & value = constant_value->getValue(); + auto constant_type = constant_value->getType(); + constant_type = removeNullable(removeLowCardinality(constant_type)); + + auto which_constant_type = WhichDataType(constant_type); + if (!which_constant_type.isUInt8() && !which_constant_type.isNothing()) + return {}; + + if (value.isNull()) + return false; + + UInt8 predicate_value = value.safeGet(); + return predicate_value > 0; +} + +namespace +{ + +void trySetStorageInTableJoin(const QueryTreeNodePtr & table_expression, std::shared_ptr & table_join) +{ + StoragePtr storage; + + if (auto * table_node = table_expression->as()) + storage = table_node->getStorage(); + else if (auto * table_function = table_expression->as()) + storage = table_function->getStorage(); + + auto storage_join = std::dynamic_pointer_cast(storage); + if (storage_join) + { + table_join->setStorageJoin(storage_join); + return; + } + + if (!table_join->isEnabledAlgorithm(JoinAlgorithm::DIRECT)) + return; + + if (auto storage_dictionary = std::dynamic_pointer_cast(storage); storage_dictionary) + table_join->setStorageJoin(std::dynamic_pointer_cast(storage_dictionary->getDictionary())); + else if (auto storage_key_value = std::dynamic_pointer_cast(storage); storage_key_value) + table_join->setStorageJoin(storage_key_value); +} + +std::shared_ptr tryDirectJoin(const std::shared_ptr & table_join, + const QueryTreeNodePtr & right_table_expression, + const Block & right_table_expression_header, + const PlannerContextPtr & planner_context) +{ + if (!table_join->isEnabledAlgorithm(JoinAlgorithm::DIRECT)) + return {}; + + auto storage = table_join->getStorageKeyValue(); + if (!storage) + return {}; + + bool allowed_inner = isInner(table_join->kind()) && table_join->strictness() == JoinStrictness::All; + bool allowed_left = isLeft(table_join->kind()) && (table_join->strictness() == JoinStrictness::Any || + table_join->strictness() == JoinStrictness::All || + table_join->strictness() == JoinStrictness::Semi || + table_join->strictness() == JoinStrictness::Anti); + if (!allowed_inner && !allowed_left) + return {}; + + const auto & clauses = table_join->getClauses(); + bool only_one_key = clauses.size() == 1 && + clauses[0].key_names_left.size() == 1 && + clauses[0].key_names_right.size() == 1 && + !clauses[0].on_filter_condition_left && + !clauses[0].on_filter_condition_right && + clauses[0].analyzer_left_filter_condition_column_name.empty() && + clauses[0].analyzer_right_filter_condition_column_name.empty(); + + if (!only_one_key) + return {}; + + const String & key_name = clauses[0].key_names_right[0]; + + auto & right_table_expression_data = planner_context->getTableExpressionDataOrThrow(right_table_expression); + const auto * table_column_name = right_table_expression_data.getColumnNameOrNull(key_name); + if (!table_column_name) + return {}; + + const auto & storage_primary_key = storage->getPrimaryKey(); + if (storage_primary_key.size() != 1 || storage_primary_key[0] != *table_column_name) + return {}; + + /** For right table expression during execution columns have unique name. + * Direct key value join implementation during storage querying must use storage column names. + * + * Example: + * CREATE DICTIONARY test_dictionary (id UInt64, value String) PRIMARY KEY id SOURCE(CLICKHOUSE(TABLE 'test_dictionary_table')) LIFETIME(0); + * SELECT t1.id FROM test_table AS t1 INNER JOIN test_dictionary AS t2 ON t1.id = t2.id; + * + * Unique execution name for `id` column from right table expression `test_dictionary AS t2` for example can be `t2.id_0`. + * Storage column name is `id`. + * + * Here we create header for right table expression with original storage column names. + */ + Block right_table_expression_header_with_storage_column_names; + + for (const auto & right_table_expression_column : right_table_expression_header) + { + const auto * table_column_name = right_table_expression_data.getColumnNameOrNull(right_table_expression_column.name); + if (!table_column_name) + return {}; + + auto right_table_expression_column_with_storage_column_name = right_table_expression_column; + right_table_expression_column_with_storage_column_name.name = *table_column_name; + right_table_expression_header_with_storage_column_names.insert(right_table_expression_column_with_storage_column_name); + } + + return std::make_shared(table_join, right_table_expression_header, storage, right_table_expression_header_with_storage_column_names); +} + +} + +std::shared_ptr chooseJoinAlgorithm(std::shared_ptr & table_join, + const QueryTreeNodePtr & right_table_expression, + const Block & right_table_expression_header, + const PlannerContextPtr & planner_context) +{ + trySetStorageInTableJoin(right_table_expression, table_join); + + /// JOIN with JOIN engine. + if (auto storage = table_join->getStorageJoin()) + return storage->getJoinLocked(table_join, planner_context->getQueryContext()); + + /** JOIN with constant. + * Example: SELECT * FROM test_table AS t1 INNER JOIN test_table AS t2 ON 1; + */ + if (table_join->isJoinWithConstant()) + { + if (!table_join->isEnabledAlgorithm(JoinAlgorithm::HASH)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "JOIN with constant supported only with join algorithm 'hash'"); + + return std::make_shared(table_join, right_table_expression_header); + } + + if (!table_join->oneDisjunct() && !table_join->isEnabledAlgorithm(JoinAlgorithm::HASH)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Only `hash` join supports multiple ORs for keys in JOIN ON section"); + + /// Direct JOIN with special storages that support key value access. For example JOIN with Dictionary + if (table_join->isEnabledAlgorithm(JoinAlgorithm::DIRECT)) + { + JoinPtr direct_join = tryDirectJoin(table_join, right_table_expression, right_table_expression_header, planner_context); + if (direct_join) + return direct_join; + } + + if (table_join->isEnabledAlgorithm(JoinAlgorithm::PARTIAL_MERGE) || + table_join->isEnabledAlgorithm(JoinAlgorithm::PREFER_PARTIAL_MERGE)) + { + if (MergeJoin::isSupported(table_join)) + return std::make_shared(table_join, right_table_expression_header); + } + + if (table_join->isEnabledAlgorithm(JoinAlgorithm::HASH) || + /// partial_merge is preferred, but can't be used for specified kind of join, fallback to hash + table_join->isEnabledAlgorithm(JoinAlgorithm::PREFER_PARTIAL_MERGE) || + table_join->isEnabledAlgorithm(JoinAlgorithm::PARALLEL_HASH)) + { + if (table_join->allowParallelHashJoin()) + { + auto query_context = planner_context->getQueryContext(); + return std::make_shared(query_context, table_join, query_context->getSettings().max_threads, right_table_expression_header); + } + + return std::make_shared(table_join, right_table_expression_header); + } + + if (table_join->isEnabledAlgorithm(JoinAlgorithm::FULL_SORTING_MERGE)) + { + if (FullSortingMergeJoin::isSupported(table_join)) + return std::make_shared(table_join, right_table_expression_header); + } + + if (table_join->isEnabledAlgorithm(JoinAlgorithm::AUTO)) + return std::make_shared(table_join, right_table_expression_header); + + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Can't execute any of specified algorithms for specified strictness/kind and right storage type"); +} + +} diff --git a/src/Planner/PlannerJoins.h b/src/Planner/PlannerJoins.h new file mode 100644 index 00000000000..d305249e789 --- /dev/null +++ b/src/Planner/PlannerJoins.h @@ -0,0 +1,196 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include + +namespace DB +{ + +/** Join clause represent single JOIN ON section clause. + * Join clause consists of JOIN keys and conditions. + * + * JOIN can contain multiple clauses in JOIN ON section. + * Example: SELECT * FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id OR t1.value = t2.value; + * t1.id = t2.id is first clause. + * t1.value = t2.value is second clause. + * + * JOIN ON section can also contain condition inside clause. + * Example: SELECT * FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id AND t1.id > 0 AND t2.id > 0; + * t1.id = t2.id AND t1.id > 0 AND t2.id > 0 is first clause. + * t1.id = t2.id is JOIN keys section. + * t1.id > 0 is left table condition. + * t2.id > 0 is right table condition. + * + * Additionally not only conditions, but JOIN keys can be represented as expressions. + * Example: SELECT * FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON toString(t1.id) = toString(t2.id). + * toString(t1.id) = toString(t2.id) is JOIN keys section. Where toString(t1.id) is left key, and toString(t2.id) is right key. + * + * During query planning JOIN ON section represented using join clause structure. It is important to split + * keys and conditions. And for each action detect from which stream it can be performed. + * + * We have 2 streams, left stream and right stream. + * We split JOIN ON section expressions actions in two parts left join expression actions and right join expression actions. + * Left join expression actions must be used to calculate necessary actions for left stream. + * Right join expression actions must be used to calculate necessary actions for right stream. + */ +class PlannerContext; +using PlannerContextPtr = std::shared_ptr; + +struct ASOFCondition +{ + size_t key_index; + ASOFJoinInequality asof_inequality; +}; + +/// Single JOIN ON section clause representation +class JoinClause +{ +public: + /// Add keys + void addKey(const ActionsDAG::Node * left_key_node, const ActionsDAG::Node * right_key_node) + { + left_key_nodes.emplace_back(left_key_node); + right_key_nodes.emplace_back(right_key_node); + } + + void addASOFKey(const ActionsDAG::Node * left_key_node, const ActionsDAG::Node * right_key_node, ASOFJoinInequality asof_inequality) + { + left_key_nodes.emplace_back(left_key_node); + right_key_nodes.emplace_back(right_key_node); + asof_conditions.push_back(ASOFCondition{left_key_nodes.size() - 1, asof_inequality}); + } + + /// Add condition for table side + void addCondition(JoinTableSide table_side, const ActionsDAG::Node * condition_node) + { + auto & filter_condition_nodes = table_side == JoinTableSide::Left ? left_filter_condition_nodes : right_filter_condition_nodes; + filter_condition_nodes.push_back(condition_node); + } + + /// Get left key nodes + const ActionsDAG::NodeRawConstPtrs & getLeftKeyNodes() const + { + return left_key_nodes; + } + + /// Get left key nodes + ActionsDAG::NodeRawConstPtrs & getLeftKeyNodes() + { + return left_key_nodes; + } + + /// Get right key nodes + const ActionsDAG::NodeRawConstPtrs & getRightKeyNodes() const + { + return right_key_nodes; + } + + /// Get right key nodes + ActionsDAG::NodeRawConstPtrs & getRightKeyNodes() + { + return right_key_nodes; + } + + /// Returns true if JOIN clause has ASOF conditions, false otherwise + bool hasASOF() const + { + return !asof_conditions.empty(); + } + + /// Get ASOF conditions + const std::vector & getASOFConditions() const + { + return asof_conditions; + } + + /// Get left filter condition nodes + const ActionsDAG::NodeRawConstPtrs & getLeftFilterConditionNodes() const + { + return left_filter_condition_nodes; + } + + /// Get left filter condition nodes + ActionsDAG::NodeRawConstPtrs & getLeftFilterConditionNodes() + { + return left_filter_condition_nodes; + } + + /// Get right filter condition nodes + const ActionsDAG::NodeRawConstPtrs & getRightFilterConditionNodes() const + { + return right_filter_condition_nodes; + } + + /// Get right filter condition nodes + ActionsDAG::NodeRawConstPtrs & getRightFilterConditionNodes() + { + return right_filter_condition_nodes; + } + + /// Dump clause into buffer + void dump(WriteBuffer & buffer) const; + + /// Dump clause + String dump() const; + +private: + ActionsDAG::NodeRawConstPtrs left_key_nodes; + ActionsDAG::NodeRawConstPtrs right_key_nodes; + + std::vector asof_conditions; + + ActionsDAG::NodeRawConstPtrs left_filter_condition_nodes; + ActionsDAG::NodeRawConstPtrs right_filter_condition_nodes; +}; + +using JoinClauses = std::vector; + +struct JoinClausesAndActions +{ + /// Join clauses. Actions dag nodes point into join_expression_actions. + JoinClauses join_clauses; + /// Whole JOIN ON section expressions + ActionsDAGPtr join_expression_actions; + /// Left join expressions actions + ActionsDAGPtr left_join_expressions_actions; + /// Right join expressions actions + ActionsDAGPtr right_join_expressions_actions; +}; + +/** Calculate join clauses and actions for JOIN ON section. + * + * left_table_expression_columns - columns from left join stream. + * right_table_expression_columns - columns from right join stream. + * join_node - join query tree node. + * planner_context - planner context. + */ +JoinClausesAndActions buildJoinClausesAndActions( + const ColumnsWithTypeAndName & left_table_expression_columns, + const ColumnsWithTypeAndName & right_table_expression_columns, + const QueryTreeNodePtr & join_node, + const PlannerContextPtr & planner_context); + +/** Try extract boolean constant from JOIN expression. + * Example: SELECT * FROM test_table AS t1 INNER JOIN test_table AS t2 ON 1; + * Example: SELECT * FROM test_table AS t1 INNER JOIN test_table AS t2 ON 1 != 1; + * + * join_node - join query tree node. + */ +std::optional tryExtractConstantFromJoinNode(const QueryTreeNodePtr & join_node); + +/** Choose JOIN algorithm for table join, right table expression, right table expression header and planner context. + * Table join structure can be modified during JOIN algorithm choosing for special JOIN algorithms. + * For example JOIN with Dictionary engine, or JOIN with JOIN engine. + */ +std::shared_ptr chooseJoinAlgorithm(std::shared_ptr & table_join, + const QueryTreeNodePtr & right_table_expression, + const Block & right_table_expression_header, + const PlannerContextPtr & planner_context); + +} diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp new file mode 100644 index 00000000000..5ae8bd1e21b --- /dev/null +++ b/src/Planner/PlannerSorting.cpp @@ -0,0 +1,157 @@ +#include + +#include + +#include + +#include + +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INVALID_WITH_FILL_EXPRESSION; +} + +namespace +{ + +std::pair extractWithFillValue(const QueryTreeNodePtr & node) +{ + const auto & constant_value = node->getConstantValue(); + + std::pair result; + result.first = constant_value.getValue(); + result.second = constant_value.getType(); + + if (!isColumnedAsNumber(result.second)) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL expression must be constant with numeric type"); + + return result; +} + +std::pair> extractWithFillStepValue(const QueryTreeNodePtr & node) +{ + const auto & constant_value = node->getConstantValue(); + + const auto & constant_node_result_type = constant_value.getType(); + if (const auto * type_interval = typeid_cast(constant_node_result_type.get())) + return std::make_pair(constant_value.getValue(), type_interval->getKind()); + + if (!isColumnedAsNumber(constant_node_result_type)) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL expression must be constant with numeric type"); + + return {constant_value.getValue(), {}}; +} + +FillColumnDescription extractWithFillDescription(const SortNode & sort_node) +{ + FillColumnDescription fill_column_description; + + if (sort_node.hasFillFrom()) + { + auto extract_result = extractWithFillValue(sort_node.getFillFrom()); + fill_column_description.fill_from = std::move(extract_result.first); + fill_column_description.fill_from_type = std::move(extract_result.second); + } + + if (sort_node.hasFillTo()) + { + auto extract_result = extractWithFillValue(sort_node.getFillTo()); + fill_column_description.fill_to = std::move(extract_result.first); + fill_column_description.fill_to_type = std::move(extract_result.second); + } + + if (sort_node.hasFillStep()) + { + auto extract_result = extractWithFillStepValue(sort_node.getFillStep()); + fill_column_description.fill_step = std::move(extract_result.first); + fill_column_description.step_kind = std::move(extract_result.second); + } + else + { + auto direction_value = sort_node.getSortDirection() == SortDirection::ASCENDING ? static_cast(1) : static_cast(-1); + fill_column_description.fill_step = Field(direction_value); + } + + if (applyVisitor(FieldVisitorAccurateEquals(), fill_column_description.fill_step, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STEP value cannot be zero"); + + if (sort_node.getSortDirection() == SortDirection::ASCENDING) + { + if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_step, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STEP value cannot be negative for sorting in ascending direction"); + + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && + applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_to, fill_column_description.fill_from)) + { + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL TO value cannot be less than FROM value for sorting in ascending direction"); + } + } + else + { + if (applyVisitor(FieldVisitorAccurateLess(), Field{0}, fill_column_description.fill_step)) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STEP value cannot be positive for sorting in descending direction"); + + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && + applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_from, fill_column_description.fill_to)) + { + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL FROM value cannot be less than TO value for sorting in descending direction"); + } + } + + return fill_column_description; +} + +} + +SortDescription extractSortDescription(const QueryTreeNodePtr & order_by_node, const PlannerContext & planner_context) +{ + auto & order_by_list_node = order_by_node->as(); + + SortDescription sort_column_description; + sort_column_description.reserve(order_by_list_node.getNodes().size()); + + for (const auto & sort_node : order_by_list_node.getNodes()) + { + auto & sort_node_typed = sort_node->as(); + + auto column_name = calculateActionNodeName(sort_node_typed.getExpression(), planner_context); + std::shared_ptr collator = sort_node_typed.getCollator(); + int direction = sort_node_typed.getSortDirection() == SortDirection::ASCENDING ? 1 : -1; + int nulls_direction = direction; + + auto nulls_sort_direction = sort_node_typed.getNullsSortDirection(); + if (nulls_sort_direction) + nulls_direction = *nulls_sort_direction == SortDirection::ASCENDING ? 1 : -1; + + if (sort_node_typed.withFill()) + { + FillColumnDescription fill_description = extractWithFillDescription(sort_node_typed); + sort_column_description.emplace_back(column_name, direction, nulls_direction, collator, true /*with_fill*/, fill_description); + } + else + { + sort_column_description.emplace_back(column_name, direction, nulls_direction, collator); + } + } + + const auto & settings = planner_context.getQueryContext()->getSettingsRef(); + sort_column_description.compile_sort_description = settings.compile_sort_description; + sort_column_description.min_count_to_compile_sort_description = settings.min_count_to_compile_sort_description; + + return sort_column_description; +} + +} diff --git a/src/Planner/PlannerSorting.h b/src/Planner/PlannerSorting.h new file mode 100644 index 00000000000..c4e4e634973 --- /dev/null +++ b/src/Planner/PlannerSorting.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +#include + +namespace DB +{ + +/// Extract sort description from order by node +SortDescription extractSortDescription(const QueryTreeNodePtr & order_by_node, const PlannerContext & planner_context); + +} + diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp new file mode 100644 index 00000000000..4fe60a18099 --- /dev/null +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -0,0 +1,146 @@ +#include + +#include +#include + +#include + +#include +#include + +namespace DB +{ + +namespace +{ + +WindowDescription extractWindowDescriptionFromWindowNode(const QueryTreeNodePtr & node, const PlannerContext & planner_context) +{ + auto & window_node = node->as(); + + WindowDescription window_description; + window_description.window_name = calculateWindowNodeActionName(node, planner_context); + + for (const auto & partition_by_node : window_node.getPartitionBy().getNodes()) + { + auto partition_by_node_action_name = calculateActionNodeName(partition_by_node, planner_context); + auto partition_by_sort_column_description = SortColumnDescription(partition_by_node_action_name, 1 /* direction */, 1 /* nulls_direction */); + window_description.partition_by.push_back(std::move(partition_by_sort_column_description)); + } + + window_description.order_by = extractSortDescription(window_node.getOrderByNode(), planner_context); + + window_description.full_sort_description = window_description.partition_by; + window_description.full_sort_description.insert(window_description.full_sort_description.end(), window_description.order_by.begin(), window_description.order_by.end()); + + /// WINDOW frame is validated during query analysis stage + window_description.frame = window_node.getWindowFrame(); + + const auto & query_context = planner_context.getQueryContext(); + const auto & query_context_settings = query_context->getSettingsRef(); + + bool compile_sort_description = query_context_settings.compile_sort_description; + size_t min_count_to_compile_sort_description = query_context_settings.min_count_to_compile_sort_description; + + window_description.partition_by.compile_sort_description = compile_sort_description; + window_description.partition_by.min_count_to_compile_sort_description = min_count_to_compile_sort_description; + + window_description.order_by.compile_sort_description = compile_sort_description; + window_description.order_by.min_count_to_compile_sort_description = min_count_to_compile_sort_description; + + window_description.full_sort_description.compile_sort_description = compile_sort_description; + window_description.full_sort_description.min_count_to_compile_sort_description = min_count_to_compile_sort_description; + + return window_description; +} + +} + +std::vector extractWindowDescriptions(const QueryTreeNodes & window_function_nodes, const PlannerContext & planner_context) +{ + std::unordered_map window_name_to_description; + + for (const auto & window_function_node : window_function_nodes) + { + auto & window_function_node_typed = window_function_node->as(); + + auto function_window_description = extractWindowDescriptionFromWindowNode(window_function_node_typed.getWindowNode(), planner_context); + auto window_name = function_window_description.window_name; + + auto [it, _] = window_name_to_description.emplace(window_name, std::move(function_window_description)); + auto & window_description = it->second; + + WindowFunctionDescription window_function; + window_function.function_node = nullptr; + window_function.column_name = calculateActionNodeName(window_function_node, planner_context); + window_function.aggregate_function = window_function_node_typed.getAggregateFunction(); + + const auto & parameters_nodes = window_function_node_typed.getParameters().getNodes(); + window_function.function_parameters.reserve(parameters_nodes.size()); + + for (const auto & parameter_node : parameters_nodes) + { + /// Function parameters constness validated during analysis stage + window_function.function_parameters.push_back(parameter_node->getConstantValue().getValue()); + } + + const auto & arguments_nodes = window_function_node_typed.getArguments().getNodes(); + size_t arguments_nodes_size = arguments_nodes.size(); + + window_function.argument_names.reserve(arguments_nodes_size); + window_function.argument_types.reserve(arguments_nodes_size); + + for (const auto & argument_node : arguments_nodes) + { + String argument_node_name = calculateActionNodeName(argument_node, planner_context); + window_function.argument_names.emplace_back(std::move(argument_node_name)); + window_function.argument_types.emplace_back(argument_node->getResultType()); + } + + window_description.window_functions.push_back(window_function); + } + + std::vector result; + result.reserve(window_name_to_description.size()); + + for (auto && [_, window_description] : window_name_to_description) + result.push_back(std::move(window_description)); + + return result; +} + +void sortWindowDescriptions(std::vector & window_descriptions) +{ + auto window_description_comparator = [](const WindowDescription & lhs, const WindowDescription & rhs) + { + const auto & left = lhs.full_sort_description; + const auto & right = rhs.full_sort_description; + + for (size_t i = 0; i < std::min(left.size(), right.size()); ++i) + { + if (left[i].column_name < right[i].column_name) + return true; + else if (left[i].column_name > right[i].column_name) + return false; + else if (left[i].direction < right[i].direction) + return true; + else if (left[i].direction > right[i].direction) + return false; + else if (left[i].nulls_direction < right[i].nulls_direction) + return true; + else if (left[i].nulls_direction > right[i].nulls_direction) + return false; + + assert(left[i] == right[i]); + } + + /** Note that we check the length last, because we want to put together the + * sort orders that have common prefix but different length. + */ + return left.size() > right.size(); + }; + + ::sort(window_descriptions.begin(), window_descriptions.end(), window_description_comparator); +} + +} diff --git a/src/Planner/PlannerWindowFunctions.h b/src/Planner/PlannerWindowFunctions.h new file mode 100644 index 00000000000..1552ef5a71f --- /dev/null +++ b/src/Planner/PlannerWindowFunctions.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +#include + +#include + +namespace DB +{ + +/// Extract window descriptions from window function nodes +std::vector extractWindowDescriptions(const QueryTreeNodes & window_function_nodes, const PlannerContext & planner_context); + +/** Try to sort window descriptions in such an order that the window with the longest + * sort description goes first, and all window that use its prefixes follow. + */ +void sortWindowDescriptions(std::vector & window_descriptions); + +} diff --git a/src/Planner/TableExpressionData.h b/src/Planner/TableExpressionData.h new file mode 100644 index 00000000000..0918c35a8ef --- /dev/null +++ b/src/Planner/TableExpressionData.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +using ColumnIdentifier = std::string; + +/** Table expression data is created for each table expression that take part in query. + * Table expression data has information about columns that participate in query, their name to identifier mapping, + * and additional table expression properties. + */ +class TableExpressionData +{ +public: + using ColumnNameToColumnIdentifier = std::unordered_map; + + using ColumnIdentifierToColumnName = std::unordered_map; + + /// Return true if column with name exists, false otherwise + bool hasColumn(const std::string & column_name) const + { + return alias_columns_names.contains(column_name) || columns_names.contains(column_name); + } + + /** Add column in table expression data. + * Column identifier must be created using global planner context. + * + * Logical error exception is thrown if column already exists. + */ + void addColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier) + { + if (hasColumn(column.name)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Column with name {} already exists"); + + columns_names.insert(column.name); + columns.push_back(column); + column_name_to_column_identifier.emplace(column.name, column_identifier); + column_identifier_to_column_name.emplace(column_identifier, column.name); + } + + /** Add column if it does not exists in table expression data. + * Column identifier must be created using global planner context. + */ + void addColumnIfNotExists(const NameAndTypePair & column, const ColumnIdentifier & column_identifier) + { + if (hasColumn(column.name)) + return; + + columns_names.insert(column.name); + columns.push_back(column); + column_name_to_column_identifier.emplace(column.name, column_identifier); + column_identifier_to_column_name.emplace(column_identifier, column.name); + } + + /// Add alias column name + void addAliasColumnName(const std::string & column_name) + { + alias_columns_names.insert(column_name); + } + + /// Get alias columns names + const NameSet & getAliasColumnsNames() const + { + return alias_columns_names; + } + + /// Get columns names + const NameSet & getColumnsNames() const + { + return columns_names; + } + + /// Get columns + const NamesAndTypesList & getColumns() const + { + return columns; + } + + /// Get column name to column identifier map + const ColumnNameToColumnIdentifier & getColumnNameToIdentifier() const + { + return column_name_to_column_identifier; + } + + /// Get column identifier to column name map + const ColumnNameToColumnIdentifier & getColumnIdentifierToColumnName() const + { + return column_identifier_to_column_name; + } + + /** Get column identifier for column name. + * Exception is thrown if there are no column identifier for column name. + */ + const ColumnIdentifier & getColumnIdentifierOrThrow(const std::string & column_name) const + { + auto it = column_name_to_column_identifier.find(column_name); + if (it == column_name_to_column_identifier.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Column identifier for name {} does not exists", + column_name); + + return it->second; + } + + /** Get column identifier for column name. + * Null is returned if there are no column identifier for column name. + */ + const ColumnIdentifier * getColumnIdentifierOrNull(const std::string & column_name) const + { + auto it = column_name_to_column_identifier.find(column_name); + if (it == column_name_to_column_identifier.end()) + return nullptr; + + return &it->second; + } + + /** Get column name for column identifier. + * Exception is thrown if there are no column name for column identifier. + */ + const std::string & getColumnNameOrThrow(const ColumnIdentifier & column_identifier) const + { + auto it = column_identifier_to_column_name.find(column_identifier); + if (it == column_identifier_to_column_name.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Column name for identifier {} does not exists", + column_identifier); + + return it->second; + } + + /** Get column name for column identifier. + * Null is returned if there are no column name for column identifier. + */ + const std::string * getColumnNameOrNull(const ColumnIdentifier & column_identifier) const + { + auto it = column_identifier_to_column_name.find(column_identifier); + if (it == column_identifier_to_column_name.end()) + return nullptr; + + return &it->second; + } + + /** Returns true if storage is remote, false otherwise. + * + * Valid only for table and table function node. + */ + bool isRemote() const + { + return is_remote; + } + + /// Set is storage remote value + void setIsRemote(bool is_remote_value) + { + is_remote = is_remote_value; + } + +private: + /// Valid for table, table function, query, union table expression nodes + NamesAndTypesList columns; + + /// Valid for table, table function, query, union table expression nodes + NameSet columns_names; + + /// Valid only for table table expression node + NameSet alias_columns_names; + + /// Valid for table, table function, query, union table expression nodes + ColumnNameToColumnIdentifier column_name_to_column_identifier; + + /// Valid for table, table function, query, union table expression nodes + ColumnIdentifierToColumnName column_identifier_to_column_name; + + /// Is storage remote + bool is_remote = false; +}; + +} diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp new file mode 100644 index 00000000000..74918285453 --- /dev/null +++ b/src/Planner/Utils.cpp @@ -0,0 +1,314 @@ +#include + +#include +#include +#include + +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int TYPE_MISMATCH; + extern const int LOGICAL_ERROR; +} + +String dumpQueryPlan(QueryPlan & query_plan) +{ + WriteBufferFromOwnString query_plan_buffer; + query_plan.explainPlan(query_plan_buffer, QueryPlan::ExplainPlanOptions{true, true, true, true}); + + return query_plan_buffer.str(); +} + +String dumpQueryPipeline(QueryPlan & query_plan) +{ + QueryPlan::ExplainPipelineOptions explain_pipeline; + WriteBufferFromOwnString query_pipeline_buffer; + query_plan.explainPipeline(query_pipeline_buffer, explain_pipeline); + + return query_pipeline_buffer.str(); +} + +Block buildCommonHeaderForUnion(const Blocks & queries_headers) +{ + size_t num_selects = queries_headers.size(); + Block common_header = queries_headers.front(); + size_t columns_size = common_header.columns(); + + for (size_t query_number = 1; query_number < num_selects; ++query_number) + { + if (queries_headers.at(query_number).columns() != columns_size) + throw Exception(ErrorCodes::TYPE_MISMATCH, + "Different number of columns in UNION elements: {} and {}", + common_header.dumpNames(), + queries_headers[query_number].dumpNames()); + } + + std::vector columns(num_selects); + + for (size_t column_number = 0; column_number < columns_size; ++column_number) + { + for (size_t i = 0; i < num_selects; ++i) + columns[i] = &queries_headers[i].getByPosition(column_number); + + ColumnWithTypeAndName & result_element = common_header.getByPosition(column_number); + result_element = getLeastSuperColumn(columns); + } + + return common_header; +} + +ASTPtr queryNodeToSelectQuery(const QueryTreeNodePtr & query_node) +{ + auto & query_node_typed = query_node->as(); + auto result_ast = query_node_typed.toAST(); + + while (true) + { + if (auto * select_query = result_ast->as()) + break; + else if (auto * select_with_union = result_ast->as()) + result_ast = select_with_union->list_of_selects->children.at(0); + else if (auto * subquery = result_ast->as()) + result_ast = subquery->children.at(0); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query node invalid conversion to select query"); + } + + if (result_ast == nullptr) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query node invalid conversion to select query"); + + return result_ast; +} + +/** There are no limits on the maximum size of the result for the subquery. + * Since the result of the query is not the result of the entire query. + */ +ContextPtr buildSubqueryContext(const ContextPtr & context) +{ + /** The subquery in the IN / JOIN section does not have any restrictions on the maximum size of the result. + * Because the result of this query is not the result of the entire query. + * Constraints work instead + * max_rows_in_set, max_bytes_in_set, set_overflow_mode, + * max_rows_in_join, max_bytes_in_join, join_overflow_mode, + * which are checked separately (in the Set, Join objects). + */ + auto subquery_context = Context::createCopy(context); + Settings subquery_settings = context->getSettings(); + subquery_settings.max_result_rows = 0; + subquery_settings.max_result_bytes = 0; + /// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query). + subquery_settings.extremes = false; + subquery_context->setSettings(subquery_settings); + + return subquery_context; +} + +namespace +{ + +StreamLocalLimits getLimitsForStorage(const Settings & settings, const SelectQueryOptions & options) +{ + StreamLocalLimits limits; + limits.mode = LimitsMode::LIMITS_TOTAL; + limits.size_limits = SizeLimits(settings.max_rows_to_read, settings.max_bytes_to_read, settings.read_overflow_mode); + limits.speed_limits.max_execution_time = settings.max_execution_time; + limits.timeout_overflow_mode = settings.timeout_overflow_mode; + + /** Quota and minimal speed restrictions are checked on the initiating server of the request, and not on remote servers, + * because the initiating server has a summary of the execution of the request on all servers. + * + * But limits on data size to read and maximum execution time are reasonable to check both on initiator and + * additionally on each remote server, because these limits are checked per block of data processed, + * and remote servers may process way more blocks of data than are received by initiator. + * + * The limits to throttle maximum execution speed is also checked on all servers. + */ + if (options.to_stage == QueryProcessingStage::Complete) + { + limits.speed_limits.min_execution_rps = settings.min_execution_speed; + limits.speed_limits.min_execution_bps = settings.min_execution_speed_bytes; + } + + limits.speed_limits.max_execution_rps = settings.max_execution_speed; + limits.speed_limits.max_execution_bps = settings.max_execution_speed_bytes; + limits.speed_limits.timeout_before_checking_execution_speed = settings.timeout_before_checking_execution_speed; + + return limits; +} + +} + +StorageLimits buildStorageLimits(const Context & context, const SelectQueryOptions & options) +{ + const auto & settings = context.getSettingsRef(); + + StreamLocalLimits limits; + SizeLimits leaf_limits; + + /// Set the limits and quota for reading data, the speed and time of the query. + if (!options.ignore_limits) + { + limits = getLimitsForStorage(settings, options); + leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, settings.max_bytes_to_read_leaf, settings.read_overflow_mode_leaf); + } + + return {limits, leaf_limits}; +} + +ActionsDAGPtr buildActionsDAGFromExpressionNode(const QueryTreeNodePtr & expression_node, const ColumnsWithTypeAndName & input_columns, const PlannerContextPtr & planner_context) +{ + ActionsDAGPtr action_dag = std::make_shared(input_columns); + PlannerActionsVisitor actions_visitor(planner_context); + auto expression_dag_index_nodes = actions_visitor.visit(action_dag, expression_node); + action_dag->getOutputs() = std::move(expression_dag_index_nodes); + + return action_dag; +} + +bool sortDescriptionIsPrefix(const SortDescription & prefix, const SortDescription & full) +{ + size_t prefix_size = prefix.size(); + if (prefix_size > full.size()) + return false; + + for (size_t i = 0; i < prefix_size; ++i) + { + if (full[i] != prefix[i]) + return false; + } + + return true; +} + +bool queryHasArrayJoinInJoinTree(const QueryTreeNodePtr & query_node) +{ + const auto & query_node_typed = query_node->as(); + + std::vector join_tree_nodes_to_process; + join_tree_nodes_to_process.push_back(query_node_typed.getJoinTree()); + + while (!join_tree_nodes_to_process.empty()) + { + auto join_tree_node_to_process = join_tree_nodes_to_process.back(); + join_tree_nodes_to_process.pop_back(); + + auto join_tree_node_type = join_tree_node_to_process->getNodeType(); + + switch (join_tree_node_type) + { + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::QUERY: + [[fallthrough]]; + case QueryTreeNodeType::UNION: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + { + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + { + return true; + } + case QueryTreeNodeType::JOIN: + { + auto & join_node = join_tree_node_to_process->as(); + join_tree_nodes_to_process.push_back(join_node.getLeftTableExpression()); + join_tree_nodes_to_process.push_back(join_node.getRightTableExpression()); + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected node type for table expression. Expected table, table function, query, union, join or array join. Actual {}", + join_tree_node_to_process->getNodeTypeName()); + } + } + } + + return false; +} + +bool queryHasWithTotalsInAnySubqueryInJoinTree(const QueryTreeNodePtr & query_node) +{ + const auto & query_node_typed = query_node->as(); + + std::vector join_tree_nodes_to_process; + join_tree_nodes_to_process.push_back(query_node_typed.getJoinTree()); + + while (!join_tree_nodes_to_process.empty()) + { + auto join_tree_node_to_process = join_tree_nodes_to_process.back(); + join_tree_nodes_to_process.pop_back(); + + auto join_tree_node_type = join_tree_node_to_process->getNodeType(); + + switch (join_tree_node_type) + { + case QueryTreeNodeType::TABLE: + [[fallthrough]]; + case QueryTreeNodeType::TABLE_FUNCTION: + { + break; + } + case QueryTreeNodeType::QUERY: + { + auto & query_node_to_process = join_tree_node_to_process->as(); + if (query_node_to_process.isGroupByWithTotals()) + return true; + + join_tree_nodes_to_process.push_back(query_node_to_process.getJoinTree()); + break; + } + case QueryTreeNodeType::UNION: + { + auto & union_node = join_tree_node_to_process->as(); + auto & union_queries = union_node.getQueries().getNodes(); + + for (auto & union_query : union_queries) + join_tree_nodes_to_process.push_back(union_query); + break; + } + case QueryTreeNodeType::ARRAY_JOIN: + { + auto & array_join_node = join_tree_node_to_process->as(); + join_tree_nodes_to_process.push_back(array_join_node.getTableExpression()); + break; + } + case QueryTreeNodeType::JOIN: + { + auto & join_node = join_tree_node_to_process->as(); + join_tree_nodes_to_process.push_back(join_node.getLeftTableExpression()); + join_tree_nodes_to_process.push_back(join_node.getRightTableExpression()); + break; + } + default: + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Unexpected node type for table expression. Expected table, table function, query, union, join or array join. Actual {}", + join_tree_node_to_process->getNodeTypeName()); + } + } + } + + return false; +} + +} diff --git a/src/Planner/Utils.h b/src/Planner/Utils.h new file mode 100644 index 00000000000..909cea3bf8f --- /dev/null +++ b/src/Planner/Utils.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + +#include + +#include +#include + +#include + +#include + +#include + +#include + +namespace DB +{ + +/// Dump query plan +String dumpQueryPlan(QueryPlan & query_plan); + +/// Dump query plan result pipeline +String dumpQueryPipeline(QueryPlan & query_plan); + +/// Build common header for UNION query +Block buildCommonHeaderForUnion(const Blocks & queries_headers); + +/// Convert query node to ASTSelectQuery +ASTPtr queryNodeToSelectQuery(const QueryTreeNodePtr & query_node); + +/// Build context for subquery execution +ContextPtr buildSubqueryContext(const ContextPtr & context); + +/// Build limits for storage +StorageLimits buildStorageLimits(const Context & context, const SelectQueryOptions & options); + +/** Convert query tree expression node into actions dag. + * Inputs are not used for actions dag outputs. + * Only root query tree expression node is used as actions dag output. + */ +ActionsDAGPtr buildActionsDAGFromExpressionNode(const QueryTreeNodePtr & expression_node, + const ColumnsWithTypeAndName & input_columns, + const PlannerContextPtr & planner_context); + +/// Returns true if prefix sort description is prefix of full sort descriptor, false otherwise +bool sortDescriptionIsPrefix(const SortDescription & prefix, const SortDescription & full); + +/// Returns true if query node JOIN TREE contains ARRAY JOIN node, false otherwise +bool queryHasArrayJoinInJoinTree(const QueryTreeNodePtr & query_node); + +/** Returns true if query node JOIN TREE contains QUERY node with WITH TOTALS, false otherwise. + * Function is applied recursively to subqueries in JOIN TREE. + */ +bool queryHasWithTotalsInAnySubqueryInJoinTree(const QueryTreeNodePtr & query_node); + +} diff --git a/src/Planner/examples/CMakeLists.txt b/src/Planner/examples/CMakeLists.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/Planner/tests/CMakeLists.txt b/src/Planner/tests/CMakeLists.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index 9e5ea3916bc..a4c7fe2f687 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -72,9 +72,9 @@ void CompletedPipelineExecutor::execute() data->executor = std::make_shared(pipeline.processors, pipeline.process_list_element); data->executor->setReadProgressCallback(pipeline.getReadProgressCallback()); - /// Avoid passing this to labmda, copy ptr to data instead. + /// Avoid passing this to lambda, copy ptr to data instead. /// Destructor of unique_ptr copy raw ptr into local variable first, only then calls object destructor. - auto func = [data_ptr = data.get(), num_threads = pipeline.getNumThreads(), thread_group = CurrentThread::getGroup()]() + auto func = [data_ptr = data.get(), num_threads = pipeline.getNumThreads(), thread_group = CurrentThread::getGroup()] { threadFunction(*data_ptr, thread_group, num_threads); }; diff --git a/src/Processors/Executors/CompletedPipelineExecutor.h b/src/Processors/Executors/CompletedPipelineExecutor.h index e616cd6a2b7..65fab6035b1 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.h +++ b/src/Processors/Executors/CompletedPipelineExecutor.h @@ -1,7 +1,9 @@ #pragma once + #include #include + namespace DB { diff --git a/src/Processors/Executors/ExecutingGraph.cpp b/src/Processors/Executors/ExecutingGraph.cpp index 651ede10cfd..4ab2c5b3802 100644 --- a/src/Processors/Executors/ExecutingGraph.cpp +++ b/src/Processors/Executors/ExecutingGraph.cpp @@ -10,17 +10,17 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -ExecutingGraph::ExecutingGraph(Processors & processors_, bool profile_processors_) - : processors(processors_) +ExecutingGraph::ExecutingGraph(std::shared_ptr processors_, bool profile_processors_) + : processors(std::move(processors_)) , profile_processors(profile_processors_) { - uint64_t num_processors = processors.size(); + uint64_t num_processors = processors->size(); nodes.reserve(num_processors); /// Create nodes. for (uint64_t node = 0; node < num_processors; ++node) { - IProcessor * proc = processors[node].get(); + IProcessor * proc = processors->at(node).get(); processors_map[proc] = node; nodes.emplace_back(std::make_unique(proc, node)); } @@ -71,7 +71,7 @@ bool ExecutingGraph::addEdges(uint64_t node) } } - /// Add direct edges form output ports. + /// Add direct edges from output ports. auto & outputs = from->getOutputs(); auto from_output = nodes[node]->direct_edges.size(); @@ -109,10 +109,10 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) { std::lock_guard guard(processors_mutex); - processors.insert(processors.end(), new_processors.begin(), new_processors.end()); + processors->insert(processors->end(), new_processors.begin(), new_processors.end()); } - uint64_t num_processors = processors.size(); + uint64_t num_processors = processors->size(); std::vector back_edges_sizes(num_processors, 0); std::vector direct_edge_sizes(num_processors, 0); @@ -126,7 +126,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) while (nodes.size() < num_processors) { - auto * processor = processors[nodes.size()].get(); + auto * processor = processors->at(nodes.size()).get(); if (processors_map.contains(processor)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Processor {} was already added to pipeline", processor->getName()); @@ -386,7 +386,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue void ExecutingGraph::cancel() { std::lock_guard guard(processors_mutex); - for (auto & processor : processors) + for (auto & processor : *processors) processor->cancel(); } diff --git a/src/Processors/Executors/ExecutingGraph.h b/src/Processors/Executors/ExecutingGraph.h index 587a2561ae0..b374f968122 100644 --- a/src/Processors/Executors/ExecutingGraph.h +++ b/src/Processors/Executors/ExecutingGraph.h @@ -1,4 +1,5 @@ #pragma once + #include #include #include @@ -6,6 +7,7 @@ #include #include + namespace DB { @@ -123,9 +125,9 @@ public: using ProcessorsMap = std::unordered_map; ProcessorsMap processors_map; - explicit ExecutingGraph(Processors & processors_, bool profile_processors_); + explicit ExecutingGraph(std::shared_ptr processors_, bool profile_processors_); - const Processors & getProcessors() const { return processors; } + const Processors & getProcessors() const { return *processors; } /// Traverse graph the first time to update all the childless nodes. void initializeExecution(Queue & queue); @@ -149,7 +151,7 @@ private: /// All new nodes and nodes with updated ports are pushed into stack. bool expandPipeline(std::stack & stack, uint64_t pid); - Processors & processors; + std::shared_ptr processors; std::mutex processors_mutex; UpgradableMutex nodes_mutex; diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index ae20d97604b..3772381de04 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -15,6 +15,7 @@ #include #endif + namespace DB { @@ -24,8 +25,8 @@ namespace ErrorCodes } -PipelineExecutor::PipelineExecutor(Processors & processors, QueryStatus * elem) - : process_list_element(elem) +PipelineExecutor::PipelineExecutor(std::shared_ptr & processors, QueryStatusPtr elem) + : process_list_element(std::move(elem)) { if (process_list_element) { @@ -41,7 +42,7 @@ PipelineExecutor::PipelineExecutor(Processors & processors, QueryStatus * elem) /// If exception was thrown while pipeline initialization, it means that query pipeline was not build correctly. /// It is logical error, and we need more information about pipeline. WriteBufferFromOwnString buf; - printPipeline(processors, buf); + printPipeline(*processors, buf); buf.finalize(); exception.addMessage("Query pipeline:\n" + buf.str()); diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index cea64d309fa..21bde312cbc 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -10,16 +10,19 @@ #include #include + namespace DB { class QueryStatus; +using QueryStatusPtr = std::shared_ptr; class ExecutingGraph; using ExecutingGraphPtr = std::unique_ptr; class ReadProgressCallback; using ReadProgressCallbackPtr = std::unique_ptr; + /// Executes query pipeline. class PipelineExecutor { @@ -30,7 +33,7 @@ public: /// During pipeline execution new processors can appear. They will be added to existing set. /// /// Explicit graph representation is built in constructor. Throws if graph is not correct. - explicit PipelineExecutor(Processors & processors, QueryStatus * elem); + explicit PipelineExecutor(std::shared_ptr & processors, QueryStatusPtr elem); ~PipelineExecutor(); /// Execute pipeline in multiple threads. Must be called once. @@ -79,7 +82,7 @@ private: Poco::Logger * log = &Poco::Logger::get("PipelineExecutor"); /// Now it's used to check if query was killed. - QueryStatus * const process_list_element = nullptr; + QueryStatusPtr process_list_element; ReadProgressCallbackPtr read_progress_callback; diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index 7a55d26f16c..ee8e94b6f28 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -129,7 +129,7 @@ PushingAsyncPipelineExecutor::PushingAsyncPipelineExecutor(QueryPipeline & pipel pushing_source = std::make_shared(pipeline.input->getHeader()); connect(pushing_source->getPort(), *pipeline.input); - pipeline.processors.emplace_back(pushing_source); + pipeline.processors->emplace_back(pushing_source); } PushingAsyncPipelineExecutor::~PushingAsyncPipelineExecutor() diff --git a/src/Processors/Executors/PushingPipelineExecutor.cpp b/src/Processors/Executors/PushingPipelineExecutor.cpp index bf43cd327fe..d9a14704cd0 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingPipelineExecutor.cpp @@ -58,7 +58,7 @@ PushingPipelineExecutor::PushingPipelineExecutor(QueryPipeline & pipeline_) : pi pushing_source = std::make_shared(pipeline.input->getHeader(), input_wait_flag); connect(pushing_source->getPort(), *pipeline.input); - pipeline.processors.emplace_back(pushing_source); + pipeline.processors->emplace_back(pushing_source); } PushingPipelineExecutor::~PushingPipelineExecutor() diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index 52395338279..6f153019df5 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -188,7 +188,7 @@ Chunk IRowInputFormat::generate() } e.setFileName(getFileNameFromReadBuffer(getReadBuffer())); - e.setLineNumber(total_rows); + e.setLineNumber(static_cast(total_rows)); e.addMessage(verbose_diagnostic); throw; } @@ -232,7 +232,9 @@ Chunk IRowInputFormat::generate() return {}; } - finalizeObjectColumns(columns); + for (const auto & column : columns) + column->finalize(); + Chunk chunk(std::move(columns), num_rows); return chunk; } diff --git a/src/Processors/Formats/ISchemaReader.cpp b/src/Processors/Formats/ISchemaReader.cpp index 0e4d3f091b2..a26ed6b0b40 100644 --- a/src/Processors/Formats/ISchemaReader.cpp +++ b/src/Processors/Formats/ISchemaReader.cpp @@ -132,6 +132,16 @@ NamesAndTypesList IRowSchemaReader::readSchema() ErrorCodes::INCORRECT_DATA, "The number of column names {} differs with the number of types {}", column_names.size(), data_types.size()); } + else + { + std::unordered_set names_set; + for (const auto & name : column_names) + { + if (names_set.contains(name)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Duplicate column name found while schema inference: \"{}\"", name); + names_set.insert(name); + } + } for (size_t i = 0; i != column_names.size(); ++i) { @@ -224,6 +234,9 @@ NamesAndTypesList IRowWithNamesSchemaReader::readSchema() names_order.reserve(names_and_types.size()); for (const auto & [name, type] : names_and_types) { + if (names_to_types.contains(name)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Duplicate column name found while schema inference: \"{}\"", name); + auto hint_it = hints.find(name); if (hint_it != hints.end()) names_to_types[name] = hint_it->second; @@ -240,8 +253,13 @@ NamesAndTypesList IRowWithNamesSchemaReader::readSchema() /// We reached eof. break; + std::unordered_set names_set; /// We should check for duplicate column names in current row for (auto & [name, new_type] : new_names_and_types) { + if (names_set.contains(name)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Duplicate column name found while schema inference: \"{}\"", name); + names_set.insert(name); + auto it = names_to_types.find(name); /// If we didn't see this column before, just add it. if (it == names_to_types.end()) diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index 70510a165e6..e9b01ec7dda 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -82,7 +82,7 @@ static ColumnWithTypeAndName readColumnWithNumericData(std::shared_ptr(*internal_column).getData(); column_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { std::shared_ptr chunk = arrow_column->chunk(chunk_i); if (chunk->length() == 0) @@ -108,7 +108,7 @@ static ColumnWithTypeAndName readColumnWithStringData(std::shared_ptr & column_offsets = assert_cast(*internal_column).getOffsets(); size_t chars_t_size = 0; - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { ArrowArray & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); const size_t chunk_length = chunk.length(); @@ -123,7 +123,7 @@ static ColumnWithTypeAndName readColumnWithStringData(std::shared_ptrlength()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { ArrowArray & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); std::shared_ptr buffer = chunk.value_data(); @@ -151,7 +151,7 @@ static ColumnWithTypeAndName readColumnWithBooleanData(std::shared_ptr &>(*internal_column).getData(); column_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::BooleanArray & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); if (chunk.length() == 0) @@ -173,7 +173,7 @@ static ColumnWithTypeAndName readColumnWithDate32Data(std::shared_ptr & column_data = assert_cast &>(*internal_column).getData(); column_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::Date32Array & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); @@ -198,7 +198,7 @@ static ColumnWithTypeAndName readColumnWithDate64Data(std::shared_ptr &>(*internal_column).getData(); column_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { auto & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); for (size_t value_i = 0, length = static_cast(chunk.length()); value_i < length; ++value_i) @@ -219,7 +219,7 @@ static ColumnWithTypeAndName readColumnWithTimestampData(std::shared_ptr &>(*internal_column).getData(); column_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { const auto & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); for (size_t value_i = 0, length = static_cast(chunk.length()); value_i < length; ++value_i) @@ -239,7 +239,7 @@ static ColumnWithTypeAndName readColumnWithTimeData(std::shared_ptrcreateColumn(); internal_column->reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { auto & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); if (chunk.length() == 0) @@ -272,7 +272,7 @@ static ColumnWithTypeAndName readColumnWithDecimalDataImpl(std::shared_ptrlength()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { auto & chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); for (size_t value_i = 0, length = static_cast(chunk.length()); value_i < length; ++value_i) @@ -308,7 +308,7 @@ static ColumnPtr readByteMapFromArrowColumn(std::shared_ptr PaddedPODArray & bytemap_data = assert_cast &>(*nullmap_column).getData(); bytemap_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0; chunk_i != static_cast(arrow_column->num_chunks()); ++chunk_i) + for (int chunk_i = 0; chunk_i != arrow_column->num_chunks(); ++chunk_i) { std::shared_ptr chunk = arrow_column->chunk(chunk_i); @@ -324,7 +324,7 @@ static ColumnPtr readOffsetsFromArrowListColumn(std::shared_ptr &>(*offsets_column).getData(); offsets_data.reserve(arrow_column->length()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::ListArray & list_chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); auto arrow_offsets_array = list_chunk.offsets(); @@ -356,7 +356,7 @@ static ColumnWithTypeAndName readColumnWithIndexesDataImpl(std::shared_ptrlength()); NumericType shift = is_nullable ? 2 : 1; - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { std::shared_ptr chunk = arrow_column->chunk(chunk_i); if (chunk->length() == 0) @@ -450,7 +450,8 @@ static ColumnPtr readColumnWithIndexesData(std::shared_ptr # define DISPATCH(ARROW_NUMERIC_TYPE, CPP_NUMERIC_TYPE) \ case ARROW_NUMERIC_TYPE: \ { \ - return readColumnWithIndexesDataImpl(arrow_column, "", default_value_index, dict_size, is_nullable).column; \ + return readColumnWithIndexesDataImpl(\ + arrow_column, "", default_value_index, static_cast(dict_size), is_nullable).column; \ } FOR_ARROW_INDEXES_TYPES(DISPATCH) # undef DISPATCH @@ -463,7 +464,7 @@ static std::shared_ptr getNestedArrowColumn(std::shared_ptr { arrow::ArrayVector array_vector; array_vector.reserve(arrow_column->num_chunks()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::ListArray & list_chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); std::shared_ptr chunk = list_chunk.values(); @@ -582,7 +583,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn( auto arrow_type = arrow_column->type(); auto * arrow_struct_type = assert_cast(arrow_type.get()); std::vector nested_arrow_columns(arrow_struct_type->num_fields()); - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::StructArray & struct_chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); for (int i = 0; i < arrow_struct_type->num_fields(); ++i) @@ -631,7 +632,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn( if (!dict_info.values) { arrow::ArrayVector dict_array; - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::DictionaryArray & dict_chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); dict_array.emplace_back(dict_chunk.dictionary()); @@ -656,7 +657,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn( } arrow::ArrayVector indexes_array; - for (size_t chunk_i = 0, num_chunks = static_cast(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i) + for (int chunk_i = 0, num_chunks = arrow_column->num_chunks(); chunk_i < num_chunks; ++chunk_i) { arrow::DictionaryArray & dict_chunk = dynamic_cast(*(arrow_column->chunk(chunk_i))); indexes_array.emplace_back(dict_chunk.indices()); diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index 0ec04c61321..80183838277 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -283,14 +283,15 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node if (root_node->leaves() == 2 && (root_node->leafAt(0)->type() == avro::AVRO_NULL || root_node->leafAt(1)->type() == avro::AVRO_NULL)) { - size_t non_null_union_index = root_node->leafAt(0)->type() == avro::AVRO_NULL ? 1 : 0; + int non_null_union_index = root_node->leafAt(0)->type() == avro::AVRO_NULL ? 1 : 0; if (target.isNullable()) { - auto nested_deserialize = this->createDeserializeFn(root_node->leafAt(non_null_union_index), removeNullable(target_type)); + auto nested_deserialize = this->createDeserializeFn( + root_node->leafAt(non_null_union_index), removeNullable(target_type)); return [non_null_union_index, nested_deserialize](IColumn & column, avro::Decoder & decoder) { ColumnNullable & col = assert_cast(column); - size_t union_index = decoder.decodeUnionIndex(); + int union_index = static_cast(decoder.decodeUnionIndex()); if (union_index == non_null_union_index) { nested_deserialize(col.getNestedColumn(), decoder); @@ -308,7 +309,7 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node auto nested_deserialize = this->createDeserializeFn(root_node->leafAt(non_null_union_index), target_type); return [non_null_union_index, nested_deserialize](IColumn & column, avro::Decoder & decoder) { - size_t union_index = decoder.decodeUnionIndex(); + int union_index = static_cast(decoder.decodeUnionIndex()); if (union_index == non_null_union_index) nested_deserialize(column, decoder); else @@ -345,7 +346,8 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node if (target.isString()) { std::vector symbols; - for (size_t i = 0; i < root_node->names(); ++i) + symbols.reserve(root_node->names()); + for (int i = 0; i < static_cast(root_node->names()); ++i) { symbols.push_back(root_node->nameAt(i)); } @@ -360,7 +362,7 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node { const auto & enum_type = dynamic_cast(*target_type); Row symbol_mapping; - for (size_t i = 0; i < root_node->names(); ++i) + for (int i = 0; i < static_cast(root_node->names()); ++i) { symbol_mapping.push_back(enum_type.castToValue(root_node->nameAt(i))); } @@ -397,7 +399,7 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node if (root_node->leaves() != nested_types.size()) throw Exception(ErrorCodes::INCORRECT_DATA, "The number of leaves in record doesn't match the number of elements in tuple"); - for (size_t i = 0; i != root_node->leaves(); ++i) + for (int i = 0; i != static_cast(root_node->leaves()); ++i) { const auto & name = root_node->nameAt(i); size_t pos = tuple_type.getPositionByName(name); @@ -505,7 +507,8 @@ AvroDeserializer::SkipFn AvroDeserializer::createSkipFn(avro::NodePtr root_node) case avro::AVRO_UNION: { std::vector union_skip_fns; - for (size_t i = 0; i < root_node->leaves(); ++i) + union_skip_fns.reserve(root_node->leaves()); + for (int i = 0; i < static_cast(root_node->leaves()); ++i) { union_skip_fns.push_back(createSkipFn(root_node->leafAt(i))); } @@ -546,7 +549,8 @@ AvroDeserializer::SkipFn AvroDeserializer::createSkipFn(avro::NodePtr root_node) case avro::AVRO_RECORD: { std::vector field_skip_fns; - for (size_t i = 0; i < root_node->leaves(); ++i) + field_skip_fns.reserve(root_node->leaves()); + for (int i = 0; i < static_cast(root_node->leaves()); ++i) { field_skip_fns.push_back(createSkipFn(root_node->leafAt(i))); } @@ -633,7 +637,7 @@ AvroDeserializer::Action AvroDeserializer::createAction(const Block & header, co const auto & column = header.getByPosition(target_column_idx); try { - AvroDeserializer::Action action(target_column_idx, createDeserializeFn(node, column.type)); + AvroDeserializer::Action action(static_cast(target_column_idx), createDeserializeFn(node, column.type)); column_found[target_column_idx] = true; return action; } @@ -646,7 +650,7 @@ AvroDeserializer::Action AvroDeserializer::createAction(const Block & header, co else if (node->type() == avro::AVRO_RECORD) { std::vector field_actions(node->leaves()); - for (size_t i = 0; i < node->leaves(); ++i) + for (int i = 0; i < static_cast(node->leaves()); ++i) { const auto & field_node = node->leafAt(i); const auto & field_name = node->nameAt(i); @@ -657,7 +661,7 @@ AvroDeserializer::Action AvroDeserializer::createAction(const Block & header, co else if (node->type() == avro::AVRO_UNION) { std::vector branch_actions(node->leaves()); - for (size_t i = 0; i < node->leaves(); ++i) + for (int i = 0; i < static_cast(node->leaves()); ++i) { const auto & branch_node = node->leafAt(i); const auto & branch_name = nodeName(branch_node); @@ -687,7 +691,7 @@ AvroDeserializer::Action AvroDeserializer::createAction(const Block & header, co /// Create nested deserializer for each nested column. std::vector nested_deserializers; std::vector nested_indexes; - for (size_t i = 0; i != nested_avro_node->leaves(); ++i) + for (int i = 0; i != static_cast(nested_avro_node->leaves()); ++i) { const auto & name = nested_avro_node->nameAt(i); if (!nested_types.contains(name)) @@ -970,7 +974,7 @@ NamesAndTypesList AvroSchemaReader::readSchema() throw Exception("Root schema must be a record", ErrorCodes::TYPE_MISMATCH); NamesAndTypesList names_and_types; - for (size_t i = 0; i != root_node->leaves(); ++i) + for (int i = 0; i != static_cast(root_node->leaves()); ++i) names_and_types.emplace_back(root_node->nameAt(i), avroNodeToDataType(root_node->leafAt(i))); return names_and_types; @@ -999,14 +1003,14 @@ DataTypePtr AvroSchemaReader::avroNodeToDataType(avro::NodePtr node) if (node->names() < 128) { EnumValues::Values values; - for (size_t i = 0; i != node->names(); ++i) + for (int i = 0; i != static_cast(node->names()); ++i) values.emplace_back(node->nameAt(i), i); return std::make_shared(std::move(values)); } else if (node->names() < 32768) { EnumValues::Values values; - for (size_t i = 0; i != node->names(); ++i) + for (int i = 0; i != static_cast(node->names()); ++i) values.emplace_back(node->nameAt(i), i); return std::make_shared(std::move(values)); } @@ -1022,7 +1026,7 @@ DataTypePtr AvroSchemaReader::avroNodeToDataType(avro::NodePtr node) case avro::Type::AVRO_UNION: if (node->leaves() == 2 && (node->leafAt(0)->type() == avro::Type::AVRO_NULL || node->leafAt(1)->type() == avro::Type::AVRO_NULL)) { - size_t nested_leaf_index = node->leafAt(0)->type() == avro::Type::AVRO_NULL ? 1 : 0; + int nested_leaf_index = node->leafAt(0)->type() == avro::Type::AVRO_NULL ? 1 : 0; auto nested_type = avroNodeToDataType(node->leafAt(nested_leaf_index)); return nested_type->canBeInsideNullable() ? makeNullable(nested_type) : nested_type; } @@ -1035,7 +1039,7 @@ DataTypePtr AvroSchemaReader::avroNodeToDataType(avro::NodePtr node) nested_types.reserve(node->leaves()); Names nested_names; nested_names.reserve(node->leaves()); - for (size_t i = 0; i != node->leaves(); ++i) + for (int i = 0; i != static_cast(node->leaves()); ++i) { nested_types.push_back(avroNodeToDataType(node->leafAt(i))); nested_names.push_back(node->nameAt(i)); diff --git a/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp index b63b1e7b9b1..e3d570d1876 100644 --- a/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp @@ -193,7 +193,7 @@ AvroSerializer::SchemaWithSerializeFn AvroSerializer::createSchemaWithSerializeF case TypeIndex::FixedString: { auto size = data_type->getSizeOfValueInMemory(); - auto schema = avro::FixedSchema(size, "fixed_" + toString(type_name_increment)); + auto schema = avro::FixedSchema(static_cast(size), "fixed_" + toString(type_name_increment)); return {schema, [](const IColumn & column, size_t row_num, avro::Encoder & encoder) { const std::string_view & s = assert_cast(column).getDataAt(row_num).toView(); diff --git a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp index 9e423f8a96b..87fff16c107 100644 --- a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp +++ b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp @@ -224,7 +224,14 @@ namespace DB for (size_t i = 0; i != column_tuple->tupleSize(); ++i) { ColumnPtr nested_column = column_tuple->getColumnPtr(i); - fillArrowArray(column_name + "." + nested_names[i], nested_column, nested_types[i], null_bytemap, builder.field_builder(i), format_name, start, end, output_string_as_string, dictionary_values); + fillArrowArray( + column_name + "." + nested_names[i], + nested_column, nested_types[i], null_bytemap, + builder.field_builder(static_cast(i)), + format_name, + start, end, + output_string_as_string, + dictionary_values); } for (size_t i = start; i != end; ++i) @@ -370,7 +377,7 @@ namespace DB else { std::string_view string_ref = internal_column.getDataAt(string_i).toView(); - status = builder.Append(string_ref.data(), string_ref.size()); + status = builder.Append(string_ref.data(), static_cast(string_ref.size())); } checkStatus(status, write_column->getName(), format_name); } diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index c6f8742455e..08d2cac743a 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -91,7 +91,7 @@ static void insertSignedInteger(IColumn & column, const DataTypePtr & column_typ assert_cast(column).insertValue(value); break; case TypeIndex::Int32: - assert_cast(column).insertValue(value); + assert_cast(column).insertValue(static_cast(value)); break; case TypeIndex::Int64: assert_cast(column).insertValue(value); @@ -117,7 +117,7 @@ static void insertUnsignedInteger(IColumn & column, const DataTypePtr & column_t break; case TypeIndex::DateTime: [[fallthrough]]; case TypeIndex::UInt32: - assert_cast(column).insertValue(value); + assert_cast(column).insertValue(static_cast(value)); break; case TypeIndex::UInt64: assert_cast(column).insertValue(value); diff --git a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp index fd33abfb587..654917b6357 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowOutputFormat.cpp @@ -79,7 +79,7 @@ static capnp::DynamicValue::Builder initStructFieldBuilder(const ColumnPtr & col if (const auto * array_column = checkAndGetColumn(*column)) { size_t size = array_column->getOffsets()[row_num] - array_column->getOffsets()[row_num - 1]; - return struct_builder.init(field, size); + return struct_builder.init(field, static_cast(size)); } if (field.getType().isStruct()) @@ -200,7 +200,7 @@ static std::optional convertToDynamicValue( size_t size = offsets[row_num] - offset; const auto * nested_array_column = checkAndGetColumn(*nested_column); - for (size_t i = 0; i != size; ++i) + for (unsigned i = 0; i != static_cast(size); ++i) { capnp::DynamicValue::Builder value_builder; /// For nested arrays we need to initialize nested list builder. @@ -208,7 +208,7 @@ static std::optional convertToDynamicValue( { const auto & nested_offset = nested_array_column->getOffsets(); size_t nested_array_size = nested_offset[offset + i] - nested_offset[offset + i - 1]; - value_builder = list_builder.init(i, nested_array_size); + value_builder = list_builder.init(i, static_cast(nested_array_size)); } else value_builder = list_builder[i]; diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index 739fa8735b2..bc363e5aa98 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -502,7 +502,7 @@ bool ConstantExpressionTemplate::parseLiteralAndAssertType(ReadBuffer & istr, co ParserTupleOfLiterals parser_tuple; Tokens tokens_number(istr.position(), istr.buffer().end()); - IParser::Pos iterator(tokens_number, settings.max_parser_depth); + IParser::Pos iterator(tokens_number, static_cast(settings.max_parser_depth)); Expected expected; ASTPtr ast; if (!parser_array.parse(iterator, ast, expected) && !parser_tuple.parse(iterator, ast, expected)) diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp index 1c99a5484a2..16df132b9d8 100644 --- a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.cpp @@ -67,6 +67,19 @@ CustomSeparatedRowInputFormat::CustomSeparatedRowInputFormat( } } +void CustomSeparatedRowInputFormat::readPrefix() +{ + RowInputFormatWithNamesAndTypes::readPrefix(); + + /// Provide better error message for unsupported delimiters + for (const auto & column_index : column_mapping->column_indexes_for_input_fields) + { + if (column_index) + checkSupportedDelimiterAfterField(format_settings.custom.escaping_rule, format_settings.custom.field_delimiter, data_types[*column_index]); + else + checkSupportedDelimiterAfterField(format_settings.custom.escaping_rule, format_settings.custom.field_delimiter, nullptr); + } +} bool CustomSeparatedRowInputFormat::allowSyncAfterError() const { diff --git a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h index c7e332b983f..e7e96ab87b1 100644 --- a/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h +++ b/src/Processors/Formats/Impl/CustomSeparatedRowInputFormat.h @@ -30,6 +30,7 @@ private: bool allowSyncAfterError() const override; void syncAfterError() override; + void readPrefix() override; std::unique_ptr buf; bool ignore_spaces; diff --git a/src/Processors/Formats/Impl/LineAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/LineAsStringRowInputFormat.cpp index 30084804d92..677f8bb28ec 100644 --- a/src/Processors/Formats/Impl/LineAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/LineAsStringRowInputFormat.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include #include @@ -63,6 +63,12 @@ void registerInputFormatLineAsString(FormatFactory & factory) }); } +void registerFileSegmentationEngineLineAsString(FormatFactory & factory) +{ + factory.registerFileSegmentationEngine("LineAsString", &newLineFileSegmentationEngine); +} + + void registerLineAsStringSchemaReader(FormatFactory & factory) { factory.registerExternalSchemaReader("LineAsString", []( diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index 931a7587903..80fdda687e2 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -128,7 +128,7 @@ static void insertInteger(IColumn & column, DataTypePtr type, UInt64 value) case TypeIndex::DateTime: [[fallthrough]]; case TypeIndex::UInt32: { - assert_cast(column).insertValue(value); + assert_cast(column).insertValue(static_cast(value)); break; } case TypeIndex::UInt64: @@ -148,7 +148,7 @@ static void insertInteger(IColumn & column, DataTypePtr type, UInt64 value) } case TypeIndex::Int32: { - assert_cast(column).insertValue(value); + assert_cast(column).insertValue(static_cast(value)); break; } case TypeIndex::Int64: diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index a470e193300..da683913d4d 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -99,15 +99,15 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr case TypeIndex::String: { const std::string_view & string = assert_cast(column).getDataAt(row_num).toView(); - packer.pack_bin(string.size()); - packer.pack_bin_body(string.data(), string.size()); + packer.pack_bin(static_cast(string.size())); + packer.pack_bin_body(string.data(), static_cast(string.size())); return; } case TypeIndex::FixedString: { const std::string_view & string = assert_cast(column).getDataAt(row_num).toView(); - packer.pack_bin(string.size()); - packer.pack_bin_body(string.data(), string.size()); + packer.pack_bin(static_cast(string.size())); + packer.pack_bin_body(string.data(), static_cast(string.size())); return; } case TypeIndex::Array: @@ -118,7 +118,7 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr const ColumnArray::Offsets & offsets = column_array.getOffsets(); size_t offset = offsets[row_num - 1]; size_t size = offsets[row_num] - offset; - packer.pack_array(size); + packer.pack_array(static_cast(size)); for (size_t i = 0; i < size; ++i) { serializeField(nested_column, nested_type, offset + i); @@ -152,7 +152,7 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr const auto & offsets = nested_column.getOffsets(); size_t offset = offsets[row_num - 1]; size_t size = offsets[row_num] - offset; - packer.pack_map(size); + packer.pack_map(static_cast(size)); for (size_t i = 0; i < size; ++i) { serializeField(*key_column, map_type.getKeyType(), offset + i); @@ -179,8 +179,8 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr WriteBufferFromOwnString buf; writeBinary(uuid_column.getElement(row_num), buf); std::string_view uuid_bin = buf.stringView(); - packer.pack_bin(uuid_bin.size()); - packer.pack_bin_body(uuid_bin.data(), uuid_bin.size()); + packer.pack_bin(static_cast(uuid_bin.size())); + packer.pack_bin_body(uuid_bin.data(), static_cast(uuid_bin.size())); return; } case FormatSettings::MsgPackUUIDRepresentation::STR: @@ -188,8 +188,8 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr WriteBufferFromOwnString buf; writeText(uuid_column.getElement(row_num), buf); std::string_view uuid_text = buf.stringView(); - packer.pack_str(uuid_text.size()); - packer.pack_bin_body(uuid_text.data(), uuid_text.size()); + packer.pack_str(static_cast(uuid_text.size())); + packer.pack_bin_body(uuid_text.data(), static_cast(uuid_text.size())); return; } case FormatSettings::MsgPackUUIDRepresentation::EXT: @@ -200,7 +200,7 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr writeBinaryBigEndian(value.toUnderType().items[1], buf); std::string_view uuid_ext = buf.stringView(); packer.pack_ext(sizeof(UUID), int8_t(MsgPackExtensionTypes::UUIDType)); - packer.pack_ext_body(uuid_ext.data(), uuid_ext.size()); + packer.pack_ext_body(uuid_ext.data(), static_cast(uuid_ext.size())); return; } } diff --git a/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp index 344c5c179db..75a03cb6d0e 100644 --- a/src/Processors/Formats/Impl/MySQLOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MySQLOutputFormat.cpp @@ -65,7 +65,7 @@ void MySQLOutputFormat::consume(Chunk chunk) { for (size_t i = 0; i < chunk.getNumRows(); ++i) { - ProtocolText::ResultSetRow row_packet(serializations, chunk.getColumns(), i); + ProtocolText::ResultSetRow row_packet(serializations, chunk.getColumns(), static_cast(i)); packet_endpoint->sendPacket(row_packet); } } @@ -74,7 +74,7 @@ void MySQLOutputFormat::finalizeImpl() { size_t affected_rows = 0; std::string human_readable_info; - if (QueryStatus * process_list_elem = getContext()->getProcessListElement()) + if (QueryStatusPtr process_list_elem = getContext()->getProcessListElement()) { CurrentThread::finalizePerformanceCounters(); QueryStatusInfo info = process_list_elem->getInfo(); diff --git a/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp index 36126c21bf1..d6dbd69135a 100644 --- a/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp @@ -136,7 +136,7 @@ void ORCBlockInputFormat::prepareReader() if (is_stopped) return; - stripe_total = file_reader->NumberOfStripes(); + stripe_total = static_cast(file_reader->NumberOfStripes()); stripe_current = 0; arrow_column_to_ch_column = std::make_unique( @@ -159,7 +159,7 @@ void ORCBlockInputFormat::prepareReader() { /// LIST type require 2 indices, STRUCT - the number of elements + 1, /// so we should recursively count the number of indices we need for this type. - int indexes_count = countIndicesForType(schema->field(i)->type()); + int indexes_count = static_cast(countIndicesForType(schema->field(i)->type())); const auto & name = schema->field(i)->name(); if (getPort().getHeader().has(name, ignore_case) || nested_table_names.contains(ignore_case ? boost::to_lower_copy(name) : name)) { diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 2f54cca466c..9172c79c890 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -128,8 +128,9 @@ void ParallelParsingInputFormat::onBackgroundException(size_t offset) background_exception = std::current_exception(); if (ParsingException * e = exception_cast(background_exception)) { + /// NOTE: it is not that safe to use line number hack here (may exceed INT_MAX) if (e->getLineNumber() != -1) - e->setLineNumber(e->getLineNumber() + offset); + e->setLineNumber(static_cast(e->getLineNumber() + offset)); auto file_name = getFileNameFromReadBuffer(getReadBuffer()); if (!file_name.empty()) diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index 427c159314b..dd2826287b2 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -161,7 +161,7 @@ void ParquetBlockInputFormat::prepareReader() /// STRUCT type require the number of indexes equal to the number of /// nested elements, so we should recursively /// count the number of indices we need for this type. - int indexes_count = countIndicesForType(schema->field(i)->type()); + int indexes_count = static_cast(countIndicesForType(schema->field(i)->type())); const auto & name = schema->field(i)->name(); if (getPort().getHeader().has(name, ignore_case) || nested_table_names.contains(ignore_case ? boost::to_lower_copy(name) : name)) diff --git a/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp index c26b6b39e0d..2ad2ad6f7a3 100644 --- a/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/RegexpRowInputFormat.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace DB @@ -50,7 +51,11 @@ bool RegexpFieldExtractor::parseRow(PeekableReadBuffer & buf) if (line_size > 0 && buf.position()[line_size - 1] == '\r') --line_to_match; - bool match = re2_st::RE2::FullMatchN(re2_st::StringPiece(buf.position(), line_to_match), regexp, re2_arguments_ptrs.data(), re2_arguments_ptrs.size()); + bool match = re2_st::RE2::FullMatchN( + re2_st::StringPiece(buf.position(), line_to_match), + regexp, + re2_arguments_ptrs.data(), + static_cast(re2_arguments_ptrs.size())); if (!match && !skip_unmatched) throw Exception("Line \"" + std::string(buf.position(), line_to_match) + "\" doesn't match the regexp.", ErrorCodes::INCORRECT_DATA); @@ -174,46 +179,9 @@ void registerInputFormatRegexp(FormatFactory & factory) }); } -static std::pair fileSegmentationEngineRegexpImpl(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t max_rows) -{ - char * pos = in.position(); - bool need_more_data = true; - size_t number_of_rows = 0; - - while (loadAtPosition(in, memory, pos) && need_more_data) - { - pos = find_first_symbols<'\r', '\n'>(pos, in.buffer().end()); - if (pos > in.buffer().end()) - throw Exception("Position in buffer is out of bounds. There must be a bug.", ErrorCodes::LOGICAL_ERROR); - else if (pos == in.buffer().end()) - continue; - - ++number_of_rows; - if ((memory.size() + static_cast(pos - in.position()) >= min_bytes) || (number_of_rows == max_rows)) - need_more_data = false; - - if (*pos == '\n') - { - ++pos; - if (loadAtPosition(in, memory, pos) && *pos == '\r') - ++pos; - } - else if (*pos == '\r') - { - ++pos; - if (loadAtPosition(in, memory, pos) && *pos == '\n') - ++pos; - } - } - - saveUpToPosition(in, memory, pos); - - return {loadAtPosition(in, memory, pos), number_of_rows}; -} - void registerFileSegmentationEngineRegexp(FormatFactory & factory) { - factory.registerFileSegmentationEngine("Regexp", &fileSegmentationEngineRegexpImpl); + factory.registerFileSegmentationEngine("Regexp", &newLineFileSegmentationEngine); } void registerRegexpSchemaReader(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp index 785658c0fa2..76fd0d2a907 100644 --- a/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TemplateRowInputFormat.cpp @@ -53,18 +53,25 @@ TemplateRowInputFormat::TemplateRowInputFormat(const Block & header_, std::uniqu std::vector column_in_format(header_.columns(), false); for (size_t i = 0; i < row_format.columnsCount(); ++i) { - if (row_format.format_idx_to_column_idx[i]) + const auto & column_index = row_format.format_idx_to_column_idx[i]; + if (column_index) { - if (header_.columns() <= *row_format.format_idx_to_column_idx[i]) - row_format.throwInvalidFormat("Column index " + std::to_string(*row_format.format_idx_to_column_idx[i]) + + if (header_.columns() <= *column_index) + row_format.throwInvalidFormat("Column index " + std::to_string(*column_index) + " must be less then number of columns (" + std::to_string(header_.columns()) + ")", i); if (row_format.escaping_rules[i] == EscapingRule::None) row_format.throwInvalidFormat("Column is not skipped, but deserialization type is None", i); - size_t col_idx = *row_format.format_idx_to_column_idx[i]; + size_t col_idx = *column_index; if (column_in_format[col_idx]) row_format.throwInvalidFormat("Duplicate column", i); column_in_format[col_idx] = true; + + checkSupportedDelimiterAfterField(row_format.escaping_rules[i], row_format.delimiters[i + 1], data_types[*column_index]); + } + else + { + checkSupportedDelimiterAfterField(row_format.escaping_rules[i], row_format.delimiters[i + 1], nullptr); } } diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index aff4557a4b7..108b4203e3e 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -101,7 +101,9 @@ Chunk ValuesBlockInputFormat::generate() return {}; } - finalizeObjectColumns(columns); + for (const auto & column : columns) + column->finalize(); + size_t rows_in_block = columns[0]->size(); return Chunk{std::move(columns), rows_in_block}; } @@ -350,7 +352,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx Expected expected; Tokens tokens(buf->position(), buf->buffer().end()); - IParser::Pos token_iterator(tokens, settings.max_parser_depth); + IParser::Pos token_iterator(tokens, static_cast(settings.max_parser_depth)); ASTPtr ast; bool parsed = parser.parse(token_iterator, ast, expected); diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h index d2dd28eb15a..9d0734f4567 100644 --- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h +++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.h @@ -41,6 +41,7 @@ protected: void resetParser() override; bool isGarbageAfterField(size_t index, ReadBuffer::Position pos) override; void setReadBuffer(ReadBuffer & in_) override; + void readPrefix() override; const FormatSettings format_settings; DataTypes data_types; @@ -48,7 +49,6 @@ protected: private: bool readRow(MutableColumns & columns, RowReadExtension & ext) override; - void readPrefix() override; bool parseRowAndPrintDiagnosticInfo(MutableColumns & columns, WriteBuffer & out) override; void tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column) override; diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp index ebc1b37074b..db08f3ffbd3 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.cpp @@ -1,7 +1,5 @@ #include -#include -#include #include #include #include @@ -18,70 +16,6 @@ AggregatingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition() = default; AggregatingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition(ColumnsDefinition &&) noexcept = default; AggregatingSortedAlgorithm::ColumnsDefinition::~ColumnsDefinition() = default; -/// Stores information for aggregation of AggregateFunction columns -struct AggregatingSortedAlgorithm::AggregateDescription -{ - ColumnAggregateFunction * column = nullptr; - const size_t column_number = 0; /// Position in header. - - AggregateDescription() = default; - explicit AggregateDescription(size_t col_number) : column_number(col_number) {} -}; - -/// Stores information for aggregation of SimpleAggregateFunction columns -struct AggregatingSortedAlgorithm::SimpleAggregateDescription -{ - /// An aggregate function 'anyLast', 'sum'... - AggregateFunctionPtr function; - IAggregateFunction::AddFunc add_function = nullptr; - - size_t column_number = 0; - IColumn * column = nullptr; - - /// For LowCardinality, convert is converted to nested type. nested_type is nullptr if no conversion needed. - const DataTypePtr nested_type; /// Nested type for LowCardinality, if it is. - const DataTypePtr real_type; /// Type in header. - - AlignedBuffer state; - bool created = false; - - SimpleAggregateDescription( - AggregateFunctionPtr function_, const size_t column_number_, - DataTypePtr nested_type_, DataTypePtr real_type_) - : function(std::move(function_)), column_number(column_number_) - , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) - { - add_function = function->getAddressOfAddFunction(); - state.reset(function->sizeOfData(), function->alignOfData()); - } - - void createState() - { - if (created) - return; - function->create(state.data()); - created = true; - } - - void destroyState() - { - if (!created) - return; - function->destroy(state.data()); - created = false; - } - - /// Explicitly destroy aggregation state if the stream is terminated - ~SimpleAggregateDescription() - { - destroyState(); - } - - SimpleAggregateDescription() = default; - SimpleAggregateDescription(SimpleAggregateDescription &&) = default; - SimpleAggregateDescription(const SimpleAggregateDescription &) = delete; -}; - static AggregatingSortedAlgorithm::ColumnsDefinition defineColumns( const Block & header, const SortDescription & description) { @@ -191,6 +125,39 @@ static void postprocessChunk(Chunk & chunk, const AggregatingSortedAlgorithm::Co } +AggregatingSortedAlgorithm::SimpleAggregateDescription::SimpleAggregateDescription( + AggregateFunctionPtr function_, const size_t column_number_, + DataTypePtr nested_type_, DataTypePtr real_type_) + : function(std::move(function_)), column_number(column_number_) + , nested_type(std::move(nested_type_)), real_type(std::move(real_type_)) +{ + add_function = function->getAddressOfAddFunction(); + state.reset(function->sizeOfData(), function->alignOfData()); +} + +void AggregatingSortedAlgorithm::SimpleAggregateDescription::createState() +{ + if (created) + return; + function->create(state.data()); + created = true; +} + +void AggregatingSortedAlgorithm::SimpleAggregateDescription::destroyState() +{ + if (!created) + return; + function->destroy(state.data()); + created = false; +} + +/// Explicitly destroy aggregation state if the stream is terminated +AggregatingSortedAlgorithm::SimpleAggregateDescription::~SimpleAggregateDescription() +{ + destroyState(); +} + + AggregatingSortedAlgorithm::AggregatingMergedData::AggregatingMergedData( MutableColumns columns_, UInt64 max_block_size_, ColumnsDefinition & def_) : MergedData(std::move(columns_), false, max_block_size_), def(def_) diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h index e572ed7d526..d670242ed81 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h @@ -1,5 +1,7 @@ #pragma once +#include +#include #include #include @@ -23,8 +25,48 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - struct SimpleAggregateDescription; - struct AggregateDescription; + /// Stores information for aggregation of SimpleAggregateFunction columns + struct SimpleAggregateDescription + { + /// An aggregate function 'anyLast', 'sum'... + AggregateFunctionPtr function; + IAggregateFunction::AddFunc add_function = nullptr; + + size_t column_number = 0; + IColumn * column = nullptr; + + /// For LowCardinality, convert is converted to nested type. nested_type is nullptr if no conversion needed. + const DataTypePtr nested_type; /// Nested type for LowCardinality, if it is. + const DataTypePtr real_type; /// Type in header. + + AlignedBuffer state; + bool created = false; + + SimpleAggregateDescription( + AggregateFunctionPtr function_, const size_t column_number_, + DataTypePtr nested_type_, DataTypePtr real_type_); + + void createState(); + + void destroyState(); + + /// Explicitly destroy aggregation state if the stream is terminated + ~SimpleAggregateDescription(); + + SimpleAggregateDescription() = default; + SimpleAggregateDescription(SimpleAggregateDescription &&) = default; + SimpleAggregateDescription(const SimpleAggregateDescription &) = delete; + }; + + /// Stores information for aggregation of AggregateFunction columns + struct AggregateDescription + { + ColumnAggregateFunction * column = nullptr; + const size_t column_number = 0; /// Position in header. + + AggregateDescription() = default; + explicit AggregateDescription(size_t col_number) : column_number(col_number) {} + }; /// This structure define columns into one of three types: /// * columns which are not aggregate functions and not needed to be aggregated diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp index 8636813132d..c79c667a988 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp @@ -23,10 +23,6 @@ namespace ErrorCodes extern const int CORRUPTED_DATA; } -SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition() = default; -SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition(ColumnsDefinition &&) noexcept = default; -SummingSortedAlgorithm::ColumnsDefinition::~ColumnsDefinition() = default; - /// Stores numbers of key-columns and value-columns. struct SummingSortedAlgorithm::MapDescription { @@ -777,4 +773,8 @@ IMergingAlgorithm::Status SummingSortedAlgorithm::merge() return Status(merged_data.pull(), true); } +SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition() = default; +SummingSortedAlgorithm::ColumnsDefinition::ColumnsDefinition(ColumnsDefinition &&) noexcept = default; +SummingSortedAlgorithm::ColumnsDefinition::~ColumnsDefinition() = default; + } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index fadbd061fbd..3b5e4e06953 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -5,16 +5,18 @@ #include + namespace DB { struct Settings; class QueryStatus; +using QueryStatusPtr = std::shared_ptr; struct BuildQueryPipelineSettings { ExpressionActionsSettings actions_settings; - QueryStatus * process_list_element = nullptr; + QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } diff --git a/src/Processors/QueryPlan/CreatingSetsStep.cpp b/src/Processors/QueryPlan/CreatingSetsStep.cpp index bd079c0b8a9..b52d86aa725 100644 --- a/src/Processors/QueryPlan/CreatingSetsStep.cpp +++ b/src/Processors/QueryPlan/CreatingSetsStep.cpp @@ -122,11 +122,8 @@ void CreatingSetsStep::describePipeline(FormatSettings & settings) const IQueryPlanStep::describePipeline(processors, settings); } -void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, ContextPtr context) +void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::SubqueriesForSets subqueries_for_sets, ContextPtr context) { - if (!prepared_sets || prepared_sets->empty()) - return; - DataStreams input_streams; input_streams.emplace_back(query_plan.getCurrentDataStream()); @@ -134,7 +131,7 @@ void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, plans.emplace_back(std::make_unique(std::move(query_plan))); query_plan = QueryPlan(); - for (auto & [description, subquery_for_set] : prepared_sets->detachSubqueries()) + for (auto & [description, subquery_for_set] : subqueries_for_sets) { if (!subquery_for_set.hasSource()) continue; @@ -166,4 +163,12 @@ void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, query_plan.unitePlans(std::move(creating_sets), std::move(plans)); } +void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, ContextPtr context) +{ + if (!prepared_sets || prepared_sets->empty()) + return; + + addCreatingSetsStep(query_plan, prepared_sets->detachSubqueries(), context); +} + } diff --git a/src/Processors/QueryPlan/CreatingSetsStep.h b/src/Processors/QueryPlan/CreatingSetsStep.h index 9c61eb2012c..9995af7bca7 100644 --- a/src/Processors/QueryPlan/CreatingSetsStep.h +++ b/src/Processors/QueryPlan/CreatingSetsStep.h @@ -49,6 +49,8 @@ private: Processors processors; }; +void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::SubqueriesForSets subqueries_for_sets, ContextPtr context); + void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, ContextPtr context); } diff --git a/src/Processors/QueryPlan/IntersectOrExceptStep.h b/src/Processors/QueryPlan/IntersectOrExceptStep.h index b2738cb297f..d7eab574431 100644 --- a/src/Processors/QueryPlan/IntersectOrExceptStep.h +++ b/src/Processors/QueryPlan/IntersectOrExceptStep.h @@ -8,9 +8,9 @@ namespace DB class IntersectOrExceptStep : public IQueryPlanStep { -using Operator = ASTSelectIntersectExceptQuery::Operator; - public: + using Operator = ASTSelectIntersectExceptQuery::Operator; + /// max_threads is used to limit the number of threads for result pipeline. IntersectOrExceptStep(DataStreams input_streams_, Operator operator_, size_t max_threads_ = 0); diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp index 7d682c408e5..984c76701ba 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp @@ -17,7 +17,7 @@ void optimizePrimaryKeyCondition(QueryPlan::Node & root) size_t next_child = 0; }; - std::deque stack; + std::vector stack; stack.push_back({.node = &root}); while (!stack.empty()) @@ -27,29 +27,29 @@ void optimizePrimaryKeyCondition(QueryPlan::Node & root) /// Traverse all children first. if (frame.next_child < frame.node->children.size()) { - stack.push_back({.node = frame.node->children[frame.next_child]}); - + auto next_frame = Frame{.node = frame.node->children[frame.next_child]}; ++frame.next_child; + stack.push_back(next_frame); continue; } - auto add_filter = [&](auto & storage) + auto add_read_from_storage_filter = [&](auto & storage) { - for (auto iter=stack.rbegin() + 1; iter!=stack.rend(); ++iter) + for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) storage.addFilter(filter_step->getExpression(), filter_step->getFilterColumnName()); else if (typeid_cast(iter->node->step.get())) - ; + continue; else break; } }; if (auto * read_from_merge_tree = typeid_cast(frame.node->step.get())) - add_filter(*read_from_merge_tree); + add_read_from_storage_filter(*read_from_merge_tree); else if (auto * read_from_merge = typeid_cast(frame.node->step.get())) - add_filter(*read_from_merge); + add_read_from_storage_filter(*read_from_merge); stack.pop_back(); } diff --git a/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp b/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp index a8431d38a78..c74b5ed915b 100644 --- a/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp +++ b/src/Processors/QueryPlan/Optimizations/reuseStorageOrderingForWindowFunctions.cpp @@ -62,7 +62,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, } auto context = read_from_merge_tree->getContext(); - if (!context->getSettings().optimize_read_in_window_order) + if (!context->getSettings().optimize_read_in_window_order || context->getSettingsRef().allow_experimental_analyzer) { return 0; } @@ -70,6 +70,10 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, const auto & query_info = read_from_merge_tree->getQueryInfo(); const auto * select_query = query_info.query->as(); + /// TODO: Analyzer syntax analyzer result + if (!query_info.syntax_analyzer_result) + return 0; + ManyExpressionActions order_by_elements_actions; const auto & window_desc = window->getWindowDescription(); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b340073e73d..b268e7deff0 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -173,6 +173,9 @@ Pipe ReadFromMergeTree::readFromPool( total_rows += part.getRowsCount(); } + if (query_info.limit > 0 && query_info.limit < total_rows) + total_rows = query_info.limit; + const auto & settings = context->getSettingsRef(); const auto & client_info = context->getClientInfo(); MergeTreeReadPool::BackoffSettings backoff_settings(settings); @@ -246,10 +249,26 @@ ProcessorPtr ReadFromMergeTree::createSource( }; } - return std::make_shared( + auto total_rows = part.getRowsCount(); + if (query_info.limit > 0 && query_info.limit < total_rows) + total_rows = query_info.limit; + + /// Actually it means that parallel reading from replicas enabled + /// and we have to collaborate with initiator. + /// In this case we won't set approximate rows, because it will be accounted multiple times. + /// Also do not count amount of read rows if we read in order of sorting key, + /// because we don't know actual amount of read rows in case when limit is set. + bool set_rows_approx = !extension.has_value() && !reader_settings.read_in_order; + + auto source = std::make_shared( data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes, preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info, actions_settings, reader_settings, virt_column_names, part.part_index_in_query, has_limit_below_one_block, std::move(extension)); + + if (set_rows_approx) + source -> addTotalRowsApprox(total_rows); + + return source; } Pipe ReadFromMergeTree::readInOrder( @@ -853,7 +872,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr context, - unsigned num_streams, + size_t num_streams, std::shared_ptr max_block_numbers_to_read, const MergeTreeData & data, const Names & real_column_names, @@ -906,8 +925,15 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( for (const auto & node : added_filter_nodes.nodes) nodes.nodes.push_back(node); - key_condition.emplace( - std::move(nodes), query_info.syntax_analyzer_result, query_info.prepared_sets, context, primary_key_columns, primary_key.expression); + NameSet array_join_name_set; + if (query_info.syntax_analyzer_result) + array_join_name_set = query_info.syntax_analyzer_result->getArrayJoinSourceNameSet(); + + key_condition.emplace(std::move(nodes), + context, + primary_key_columns, + primary_key.expression, + array_join_name_set); } else { @@ -945,7 +971,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( result.index_stats); result.sampling = MergeTreeDataSelectExecutor::getSampling( - select, + query_info, metadata_snapshot->getColumns().getAllPhysical(), parts, *key_condition, @@ -965,7 +991,13 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( auto reader_settings = getMergeTreeReaderSettings(context, query_info); bool use_skip_indexes = settings.use_skip_indexes; - if (select.final() && !settings.use_skip_indexes_if_final) + bool final = false; + if (query_info.table_expression_modifiers) + final = query_info.table_expression_modifiers->hasFinal(); + else + final = select.final(); + + if (final && !settings.use_skip_indexes_if_final) use_skip_indexes = false; result.parts_with_ranges = MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipIndexes( @@ -1097,7 +1129,13 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons Names column_names_to_read = std::move(result.column_names_to_read); const auto & select = query_info.query->as(); - if (!select.final() && result.sampling.use_sampling) + bool final = false; + if (query_info.table_expression_modifiers) + final = query_info.table_expression_modifiers->hasFinal(); + else + final = select.final(); + + if (!final && result.sampling.use_sampling) { /// Add columns needed for `sample_by_ast` to `column_names_to_read`. /// Skip this if final was used, because such columns were already added from PK. @@ -1112,7 +1150,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons const auto & input_order_info = query_info.getInputOrderInfo(); - if (select.final()) + if (final) { /// Add columns needed to calculate the sorting expression and the sign. std::vector add_columns = metadata_for_reading->getColumnsRequiredForSortingKey(); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 0a013748e91..15258eb6c40 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -140,7 +140,7 @@ public: const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr context, - unsigned num_streams, + size_t num_streams, std::shared_ptr max_block_numbers_to_read, const MergeTreeData & data, const Names & real_column_names, diff --git a/src/Processors/Sources/MySQLSource.cpp b/src/Processors/Sources/MySQLSource.cpp index e868182f49b..ecc80bef40b 100644 --- a/src/Processors/Sources/MySQLSource.cpp +++ b/src/Processors/Sources/MySQLSource.cpp @@ -141,7 +141,7 @@ namespace read_bytes_size += 2; break; case ValueType::vtUInt32: - assert_cast(column).insertValue(value.getUInt()); + assert_cast(column).insertValue(static_cast(value.getUInt())); read_bytes_size += 4; break; case ValueType::vtUInt64: @@ -171,7 +171,7 @@ namespace read_bytes_size += 2; break; case ValueType::vtInt32: - assert_cast(column).insertValue(value.getInt()); + assert_cast(column).insertValue(static_cast(value.getInt())); read_bytes_size += 4; break; case ValueType::vtInt64: @@ -236,7 +236,7 @@ namespace readDateTimeText(time, in, assert_cast(data_type).getTimeZone()); if (time < 0) time = 0; - assert_cast(column).insertValue(time); + assert_cast(column).insertValue(static_cast(time)); read_bytes_size += 4; break; } diff --git a/src/Processors/Sources/SQLiteSource.cpp b/src/Processors/Sources/SQLiteSource.cpp index d2c6f2ebb23..79c4be7f692 100644 --- a/src/Processors/Sources/SQLiteSource.cpp +++ b/src/Processors/Sources/SQLiteSource.cpp @@ -35,7 +35,11 @@ SQLiteSource::SQLiteSource( description.init(sample_block); sqlite3_stmt * compiled_stmt = nullptr; - int status = sqlite3_prepare_v2(sqlite_db.get(), query_str.c_str(), query_str.size() + 1, &compiled_stmt, nullptr); + int status = sqlite3_prepare_v2( + sqlite_db.get(), + query_str.c_str(), + static_cast(query_str.size() + 1), + &compiled_stmt, nullptr); if (status != SQLITE_OK) throw Exception(ErrorCodes::SQLITE_ENGINE_ERROR, @@ -109,7 +113,7 @@ Chunk SQLiteSource::generate() return Chunk(std::move(columns), num_rows); } -void SQLiteSource::insertValue(IColumn & column, ExternalResultDescription::ValueType type, size_t idx) +void SQLiteSource::insertValue(IColumn & column, ExternalResultDescription::ValueType type, int idx) { switch (type) { @@ -120,7 +124,7 @@ void SQLiteSource::insertValue(IColumn & column, ExternalResultDescription::Valu assert_cast(column).insertValue(sqlite3_column_int(compiled_statement.get(), idx)); break; case ValueType::vtUInt32: - assert_cast(column).insertValue(sqlite3_column_int64(compiled_statement.get(), idx)); + assert_cast(column).insertValue(static_cast(sqlite3_column_int64(compiled_statement.get(), idx))); break; case ValueType::vtUInt64: /// There is no uint64 in sqlite3, only int and int64 diff --git a/src/Processors/Sources/SQLiteSource.h b/src/Processors/Sources/SQLiteSource.h index d792483c70f..c1bae4d8a67 100644 --- a/src/Processors/Sources/SQLiteSource.h +++ b/src/Processors/Sources/SQLiteSource.h @@ -33,7 +33,7 @@ private: Chunk generate() override; - void insertValue(IColumn & column, ExternalResultDescription::ValueType type, size_t idx); + void insertValue(IColumn & column, ExternalResultDescription::ValueType type, int idx); String query_str; UInt64 max_block_size; diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index 8598b0197fc..3f70abaea6d 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -77,7 +77,7 @@ static bool pollFd(int fd, size_t timeout_milliseconds, int events) while (true) { - res = poll(&pfd, 1, timeout_milliseconds); + res = poll(&pfd, 1, static_cast(timeout_milliseconds)); if (res < 0) { @@ -527,7 +527,7 @@ Pipe ShellCommandSourceCoordinator::createPipe( } else { - auto descriptor = i + 2; + int descriptor = static_cast(i) + 2; auto it = process->write_fds.find(descriptor); if (it == process->write_fds.end()) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Process does not contain descriptor to write {}", descriptor); diff --git a/src/Processors/TTL/ITTLAlgorithm.cpp b/src/Processors/TTL/ITTLAlgorithm.cpp index 489941950b5..c71ad740719 100644 --- a/src/Processors/TTL/ITTLAlgorithm.cpp +++ b/src/Processors/TTL/ITTLAlgorithm.cpp @@ -48,13 +48,13 @@ ColumnPtr ITTLAlgorithm::executeExpressionAndGetColumn( UInt32 ITTLAlgorithm::getTimestampByIndex(const IColumn * column, size_t index) const { if (const ColumnUInt16 * column_date = typeid_cast(column)) - return date_lut.fromDayNum(DayNum(column_date->getData()[index])); + return static_cast(date_lut.fromDayNum(DayNum(column_date->getData()[index]))); else if (const ColumnUInt32 * column_date_time = typeid_cast(column)) return column_date_time->getData()[index]; else if (const ColumnConst * column_const = typeid_cast(column)) { if (typeid_cast(&column_const->getDataColumn())) - return date_lut.fromDayNum(DayNum(column_const->getValue())); + return static_cast(date_lut.fromDayNum(DayNum(column_const->getValue()))); else if (typeid_cast(&column_const->getDataColumn())) return column_const->getValue(); } diff --git a/src/Processors/Transforms/CountingTransform.h b/src/Processors/Transforms/CountingTransform.h index bd2ec58a27f..05d8e2aeac8 100644 --- a/src/Processors/Transforms/CountingTransform.h +++ b/src/Processors/Transforms/CountingTransform.h @@ -9,6 +9,7 @@ namespace DB { class QueryStatus; +using QueryStatusPtr = std::shared_ptr; class ThreadStatus; /// Proxy class which counts number of written block, rows, bytes @@ -29,7 +30,7 @@ public: progress_callback = callback; } - void setProcessListElement(QueryStatus * elem) + void setProcessListElement(QueryStatusPtr elem) { process_elem = elem; } @@ -50,7 +51,7 @@ public: protected: Progress progress; ProgressCallback progress_callback; - QueryStatus * process_elem = nullptr; + QueryStatusPtr process_elem; ThreadStatus * thread_status = nullptr; /// Quota is used to limit amount of written bytes. diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 16abb72cbd4..78ae6b8771f 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -40,7 +40,9 @@ static FillColumnDescription::StepFunction getStepFunction( { #define DECLARE_CASE(NAME) \ case IntervalKind::NAME: \ - return [step, scale, &date_lut](Field & field) { field = Add##NAME##sImpl::execute(static_cast(field.get()), step, date_lut, scale); }; + return [step, scale, &date_lut](Field & field) { \ + field = Add##NAME##sImpl::execute(static_cast(\ + field.get()), static_cast(step), date_lut, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) #undef DECLARE_CASE diff --git a/src/Processors/Transforms/MongoDBSource.cpp b/src/Processors/Transforms/MongoDBSource.cpp index b548e8c4184..9eef17cf40d 100644 --- a/src/Processors/Transforms/MongoDBSource.cpp +++ b/src/Processors/Transforms/MongoDBSource.cpp @@ -184,7 +184,7 @@ namespace break; case Poco::MongoDB::ElementTraits::TypeId: assert_cast &>(column).getData().push_back( - static_cast &>(value).value()); + static_cast(static_cast &>(value).value())); break; case Poco::MongoDB::ElementTraits::TypeId: assert_cast &>(column).getData().push_back(static_cast( @@ -282,7 +282,7 @@ namespace ErrorCodes::TYPE_MISMATCH}; assert_cast(column).getData().push_back( - static_cast &>(value).value().epochTime()); + static_cast(static_cast &>(value).value().epochTime())); break; } case ValueType::vtUUID: diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 07bfb274a86..83b0b202d74 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -28,7 +28,6 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int NOT_IMPLEMENTED; - extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; } @@ -63,20 +62,21 @@ static int compareValuesWithOffset(const IColumn * _compared_column, _compared_column); const auto * reference_column = assert_cast( _reference_column); + + using ValueType = typename ColumnType::ValueType; // Note that the storage type of offset returned by get<> is different, so // we need to specify the type explicitly. - const typename ColumnType::ValueType offset - = _offset.get(); + const ValueType offset = static_cast(_offset.get()); assert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); - assert(compared_value_data.size == sizeof(typename ColumnType::ValueType)); - auto compared_value = unalignedLoad( + assert(compared_value_data.size == sizeof(ValueType)); + auto compared_value = unalignedLoad( compared_value_data.data); const auto reference_value_data = reference_column->getDataAt(reference_row); - assert(reference_value_data.size == sizeof(typename ColumnType::ValueType)); - auto reference_value = unalignedLoad( + assert(reference_value_data.size == sizeof(ValueType)); + auto reference_value = unalignedLoad( reference_value_data.data); bool is_overflow; @@ -85,15 +85,6 @@ static int compareValuesWithOffset(const IColumn * _compared_column, else is_overflow = common::addOverflow(reference_value, offset, reference_value); -// fmt::print(stderr, -// "compared [{}] = {}, old ref {}, shifted ref [{}] = {}, offset {} preceding {} overflow {} to negative {}\n", -// compared_row, toString(compared_value), -// // fmt doesn't like char8_t. -// static_cast(unalignedLoad(reference_value_data.data)), -// reference_row, toString(reference_value), -// toString(offset), offset_is_preceding, -// is_overflow, offset_is_preceding); - if (is_overflow) { if (offset_is_preceding) @@ -984,22 +975,9 @@ void WindowTransform::writeOutCurrentRow() // FIXME does it also allocate the result on the arena? // We'll have to pass it out with blocks then... - if (a->isState()) - { - /// AggregateFunction's states should be inserted into column using specific way - auto * res_col_aggregate_function = typeid_cast(result_column); - if (!res_col_aggregate_function) - { - throw Exception("State function " + a->getName() + " inserts results into non-state column ", - ErrorCodes::ILLEGAL_COLUMN); - } - res_col_aggregate_function->insertFrom(buf); - } - else - { - a->insertResultInto(buf, *result_column, arena.get()); - } - + /// We should use insertMergeResultInto to insert result into ColumnAggregateFunction + /// correctly if result contains AggregateFunction's states + a->insertMergeResultInto(buf, *result_column, arena.get()); } } diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index 174aaf67ec5..830f400faf2 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -620,9 +620,10 @@ void PushingToLiveViewSink::consume(Chunk chunk) { Progress local_progress(chunk.getNumRows(), chunk.bytes(), 0); StorageLiveView::writeIntoLiveView(live_view, getHeader().cloneWithColumns(chunk.detachColumns()), context); - auto * process = context->getProcessListElement(); - if (process) + + if (auto process = context->getProcessListElement()) process->updateProgressIn(local_progress); + ProfileEvents::increment(ProfileEvents::SelectedRows, local_progress.read_rows); ProfileEvents::increment(ProfileEvents::SelectedBytes, local_progress.read_bytes); } @@ -643,9 +644,10 @@ void PushingToWindowViewSink::consume(Chunk chunk) Progress local_progress(chunk.getNumRows(), chunk.bytes(), 0); StorageWindowView::writeIntoWindowView( window_view, getHeader().cloneWithColumns(chunk.detachColumns()), context); - auto * process = context->getProcessListElement(); - if (process) + + if (auto process = context->getProcessListElement()) process->updateProgressIn(local_progress); + ProfileEvents::increment(ProfileEvents::SelectedRows, local_progress.read_rows); ProfileEvents::increment(ProfileEvents::SelectedBytes, local_progress.read_bytes); } diff --git a/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp b/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp index b137eaf0f47..40718bd968a 100644 --- a/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp +++ b/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp @@ -23,11 +23,11 @@ TEST(Processors, PortsConnected) connect(source->getPort(), sink->getPort()); - Processors processors; - processors.emplace_back(std::move(source)); - processors.emplace_back(std::move(sink)); + auto processors = std::make_shared(); + processors->emplace_back(std::move(source)); + processors->emplace_back(std::move(sink)); - QueryStatus * element = nullptr; + QueryStatusPtr element; PipelineExecutor executor(processors, element); executor.execute(1); } @@ -46,14 +46,14 @@ TEST(Processors, PortsNotConnected) /// connect(source->getPort(), sink->getPort()); - Processors processors; - processors.emplace_back(std::move(source)); - processors.emplace_back(std::move(sink)); + auto processors = std::make_shared(); + processors->emplace_back(std::move(source)); + processors->emplace_back(std::move(sink)); #ifndef ABORT_ON_LOGICAL_ERROR try { - QueryStatus * element = nullptr; + QueryStatusPtr element; PipelineExecutor executor(processors, element); executor.execute(1); ASSERT_TRUE(false) << "Should have thrown."; diff --git a/src/QueryPipeline/BlockIO.cpp b/src/QueryPipeline/BlockIO.cpp index 35463ca6be9..9e42e06c722 100644 --- a/src/QueryPipeline/BlockIO.cpp +++ b/src/QueryPipeline/BlockIO.cpp @@ -53,9 +53,8 @@ void BlockIO::setAllDataSent() const /// - internal /// - SHOW PROCESSLIST if (process_list_entry) - (*process_list_entry)->setAllDataSent(); + process_list_entry->getQueryStatus()->setAllDataSent(); } } - diff --git a/src/QueryPipeline/BlockIO.h b/src/QueryPipeline/BlockIO.h index 1f2a8f6f033..b69f86ac684 100644 --- a/src/QueryPipeline/BlockIO.h +++ b/src/QueryPipeline/BlockIO.h @@ -34,9 +34,8 @@ struct BlockIO void onFinish() { if (finish_callback) - { finish_callback(pipeline); - } + pipeline.reset(); } diff --git a/src/QueryPipeline/Pipe.cpp b/src/QueryPipeline/Pipe.cpp index 291739079a2..62a928d814c 100644 --- a/src/QueryPipeline/Pipe.cpp +++ b/src/QueryPipeline/Pipe.cpp @@ -102,7 +102,12 @@ static OutputPort * uniteTotals(const OutputPortRawPtrs & ports, const Block & h return totals_port; } +Pipe::Pipe() : processors(std::make_shared()) +{ +} + Pipe::Pipe(ProcessorPtr source, OutputPort * output, OutputPort * totals, OutputPort * extremes) + : processors(std::make_shared()) { if (!source->getInputs().empty()) throw Exception( @@ -155,11 +160,12 @@ Pipe::Pipe(ProcessorPtr source, OutputPort * output, OutputPort * totals, Output totals_port = totals; extremes_port = extremes; output_ports.push_back(output); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); max_parallel_streams = 1; } Pipe::Pipe(ProcessorPtr source) + : processors(std::make_shared()) { checkSource(*source); @@ -168,18 +174,18 @@ Pipe::Pipe(ProcessorPtr source) output_ports.push_back(&source->getOutputs().front()); header = output_ports.front()->getHeader(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); max_parallel_streams = 1; } -Pipe::Pipe(Processors processors_) : processors(std::move(processors_)) +Pipe::Pipe(std::shared_ptr processors_) : processors(std::move(processors_)) { /// Create hash table with processors. std::unordered_set set; - for (const auto & processor : processors) + for (const auto & processor : *processors) set.emplace(processor.get()); - for (auto & processor : processors) + for (auto & processor : *processors) { for (const auto & port : processor->getInputs()) { @@ -225,7 +231,7 @@ Pipe::Pipe(Processors processors_) : processors(std::move(processors_)) max_parallel_streams = output_ports.size(); if (collected_processors) - for (const auto & processor : processors) + for (const auto & processor : *processors) collected_processors->emplace_back(processor); } @@ -311,7 +317,7 @@ Pipe Pipe::unitePipes(Pipes pipes, Processors * collected_processors, bool allow if (!allow_empty_header || pipe.header) assertCompatibleHeader(pipe.header, res.header, "Pipe::unitePipes"); - res.processors.insert(res.processors.end(), pipe.processors.begin(), pipe.processors.end()); + res.processors->insert(res.processors->end(), pipe.processors->begin(), pipe.processors->end()); res.output_ports.insert(res.output_ports.end(), pipe.output_ports.begin(), pipe.output_ports.end()); res.max_parallel_streams += pipe.max_parallel_streams; @@ -323,15 +329,15 @@ Pipe Pipe::unitePipes(Pipes pipes, Processors * collected_processors, bool allow extremes.emplace_back(pipe.extremes_port); } - size_t num_processors = res.processors.size(); + size_t num_processors = res.processors->size(); - res.totals_port = uniteTotals(totals, res.header, res.processors); - res.extremes_port = uniteExtremes(extremes, res.header, res.processors); + res.totals_port = uniteTotals(totals, res.header, *res.processors); + res.extremes_port = uniteExtremes(extremes, res.header, *res.processors); if (res.collected_processors) { - for (; num_processors < res.processors.size(); ++num_processors) - res.collected_processors->emplace_back(res.processors[num_processors]); + for (; num_processors < res.processors->size(); ++num_processors) + res.collected_processors->emplace_back(res.processors->at(num_processors)); } return res; @@ -351,7 +357,7 @@ void Pipe::addSource(ProcessorPtr source) collected_processors->emplace_back(source); output_ports.push_back(&source->getOutputs().front()); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); max_parallel_streams = std::max(max_parallel_streams, output_ports.size()); } @@ -373,7 +379,7 @@ void Pipe::addTotalsSource(ProcessorPtr source) collected_processors->emplace_back(source); totals_port = &source->getOutputs().front(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); } void Pipe::addExtremesSource(ProcessorPtr source) @@ -393,7 +399,7 @@ void Pipe::addExtremesSource(ProcessorPtr source) collected_processors->emplace_back(source); extremes_port = &source->getOutputs().front(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); } static void dropPort(OutputPort *& port, Processors & processors, Processors * collected_processors) @@ -413,12 +419,12 @@ static void dropPort(OutputPort *& port, Processors & processors, Processors * c void Pipe::dropTotals() { - dropPort(totals_port, processors, collected_processors); + dropPort(totals_port, *processors, collected_processors); } void Pipe::dropExtremes() { - dropPort(extremes_port, processors, collected_processors); + dropPort(extremes_port, *processors, collected_processors); } void Pipe::addTransform(ProcessorPtr transform) @@ -504,7 +510,7 @@ void Pipe::addTransform(ProcessorPtr transform, OutputPort * totals, OutputPort if (collected_processors) collected_processors->emplace_back(transform); - processors.emplace_back(std::move(transform)); + processors->emplace_back(std::move(transform)); max_parallel_streams = std::max(max_parallel_streams, output_ports.size()); } @@ -595,7 +601,7 @@ void Pipe::addTransform(ProcessorPtr transform, InputPort * totals, InputPort * if (collected_processors) collected_processors->emplace_back(transform); - processors.emplace_back(std::move(transform)); + processors->emplace_back(std::move(transform)); max_parallel_streams = std::max(max_parallel_streams, output_ports.size()); } @@ -647,7 +653,7 @@ void Pipe::addSimpleTransform(const ProcessorGetterWithStreamKind & getter) if (collected_processors) collected_processors->emplace_back(transform); - processors.emplace_back(std::move(transform)); + processors->emplace_back(std::move(transform)); } }; @@ -698,7 +704,7 @@ void Pipe::addChains(std::vector chains) if (collected_processors) collected_processors->emplace_back(transform); - processors.emplace_back(std::move(transform)); + processors->emplace_back(std::move(transform)); } } @@ -757,7 +763,7 @@ void Pipe::setSinks(const Pipe::ProcessorGetterWithStreamKind & getter) transform = std::make_shared(stream->getHeader()); connect(*stream, transform->getInputs().front()); - processors.emplace_back(std::move(transform)); + processors->emplace_back(std::move(transform)); }; for (auto & port : output_ports) @@ -858,7 +864,7 @@ void Pipe::transform(const Transformer & transformer, bool check_ports) collected_processors->emplace_back(processor); } - processors.insert(processors.end(), new_processors.begin(), new_processors.end()); + processors->insert(processors->end(), new_processors.begin(), new_processors.end()); max_parallel_streams = std::max(max_parallel_streams, output_ports.size()); } diff --git a/src/QueryPipeline/Pipe.h b/src/QueryPipeline/Pipe.h index 79d19a18193..7e30d9c990e 100644 --- a/src/QueryPipeline/Pipe.h +++ b/src/QueryPipeline/Pipe.h @@ -5,6 +5,7 @@ #include #include + namespace DB { @@ -27,13 +28,13 @@ class Pipe public: /// Default constructor creates empty pipe. Generally, you cannot do anything with it except to check it is empty(). /// You cannot get empty pipe in any other way. All transforms check that result pipe is not empty. - Pipe() = default; + Pipe(); /// Create from source. Source must have no input ports and single output. explicit Pipe(ProcessorPtr source); /// Create from source with specified totals end extremes (may be nullptr). Ports should be owned by source. explicit Pipe(ProcessorPtr source, OutputPort * output, OutputPort * totals, OutputPort * extremes); /// Create from processors. Use all not-connected output ports as output_ports. Check invariants. - explicit Pipe(Processors processors_); + explicit Pipe(std::shared_ptr processors_); Pipe(const Pipe & other) = delete; Pipe(Pipe && other) = default; @@ -41,7 +42,7 @@ public: Pipe & operator=(Pipe && other) = default; const Block & getHeader() const { return header; } - bool empty() const { return processors.empty(); } + bool empty() const { return processors->empty(); } size_t numOutputPorts() const { return output_ports.size(); } size_t maxParallelStreams() const { return max_parallel_streams; } OutputPort * getOutputPort(size_t pos) const { return output_ports[pos]; } @@ -96,15 +97,15 @@ public: /// Unite several pipes together. They should have same header. static Pipe unitePipes(Pipes pipes); - /// Get processors from Pipe. Use it with cautious, it is easy to loss totals and extremes ports. - static Processors detachProcessors(Pipe pipe) { return std::move(pipe.processors); } + /// Get processors from Pipe. Use it with caution, it is easy to lose totals and extremes ports. + static Processors detachProcessors(Pipe pipe) { return *std::move(pipe.processors); } /// Get processors from Pipe without destroying pipe (used for EXPLAIN to keep QueryPlan). - const Processors & getProcessors() const { return processors; } + const Processors & getProcessors() const { return *processors; } private: /// Header is common for all output below. Block header; - Processors processors; + std::shared_ptr processors; /// Output ports. Totals and extremes are allowed to be empty. OutputPortRawPtrs output_ports; diff --git a/src/QueryPipeline/PipelineResourcesHolder.h b/src/QueryPipeline/PipelineResourcesHolder.h index 46b1024f384..ed9eb68b7ba 100644 --- a/src/QueryPipeline/PipelineResourcesHolder.h +++ b/src/QueryPipeline/PipelineResourcesHolder.h @@ -19,8 +19,9 @@ struct QueryPlanResourceHolder QueryPlanResourceHolder(); QueryPlanResourceHolder(QueryPlanResourceHolder &&) noexcept; ~QueryPlanResourceHolder(); + /// Custom move assignment does not destroy data from lhs. It appends data from rhs to lhs. - QueryPlanResourceHolder& operator=(QueryPlanResourceHolder &&) noexcept; + QueryPlanResourceHolder & operator=(QueryPlanResourceHolder &&) noexcept; /// Some processors may implicitly use Context or temporary Storage created by Interpreter. /// But lifetime of Streams is not nested in lifetime of Interpreters, so we have to store it here, diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 31b18c7f7f0..e0da4c4f0eb 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -21,6 +21,7 @@ #include #include + namespace DB { @@ -29,7 +30,11 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -QueryPipeline::QueryPipeline() = default; +QueryPipeline::QueryPipeline() + : processors(std::make_shared()) +{ +} + QueryPipeline::QueryPipeline(QueryPipeline &&) noexcept = default; QueryPipeline & QueryPipeline::operator=(QueryPipeline &&) noexcept = default; QueryPipeline::~QueryPipeline() = default; @@ -210,16 +215,16 @@ static void initRowsBeforeLimit(IOutputFormat * output_format) QueryPipeline::QueryPipeline( QueryPlanResourceHolder resources_, - Processors processors_) + std::shared_ptr processors_) : resources(std::move(resources_)) , processors(std::move(processors_)) { - checkCompleted(processors); + checkCompleted(*processors); } QueryPipeline::QueryPipeline( QueryPlanResourceHolder resources_, - Processors processors_, + std::shared_ptr processors_, InputPort * input_) : resources(std::move(resources_)) , processors(std::move(processors_)) @@ -231,7 +236,7 @@ QueryPipeline::QueryPipeline( "Cannot create pushing QueryPipeline because its input port is connected or null"); bool found_input = false; - for (const auto & processor : processors) + for (const auto & processor : *processors) { for (const auto & in : processor->getInputs()) { @@ -255,7 +260,7 @@ QueryPipeline::QueryPipeline(std::shared_ptr source) : QueryPipeline(Pi QueryPipeline::QueryPipeline( QueryPlanResourceHolder resources_, - Processors processors_, + std::shared_ptr processors_, OutputPort * output_, OutputPort * totals_, OutputPort * extremes_) @@ -265,7 +270,7 @@ QueryPipeline::QueryPipeline( , totals(totals_) , extremes(extremes_) { - checkPulling(processors, output, totals, extremes); + checkPulling(*processors, output, totals, extremes); } QueryPipeline::QueryPipeline(Pipe pipe) @@ -278,32 +283,34 @@ QueryPipeline::QueryPipeline(Pipe pipe) extremes = pipe.getExtremesPort(); processors = std::move(pipe.processors); - checkPulling(processors, output, totals, extremes); + checkPulling(*processors, output, totals, extremes); } else { processors = std::move(pipe.processors); - checkCompleted(processors); + checkCompleted(*processors); } } QueryPipeline::QueryPipeline(Chain chain) : resources(chain.detachResources()) + , processors(std::make_shared()) , input(&chain.getInputPort()) , num_threads(chain.getNumThreads()) { - processors.reserve(chain.getProcessors().size() + 1); + processors->reserve(chain.getProcessors().size() + 1); for (auto processor : chain.getProcessors()) - processors.emplace_back(std::move(processor)); + processors->emplace_back(std::move(processor)); auto sink = std::make_shared(chain.getOutputPort().getHeader()); connect(chain.getOutputPort(), sink->getPort()); - processors.emplace_back(std::move(sink)); + processors->emplace_back(std::move(sink)); input = &chain.getInputPort(); } QueryPipeline::QueryPipeline(std::shared_ptr format) + : processors(std::make_shared()) { auto & format_main = format->getPort(IOutputFormat::PortKind::Main); auto & format_totals = format->getPort(IOutputFormat::PortKind::Totals); @@ -313,14 +320,14 @@ QueryPipeline::QueryPipeline(std::shared_ptr format) { auto source = std::make_shared(format_totals.getHeader()); totals = &source->getPort(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); } if (!extremes) { auto source = std::make_shared(format_extremes.getHeader()); extremes = &source->getPort(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); } connect(*totals, format_totals); @@ -332,7 +339,7 @@ QueryPipeline::QueryPipeline(std::shared_ptr format) output_format = format.get(); - processors.emplace_back(std::move(format)); + processors->emplace_back(std::move(format)); } static void drop(OutputPort *& port, Processors & processors) @@ -354,11 +361,11 @@ void QueryPipeline::complete(std::shared_ptr sink) if (!pulling()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Pipeline must be pulling to be completed with sink"); - drop(totals, processors); - drop(extremes, processors); + drop(totals, *processors); + drop(extremes, *processors); connect(*output, sink->getPort()); - processors.emplace_back(std::move(sink)); + processors->emplace_back(std::move(sink)); output = nullptr; } @@ -369,17 +376,17 @@ void QueryPipeline::complete(Chain chain) resources = chain.detachResources(); - drop(totals, processors); - drop(extremes, processors); + drop(totals, *processors); + drop(extremes, *processors); - processors.reserve(processors.size() + chain.getProcessors().size() + 1); + processors->reserve(processors->size() + chain.getProcessors().size() + 1); for (auto processor : chain.getProcessors()) - processors.emplace_back(std::move(processor)); + processors->emplace_back(std::move(processor)); auto sink = std::make_shared(chain.getOutputPort().getHeader()); connect(*output, chain.getInputPort()); connect(chain.getOutputPort(), sink->getPort()); - processors.emplace_back(std::move(sink)); + processors->emplace_back(std::move(sink)); output = nullptr; } @@ -400,7 +407,7 @@ void QueryPipeline::complete(Pipe pipe) input = nullptr; auto pipe_processors = Pipe::detachProcessors(std::move(pipe)); - processors.insert(processors.end(), pipe_processors.begin(), pipe_processors.end()); + processors->insert(processors->end(), pipe_processors.begin(), pipe_processors.end()); } static void addMaterializing(OutputPort *& output, Processors & processors) @@ -421,9 +428,9 @@ void QueryPipeline::complete(std::shared_ptr format) if (format->expectMaterializedColumns()) { - addMaterializing(output, processors); - addMaterializing(totals, processors); - addMaterializing(extremes, processors); + addMaterializing(output, *processors); + addMaterializing(totals, *processors); + addMaterializing(extremes, *processors); } auto & format_main = format->getPort(IOutputFormat::PortKind::Main); @@ -434,14 +441,14 @@ void QueryPipeline::complete(std::shared_ptr format) { auto source = std::make_shared(format_totals.getHeader()); totals = &source->getPort(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); } if (!extremes) { auto source = std::make_shared(format_extremes.getHeader()); extremes = &source->getPort(); - processors.emplace_back(std::move(source)); + processors->emplace_back(std::move(source)); } connect(*output, format_main); @@ -455,7 +462,7 @@ void QueryPipeline::complete(std::shared_ptr format) initRowsBeforeLimit(format.get()); output_format = format.get(); - processors.emplace_back(std::move(format)); + processors->emplace_back(std::move(format)); } Block QueryPipeline::getHeader() const @@ -475,7 +482,7 @@ void QueryPipeline::setProgressCallback(const ProgressCallback & callback) progress_callback = callback; } -void QueryPipeline::setProcessListElement(QueryStatus * elem) +void QueryPipeline::setProcessListElement(QueryStatusPtr elem) { process_list_element = elem; @@ -504,7 +511,7 @@ void QueryPipeline::setLimitsAndQuota(const StreamLocalLimits & limits, std::sha transform->setQuota(quota_); connect(*output, transform->getInputPort()); output = &transform->getOutputPort(); - processors.emplace_back(std::move(transform)); + processors->emplace_back(std::move(transform)); } @@ -529,7 +536,7 @@ void QueryPipeline::addCompletedPipeline(QueryPipeline other) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add not completed pipeline"); resources = std::move(other.resources); - processors.insert(processors.end(), other.processors.begin(), other.processors.end()); + processors->insert(processors->end(), other.processors->begin(), other.processors->end()); } void QueryPipeline::reset() @@ -560,9 +567,9 @@ void QueryPipeline::convertStructureTo(const ColumnsWithTypeAndName & columns) ActionsDAG::MatchColumnsMode::Position); auto actions = std::make_shared(std::move(converting)); - addExpression(output, actions, processors); - addExpression(totals, actions, processors); - addExpression(extremes, actions, processors); + addExpression(output, actions, *processors); + addExpression(totals, actions, *processors); + addExpression(extremes, actions, *processors); } std::unique_ptr QueryPipeline::getReadProgressCallback() const diff --git a/src/QueryPipeline/QueryPipeline.h b/src/QueryPipeline/QueryPipeline.h index 1b88ede3349..63f444e6ec1 100644 --- a/src/QueryPipeline/QueryPipeline.h +++ b/src/QueryPipeline/QueryPipeline.h @@ -4,6 +4,7 @@ #include #include + namespace DB { @@ -15,6 +16,7 @@ using ProcessorPtr = std::shared_ptr; using Processors = std::vector; class QueryStatus; +using QueryStatusPtr = std::shared_ptr; struct Progress; using ProgressCallback = std::function; @@ -34,6 +36,7 @@ class ReadProgressCallback; struct ColumnWithTypeAndName; using ColumnsWithTypeAndName = std::vector; + class QueryPipeline { public: @@ -58,23 +61,23 @@ public: /// completed QueryPipeline( QueryPlanResourceHolder resources_, - Processors processors_); + std::shared_ptr processors_); /// pushing QueryPipeline( QueryPlanResourceHolder resources_, - Processors processors_, + std::shared_ptr processors_, InputPort * input_); /// pulling QueryPipeline( QueryPlanResourceHolder resources_, - Processors processors_, + std::shared_ptr processors_, OutputPort * output_, OutputPort * totals_ = nullptr, OutputPort * extremes_ = nullptr); - bool initialized() const { return !processors.empty(); } + bool initialized() const { return !processors->empty(); } /// When initialized, exactly one of the following is true. /// Use PullingPipelineExecutor or PullingAsyncPipelineExecutor. bool pulling() const { return output != nullptr; } @@ -97,7 +100,7 @@ public: size_t getNumThreads() const { return num_threads; } void setNumThreads(size_t num_threads_) { num_threads = num_threads_; } - void setProcessListElement(QueryStatus * elem); + void setProcessListElement(QueryStatusPtr elem); void setProgressCallback(const ProgressCallback & callback); void setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota_); bool tryGetResultRowsAndBytes(UInt64 & result_rows, UInt64 & result_bytes) const; @@ -119,7 +122,7 @@ public: /// Add processors and resources from other pipeline. Other pipeline should be completed. void addCompletedPipeline(QueryPipeline other); - const Processors & getProcessors() const { return processors; } + const Processors & getProcessors() const { return *processors; } /// For pulling pipeline, convert structure to expected. /// Trash, need to remove later. @@ -134,7 +137,7 @@ private: std::shared_ptr quota; bool update_profile_events = true; - Processors processors; + std::shared_ptr processors; InputPort * input = nullptr; @@ -142,7 +145,7 @@ private: OutputPort * totals = nullptr; OutputPort * extremes = nullptr; - QueryStatus * process_list_element = nullptr; + QueryStatusPtr process_list_element; IOutputFormat * output_format = nullptr; diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 440f123e876..812bd155b42 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -327,9 +327,9 @@ QueryPipelineBuilderPtr QueryPipelineBuilder::mergePipelines( collected_processors->emplace_back(transform); left->pipe.output_ports.front() = &transform->getOutputs().front(); - left->pipe.processors.emplace_back(transform); + left->pipe.processors->emplace_back(transform); - left->pipe.processors.insert(left->pipe.processors.end(), right->pipe.processors.begin(), right->pipe.processors.end()); + left->pipe.processors->insert(left->pipe.processors->end(), right->pipe.processors->begin(), right->pipe.processors->end()); left->pipe.header = left->pipe.output_ports.front()->getHeader(); left->pipe.max_parallel_streams = std::max(left->pipe.max_parallel_streams, right->pipe.max_parallel_streams); return left; @@ -383,7 +383,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe /// Collect the NEW processors for the right pipeline. QueryPipelineProcessorsCollector collector(*right); /// Remember the last step of the right pipeline. - ExpressionStep* step = typeid_cast(right->pipe.processors.back()->getQueryPlanStep()); + ExpressionStep* step = typeid_cast(right->pipe.processors->back()->getQueryPlanStep()); if (!step) { throw Exception(ErrorCodes::LOGICAL_ERROR, "The top step of the right pipeline should be ExpressionStep"); @@ -467,7 +467,7 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe if (collected_processors) collected_processors->emplace_back(joining); - left->pipe.processors.emplace_back(std::move(joining)); + left->pipe.processors->emplace_back(std::move(joining)); } if (left->hasTotals()) @@ -482,14 +482,14 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe if (collected_processors) collected_processors->emplace_back(joining); - left->pipe.processors.emplace_back(std::move(joining)); + left->pipe.processors->emplace_back(std::move(joining)); } /// Move the collected processors to the last step in the right pipeline. Processors processors = collector.detachProcessors(); step->appendExtraProcessors(processors); - left->pipe.processors.insert(left->pipe.processors.end(), right->pipe.processors.begin(), right->pipe.processors.end()); + left->pipe.processors->insert(left->pipe.processors->end(), right->pipe.processors->begin(), right->pipe.processors->end()); left->resources = std::move(right->resources); left->pipe.header = left->pipe.output_ports.front()->getHeader(); left->pipe.max_parallel_streams = std::max(left->pipe.max_parallel_streams, right->pipe.max_parallel_streams); @@ -537,7 +537,7 @@ void QueryPipelineBuilder::addPipelineBefore(QueryPipelineBuilder pipeline) addTransform(std::move(processor)); } -void QueryPipelineBuilder::setProcessListElement(QueryStatus * elem) +void QueryPipelineBuilder::setProcessListElement(QueryStatusPtr elem) { process_list_element = elem; } diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 13b4d681b7d..5a0694100eb 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -148,7 +148,7 @@ public: const Block & getHeader() const { return pipe.getHeader(); } - void setProcessListElement(QueryStatus * elem); + void setProcessListElement(QueryStatusPtr elem); void setProgressCallback(ProgressCallback callback); /// Recommend number of threads for pipeline execution. @@ -189,7 +189,7 @@ private: /// Sometimes, more streams are created then the number of threads for more optimal execution. size_t max_threads = 0; - QueryStatus * process_list_element = nullptr; + QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; void checkInitialized(); diff --git a/src/QueryPipeline/ReadProgressCallback.cpp b/src/QueryPipeline/ReadProgressCallback.cpp index bbdabb8e8d8..6692b0f96bd 100644 --- a/src/QueryPipeline/ReadProgressCallback.cpp +++ b/src/QueryPipeline/ReadProgressCallback.cpp @@ -2,6 +2,7 @@ #include #include + namespace ProfileEvents { extern const Event SelectedRows; @@ -17,7 +18,7 @@ namespace ErrorCodes extern const int TOO_MANY_BYTES; } -void ReadProgressCallback::setProcessListElement(QueryStatus * elem) +void ReadProgressCallback::setProcessListElement(QueryStatusPtr elem) { process_list_elem = elem; if (!elem) diff --git a/src/QueryPipeline/ReadProgressCallback.h b/src/QueryPipeline/ReadProgressCallback.h index f64123ef39d..c8f0d4cf537 100644 --- a/src/QueryPipeline/ReadProgressCallback.h +++ b/src/QueryPipeline/ReadProgressCallback.h @@ -4,20 +4,23 @@ #include #include + namespace DB { class QueryStatus; +using QueryStatusPtr = std::shared_ptr; class EnabledQuota; struct StorageLimits; using StorageLimitsList = std::list; + class ReadProgressCallback { public: void setQuota(const std::shared_ptr & quota_) { quota = quota_; } - void setProcessListElement(QueryStatus * elem); + void setProcessListElement(QueryStatusPtr elem); void setProgressCallback(const ProgressCallback & callback) { progress_callback = callback; } void addTotalRowsApprox(size_t value) { total_rows_approx += value; } @@ -30,7 +33,7 @@ public: private: std::shared_ptr quota; ProgressCallback progress_callback; - QueryStatus * process_list_elem = nullptr; + QueryStatusPtr process_list_elem; /// The approximate total number of rows to read. For progress bar. std::atomic_size_t total_rows_approx = 0; diff --git a/src/QueryPipeline/RemoteQueryExecutorReadContext.cpp b/src/QueryPipeline/RemoteQueryExecutorReadContext.cpp index 43bb5fc7083..4596bbb8961 100644 --- a/src/QueryPipeline/RemoteQueryExecutorReadContext.cpp +++ b/src/QueryPipeline/RemoteQueryExecutorReadContext.cpp @@ -126,12 +126,12 @@ bool RemoteQueryExecutorReadContext::checkTimeoutImpl(bool blocking) epoll_event events[3]; events[0].data.fd = events[1].data.fd = events[2].data.fd = -1; - int num_events = epoll.getManyReady(3, events, blocking); + size_t num_events = epoll.getManyReady(3, events, blocking); bool is_socket_ready = false; bool is_pipe_alarmed = false; - for (int i = 0; i < num_events; ++i) + for (size_t i = 0; i < num_events; ++i) { if (events[i].data.fd == connection_fd) is_socket_ready = true; diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index a2a2db75d68..a9373555af7 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1082,7 +1082,8 @@ namespace NamesAndTypesList columns; for (size_t column_idx : collections::range(external_table.columns_size())) { - const auto & name_and_type = external_table.columns(column_idx); + /// TODO: consider changing protocol + const auto & name_and_type = external_table.columns(static_cast(column_idx)); NameAndTypePair column; column.name = name_and_type.name(); if (column.name.empty()) diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp index c8ae9c6e07c..c8015cfd185 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp @@ -136,7 +136,7 @@ void WriteBufferFromHTTPServerResponse::nextImpl() WriteBufferFromHTTPServerResponse::WriteBufferFromHTTPServerResponse( HTTPServerResponse & response_, bool is_http_method_head_, - unsigned keep_alive_timeout_, + size_t keep_alive_timeout_, bool compress_, CompressionMethod compression_method_) : BufferWithOwnMemory(DBMS_DEFAULT_BUFFER_SIZE) diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h index 6905d5df8b5..ce677616755 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h @@ -36,7 +36,7 @@ public: WriteBufferFromHTTPServerResponse( HTTPServerResponse & response_, bool is_http_method_head_, - unsigned keep_alive_timeout_, + size_t keep_alive_timeout_, bool compress_ = false, /// If true - set Content-Encoding header and compress the result. CompressionMethod compression_method_ = CompressionMethod::None); @@ -105,7 +105,7 @@ private: bool is_http_method_head; bool add_cors_header = false; - unsigned keep_alive_timeout = 0; + size_t keep_alive_timeout = 0; bool compress = false; CompressionMethod compression_method; int compression_level = 1; diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index d02da92c613..2b63524fb79 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -622,8 +622,10 @@ void HTTPHandler::processQuery( /// Request body can be compressed using algorithm specified in the Content-Encoding header. String http_request_compression_method_str = request.get("Content-Encoding", ""); + int zstd_window_log_max = static_cast(context->getSettingsRef().zstd_window_log_max); auto in_post = wrapReadBufferWithCompressionMethod( - wrapReadBufferReference(request.getStream()), chooseCompressionMethod({}, http_request_compression_method_str), context->getSettingsRef().zstd_window_log_max); + wrapReadBufferReference(request.getStream()), + chooseCompressionMethod({}, http_request_compression_method_str), zstd_window_log_max); /// The data can also be compressed using incompatible internal algorithm. This is indicated by /// 'decompress' query parameter. @@ -749,7 +751,7 @@ void HTTPHandler::processQuery( /// (using Accept-Encoding header) and 'enable_http_compression' setting is turned on. used_output.out->setCompression(client_supports_http_compression && settings.enable_http_compression); if (client_supports_http_compression) - used_output.out->setCompressionLevel(settings.http_zlib_compression_level); + used_output.out->setCompressionLevel(static_cast(settings.http_zlib_compression_level)); used_output.out->setSendProgress(settings.send_progress_in_http_headers); used_output.out->setSendProgressInterval(settings.http_headers_progress_interval_ms); diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index 94e3597f88e..38a10926036 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -126,7 +126,8 @@ struct SocketInterruptablePollWrapper do { Poco::Timestamp start; - rc = epoll_wait(epollfd, evout, 2, remaining_time.totalMilliseconds()); + /// TODO: use epoll_pwait() for more precise timers + rc = epoll_wait(epollfd, evout, 2, static_cast(remaining_time.totalMilliseconds())); if (rc < 0 && errno == EINTR) { Poco::Timestamp end; @@ -156,7 +157,7 @@ struct SocketInterruptablePollWrapper do { Poco::Timestamp start; - rc = ::poll(poll_buf, 2, remaining_time.totalMilliseconds()); + rc = ::poll(poll_buf, 2, static_cast(remaining_time.totalMilliseconds())); if (rc < 0 && errno == POCO_EINTR) { Poco::Timestamp end; @@ -325,6 +326,7 @@ void KeeperTCPHandler::runImpl() int32_t four_letter_cmd = header; if (!isHandShake(four_letter_cmd)) { + connected.store(true, std::memory_order_relaxed); tryExecuteFourLetterWordCmd(four_letter_cmd); return; } @@ -380,7 +382,7 @@ void KeeperTCPHandler::runImpl() response->zxid); UInt8 single_byte = 1; - [[maybe_unused]] int result = write(response_fd, &single_byte, sizeof(single_byte)); + [[maybe_unused]] ssize_t result = write(response_fd, &single_byte, sizeof(single_byte)); }; keeper_dispatcher->registerSession(session_id, response_callback); @@ -395,6 +397,7 @@ void KeeperTCPHandler::runImpl() }; session_stopwatch.start(); + connected.store(true, std::memory_order_release); bool close_received = false; try @@ -584,6 +587,9 @@ KeeperConnectionStats & KeeperTCPHandler::getConnectionStats() void KeeperTCPHandler::dumpStats(WriteBufferFromOwnString & buf, bool brief) { + if (!connected.load(std::memory_order_acquire)) + return; + auto & stats = getConnectionStats(); writeText(' ', buf); diff --git a/src/Server/KeeperTCPHandler.h b/src/Server/KeeperTCPHandler.h index e9bd211628f..ffdd50b805a 100644 --- a/src/Server/KeeperTCPHandler.h +++ b/src/Server/KeeperTCPHandler.h @@ -81,6 +81,8 @@ private: std::shared_ptr in; std::shared_ptr out; + std::atomic connected{false}; + void runImpl(); void sendHandshake(bool has_leader); diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index 8e701956d29..8e2d99e2909 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -63,8 +63,11 @@ static String showTableStatusReplacementQuery(const String & query); static String killConnectionIdReplacementQuery(const String & query); static String selectLimitReplacementQuery(const String & query); -MySQLHandler::MySQLHandler(IServer & server_, TCPServer & tcp_server_, const Poco::Net::StreamSocket & socket_, - bool ssl_enabled, size_t connection_id_) +MySQLHandler::MySQLHandler( + IServer & server_, + TCPServer & tcp_server_, + const Poco::Net::StreamSocket & socket_, + bool ssl_enabled, uint32_t connection_id_) : Poco::Net::TCPServerConnection(socket_) , server(server_) , tcp_server(tcp_server_) @@ -215,7 +218,7 @@ void MySQLHandler::finishHandshake(MySQLProtocol::ConnectionPhase::HandshakeResp auto read_bytes = [this, &buf, &pos, &packet_size](size_t count) -> void { while (pos < count) { - int ret = socket().receiveBytes(buf + pos, packet_size - pos); + int ret = socket().receiveBytes(buf + pos, static_cast(packet_size - pos)); if (ret == 0) { throw Exception("Cannot read all data. Bytes read: " + std::to_string(pos) + ". Bytes expected: 3", ErrorCodes::CANNOT_READ_ALL_DATA); @@ -376,7 +379,14 @@ void MySQLHandler::finishHandshakeSSL( } #if USE_SSL -MySQLHandlerSSL::MySQLHandlerSSL(IServer & server_, TCPServer & tcp_server_, const Poco::Net::StreamSocket & socket_, bool ssl_enabled, size_t connection_id_, RSA & public_key_, RSA & private_key_) +MySQLHandlerSSL::MySQLHandlerSSL( + IServer & server_, + TCPServer & tcp_server_, + const Poco::Net::StreamSocket & socket_, + bool ssl_enabled, + uint32_t connection_id_, + RSA & public_key_, + RSA & private_key_) : MySQLHandler(server_, tcp_server_, socket_, ssl_enabled, connection_id_) , public_key(public_key_) , private_key(private_key_) diff --git a/src/Server/MySQLHandler.h b/src/Server/MySQLHandler.h index 2f43d471c40..3366e8792c9 100644 --- a/src/Server/MySQLHandler.h +++ b/src/Server/MySQLHandler.h @@ -31,7 +31,12 @@ class TCPServer; class MySQLHandler : public Poco::Net::TCPServerConnection { public: - MySQLHandler(IServer & server_, TCPServer & tcp_server_, const Poco::Net::StreamSocket & socket_, bool ssl_enabled, size_t connection_id_); + MySQLHandler( + IServer & server_, + TCPServer & tcp_server_, + const Poco::Net::StreamSocket & socket_, + bool ssl_enabled, + uint32_t connection_id_); void run() final; @@ -57,7 +62,7 @@ protected: IServer & server; TCPServer & tcp_server; Poco::Logger * log; - UInt64 connection_id = 0; + uint32_t connection_id = 0; uint32_t server_capabilities = 0; uint32_t client_capabilities = 0; @@ -81,7 +86,14 @@ protected: class MySQLHandlerSSL : public MySQLHandler { public: - MySQLHandlerSSL(IServer & server_, TCPServer & tcp_server_, const Poco::Net::StreamSocket & socket_, bool ssl_enabled, size_t connection_id_, RSA & public_key_, RSA & private_key_); + MySQLHandlerSSL( + IServer & server_, + TCPServer & tcp_server_, + const Poco::Net::StreamSocket & socket_, + bool ssl_enabled, + uint32_t connection_id_, + RSA & public_key_, + RSA & private_key_); private: void authPluginSSL() override; diff --git a/src/Server/MySQLHandlerFactory.cpp b/src/Server/MySQLHandlerFactory.cpp index c02a3015945..cbcddbb444a 100644 --- a/src/Server/MySQLHandlerFactory.cpp +++ b/src/Server/MySQLHandlerFactory.cpp @@ -127,7 +127,7 @@ void MySQLHandlerFactory::generateRSAKeys() Poco::Net::TCPServerConnection * MySQLHandlerFactory::createConnection(const Poco::Net::StreamSocket & socket, TCPServer & tcp_server) { - size_t connection_id = last_connection_id++; + uint32_t connection_id = last_connection_id++; LOG_TRACE(log, "MySQL connection. Id: {}. Address: {}", connection_id, socket.peerAddress().toString()); #if USE_SSL return new MySQLHandlerSSL(server, tcp_server, socket, ssl_enabled, connection_id, *public_key, *private_key); diff --git a/src/Server/MySQLHandlerFactory.h b/src/Server/MySQLHandlerFactory.h index 38caae922ee..fa4ce93f765 100644 --- a/src/Server/MySQLHandlerFactory.h +++ b/src/Server/MySQLHandlerFactory.h @@ -36,7 +36,7 @@ private: bool ssl_enabled = false; #endif - std::atomic last_connection_id = 0; + std::atomic last_connection_id = 0; public: explicit MySQLHandlerFactory(IServer & server_); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 25a832ab7e3..73b91b29f31 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -377,8 +377,8 @@ void TCPHandler::runImpl() after_send_progress.restart(); if (state.io.pipeline.pushing()) - /// FIXME: check explicitly that insert query suggests to receive data via native protocol, { + /// FIXME: check explicitly that insert query suggests to receive data via native protocol, state.need_receive_data_for_insert = true; processInsertQuery(); state.io.onFinish(); @@ -390,27 +390,30 @@ void TCPHandler::runImpl() } else if (state.io.pipeline.completed()) { - CompletedPipelineExecutor executor(state.io.pipeline); - /// Should not check for cancel in case of input. - if (!state.need_receive_data_for_input) { - auto callback = [this]() + CompletedPipelineExecutor executor(state.io.pipeline); + + /// Should not check for cancel in case of input. + if (!state.need_receive_data_for_input) { - std::lock_guard lock(fatal_error_mutex); + auto callback = [this]() + { + std::lock_guard lock(fatal_error_mutex); - if (isQueryCancelled()) - return true; + if (isQueryCancelled()) + return true; - sendProgress(); - sendSelectProfileEvents(); - sendLogs(); + sendProgress(); + sendSelectProfileEvents(); + sendLogs(); - return false; - }; + return false; + }; - executor.setCancelCallback(callback, interactive_delay / 1000); + executor.setCancelCallback(callback, interactive_delay / 1000); + } + executor.execute(); } - executor.execute(); state.io.onFinish(); /// Send final progress after calling onFinish(), since it will update the progress. @@ -841,7 +844,7 @@ void TCPHandler::processTablesStatusRequest() if (auto * replicated_table = dynamic_cast(table.get())) { status.is_replicated = true; - status.absolute_delay = replicated_table->getAbsoluteDelay(); + status.absolute_delay = static_cast(replicated_table->getAbsoluteDelay()); } else status.is_replicated = false; //-V1048 diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index c36ce1e9378..9c8d3ca60f3 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -159,7 +159,7 @@ private: UInt64 client_version_major = 0; UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; - UInt64 client_tcp_protocol_version = 0; + UInt32 client_tcp_protocol_version = 0; String quota_key; /// Connection settings, which are extracted from a context. diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index dcd7abae68a..c1e7cefd19e 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -755,9 +755,10 @@ bool isMetadataOnlyConversion(const IDataType * from, const IDataType * to) const auto * nullable_from = typeid_cast(from); const auto * nullable_to = typeid_cast(to); - if (nullable_from && nullable_to) + if (nullable_to) { - from = nullable_from->getNestedType().get(); + /// Here we allow a conversion X -> Nullable(X) to make a metadata-only conversion. + from = nullable_from ? nullable_from->getNestedType().get() : from; to = nullable_to->getNestedType().get(); continue; } diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 9dc3d773e01..4b9667aa95d 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -106,7 +106,7 @@ namespace for (size_t replica_index = 1; replica_index <= replicas; ++replica_index) { - address.replica_index = replica_index; + address.replica_index = static_cast(replica_index); make_connection(address); } } diff --git a/src/Storages/ExternalDataSourceConfiguration.h b/src/Storages/ExternalDataSourceConfiguration.h index 0890247eb45..5736336983a 100644 --- a/src/Storages/ExternalDataSourceConfiguration.h +++ b/src/Storages/ExternalDataSourceConfiguration.h @@ -117,7 +117,7 @@ struct URLBasedDataSourceConfiguration struct StorageS3Configuration : URLBasedDataSourceConfiguration { - S3Settings::AuthSettings auth_settings; + S3::AuthSettings auth_settings; S3Settings::ReadWriteSettings rw_settings; }; diff --git a/src/Storages/FileLog/DirectoryWatcherBase.cpp b/src/Storages/FileLog/DirectoryWatcherBase.cpp index 005e1e5fd1b..efcd70d6742 100644 --- a/src/Storages/FileLog/DirectoryWatcherBase.cpp +++ b/src/Storages/FileLog/DirectoryWatcherBase.cpp @@ -70,10 +70,10 @@ void DirectoryWatcherBase::watchFunc() while (!stopped) { const auto & settings = owner.storage.getFileLogSettings(); - if (poll(&pfd, 1, milliseconds_to_wait) > 0 && pfd.revents & POLLIN) + if (poll(&pfd, 1, static_cast(milliseconds_to_wait)) > 0 && pfd.revents & POLLIN) { milliseconds_to_wait = settings->poll_directory_watch_events_backoff_init.totalMilliseconds(); - int n = read(fd, buffer.data(), buffer.size()); + ssize_t n = read(fd, buffer.data(), buffer.size()); int i = 0; if (n > 0) { diff --git a/src/Storages/FileLog/StorageFileLog.cpp b/src/Storages/FileLog/StorageFileLog.cpp index 7848b75deec..722843a7ab6 100644 --- a/src/Storages/FileLog/StorageFileLog.cpp +++ b/src/Storages/FileLog/StorageFileLog.cpp @@ -315,7 +315,7 @@ Pipe StorageFileLog::read( ContextPtr local_context, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, - unsigned /* num_streams */) + size_t /* num_streams */) { /// If there are MVs depended on this table, we just forbid reading if (!local_context->getSettingsRef().stream_like_engine_allow_direct_select) diff --git a/src/Storages/FileLog/StorageFileLog.h b/src/Storages/FileLog/StorageFileLog.h index 4295a8a764a..56f2d40ef5a 100644 --- a/src/Storages/FileLog/StorageFileLog.h +++ b/src/Storages/FileLog/StorageFileLog.h @@ -54,7 +54,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; void drop() override; diff --git a/src/Storages/Freeze.cpp b/src/Storages/Freeze.cpp index a2f0395b001..74adf3de0ae 100644 --- a/src/Storages/Freeze.cpp +++ b/src/Storages/Freeze.cpp @@ -194,7 +194,7 @@ bool Unfreezer::removeFreezedPart(DiskPtr disk, const String & path, const Strin if (meta.load(disk, path)) { FreezeMetaData::clean(disk, path); - return StorageReplicatedMergeTree::removeSharedDetachedPart(disk, path, part_name, meta.table_shared_id, meta.zookeeper_name, meta.replica_name, "", local_context, zookeeper); + return StorageReplicatedMergeTree::removeSharedDetachedPart(disk, path, part_name, meta.table_shared_id, meta.replica_name, "", local_context, zookeeper); } } diff --git a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h index dd77fc70358..3726d3aae96 100644 --- a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h +++ b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h @@ -52,7 +52,7 @@ private: std::future asyncReadInto(char * data, size_t size); IAsynchronousReader & reader; - Int32 priority; + size_t priority; std::shared_ptr impl; std::future prefetch_future; Memory<> prefetch_buffer; diff --git a/src/Storages/HDFS/ReadBufferFromHDFS.cpp b/src/Storages/HDFS/ReadBufferFromHDFS.cpp index 4aebcd6f6ab..3f5c81dc01b 100644 --- a/src/Storages/HDFS/ReadBufferFromHDFS.cpp +++ b/src/Storages/HDFS/ReadBufferFromHDFS.cpp @@ -3,6 +3,7 @@ #if USE_HDFS #include #include +#include #include #include @@ -90,7 +91,7 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory(num_bytes_to_read)); if (bytes_read < 0) throw Exception(ErrorCodes::NETWORK_ERROR, "Fail to read from HDFS: {}, file path: {}. Error: {}", diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 138c92ea62d..bbabd523c45 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -214,8 +214,8 @@ ColumnsDescription StorageHDFS::getTableStructureFromData( return nullptr; auto compression = chooseCompressionMethod(*it, compression_method); auto impl = std::make_unique(uri_without_path, *it++, ctx->getGlobalContext()->getConfigRef(), ctx->getReadSettings()); - const auto zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; - return wrapReadBufferWithCompressionMethod(std::move(impl), compression, zstd_window_log_max); + const Int64 zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; + return wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); }; ColumnsDescription columns; @@ -356,8 +356,8 @@ bool HDFSSource::initialize() auto compression = chooseCompressionMethod(path_from_uri, storage->compression_method); auto impl = std::make_unique( uri_without_path, path_from_uri, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings()); - const auto zstd_window_log_max = getContext()->getSettingsRef().zstd_window_log_max; - read_buf = wrapReadBufferWithCompressionMethod(std::move(impl), compression, zstd_window_log_max); + const Int64 zstd_window_log_max = getContext()->getSettingsRef().zstd_window_log_max; + read_buf = wrapReadBufferWithCompressionMethod(std::move(impl), compression, static_cast(zstd_window_log_max)); auto input_format = getContext()->getInputFormat(storage->format_name, *read_buf, block_for_format, max_block_size); @@ -550,7 +550,7 @@ Pipe StorageHDFS::read( ContextPtr context_, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { std::shared_ptr iterator_wrapper{nullptr}; if (distributed_processing) diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index 90a42d0c692..b641f5bfb43 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -40,7 +40,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) override; diff --git a/src/Storages/HDFS/StorageHDFSCluster.cpp b/src/Storages/HDFS/StorageHDFSCluster.cpp index 467203c58f6..5f9d5ea3d6d 100644 --- a/src/Storages/HDFS/StorageHDFSCluster.cpp +++ b/src/Storages/HDFS/StorageHDFSCluster.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -25,6 +24,8 @@ #include #include #include +#include +#include #include @@ -56,6 +57,7 @@ StorageHDFSCluster::StorageHDFSCluster( { auto columns = StorageHDFS::getTableStructureFromData(format_name, uri_, compression_method, context_); storage_metadata.setColumns(columns); + add_columns_structure_to_query = true; } else storage_metadata.setColumns(columns_); @@ -72,7 +74,7 @@ Pipe StorageHDFSCluster::read( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t /*max_block_size*/, - unsigned /*num_streams*/) + size_t /*num_streams*/) { auto cluster = context->getCluster(cluster_name)->getClusterWithReplicasAsShards(context->getSettingsRef()); @@ -92,6 +94,11 @@ Pipe StorageHDFSCluster::read( const bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState; + auto query_to_send = query_info.original_query->clone(); + if (add_columns_structure_to_query) + addColumnsStructureToQueryWithClusterEngine( + query_to_send, StorageDictionary::generateNamesAndTypesDescription(storage_snapshot->metadata->getColumns().getAll()), 3, getName()); + for (const auto & replicas : cluster->getShardsAddresses()) { /// There will be only one replica, because we consider each replica as a shard @@ -110,7 +117,7 @@ Pipe StorageHDFSCluster::read( /// So, task_identifier is passed as constructor argument. It is more obvious. auto remote_query_executor = std::make_shared( connection, - queryToString(query_info.original_query), + queryToString(query_to_send), header, context, /*throttler=*/nullptr, diff --git a/src/Storages/HDFS/StorageHDFSCluster.h b/src/Storages/HDFS/StorageHDFSCluster.h index 3239a1e4076..adcc3f5db6e 100644 --- a/src/Storages/HDFS/StorageHDFSCluster.h +++ b/src/Storages/HDFS/StorageHDFSCluster.h @@ -32,7 +32,7 @@ public: std::string getName() const override { return "HDFSCluster"; } Pipe read(const Names &, const StorageSnapshotPtr &, SelectQueryInfo &, - ContextPtr, QueryProcessingStage::Enum, size_t /*max_block_size*/, unsigned /*num_streams*/) override; + ContextPtr, QueryProcessingStage::Enum, size_t /*max_block_size*/, size_t /*num_streams*/) override; QueryProcessingStage::Enum getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override; @@ -44,6 +44,7 @@ private: String uri; String format_name; String compression_method; + bool add_columns_structure_to_query = false; }; diff --git a/src/Storages/HDFS/WriteBufferFromHDFS.cpp b/src/Storages/HDFS/WriteBufferFromHDFS.cpp index a179f484652..1f952ec2bd9 100644 --- a/src/Storages/HDFS/WriteBufferFromHDFS.cpp +++ b/src/Storages/HDFS/WriteBufferFromHDFS.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include namespace DB @@ -57,7 +58,7 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl int write(const char * start, size_t size) const { - int bytes_written = hdfsWrite(fs.get(), fout, start, size); + int bytes_written = hdfsWrite(fs.get(), fout, start, safe_cast(size)); if (write_settings.remote_throttler) write_settings.remote_throttler->add(bytes_written); diff --git a/src/Storages/Hive/HiveFile.cpp b/src/Storages/Hive/HiveFile.cpp index fc08c046f93..8f5b1b5f5fd 100644 --- a/src/Storages/Hive/HiveFile.cpp +++ b/src/Storages/Hive/HiveFile.cpp @@ -210,7 +210,7 @@ std::unique_ptr HiveORCFile::buildMinMaxIndex(c { size_t pos = it->second; /// Attention: column statistics start from 1. 0 has special purpose. - const orc::ColumnStatistics * col_stats = statistics->getColumnStatistics(pos + 1); + const orc::ColumnStatistics * col_stats = statistics->getColumnStatistics(static_cast(pos + 1)); idx->hyperrectangle[i] = buildRange(col_stats); } ++i; @@ -297,7 +297,7 @@ void HiveParquetFile::loadSplitMinMaxIndexesImpl() const auto * schema = meta->schema(); for (size_t pos = 0; pos < num_cols; ++pos) { - String column{schema->Column(pos)->name()}; + String column{schema->Column(static_cast(pos))->name()}; boost::to_lower(column); parquet_column_positions[column] = pos; } @@ -306,7 +306,7 @@ void HiveParquetFile::loadSplitMinMaxIndexesImpl() split_minmax_idxes.resize(num_row_groups); for (size_t i = 0; i < num_row_groups; ++i) { - auto row_group_meta = meta->RowGroup(i); + auto row_group_meta = meta->RowGroup(static_cast(i)); split_minmax_idxes[i] = std::make_shared(); split_minmax_idxes[i]->hyperrectangle.resize(num_cols); @@ -321,7 +321,7 @@ void HiveParquetFile::loadSplitMinMaxIndexesImpl() continue; size_t pos = mit->second; - auto col_chunk = row_group_meta->ColumnChunk(pos); + auto col_chunk = row_group_meta->ColumnChunk(static_cast(pos)); if (!col_chunk->is_stats_set()) continue; diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 01ee5a8c3c5..47d7382f7ca 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -727,7 +727,7 @@ HiveFilePtr StorageHive::getHiveFileIfNeeded( hive_file->getPath(), hive_file->describeMinMaxIndex(sub_minmax_idxes[i])); - skip_splits.insert(i); + skip_splits.insert(static_cast(i)); } } hive_file->setSkipSplits(skip_splits); @@ -749,7 +749,7 @@ Pipe StorageHive::read( ContextPtr context_, QueryProcessingStage::Enum /* processed_stage */, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { lazyInitialize(); @@ -829,7 +829,7 @@ Pipe StorageHive::read( } HiveFiles StorageHive::collectHiveFiles( - unsigned max_threads, + size_t max_threads, const SelectQueryInfo & query_info, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, @@ -937,7 +937,13 @@ StorageHive::totalRowsImpl(const Settings & settings, const SelectQueryInfo & qu auto hive_table_metadata = hive_metastore_client->getTableMetadata(hive_database, hive_table); HDFSBuilderWrapper builder = createHDFSBuilder(hdfs_namenode_url, getContext()->getGlobalContext()->getConfigRef()); HDFSFSPtr fs = createHDFSFS(builder.get()); - HiveFiles hive_files = collectHiveFiles(settings.max_threads, query_info, hive_table_metadata, fs, context_, prune_level); + HiveFiles hive_files = collectHiveFiles( + settings.max_threads, + query_info, + hive_table_metadata, + fs, + context_, + prune_level); UInt64 total_rows = 0; for (const auto & hive_file : hive_files) diff --git a/src/Storages/Hive/StorageHive.h b/src/Storages/Hive/StorageHive.h index 9c02d228f97..363042621c7 100644 --- a/src/Storages/Hive/StorageHive.h +++ b/src/Storages/Hive/StorageHive.h @@ -60,7 +60,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/) override; @@ -98,7 +98,7 @@ private: void initMinMaxIndexExpression(); HiveFiles collectHiveFiles( - unsigned max_threads, + size_t max_threads, const SelectQueryInfo & query_info, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 5b12b720f1c..7a704a17f4d 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -108,7 +108,7 @@ Pipe IStorage::watch( ContextPtr /*context*/, QueryProcessingStage::Enum & /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) + size_t /*num_streams*/) { throw Exception("Method watch is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); } @@ -120,7 +120,7 @@ Pipe IStorage::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) + size_t /*num_streams*/) { throw Exception("Method read is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); } @@ -133,7 +133,7 @@ void IStorage::read( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { auto pipe = read(column_names, storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); readFromPipe(query_plan, std::move(pipe), column_names, storage_snapshot, query_info, context, getName()); diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 242f17d6f20..fd48d22b12b 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -323,7 +323,7 @@ public: ContextPtr /*context*/, QueryProcessingStage::Enum & /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/); + size_t /*num_streams*/); /// Returns true if FINAL modifier must be added to SELECT query depending on required columns. /// It's needed for ReplacingMergeTree wrappers such as MaterializedMySQL and MaterializedPostrgeSQL @@ -357,7 +357,7 @@ private: ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/); + size_t /*num_streams*/); public: /// Other version of read which adds reading step to query plan. @@ -370,7 +370,7 @@ public: ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/); + size_t /*num_streams*/); /** Writes the data to a table. * Receives a description of the query, which can contain information about the data write method. diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index fa52850fb39..8e4dd78379e 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -212,7 +212,7 @@ StorageKafka::StorageKafka( , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value)) , num_consumers(kafka_settings->kafka_num_consumers.value) , log(&Poco::Logger::get("StorageKafka (" + table_id_.table_name + ")")) - , semaphore(0, num_consumers) + , semaphore(0, static_cast(num_consumers)) , intermediate_commit(kafka_settings->kafka_commit_every_batch.value) , settings_adjustments(createSettingsAdjustments()) , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) @@ -291,7 +291,7 @@ Pipe StorageKafka::read( ContextPtr local_context, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, - unsigned /* num_streams */) + size_t /* num_streams */) { if (num_created_consumers == 0) return {}; diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index 77bad6e17a9..c1c67b19c51 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -53,7 +53,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write( const ASTPtr & query, diff --git a/src/Storages/LiveView/StorageBlocks.h b/src/Storages/LiveView/StorageBlocks.h index bc860a1fa3c..a732ada1da2 100644 --- a/src/Storages/LiveView/StorageBlocks.h +++ b/src/Storages/LiveView/StorageBlocks.h @@ -46,7 +46,7 @@ public: ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) override + size_t /*num_streams*/) override { return Pipe::unitePipes(std::move(pipes)); } diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index e3d19d0a433..3d27205d638 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -531,7 +531,7 @@ Pipe StorageLiveView::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { std::lock_guard lock(mutex); @@ -556,7 +556,7 @@ Pipe StorageLiveView::watch( ContextPtr local_context, QueryProcessingStage::Enum & processed_stage, size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { ASTWatchQuery & query = typeid_cast(*query_info.query); diff --git a/src/Storages/LiveView/StorageLiveView.h b/src/Storages/LiveView/StorageLiveView.h index c6a0379e2ab..31b1c425709 100644 --- a/src/Storages/LiveView/StorageLiveView.h +++ b/src/Storages/LiveView/StorageLiveView.h @@ -143,7 +143,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; Pipe watch( const Names & column_names, @@ -151,7 +151,7 @@ public: ContextPtr context, QueryProcessingStage::Enum & processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; std::shared_ptr getBlocksPtr() { return blocks_ptr; } MergeableBlocksPtr getMergeableBlocks() { return mergeable_blocks; } diff --git a/src/Storages/MeiliSearch/SourceMeiliSearch.cpp b/src/Storages/MeiliSearch/SourceMeiliSearch.cpp index 8e37e469e96..b516ad8d0cf 100644 --- a/src/Storages/MeiliSearch/SourceMeiliSearch.cpp +++ b/src/Storages/MeiliSearch/SourceMeiliSearch.cpp @@ -174,7 +174,7 @@ size_t MeiliSearchSource::parseJSON(MutableColumns & columns, const JSON & jres) { ++cnt_fields; const auto & name = kv_pair.getName(); - int pos = description.sample_block.getPositionByName(name); + size_t pos = description.sample_block.getPositionByName(name); MutableColumnPtr & col = columns[pos]; DataTypePtr type_ptr = description.sample_block.getByPosition(pos).type; insertWithTypeId(col, kv_pair.getValue(), type_ptr); diff --git a/src/Storages/MeiliSearch/StorageMeiliSearch.cpp b/src/Storages/MeiliSearch/StorageMeiliSearch.cpp index c5966d9e322..30d49edbb10 100644 --- a/src/Storages/MeiliSearch/StorageMeiliSearch.cpp +++ b/src/Storages/MeiliSearch/StorageMeiliSearch.cpp @@ -80,7 +80,7 @@ Pipe StorageMeiliSearch::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned) + size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/MeiliSearch/StorageMeiliSearch.h b/src/Storages/MeiliSearch/StorageMeiliSearch.h index d7a2697730c..5fa7ac2c0e3 100644 --- a/src/Storages/MeiliSearch/StorageMeiliSearch.h +++ b/src/Storages/MeiliSearch/StorageMeiliSearch.h @@ -25,7 +25,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) override; diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp index e2a2f3f793f..7b36a9873e4 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.cpp @@ -6,12 +6,12 @@ #include #include #include -#include #include #include #include #include #include +#include namespace DB { @@ -29,6 +29,16 @@ DataPartStorageOnDisk::DataPartStorageOnDisk(VolumePtr volume_, std::string root { } +DataPartStorageOnDisk::DataPartStorageOnDisk( + VolumePtr volume_, std::string root_path_, std::string part_dir_, DiskTransactionPtr transaction_) + : volume(std::move(volume_)) + , root_path(std::move(root_path_)) + , part_dir(std::move(part_dir_)) + , transaction(std::move(transaction_)) + , has_shared_transaction(transaction != nullptr) +{ +} + std::string DataPartStorageOnDisk::getFullPath() const { return fs::path(volume->getDisk()->getPath()) / root_path / part_dir / ""; @@ -49,6 +59,11 @@ std::string DataPartStorageOnDisk::getFullRootPath() const return fs::path(volume->getDisk()->getPath()) / root_path / ""; } +MutableDataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) +{ + return std::shared_ptr(new DataPartStorageOnDisk(volume, std::string(fs::path(root_path) / part_dir), name, transaction)); +} + DataPartStoragePtr DataPartStorageOnDisk::getProjection(const std::string & name) const { return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); @@ -113,6 +128,7 @@ static UInt64 calculateTotalSizeOnDiskImpl(const DiskPtr & disk, const String & { if (disk->isFile(from)) return disk->getFileSize(from); + std::vector files; disk->listFiles(from, files); UInt64 res = 0; @@ -135,75 +151,11 @@ std::unique_ptr DataPartStorageOnDisk::readFile( return volume->getDisk()->readFile(fs::path(root_path) / part_dir / name, settings, read_hint, file_size); } -static std::unique_ptr openForReading(const DiskPtr & disk, const String & path) -{ - size_t file_size = disk->getFileSize(path); - return disk->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size); -} - -void DataPartStorageOnDisk::loadVersionMetadata(VersionMetadata & version, Poco::Logger * log) const -{ - std::string version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - String tmp_version_file_name = version_file_name + ".tmp"; - DiskPtr disk = volume->getDisk(); - - auto remove_tmp_file = [&]() - { - auto last_modified = disk->getLastModified(tmp_version_file_name); - auto buf = openForReading(disk, tmp_version_file_name); - String content; - readStringUntilEOF(content, *buf); - LOG_WARNING(log, "Found file {} that was last modified on {}, has size {} and the following content: {}", - tmp_version_file_name, last_modified.epochTime(), content.size(), content); - disk->removeFile(tmp_version_file_name); - }; - - if (disk->exists(version_file_name)) - { - auto buf = openForReading(disk, version_file_name); - version.read(*buf); - if (disk->exists(tmp_version_file_name)) - remove_tmp_file(); - return; - } - - /// Four (?) cases are possible: - /// 1. Part was created without transactions. - /// 2. Version metadata file was not renamed from *.tmp on part creation. - /// 3. Version metadata were written to *.tmp file, but hard restart happened before fsync. - /// 4. Fsyncs in storeVersionMetadata() work incorrectly. - - if (!disk->exists(tmp_version_file_name)) - { - /// Case 1. - /// We do not have version metadata and transactions history for old parts, - /// so let's consider that such parts were created by some ancient transaction - /// and were committed with some prehistoric CSN. - /// NOTE It might be Case 3, but version metadata file is written on part creation before other files, - /// so it's not Case 3 if part is not broken. - version.setCreationTID(Tx::PrehistoricTID, nullptr); - version.creation_csn = Tx::PrehistoricCSN; - return; - } - - /// Case 2. - /// Content of *.tmp file may be broken, just use fake TID. - /// Transaction was not committed if *.tmp file was not renamed, so we should complete rollback by removing part. - version.setCreationTID(Tx::DummyTID, nullptr); - version.creation_csn = Tx::RolledBackCSN; - remove_tmp_file(); -} - void DataPartStorageOnDisk::checkConsistency(const MergeTreeDataPartChecksums & checksums) const { checksums.checkSizes(volume->getDisk(), getRelativePath()); } -DataPartStorageBuilderPtr DataPartStorageOnDisk::getBuilder() const -{ - return std::make_shared(volume, root_path, part_dir); -} - void DataPartStorageOnDisk::remove( CanRemoveCallback && can_remove_callback, const MergeTreeDataPartChecksums & checksums, @@ -273,7 +225,7 @@ void DataPartStorageOnDisk::remove( try { disk->moveDirectory(from, to); - onRename(root_path, part_dir_without_slash); + part_dir = part_dir_without_slash; } catch (const Exception & e) { @@ -406,14 +358,18 @@ void DataPartStorageOnDisk::clearDirectory( } } -std::string DataPartStorageOnDisk::getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached) const +std::optional DataPartStorageOnDisk::getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached, bool broken) const { + assert(!broken || detached); String res; auto full_relative_path = fs::path(root_path); if (detached) full_relative_path /= "detached"; + std::optional original_checksums_content; + std::optional original_files_list; + for (int try_no = 0; try_no < 10; ++try_no) { res = (prefix.empty() ? "" : prefix + "_") + part_dir + (try_no ? "_try" + DB::toString(try_no) : ""); @@ -421,15 +377,67 @@ std::string DataPartStorageOnDisk::getRelativePathForPrefix(Poco::Logger * log, if (!volume->getDisk()->exists(full_relative_path / res)) return res; + if (broken && looksLikeBrokenDetachedPartHasTheSameContent(res, original_checksums_content, original_files_list)) + { + LOG_WARNING(log, "Directory {} (to detach to) already exists, " + "but its content looks similar to content of the broken part which we are going to detach. " + "Assuming it was already cloned to detached, will not do it again to avoid redundant copies of broken part.", res); + return {}; + } + LOG_WARNING(log, "Directory {} (to detach to) already exists. Will detach to directory with '_tryN' suffix.", res); } return res; } -void DataPartStorageBuilderOnDisk::setRelativePath(const std::string & path) +bool DataPartStorageOnDisk::looksLikeBrokenDetachedPartHasTheSameContent(const String & detached_part_path, + std::optional & original_checksums_content, + std::optional & original_files_list) const { - part_dir = path; + /// We cannot know for sure that content of detached part is the same, + /// but in most cases it's enough to compare checksums.txt and list of files. + + if (!exists("checksums.txt")) + return false; + + auto detached_full_path = fs::path(root_path) / "detached" / detached_part_path; + auto disk = volume->getDisk(); + if (!disk->exists(detached_full_path / "checksums.txt")) + return false; + + if (!original_checksums_content) + { + auto in = disk->readFile(detached_full_path / "checksums.txt", /* settings */ {}, /* read_hint */ {}, /* file_size */ {}); + original_checksums_content.emplace(); + readStringUntilEOF(*original_checksums_content, *in); + } + + if (original_checksums_content->empty()) + return false; + + auto part_full_path = fs::path(root_path) / part_dir; + String detached_checksums_content; + { + auto in = readFile("checksums.txt", /* settings */ {}, /* read_hint */ {}, /* file_size */ {}); + readStringUntilEOF(detached_checksums_content, *in); + } + + if (original_checksums_content != detached_checksums_content) + return false; + + if (!original_files_list) + { + original_files_list.emplace(); + disk->listFiles(part_full_path, *original_files_list); + std::sort(original_files_list->begin(), original_files_list->end()); + } + + Strings detached_files_list; + disk->listFiles(detached_full_path, detached_files_list); + std::sort(detached_files_list.begin(), detached_files_list.end()); + + return original_files_list == detached_files_list; } std::string DataPartStorageOnDisk::getDiskName() const @@ -462,7 +470,7 @@ bool DataPartStorageOnDisk::isBroken() const return volume->getDisk()->isBroken(); } -void DataPartStorageOnDisk::syncRevision(UInt64 revision) +void DataPartStorageOnDisk::syncRevision(UInt64 revision) const { volume->getDisk()->syncRevision(revision); } @@ -482,11 +490,6 @@ std::string DataPartStorageOnDisk::getDiskPath() const return volume->getDisk()->getPath(); } -DataPartStorageOnDisk::DisksSet::const_iterator DataPartStorageOnDisk::isStoredOnDisk(const DisksSet & disks) const -{ - return disks.find(volume->getDisk()); -} - ReservationPtr DataPartStorageOnDisk::reserve(UInt64 bytes) const { auto res = volume->reserve(bytes); @@ -501,159 +504,6 @@ ReservationPtr DataPartStorageOnDisk::tryReserve(UInt64 bytes) const return volume->reserve(bytes); } -size_t DataPartStorageOnDisk::getVolumeIndex(const IStoragePolicy & storage_policy) const -{ - return storage_policy.getVolumeIndexByDisk(volume->getDisk()); -} - -void DataPartStorageOnDisk::writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const -{ - std::string path = fs::path(root_path) / part_dir / "checksums.txt"; - - try - { - { - auto out = volume->getDisk()->writeFile(path + ".tmp", 4096, WriteMode::Rewrite, settings); - checksums.write(*out); - } - - volume->getDisk()->moveFile(path + ".tmp", path); - } - catch (...) - { - try - { - if (volume->getDisk()->exists(path + ".tmp")) - volume->getDisk()->removeFile(path + ".tmp"); - } - catch (...) - { - tryLogCurrentException("DataPartStorageOnDisk"); - } - - throw; - } -} - -void DataPartStorageOnDisk::writeColumns(const NamesAndTypesList & columns, const WriteSettings & settings) const -{ - std::string path = fs::path(root_path) / part_dir / "columns.txt"; - - try - { - auto buf = volume->getDisk()->writeFile(path + ".tmp", 4096, WriteMode::Rewrite, settings); - columns.writeText(*buf); - buf->finalize(); - - volume->getDisk()->moveFile(path + ".tmp", path); - } - catch (...) - { - try - { - if (volume->getDisk()->exists(path + ".tmp")) - volume->getDisk()->removeFile(path + ".tmp"); - } - catch (...) - { - tryLogCurrentException("DataPartStorageOnDisk"); - } - - throw; - } -} - -void DataPartStorageOnDisk::writeVersionMetadata(const VersionMetadata & version, bool fsync_part_dir) const -{ - std::string path = fs::path(root_path) / part_dir / "txn_version.txt"; - try - { - { - /// TODO IDisk interface does not allow to open file with O_EXCL flag (for DiskLocal), - /// so we create empty file at first (expecting that createFile throws if file already exists) - /// and then overwrite it. - volume->getDisk()->createFile(path + ".tmp"); - auto buf = volume->getDisk()->writeFile(path + ".tmp", 256); - version.write(*buf); - buf->finalize(); - buf->sync(); - } - - SyncGuardPtr sync_guard; - if (fsync_part_dir) - sync_guard = volume->getDisk()->getDirectorySyncGuard(getRelativePath()); - volume->getDisk()->replaceFile(path + ".tmp", path); - - } - catch (...) - { - try - { - if (volume->getDisk()->exists(path + ".tmp")) - volume->getDisk()->removeFile(path + ".tmp"); - } - catch (...) - { - tryLogCurrentException("DataPartStorageOnDisk"); - } - - throw; - } -} - -void DataPartStorageOnDisk::appendCSNToVersionMetadata(const VersionMetadata & version, VersionMetadata::WhichCSN which_csn) const -{ - /// Small enough appends to file are usually atomic, - /// so we append new metadata instead of rewriting file to reduce number of fsyncs. - /// We don't need to do fsync when writing CSN, because in case of hard restart - /// we will be able to restore CSN from transaction log in Keeper. - - std::string version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - DiskPtr disk = volume->getDisk(); - auto out = disk->writeFile(version_file_name, 256, WriteMode::Append); - version.writeCSN(*out, which_csn); - out->finalize(); -} - -void DataPartStorageOnDisk::appendRemovalTIDToVersionMetadata(const VersionMetadata & version, bool clear) const -{ - String version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - DiskPtr disk = volume->getDisk(); - auto out = disk->writeFile(version_file_name, 256, WriteMode::Append); - version.writeRemovalTID(*out, clear); - out->finalize(); - - /// fsync is not required when we clearing removal TID, because after hard restart we will fix metadata - if (!clear) - out->sync(); -} - -void DataPartStorageOnDisk::writeDeleteOnDestroyMarker(Poco::Logger * log) const -{ - String marker_path = fs::path(root_path) / part_dir / "delete-on-destroy.txt"; - auto disk = volume->getDisk(); - try - { - volume->getDisk()->createFile(marker_path); - } - catch (Poco::Exception & e) - { - LOG_ERROR(log, "{} (while creating DeleteOnDestroy marker: {})", e.what(), backQuote(fullPath(disk, marker_path))); - } -} - -void DataPartStorageOnDisk::removeDeleteOnDestroyMarker() const -{ - std::string delete_on_destroy_file_name = fs::path(root_path) / part_dir / "delete-on-destroy.txt"; - volume->getDisk()->removeFileIfExists(delete_on_destroy_file_name); -} - -void DataPartStorageOnDisk::removeVersionMetadata() const -{ - std::string version_file_name = fs::path(root_path) / part_dir / "txn_version.txt"; - volume->getDisk()->removeFileIfExists(version_file_name); -} - String DataPartStorageOnDisk::getUniqueId() const { auto disk = volume->getDisk(); @@ -663,16 +513,6 @@ String DataPartStorageOnDisk::getUniqueId() const return disk->getUniqueId(fs::path(getRelativePath()) / "checksums.txt"); } -bool DataPartStorageOnDisk::shallParticipateInMerges(const IStoragePolicy & storage_policy) const -{ - /// `IMergeTreeDataPart::volume` describes space where current part belongs, and holds - /// `SingleDiskVolume` object which does not contain up-to-date settings of corresponding volume. - /// Therefore we shall obtain volume from storage policy. - auto volume_ptr = storage_policy.getVolume(storage_policy.getVolumeIndexByDisk(volume->getDisk())); - - return !volume_ptr->areMergesAvoided(); -} - void DataPartStorageOnDisk::backup( const MergeTreeDataPartChecksums & checksums, const NameSet & files_without_checksums, @@ -737,7 +577,7 @@ void DataPartStorageOnDisk::backup( } } -DataPartStoragePtr DataPartStorageOnDisk::freeze( +MutableDataPartStoragePtr DataPartStorageOnDisk::freeze( const std::string & to, const std::string & dir_path, bool make_source_readonly, @@ -761,7 +601,7 @@ DataPartStoragePtr DataPartStorageOnDisk::freeze( return std::make_shared(single_disk_volume, to, dir_path); } -DataPartStoragePtr DataPartStorageOnDisk::clone( +MutableDataPartStoragePtr DataPartStorageOnDisk::clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, @@ -774,6 +614,7 @@ DataPartStoragePtr DataPartStorageOnDisk::clone( LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone)); disk->removeRecursive(path_to_clone); } + disk->createDirectories(to); volume->getDisk()->copy(getRelativePath(), disk, to); volume->getDisk()->removeFileIfExists(fs::path(path_to_clone) / "delete-on-destroy.txt"); @@ -782,13 +623,7 @@ DataPartStoragePtr DataPartStorageOnDisk::clone( return std::make_shared(single_disk_volume, to, dir_path); } -void DataPartStorageOnDisk::onRename(const std::string & new_root_path, const std::string & new_part_dir) -{ - part_dir = new_part_dir; - root_path = new_root_path; -} - -void DataPartStorageBuilderOnDisk::rename( +void DataPartStorageOnDisk::rename( const std::string & new_root_path, const std::string & new_part_dir, Poco::Logger * log, @@ -809,7 +644,7 @@ void DataPartStorageBuilderOnDisk::rename( "Part directory {} already exists and contains {} files. Removing it.", fullPath(volume->getDisk(), to), files.size()); - transaction->removeRecursive(to); + executeOperation([&](auto & disk) { disk.removeRecursive(to); }); } else { @@ -823,8 +658,12 @@ void DataPartStorageBuilderOnDisk::rename( String from = getRelativePath(); /// Why? - transaction->setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr))); - transaction->moveDirectory(from, to); + executeOperation([&](auto & disk) + { + disk.setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr))); + disk.moveDirectory(from, to); + }); + part_dir = new_part_dir; root_path = new_root_path; @@ -846,7 +685,7 @@ void DataPartStorageOnDisk::changeRootPath(const std::string & from_root, const --prefix_size; if (prefix_size > root_path.size() - || std::string_view(from_root).substr(0, prefix_size) != std::string_view(root_path).substr(0, prefix_size)) + || std::string_view(from_root).substr(0, prefix_size) != std::string_view(root_path).substr(0, prefix_size)) throw Exception( ErrorCodes::LOGICAL_ERROR, "Cannot change part root to {} because it is not a prefix of current root {}", @@ -859,51 +698,80 @@ void DataPartStorageOnDisk::changeRootPath(const std::string & from_root, const root_path = to_root.substr(0, dst_size) + root_path.substr(prefix_size); } -DataPartStorageBuilderOnDisk::DataPartStorageBuilderOnDisk( - VolumePtr volume_, - std::string root_path_, - std::string part_dir_) - : volume(std::move(volume_)) - , root_path(std::move(root_path_)) - , part_dir(std::move(part_dir_)) - , transaction(volume->getDisk()->createTransaction()) -{ -} - -std::unique_ptr DataPartStorageBuilderOnDisk::writeFile( - const String & name, - size_t buf_size, - const WriteSettings & settings) -{ - return transaction->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings, /* autocommit = */ false); -} - -void DataPartStorageBuilderOnDisk::removeFile(const String & name) -{ - transaction->removeFile(fs::path(root_path) / part_dir / name); -} - -void DataPartStorageBuilderOnDisk::removeFileIfExists(const String & name) -{ - transaction->removeFileIfExists(fs::path(root_path) / part_dir / name); -} - -void DataPartStorageBuilderOnDisk::removeRecursive() -{ - transaction->removeRecursive(fs::path(root_path) / part_dir); -} - -void DataPartStorageBuilderOnDisk::removeSharedRecursive(bool keep_in_remote_fs) -{ - transaction->removeSharedRecursive(fs::path(root_path) / part_dir, keep_in_remote_fs, {}); -} - -SyncGuardPtr DataPartStorageBuilderOnDisk::getDirectorySyncGuard() const +SyncGuardPtr DataPartStorageOnDisk::getDirectorySyncGuard() const { return volume->getDisk()->getDirectorySyncGuard(fs::path(root_path) / part_dir); } -void DataPartStorageBuilderOnDisk::createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) const +template +void DataPartStorageOnDisk::executeOperation(Op && op) +{ + if (transaction) + op(*transaction); + else + op(*volume->getDisk()); +} + +std::unique_ptr DataPartStorageOnDisk::writeFile( + const String & name, + size_t buf_size, + const WriteSettings & settings) +{ + if (transaction) + return transaction->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings, /* autocommit = */ false); + + return volume->getDisk()->writeFile(fs::path(root_path) / part_dir / name, buf_size, WriteMode::Rewrite, settings); +} + +std::unique_ptr DataPartStorageOnDisk::writeTransactionFile(WriteMode mode) const +{ + return volume->getDisk()->writeFile(fs::path(root_path) / part_dir / "txn_version.txt", 256, mode); +} + +void DataPartStorageOnDisk::createFile(const String & name) +{ + executeOperation([&](auto & disk) { disk.createFile(fs::path(root_path) / part_dir / name); }); +} + +void DataPartStorageOnDisk::moveFile(const String & from_name, const String & to_name) +{ + executeOperation([&](auto & disk) + { + auto relative_path = fs::path(root_path) / part_dir; + disk.moveFile(relative_path / from_name, relative_path / to_name); + }); +} + +void DataPartStorageOnDisk::replaceFile(const String & from_name, const String & to_name) +{ + executeOperation([&](auto & disk) + { + auto relative_path = fs::path(root_path) / part_dir; + disk.replaceFile(relative_path / from_name, relative_path / to_name); + }); +} + +void DataPartStorageOnDisk::removeFile(const String & name) +{ + executeOperation([&](auto & disk) { disk.removeFile(fs::path(root_path) / part_dir / name); }); +} + +void DataPartStorageOnDisk::removeFileIfExists(const String & name) +{ + executeOperation([&](auto & disk) { disk.removeFileIfExists(fs::path(root_path) / part_dir / name); }); +} + +void DataPartStorageOnDisk::removeRecursive() +{ + executeOperation([&](auto & disk) { disk.removeRecursive(fs::path(root_path) / part_dir); }); +} + +void DataPartStorageOnDisk::removeSharedRecursive(bool keep_in_remote_fs) +{ + executeOperation([&](auto & disk) { disk.removeSharedRecursive(fs::path(root_path) / part_dir, keep_in_remote_fs, {}); }); +} + +void DataPartStorageOnDisk::createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) { const auto * source_on_disk = typeid_cast(&source); if (!source_on_disk) @@ -912,58 +780,43 @@ void DataPartStorageBuilderOnDisk::createHardLinkFrom(const IDataPartStorage & s "Cannot create hardlink from different storage. Expected DataPartStorageOnDisk, got {}", typeid(source).name()); - transaction->createHardLink( - fs::path(source_on_disk->getRelativePath()) / from, - fs::path(root_path) / part_dir / to); + executeOperation([&](auto & disk) + { + disk.createHardLink( + fs::path(source_on_disk->getRelativePath()) / from, + fs::path(root_path) / part_dir / to); + }); } -bool DataPartStorageBuilderOnDisk::exists() const +void DataPartStorageOnDisk::createDirectories() { - return volume->getDisk()->exists(fs::path(root_path) / part_dir); + executeOperation([&](auto & disk) { disk.createDirectories(fs::path(root_path) / part_dir); }); } -std::string DataPartStorageBuilderOnDisk::getFullPath() const +void DataPartStorageOnDisk::createProjection(const std::string & name) { - return fs::path(volume->getDisk()->getPath()) / root_path / part_dir; + executeOperation([&](auto & disk) { disk.createDirectory(fs::path(root_path) / part_dir / name); }); } -std::string DataPartStorageBuilderOnDisk::getRelativePath() const +void DataPartStorageOnDisk::beginTransaction() { - return fs::path(root_path) / part_dir; + if (transaction) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Uncommitted {}transaction already exists", has_shared_transaction ? "shared " : ""); + + transaction = volume->getDisk()->createTransaction(); } -void DataPartStorageBuilderOnDisk::createDirectories() +void DataPartStorageOnDisk::commitTransaction() { - transaction->createDirectories(fs::path(root_path) / part_dir); -} + if (!transaction) + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no uncommitted transaction"); -void DataPartStorageBuilderOnDisk::createProjection(const std::string & name) -{ - transaction->createDirectory(fs::path(root_path) / part_dir / name); -} + if (has_shared_transaction) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot commit shared transaction"); -ReservationPtr DataPartStorageBuilderOnDisk::reserve(UInt64 bytes) -{ - auto res = volume->reserve(bytes); - if (!res) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Cannot reserve {}, not enough space", ReadableSize(bytes)); - - return res; -} - -DataPartStorageBuilderPtr DataPartStorageBuilderOnDisk::getProjection(const std::string & name) const -{ - return std::make_shared(volume, std::string(fs::path(root_path) / part_dir), name); -} - -DataPartStoragePtr DataPartStorageBuilderOnDisk::getStorage() const -{ - return std::make_shared(volume, root_path, part_dir); -} - -void DataPartStorageBuilderOnDisk::commit() -{ transaction->commit(); + transaction.reset(); } } diff --git a/src/Storages/MergeTree/DataPartStorageOnDisk.h b/src/Storages/MergeTree/DataPartStorageOnDisk.h index adf1b78cdfb..bea1596e1f7 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDisk.h +++ b/src/Storages/MergeTree/DataPartStorageOnDisk.h @@ -21,6 +21,7 @@ public: std::string getPartDirectory() const override { return part_dir; } std::string getFullRootPath() const override; + MutableDataPartStoragePtr getProjection(const std::string & name) override; DataPartStoragePtr getProjection(const std::string & name) const override; bool exists() const override; @@ -41,7 +42,6 @@ public: std::optional read_hint, std::optional file_size) const override; - void loadVersionMetadata(VersionMetadata & version, Poco::Logger * log) const override; void checkConsistency(const MergeTreeDataPartChecksums & checksums) const override; void remove( @@ -52,10 +52,14 @@ public: MergeTreeDataPartState state, Poco::Logger * log) override; - std::string getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached) const override; + /// Returns path to place detached part in or nullopt if we don't need to detach part (if it already exists and has the same content) + std::optional getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached, bool broken) const override; + + /// Returns true if detached part already exists and has the same content (compares checksums.txt and the list of files) + bool looksLikeBrokenDetachedPartHasTheSameContent(const String & detached_part_path, std::optional & original_checksums_content, + std::optional & original_files_list) const; void setRelativePath(const std::string & path) override; - void onRename(const std::string & new_root_path, const std::string & new_part_dir) override; std::string getDiskName() const override; std::string getDiskType() const override; @@ -63,30 +67,14 @@ public: bool supportZeroCopyReplication() const override; bool supportParallelWrite() const override; bool isBroken() const override; - void syncRevision(UInt64 revision) override; + void syncRevision(UInt64 revision) const override; UInt64 getRevision() const override; std::unordered_map getSerializedMetadata(const std::vector & paths) const override; std::string getDiskPath() const override; - - DisksSet::const_iterator isStoredOnDisk(const DisksSet & disks) const override; - ReservationPtr reserve(UInt64 bytes) const override; ReservationPtr tryReserve(UInt64 bytes) const override; - size_t getVolumeIndex(const IStoragePolicy &) const override; - - void writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const override; - void writeColumns(const NamesAndTypesList & columns, const WriteSettings & settings) const override; - void writeVersionMetadata(const VersionMetadata & version, bool fsync_part_dir) const override; - void appendCSNToVersionMetadata(const VersionMetadata & version, VersionMetadata::WhichCSN which_csn) const override; - void appendRemovalTIDToVersionMetadata(const VersionMetadata & version, bool clear) const override; - void writeDeleteOnDestroyMarker(Poco::Logger * log) const override; - void removeDeleteOnDestroyMarker() const override; - void removeVersionMetadata() const override; - String getUniqueId() const override; - bool shallParticipateInMerges(const IStoragePolicy &) const override; - void backup( const MergeTreeDataPartChecksums & checksums, const NameSet & files_without_checksums, @@ -95,7 +83,7 @@ public: bool make_temporary_hard_links, TemporaryFilesOnDisks * temp_dirs) const override; - DataPartStoragePtr freeze( + MutableDataPartStoragePtr freeze( const std::string & to, const std::string & dir_path, bool make_source_readonly, @@ -103,7 +91,7 @@ public: bool copy_instead_of_hardlink, const NameSet & files_to_copy_instead_of_hardlinks) const override; - DataPartStoragePtr clone( + MutableDataPartStoragePtr clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, @@ -111,11 +99,51 @@ public: void changeRootPath(const std::string & from_root, const std::string & to_root) override; - DataPartStorageBuilderPtr getBuilder() const override; + void createDirectories() override; + void createProjection(const std::string & name) override; + + std::unique_ptr writeFile( + const String & name, + size_t buf_size, + const WriteSettings & settings) override; + + std::unique_ptr writeTransactionFile(WriteMode mode) const override; + + void createFile(const String & name) override; + void moveFile(const String & from_name, const String & to_name) override; + void replaceFile(const String & from_name, const String & to_name) override; + + void removeFile(const String & name) override; + void removeFileIfExists(const String & name) override; + void removeRecursive() override; + void removeSharedRecursive(bool keep_in_remote_fs) override; + + SyncGuardPtr getDirectorySyncGuard() const override; + + void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) override; + + void rename( + const std::string & new_root_path, + const std::string & new_part_dir, + Poco::Logger * log, + bool remove_new_dir_if_exists, + bool fsync_part_dir) override; + + void beginTransaction() override; + void commitTransaction() override; + bool hasActiveTransaction() const override { return transaction != nullptr; } + private: VolumePtr volume; std::string root_path; std::string part_dir; + DiskTransactionPtr transaction; + bool has_shared_transaction = false; + + DataPartStorageOnDisk(VolumePtr volume_, std::string root_path_, std::string part_dir_, DiskTransactionPtr transaction_); + + template + void executeOperation(Op && op); void clearDirectory( const std::string & dir, @@ -129,56 +157,4 @@ private: bool is_projection) const; }; -class DataPartStorageBuilderOnDisk final : public IDataPartStorageBuilder -{ -public: - DataPartStorageBuilderOnDisk(VolumePtr volume_, std::string root_path_, std::string part_dir_); - - void setRelativePath(const std::string & path) override; - - bool exists() const override; - - void createDirectories() override; - void createProjection(const std::string & name) override; - - std::string getPartDirectory() const override { return part_dir; } - std::string getFullPath() const override; - std::string getRelativePath() const override; - - std::unique_ptr writeFile( - const String & name, - size_t buf_size, - const WriteSettings & settings) override; - - void removeFile(const String & name) override; - void removeFileIfExists(const String & name) override; - void removeRecursive() override; - void removeSharedRecursive(bool keep_in_remote_fs) override; - - SyncGuardPtr getDirectorySyncGuard() const override; - - void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) const override; - - ReservationPtr reserve(UInt64 bytes) override; - - DataPartStorageBuilderPtr getProjection(const std::string & name) const override; - - DataPartStoragePtr getStorage() const override; - - void rename( - const std::string & new_root_path, - const std::string & new_part_dir, - Poco::Logger * log, - bool remove_new_dir_if_exists, - bool fsync_part_dir) override; - - void commit() override; - -private: - VolumePtr volume; - std::string root_path; - std::string part_dir; - DiskTransactionPtr transaction; -}; - } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 475461aa0d6..4f9c9ffd596 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -13,9 +13,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -147,12 +147,13 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedSend}; - if (part->data_part_storage->isStoredOnRemoteDisk()) + if (part->getDataPartStorage().isStoredOnRemoteDisk()) { UInt64 revision = parse(params.get("disk_revision", "0")); if (revision) - part->data_part_storage->syncRevision(revision); - revision = part->data_part_storage->getRevision(); + part->getDataPartStorage().syncRevision(revision); + + revision = part->getDataPartStorage().getRevision(); if (revision) response.addCookie({"disk_revision", toString(revision)}); } @@ -179,43 +180,32 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write std::sregex_token_iterator(remote_fs_metadata.begin(), remote_fs_metadata.end(), re, -1), std::sregex_token_iterator()); + bool send_projections = client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_PROJECTION; + if (send_projections) + { + const auto & projections = part->getProjectionParts(); + writeBinary(projections.size(), out); + } + if (data_settings->allow_remote_fs_zero_copy_replication && /// In memory data part does not have metadata yet. !isInMemoryPart(part) && client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_ZERO_COPY) { - auto disk_type = part->data_part_storage->getDiskType(); - if (part->data_part_storage->supportZeroCopyReplication() && std::find(capability.begin(), capability.end(), disk_type) != capability.end()) + auto disk_type = part->getDataPartStorage().getDiskType(); + if (part->getDataPartStorage().supportZeroCopyReplication() && std::find(capability.begin(), capability.end(), disk_type) != capability.end()) { /// Send metadata if the receiver's capability covers the source disk type. response.addCookie({"remote_fs_metadata", disk_type}); - if (client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_PROJECTION) - { - const auto & projections = part->getProjectionParts(); - writeBinary(projections.size(), out); - } - - sendPartFromDiskRemoteMeta(part, out, true, part->getProjectionParts()); + sendPartFromDiskRemoteMeta(part, out, true, send_projections); return; } } - if (client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_PROJECTION) - { - const auto & projections = part->getProjectionParts(); - writeBinary(projections.size(), out); - if (isInMemoryPart(part)) - sendPartFromMemory(part, out, projections); - else - sendPartFromDisk(part, out, client_protocol_version, projections); - } + if (isInMemoryPart(part)) + sendPartFromMemory(part, out, send_projections); else - { - if (isInMemoryPart(part)) - sendPartFromMemory(part, out); - else - sendPartFromDisk(part, out, client_protocol_version); - } + sendPartFromDisk(part, out, client_protocol_version, send_projections); } catch (const NetException &) { @@ -237,20 +227,23 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write } void Service::sendPartFromMemory( - const MergeTreeData::DataPartPtr & part, WriteBuffer & out, const std::map> & projections) + const MergeTreeData::DataPartPtr & part, WriteBuffer & out, bool send_projections) { auto metadata_snapshot = data.getInMemoryMetadataPtr(); - for (const auto & [name, projection] : projections) + if (send_projections) { - auto projection_sample_block = metadata_snapshot->projections.get(name).sample_block; - auto part_in_memory = asInMemoryPart(projection); - if (!part_in_memory) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Projection {} of part {} is not stored in memory", name, part->name); + for (const auto & [name, projection] : part->getProjectionParts()) + { + auto projection_sample_block = metadata_snapshot->projections.get(name).sample_block; + auto part_in_memory = asInMemoryPart(projection); + if (!part_in_memory) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Projection {} of part {} is not stored in memory", name, part->name); - writeStringBinary(name, out); - projection->checksums.write(out); - NativeWriter block_out(out, 0, projection_sample_block); - block_out.write(part_in_memory->block); + writeStringBinary(name, out); + projection->checksums.write(out); + NativeWriter block_out(out, 0, projection_sample_block); + block_out.write(part_in_memory->block); + } } auto part_in_memory = asInMemoryPart(part); @@ -268,7 +261,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( const MergeTreeData::DataPartPtr & part, WriteBuffer & out, int client_protocol_version, - const std::map> & projections) + bool send_projections) { /// We'll take a list of files from the list of checksums. MergeTreeData::DataPart::Checksums checksums = part->checksums; @@ -276,7 +269,8 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( auto file_names_without_checksums = part->getFileNamesWithoutChecksums(); for (const auto & file_name : file_names_without_checksums) { - if (client_protocol_version < REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION && file_name == IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME) + if (client_protocol_version < REPLICATION_PROTOCOL_VERSION_WITH_PARTS_DEFAULT_COMPRESSION + && file_name == IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME) continue; checksums.files[file_name] = {}; @@ -287,11 +281,10 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( { // Get rid of projection files checksums.files.erase(name + ".proj"); - auto it = projections.find(name); - if (it != projections.end()) + if (send_projections) { writeStringBinary(name, out); - MergeTreeData::DataPart::Checksums projection_checksum = sendPartFromDisk(it->second, out, client_protocol_version); + MergeTreeData::DataPart::Checksums projection_checksum = sendPartFromDisk(projection, out, client_protocol_version, false); data_checksums.addFile(name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128()); } else if (part->checksums.has(name + ".proj")) @@ -307,12 +300,12 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( { String file_name = it.first; - UInt64 size = part->data_part_storage->getFileSize(file_name); + UInt64 size = part->getDataPartStorage().getFileSize(file_name); writeStringBinary(it.first, out); writeBinary(size, out); - auto file_in = part->data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto file_in = part->getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); HashingWriteBuffer hashing_out(out); copyDataWithThrottler(*file_in, hashing_out, blocker.getCounter(), data.getSendsThrottler()); @@ -323,7 +316,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}, expected {} got {}", - std::string(fs::path(part->data_part_storage->getRelativePath()) / file_name), + std::string(fs::path(part->getDataPartStorage().getRelativePath()) / file_name), hashing_out.count(), size); writePODBinary(hashing_out.getHash(), out); @@ -336,18 +329,15 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk( return data_checksums; } -MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( +void Service::sendPartFromDiskRemoteMeta( const MergeTreeData::DataPartPtr & part, WriteBuffer & out, bool send_part_id, - const std::map> & projections) + bool send_projections) { - const auto * data_part_storage_on_disk = dynamic_cast(part->data_part_storage.get()); - if (!data_part_storage_on_disk) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage '{}' doesn't support zero-copy replication", part->data_part_storage->getDiskName()); - - if (!data_part_storage_on_disk->supportZeroCopyReplication()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Disk '{}' doesn't support zero-copy replication", data_part_storage_on_disk->getDiskName()); + auto data_part_storage = part->getDataPartStoragePtr(); + if (!data_part_storage->supportZeroCopyReplication()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Disk '{}' doesn't support zero-copy replication", data_part_storage->getDiskName()); /// We'll take a list of files from the list of checksums. MergeTreeData::DataPart::Checksums checksums = part->checksums; @@ -365,33 +355,23 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( std::vector paths; paths.reserve(checksums.files.size()); for (const auto & it : checksums.files) - paths.push_back(fs::path(part->data_part_storage->getRelativePath()) / it.first); + paths.push_back(fs::path(part->getDataPartStorage().getRelativePath()) / it.first); /// Serialized metadatadatas with zero ref counts. - auto metadatas = data_part_storage_on_disk->getSerializedMetadata(paths); + auto metadatas = data_part_storage->getSerializedMetadata(paths); if (send_part_id) { - String part_id = data_part_storage_on_disk->getUniqueId(); + String part_id = data_part_storage->getUniqueId(); writeStringBinary(part_id, out); } - MergeTreeData::DataPart::Checksums data_checksums; - for (const auto & [name, projection] : part->getProjectionParts()) + if (send_projections) { - auto it = projections.find(name); - if (it != projections.end()) + for (const auto & [name, projection] : part->getProjectionParts()) { - writeStringBinary(name, out); - MergeTreeData::DataPart::Checksums projection_checksum = sendPartFromDiskRemoteMeta(it->second, out, false); - data_checksums.addFile(name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128()); - } - else if (part->checksums.has(name + ".proj")) - { - // We don't send this projection, just add out checksum to bypass the following check - const auto & our_checksum = part->checksums.files.find(name + ".proj")->second; - data_checksums.addFile(name + ".proj", our_checksum.file_size, our_checksum.file_hash); + sendPartFromDiskRemoteMeta(projection, out, false, false); } } @@ -399,10 +379,10 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( for (const auto & it : checksums.files) { const String & file_name = it.first; - String file_path_prefix = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path_prefix = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; /// Just some additional checks - String metadata_file_path = fs::path(data_part_storage_on_disk->getDiskPath()) / file_path_prefix; + String metadata_file_path = fs::path(data_part_storage->getDiskPath()) / file_path_prefix; fs::path metadata(metadata_file_path); if (!fs::exists(metadata)) throw Exception(ErrorCodes::CORRUPTED_DATA, "Remote metadata '{}' is not exists", file_name); @@ -426,12 +406,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDiskRemoteMeta( throw Exception(ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}", metadata_file_path); writePODBinary(hashing_out.getHash(), out); - - if (!file_names_without_checksums.contains(file_name)) - data_checksums.addFile(file_name, hashing_out.count(), hashing_out.getHash()); } - - return data_checksums; } MergeTreeData::DataPartPtr Service::findPart(const String & name) @@ -706,74 +681,54 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchSelectedPart( in->setNextCallback(ReplicatedFetchReadCallback(*entry)); - return part_type == "InMemory" - ? downloadPartToMemory(part_name, part_uuid, metadata_snapshot, context, disk, *in, projections, throttler) - : downloadPartToDisk(part_name, replica_path, to_detached, tmp_prefix, sync, disk, *in, projections, checksums, throttler); + if (part_type == "InMemory") + { + auto volume = std::make_shared("volume_" + part_name, disk, 0); + + auto data_part_storage = std::make_shared( + volume, + data.getRelativeDataPath(), + part_name); + + return downloadPartToMemory( + data_part_storage, part_name, + MergeTreePartInfo::fromPartName(part_name, data.format_version), + part_uuid, metadata_snapshot, context, *in, + projections, false, throttler); + } + + return downloadPartToDisk( + part_name, replica_path, to_detached, tmp_prefix, + sync, disk, *in, projections, checksums, throttler); } MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( + MutableDataPartStoragePtr data_part_storage, const String & part_name, + const MergeTreePartInfo & part_info, const UUID & part_uuid, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, - DiskPtr disk, PooledReadWriteBufferFromHTTP & in, size_t projections, + bool is_projection, ThrottlerPtr throttler) { - auto volume = std::make_shared("volume_" + part_name, disk, 0); + auto new_data_part = std::make_shared(data, part_name, part_info, data_part_storage); - auto data_part_storage = std::make_shared( - volume, - data.getRelativeDataPath(), - part_name); - - auto data_part_storage_builder = std::make_shared( - volume, - data.getRelativeDataPath(), - part_name); - - MergeTreeData::MutableDataPartPtr new_data_part = - std::make_shared(data, part_name, data_part_storage); - new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); - - for (auto i = 0ul; i < projections; ++i) + for (size_t i = 0; i < projections; ++i) { String projection_name; readStringBinary(projection_name, in); - MergeTreeData::DataPart::Checksums checksums; - if (!checksums.read(in)) - throw Exception("Cannot deserialize checksums", ErrorCodes::CORRUPTED_DATA); - - NativeReader block_in(in, 0); - auto block = block_in.read(); - throttler->add(block.bytes()); - - auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); MergeTreePartInfo new_part_info("all", 0, 0, 0); - MergeTreeData::MutableDataPartPtr new_projection_part = - std::make_shared(data, projection_name, new_part_info, projection_part_storage, new_data_part.get()); + auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - new_projection_part->is_temp = false; - new_projection_part->setColumns(block.getNamesAndTypesList(), {}); - MergeTreePartition partition{}; - new_projection_part->partition = std::move(partition); - new_projection_part->minmax_idx = std::make_shared(); + auto new_projection_part = downloadPartToMemory( + projection_part_storage, projection_name, + new_part_info, part_uuid, metadata_snapshot, + context, in, 0, true, throttler); - MergedBlockOutputStream part_out( - new_projection_part, - projection_part_storage_builder, - metadata_snapshot->projections.get(projection_name).metadata, - block.getNamesAndTypesList(), - {}, - CompressionCodecFactory::instance().get("NONE", {}), - NO_TRANSACTION_PTR); - - part_out.write(block); - part_out.finalizePart(new_projection_part, false); - new_projection_part->checksums.checkEqual(checksums, /* have_uncompressed = */ true); new_data_part->addProjectionPart(projection_name, std::move(new_projection_part)); } @@ -785,14 +740,19 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( auto block = block_in.read(); throttler->add(block.bytes()); - new_data_part->uuid = part_uuid; - new_data_part->is_temp = true; new_data_part->setColumns(block.getNamesAndTypesList(), {}); - new_data_part->minmax_idx->update(block, data.getMinMaxColumnsNames(metadata_snapshot->getPartitionKey())); - new_data_part->partition.create(metadata_snapshot, block, 0, context); + + if (!is_projection) + { + new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); + new_data_part->uuid = part_uuid; + new_data_part->is_temp = true; + new_data_part->minmax_idx->update(block, data.getMinMaxColumnsNames(metadata_snapshot->getPartitionKey())); + new_data_part->partition.create(metadata_snapshot, block, 0, context); + } MergedBlockOutputStream part_out( - new_data_part, data_part_storage_builder, metadata_snapshot, block.getNamesAndTypesList(), {}, + new_data_part, metadata_snapshot, block.getNamesAndTypesList(), {}, CompressionCodecFactory::instance().get("NONE", {}), NO_TRANSACTION_PTR); part_out.write(block); @@ -804,7 +764,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory( void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, ThrottlerPtr throttler) const @@ -820,7 +780,7 @@ void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( readStringBinary(file_name, in); readBinary(file_size, in); - String metadata_file = fs::path(data_part_storage_builder->getFullPath()) / file_name; + String metadata_file = fs::path(data_part_storage->getFullPath()) / file_name; { auto file_out = std::make_unique(metadata_file, DBMS_DEFAULT_BUFFER_SIZE, -1, 0666, nullptr, 0); @@ -834,8 +794,8 @@ void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, /// performing a poll with a not very large timeout. /// And now we check it only between read chunks (in the `copyData` function). - data_part_storage_builder->removeSharedRecursive(true); - data_part_storage_builder->commit(); + data_part_storage->removeSharedRecursive(true); + data_part_storage->commitTransaction(); throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); } @@ -855,13 +815,12 @@ void Fetcher::downloadBasePartOrProjectionPartToDiskRemoteMeta( checksums.addFile(file_name, file_size, expected_hash); } } - } void Fetcher::downloadBaseOrProjectionPartToDisk( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, bool sync, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, @@ -880,14 +839,14 @@ void Fetcher::downloadBaseOrProjectionPartToDisk( /// File must be inside "absolute_part_path" directory. /// Otherwise malicious ClickHouse replica may force us to write to arbitrary path. - String absolute_file_path = fs::weakly_canonical(fs::path(data_part_storage_builder->getRelativePath()) / file_name); - if (!startsWith(absolute_file_path, fs::weakly_canonical(data_part_storage_builder->getRelativePath()).string())) + String absolute_file_path = fs::weakly_canonical(fs::path(data_part_storage->getRelativePath()) / file_name); + if (!startsWith(absolute_file_path, fs::weakly_canonical(data_part_storage->getRelativePath()).string())) throw Exception(ErrorCodes::INSECURE_PATH, "File path ({}) doesn't appear to be inside part path ({}). " "This may happen if we are trying to download part from malicious replica or logical error.", - absolute_file_path, data_part_storage_builder->getRelativePath()); + absolute_file_path, data_part_storage->getRelativePath()); - auto file_out = data_part_storage_builder->writeFile(file_name, std::min(file_size, DBMS_DEFAULT_BUFFER_SIZE), {}); + auto file_out = data_part_storage->writeFile(file_name, std::min(file_size, DBMS_DEFAULT_BUFFER_SIZE), {}); HashingWriteBuffer hashing_out(*file_out); copyDataWithThrottler(in, hashing_out, file_size, blocker.getCounter(), throttler); @@ -896,7 +855,7 @@ void Fetcher::downloadBaseOrProjectionPartToDisk( /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, /// performing a poll with a not very large timeout. /// And now we check it only between read chunks (in the `copyData` function). - data_part_storage_builder->removeRecursive(); + data_part_storage->removeRecursive(); throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); } @@ -906,7 +865,7 @@ void Fetcher::downloadBaseOrProjectionPartToDisk( if (expected_hash != hashing_out.getHash()) throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH, "Checksum mismatch for file {} transferred from {}", - (fs::path(data_part_storage_builder->getFullPath()) / file_name).string(), + (fs::path(data_part_storage->getFullPath()) / file_name).string(), replica_path); if (file_name != "checksums.txt" && @@ -951,15 +910,12 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( part_relative_path, part_dir); - DataPartStorageBuilderPtr data_part_storage_builder = std::make_shared( - volume, - part_relative_path, - part_dir); + data_part_storage->beginTransaction(); - if (data_part_storage_builder->exists()) + if (data_part_storage->exists()) { LOG_WARNING(log, "Directory {} already exists, probably result of a failed fetch. Will remove it before fetching part.", - data_part_storage_builder->getFullPath()); + data_part_storage->getFullPath()); /// Even if it's a temporary part it could be downloaded with zero copy replication and this function /// is executed as a callback. @@ -967,37 +923,36 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( /// We don't control the amount of refs for temporary parts so we cannot decide can we remove blobs /// or not. So we are not doing it bool keep_shared = disk->supportZeroCopyReplication() && data_settings->allow_remote_fs_zero_copy_replication; - data_part_storage_builder->removeSharedRecursive(keep_shared); + data_part_storage->removeSharedRecursive(keep_shared); } - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); SyncGuardPtr sync_guard; if (data.getSettings()->fsync_part_directory) - sync_guard = disk->getDirectorySyncGuard(data_part_storage->getRelativePath()); + sync_guard = data_part_storage->getDirectorySyncGuard(); CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedFetch}; - for (auto i = 0ul; i < projections; ++i) + for (size_t i = 0; i < projections; ++i) { String projection_name; readStringBinary(projection_name, in); MergeTreeData::DataPart::Checksums projection_checksum; auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); - - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); downloadBaseOrProjectionPartToDisk( - replica_path, projection_part_storage_builder, sync, in, projection_checksum, throttler); + replica_path, projection_part_storage, sync, in, projection_checksum, throttler); checksums.addFile( projection_name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128()); } // Download the base part - downloadBaseOrProjectionPartToDisk(replica_path, data_part_storage_builder, sync, in, checksums, throttler); + downloadBaseOrProjectionPartToDisk(replica_path, data_part_storage, sync, in, checksums, throttler); assertEOF(in); + data_part_storage->commitTransaction(); MergeTreeData::MutableDataPartPtr new_data_part = data.createPart(part_name, data_part_storage); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); new_data_part->is_temp = true; @@ -1043,49 +998,43 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta( part_relative_path, part_dir); - DataPartStorageBuilderPtr data_part_storage_builder = std::make_shared( - volume, - part_relative_path, - part_dir); + data_part_storage->beginTransaction(); if (data_part_storage->exists()) throw Exception(ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Directory {} already exists.", data_part_storage->getFullPath()); CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedFetch}; - volume->getDisk()->createDirectories(data_part_storage->getFullPath()); + data_part_storage->createDirectories(); - for (auto i = 0ul; i < projections; ++i) + for (size_t i = 0; i < projections; ++i) { String projection_name; readStringBinary(projection_name, in); MergeTreeData::DataPart::Checksums projection_checksum; auto projection_part_storage = data_part_storage->getProjection(projection_name + ".proj"); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); - - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); downloadBasePartOrProjectionPartToDiskRemoteMeta( - replica_path, projection_part_storage_builder, in, projection_checksum, throttler); + replica_path, projection_part_storage, in, projection_checksum, throttler); checksums.addFile( projection_name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128()); } downloadBasePartOrProjectionPartToDiskRemoteMeta( - replica_path, data_part_storage_builder, in, checksums, throttler); + replica_path, data_part_storage, in, checksums, throttler); assertEOF(in); MergeTreeData::MutableDataPartPtr new_data_part; try { - data_part_storage_builder->commit(); + data_part_storage->commitTransaction(); new_data_part = data.createPart(part_name, data_part_storage); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); new_data_part->is_temp = true; new_data_part->modification_time = time(nullptr); - new_data_part->loadColumnsChecksumsIndexes(true, false); } #if USE_AWS_S3 diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index 9e453ffb422..6c92fad4092 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -1,5 +1,6 @@ #pragma once +#include "Storages/MergeTree/MergeTreePartInfo.h" #include #include #include @@ -42,19 +43,19 @@ private: void sendPartFromMemory( const MergeTreeData::DataPartPtr & part, WriteBuffer & out, - const std::map> & projections = {}); + bool send_projections); MergeTreeData::DataPart::Checksums sendPartFromDisk( const MergeTreeData::DataPartPtr & part, WriteBuffer & out, int client_protocol_version, - const std::map> & projections = {}); + bool send_projections); - MergeTreeData::DataPart::Checksums sendPartFromDiskRemoteMeta( + void sendPartFromDiskRemoteMeta( const MergeTreeData::DataPartPtr & part, WriteBuffer & out, bool send_part_id, - const std::map> & projections = {}); + bool send_projections); /// StorageReplicatedMergeTree::shutdown() waits for all parts exchange handlers to finish, /// so Service will never access dangling reference to storage @@ -94,7 +95,7 @@ public: private: void downloadBaseOrProjectionPartToDisk( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, bool sync, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, @@ -102,12 +103,11 @@ private: void downloadBasePartOrProjectionPartToDiskRemoteMeta( const String & replica_path, - DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, PooledReadWriteBufferFromHTTP & in, MergeTreeData::DataPart::Checksums & checksums, ThrottlerPtr throttler) const; - MergeTreeData::MutableDataPartPtr downloadPartToDisk( const String & part_name, const String & replica_path, @@ -121,13 +121,15 @@ private: ThrottlerPtr throttler); MergeTreeData::MutableDataPartPtr downloadPartToMemory( + MutableDataPartStoragePtr data_part_storage, const String & part_name, + const MergeTreePartInfo & part_info, const UUID & part_uuid, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, - DiskPtr disk, PooledReadWriteBufferFromHTTP & in, size_t projections, + bool is_projection, ThrottlerPtr throttler); MergeTreeData::MutableDataPartPtr downloadPartToDiskRemoteMeta( diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index 17af6dd2909..c6669908db4 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -4,6 +4,9 @@ #include #include #include +#include +#include +#include #include namespace DB @@ -18,6 +21,7 @@ struct CanRemoveDescription NameSet files_not_to_remove; }; + using CanRemoveCallback = std::function; class IDataPartStorageIterator @@ -61,13 +65,10 @@ struct WriteSettings; class TemporaryFileOnDisk; -class IDataPartStorageBuilder; -using DataPartStorageBuilderPtr = std::shared_ptr; - /// This is an abstraction of storage for data part files. /// Ideally, it is assumed to contains read-only methods from IDisk. /// It is not fulfilled now, but let's try our best. -class IDataPartStorage +class IDataPartStorage : public boost::noncopyable { public: virtual ~IDataPartStorage() = default; @@ -81,16 +82,19 @@ public: /// virtual std::string getRelativeRootPath() const = 0; /// Get a storage for projection. - virtual std::shared_ptr getProjection(const std::string & name) const = 0; + virtual std::shared_ptr getProjection(const std::string & name) = 0; + virtual std::shared_ptr getProjection(const std::string & name) const = 0; /// Part directory exists. virtual bool exists() const = 0; + /// File inside part directory exists. Specified path is relative to the part path. virtual bool exists(const std::string & name) const = 0; virtual bool isDirectory(const std::string & name) const = 0; /// Modification time for part directory. virtual Poco::Timestamp getLastModified() const = 0; + /// Iterate part directory. Iteration in subdirectory is not needed yet. virtual DataPartStorageIteratorPtr iterate() const = 0; @@ -107,7 +111,6 @@ public: std::optional read_hint, std::optional file_size) const = 0; - virtual void loadVersionMetadata(VersionMetadata & version, Poco::Logger * log) const = 0; virtual void checkConsistency(const MergeTreeDataPartChecksums & checksums) const = 0; struct ProjectionChecksums @@ -129,12 +132,12 @@ public: /// Get a name like 'prefix_partdir_tryN' which does not exist in a root dir. /// TODO: remove it. - virtual std::string getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached) const = 0; + virtual std::optional getRelativePathForPrefix( + Poco::Logger * log, const String & prefix, bool detached, bool broken) const = 0; - /// Reset part directory, used for im-memory parts. + /// Reset part directory, used for in-memory parts. /// TODO: remove it. virtual void setRelativePath(const std::string & path) = 0; - virtual void onRename(const std::string & new_root_path, const std::string & new_part_dir) = 0; /// Some methods from IDisk. Needed to avoid getting internal IDisk interface. virtual std::string getDiskName() const = 0; @@ -143,41 +146,26 @@ public: virtual bool supportZeroCopyReplication() const { return false; } virtual bool supportParallelWrite() const = 0; virtual bool isBroken() const = 0; - virtual void syncRevision(UInt64 revision) = 0; + + /// TODO: remove or at least remove const. + virtual void syncRevision(UInt64 revision) const = 0; virtual UInt64 getRevision() const = 0; + virtual std::unordered_map getSerializedMetadata(const std::vector & paths) const = 0; /// Get a path for internal disk if relevant. It is used mainly for logging. virtual std::string getDiskPath() const = 0; - /// Check if data part is stored on one of the specified disk in set. - using DisksSet = std::unordered_set; - virtual DisksSet::const_iterator isStoredOnDisk(const DisksSet & disks) const { return disks.end(); } - /// Reserve space on the same disk. /// Probably we should try to remove it later. - virtual ReservationPtr reserve(UInt64 /*bytes*/) const { return nullptr; } - virtual ReservationPtr tryReserve(UInt64 /*bytes*/) const { return nullptr; } - virtual size_t getVolumeIndex(const IStoragePolicy &) const { return 0; } - - /// Some methods which change data part internals possibly after creation. - /// Probably we should try to remove it later. - virtual void writeChecksums(const MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const = 0; - virtual void writeColumns(const NamesAndTypesList & columns, const WriteSettings & settings) const = 0; - virtual void writeVersionMetadata(const VersionMetadata & version, bool fsync_part_dir) const = 0; - virtual void appendCSNToVersionMetadata(const VersionMetadata & version, VersionMetadata::WhichCSN which_csn) const = 0; - virtual void appendRemovalTIDToVersionMetadata(const VersionMetadata & version, bool clear) const = 0; - virtual void writeDeleteOnDestroyMarker(Poco::Logger * log) const = 0; - virtual void removeDeleteOnDestroyMarker() const = 0; - virtual void removeVersionMetadata() const = 0; + /// TODO: remove constness + virtual ReservationPtr reserve(UInt64 /*bytes*/) const { return nullptr; } + virtual ReservationPtr tryReserve(UInt64 /*bytes*/) const { return nullptr; } /// A leak of abstraction. /// Return some uniq string for file. /// Required for distinguish different copies of the same part on remote FS. virtual String getUniqueId() const = 0; - /// A leak of abstraction - virtual bool shallParticipateInMerges(const IStoragePolicy &) const { return true; } - /// Create a backup of a data part. /// This method adds a new entry to backup_entries. /// Also creates a new tmp_dir for internal disk (if disk is mentioned the first time). @@ -205,7 +193,7 @@ public: const NameSet & files_to_copy_instead_of_hardlinks) const = 0; /// Make a full copy of a data part into 'to/dir_path' (possibly to a different disk). - virtual std::shared_ptr clone( + virtual std::shared_ptr clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, @@ -215,33 +203,22 @@ public: /// Right now, this is needed for rename table query. virtual void changeRootPath(const std::string & from_root, const std::string & to_root) = 0; - /// Leak of abstraction as well. We should use builder as one-time object which allow - /// us to build parts, while storage should be read-only method to access part properties - /// related to disk. However our code is really tricky and sometimes we need ad-hoc builders. - virtual DataPartStorageBuilderPtr getBuilder() const = 0; -}; - -using DataPartStoragePtr = std::shared_ptr; - -/// This interface is needed to write data part. -class IDataPartStorageBuilder -{ -public: - virtual ~IDataPartStorageBuilder() = default; - - /// Reset part directory, used for im-memory parts - virtual void setRelativePath(const std::string & path) = 0; - - virtual std::string getPartDirectory() const = 0; - virtual std::string getFullPath() const = 0; - virtual std::string getRelativePath() const = 0; - - virtual bool exists() const = 0; - virtual void createDirectories() = 0; virtual void createProjection(const std::string & name) = 0; - virtual std::unique_ptr writeFile(const String & name, size_t buf_size, const WriteSettings & settings) = 0; + virtual std::unique_ptr writeFile( + const String & name, + size_t buf_size, + const WriteSettings & settings) = 0; + + /// A special const method to write transaction file. + /// It's const, because file with transaction metadata + /// can be modified after part creation. + virtual std::unique_ptr writeTransactionFile(WriteMode mode) const = 0; + + virtual void createFile(const String & name) = 0; + virtual void moveFile(const String & from_name, const String & to_name) = 0; + virtual void replaceFile(const String & from_name, const String & to_name) = 0; virtual void removeFile(const String & name) = 0; virtual void removeFileIfExists(const String & name) = 0; @@ -250,20 +227,12 @@ public: virtual SyncGuardPtr getDirectorySyncGuard() const { return nullptr; } - virtual void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) const = 0; - - virtual ReservationPtr reserve(UInt64 /*bytes*/) { return nullptr; } - - virtual std::shared_ptr getProjection(const std::string & name) const = 0; - - virtual DataPartStoragePtr getStorage() const = 0; + virtual void createHardLinkFrom(const IDataPartStorage & source, const std::string & from, const std::string & to) = 0; /// Rename part. /// Ideally, new_root_path should be the same as current root (but it is not true). /// Examples are: 'all_1_2_1' -> 'detached/all_1_2_1' /// 'moving/tmp_all_1_2_1' -> 'all_1_2_1' - /// - /// To notify storage also call onRename for it with first two args virtual void rename( const std::string & new_root_path, const std::string & new_part_dir, @@ -271,7 +240,35 @@ public: bool remove_new_dir_if_exists, bool fsync_part_dir) = 0; - virtual void commit() = 0; + /// Starts a transaction of mutable operations. + virtual void beginTransaction() = 0; + /// Commits a transaction of mutable operations. + virtual void commitTransaction() = 0; + virtual bool hasActiveTransaction() const = 0; +}; + +using DataPartStoragePtr = std::shared_ptr; +using MutableDataPartStoragePtr = std::shared_ptr; + +/// A holder that encapsulates data part storage and +/// gives access to const storage from const methods +/// and to mutable storage from non-const methods. +class DataPartStorageHolder : public boost::noncopyable +{ +public: + explicit DataPartStorageHolder(MutableDataPartStoragePtr storage_) + : storage(std::move(storage_)) + { + } + + IDataPartStorage & getDataPartStorage() { return *storage; } + const IDataPartStorage & getDataPartStorage() const { return *storage; } + + MutableDataPartStoragePtr getDataPartStoragePtr() { return storage; } + DataPartStoragePtr getDataPartStoragePtr() const { return storage; } + +private: + MutableDataPartStoragePtr storage; }; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 46323f12305..368af55aa15 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1,4 +1,5 @@ #include "IMergeTreeDataPart.h" +#include "Storages/MergeTree/IDataPartStorage.h" #include #include @@ -101,7 +102,7 @@ void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const Par } IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::store( - const MergeTreeData & data, const DataPartStorageBuilderPtr & data_part_storage_builder, Checksums & out_checksums) const + const MergeTreeData & data, IDataPartStorage & part_storage, Checksums & out_checksums) const { auto metadata_snapshot = data.getInMemoryMetadataPtr(); const auto & partition_key = metadata_snapshot->getPartitionKey(); @@ -109,20 +110,20 @@ IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::s auto minmax_column_names = data.getMinMaxColumnsNames(partition_key); auto minmax_column_types = data.getMinMaxColumnsTypes(partition_key); - return store(minmax_column_names, minmax_column_types, data_part_storage_builder, out_checksums); + return store(minmax_column_names, minmax_column_types, part_storage, out_checksums); } IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::store( const Names & column_names, const DataTypes & data_types, - const DataPartStorageBuilderPtr & data_part_storage_builder, + IDataPartStorage & part_storage, Checksums & out_checksums) const { if (!initialized) throw Exception( ErrorCodes::LOGICAL_ERROR, "Attempt to store uninitialized MinMax index for part {}. This is a bug", - data_part_storage_builder->getFullPath()); + part_storage.getFullPath()); WrittenFiles written_files; @@ -131,7 +132,7 @@ IMergeTreeDataPart::MinMaxIndex::WrittenFiles IMergeTreeDataPart::MinMaxIndex::s String file_name = "minmax_" + escapeForFileName(column_names[i]) + ".idx"; auto serialization = data_types.at(i)->getDefaultSerialization(); - auto out = data_part_storage_builder->writeFile(file_name, DBMS_DEFAULT_BUFFER_SIZE, {}); + auto out = part_storage.writeFile(file_name, DBMS_DEFAULT_BUFFER_SIZE, {}); HashingWriteBuffer out_hashing(*out); serialization->serializeBinary(hyperrectangle[i].left, out_hashing); serialization->serializeBinary(hyperrectangle[i].right, out_hashing); @@ -301,13 +302,13 @@ static void decrementTypeMetric(MergeTreeDataPartType type) IMergeTreeDataPart::IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_) - : storage(storage_) + : DataPartStorageHolder(data_part_storage_) + , storage(storage_) , name(name_) , info(MergeTreePartInfo::fromPartName(name_, storage.format_version)) - , data_part_storage(parent_part_ ? parent_part_->data_part_storage : data_part_storage_) , index_granularity_info(storage_, part_type_) , part_type(part_type_) , parent_part(parent_part_) @@ -315,6 +316,7 @@ IMergeTreeDataPart::IMergeTreeDataPart( { if (parent_part) state = MergeTreeDataPartState::Active; + incrementStateMetric(state); incrementTypeMetric(part_type); @@ -328,13 +330,13 @@ IMergeTreeDataPart::IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_) - : storage(storage_) + : DataPartStorageHolder(data_part_storage_) + , storage(storage_) , name(name_) , info(info_) - , data_part_storage(data_part_storage_) , index_granularity_info(storage_, part_type_) , part_type(part_type_) , parent_part(parent_part_) @@ -342,6 +344,7 @@ IMergeTreeDataPart::IMergeTreeDataPart( { if (parent_part) state = MergeTreeDataPartState::Active; + incrementStateMetric(state); incrementTypeMetric(part_type); @@ -505,17 +508,17 @@ void IMergeTreeDataPart::removeIfNeeded() std::string path; try { - path = data_part_storage->getRelativePath(); + path = getDataPartStorage().getRelativePath(); - if (!data_part_storage->exists()) // path + if (!getDataPartStorage().exists()) // path return; if (is_temp) { - String file_name = fileName(data_part_storage->getPartDirectory()); + String file_name = fileName(getDataPartStorage().getPartDirectory()); if (file_name.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set", data_part_storage->getPartDirectory(), name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set", getDataPartStorage().getPartDirectory(), name); if (!startsWith(file_name, "tmp") && !endsWith(file_name, ".tmp_proj")) { @@ -620,7 +623,7 @@ String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize(bool with_subc } if (!minimum_size_column) - throw Exception("Could not find a column of minimum size in MergeTree, part " + data_part_storage->getFullPath(), ErrorCodes::LOGICAL_ERROR); + throw Exception("Could not find a column of minimum size in MergeTree, part " + getDataPartStorage().getFullPath(), ErrorCodes::LOGICAL_ERROR); return *minimum_size_column; } @@ -698,9 +701,9 @@ void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool ch for (const auto & projection : metadata_snapshot->projections) { String path = /*getRelativePath() + */ projection.name + ".proj"; - if (data_part_storage->exists(path)) + if (getDataPartStorage().exists(path)) { - auto projection_part_storage = data_part_storage->getProjection(projection.name + ".proj"); + auto projection_part_storage = getDataPartStorage().getProjection(projection.name + ".proj"); auto part = storage.createPart(projection.name, {"all", 0, 0, 0}, projection_part_storage, this); part->loadColumnsChecksumsIndexes(require_columns_checksums, check_consistency); projection_parts.emplace(projection.name, std::move(part)); @@ -741,8 +744,8 @@ void IMergeTreeDataPart::loadIndex() loaded_index[i]->reserve(index_granularity.getMarksCount()); } - String index_name = "primary" + getIndexExtensionFromFilesystem(data_part_storage).value(); - String index_path = fs::path(data_part_storage->getRelativePath()) / index_name; + String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()).value(); + String index_path = fs::path(getDataPartStorage().getRelativePath()) / index_name; auto index_file = metadata_manager->read(index_name); size_t marks_count = index_granularity.getMarksCount(); @@ -781,7 +784,7 @@ void IMergeTreeDataPart::appendFilesOfIndex(Strings & files) const if (metadata_snapshot->hasPrimaryKey()) { - String index_name = "primary" + getIndexExtensionFromFilesystem(data_part_storage).value(); + String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()).value(); files.push_back(index_name); } } @@ -793,10 +796,10 @@ NameSet IMergeTreeDataPart::getFileNamesWithoutChecksums() const NameSet result = {"checksums.txt", "columns.txt"}; - if (data_part_storage->exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME)) + if (getDataPartStorage().exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME)) result.emplace(DEFAULT_COMPRESSION_CODEC_FILE_NAME); - if (data_part_storage->exists(TXN_VERSION_METADATA_FILE_NAME)) + if (getDataPartStorage().exists(TXN_VERSION_METADATA_FILE_NAME)) result.emplace(TXN_VERSION_METADATA_FILE_NAME); return result; @@ -811,7 +814,7 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec() return; } - String path = fs::path(data_part_storage->getRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME; + String path = fs::path(getDataPartStorage().getRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME; bool exists = metadata_manager->exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME); if (!exists) { @@ -851,6 +854,120 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec() } } +template +void IMergeTreeDataPart::writeMetadata(const String & filename, const WriteSettings & settings, Writer && writer) +{ + auto & data_part_storage = getDataPartStorage(); + auto tmp_filename = filename + ".tmp"; + + try + { + { + auto out = data_part_storage.writeFile(tmp_filename, 4096, settings); + writer(*out); + out->finalize(); + } + + data_part_storage.moveFile(tmp_filename, filename); + } + catch (...) + { + try + { + if (data_part_storage.exists(tmp_filename)) + data_part_storage.removeFile(tmp_filename); + } + catch (...) + { + tryLogCurrentException("DataPartStorageOnDisk"); + } + + throw; + } +} + +void IMergeTreeDataPart::writeChecksums(const MergeTreeDataPartChecksums & checksums_, const WriteSettings & settings) +{ + writeMetadata("checksums.txt", settings, [&checksums_](auto & buffer) + { + checksums_.write(buffer); + }); +} + +void IMergeTreeDataPart::writeColumns(const NamesAndTypesList & columns_, const WriteSettings & settings) +{ + writeMetadata("columns.txt", settings, [&columns_](auto & buffer) + { + columns_.writeText(buffer); + }); +} + +void IMergeTreeDataPart::writeVersionMetadata(const VersionMetadata & version_, bool fsync_part_dir) const +{ + static constexpr auto filename = "txn_version.txt"; + static constexpr auto tmp_filename = "txn_version.txt.tmp"; + auto & data_part_storage = const_cast(getDataPartStorage()); + + try + { + { + /// TODO IDisk interface does not allow to open file with O_EXCL flag (for DiskLocal), + /// so we create empty file at first (expecting that createFile throws if file already exists) + /// and then overwrite it. + data_part_storage.createFile(tmp_filename); + auto write_settings = storage.getContext()->getWriteSettings(); + auto buf = data_part_storage.writeFile(tmp_filename, 256, write_settings); + version_.write(*buf); + buf->finalize(); + buf->sync(); + } + + SyncGuardPtr sync_guard; + if (fsync_part_dir) + sync_guard = data_part_storage.getDirectorySyncGuard(); + data_part_storage.replaceFile(tmp_filename, filename); + } + catch (...) + { + try + { + if (data_part_storage.exists(tmp_filename)) + data_part_storage.removeFile(tmp_filename); + } + catch (...) + { + tryLogCurrentException("DataPartStorageOnDisk"); + } + + throw; + } +} + +void IMergeTreeDataPart::writeDeleteOnDestroyMarker() +{ + static constexpr auto marker_path = "delete-on-destroy.txt"; + + try + { + getDataPartStorage().createFile(marker_path); + } + catch (Poco::Exception & e) + { + LOG_ERROR(storage.log, "{} (while creating DeleteOnDestroy marker: {})", + e.what(), (fs::path(getDataPartStorage().getFullPath()) / marker_path).string()); + } +} + +void IMergeTreeDataPart::removeDeleteOnDestroyMarker() +{ + getDataPartStorage().removeFileIfExists("delete-on-destroy.txt"); +} + +void IMergeTreeDataPart::removeVersionMetadata() +{ + getDataPartStorage().removeFileIfExists("txn_version.txt"); +} + void IMergeTreeDataPart::appendFilesOfDefaultCompressionCodec(Strings & files) { files.push_back(DEFAULT_COMPRESSION_CODEC_FILE_NAME); @@ -880,7 +997,7 @@ CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const String candidate_path = /*fs::path(getRelativePath()) */ (ISerialization::getFileNameForStream(part_column, substream_path) + ".bin"); /// We can have existing, but empty .bin files. Example: LowCardinality(Nullable(...)) columns and column_name.dict.null.bin file. - if (data_part_storage->exists(candidate_path) && data_part_storage->getFileSize(candidate_path) != 0) + if (getDataPartStorage().exists(candidate_path) && getDataPartStorage().getFileSize(candidate_path) != 0) path_to_data_file = candidate_path; } }); @@ -891,7 +1008,7 @@ CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const continue; } - result = getCompressionCodecForFile(data_part_storage, path_to_data_file); + result = getCompressionCodecForFile(getDataPartStorage(), path_to_data_file); break; } } @@ -936,7 +1053,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex() String calculated_partition_id = partition.getID(metadata_snapshot->getPartitionKey().sample_block); if (calculated_partition_id != info.partition_id) throw Exception( - "While loading part " + data_part_storage->getFullPath() + ": calculated partition ID: " + calculated_partition_id + "While loading part " + getDataPartStorage().getFullPath() + ": calculated partition ID: " + calculated_partition_id + " differs from partition ID in part name: " + info.partition_id, ErrorCodes::CORRUPTED_DATA); } @@ -965,7 +1082,7 @@ void IMergeTreeDataPart::loadChecksums(bool require) bytes_on_disk = checksums.getTotalSizeOnDisk(); } else - bytes_on_disk = data_part_storage->calculateTotalSizeOnDisk(); + bytes_on_disk = getDataPartStorage().calculateTotalSizeOnDisk(); } else { @@ -977,7 +1094,7 @@ void IMergeTreeDataPart::loadChecksums(bool require) LOG_WARNING(storage.log, "Checksums for part {} not found. Will calculate them from data on disk.", name); checksums = checkDataPart(shared_from_this(), false); - data_part_storage->writeChecksums(checksums, {}); + writeChecksums(checksums, {}); bytes_on_disk = checksums.getTotalSizeOnDisk(); } @@ -990,8 +1107,6 @@ void IMergeTreeDataPart::appendFilesOfChecksums(Strings & files) void IMergeTreeDataPart::loadRowsCount() { - //String path = fs::path(getRelativePath()) / "count.txt"; - auto read_rows_count = [&]() { auto buf = metadata_manager->read("count.txt"); @@ -1062,7 +1177,7 @@ void IMergeTreeDataPart::loadRowsCount() } else { - if (data_part_storage->exists("count.txt")) + if (getDataPartStorage().exists("count.txt")) { read_rows_count(); return; @@ -1161,7 +1276,7 @@ void IMergeTreeDataPart::appendFilesOfUUID(Strings & files) void IMergeTreeDataPart::loadColumns(bool require) { - String path = fs::path(data_part_storage->getRelativePath()) / "columns.txt"; + String path = fs::path(getDataPartStorage().getRelativePath()) / "columns.txt"; auto metadata_snapshot = storage.getInMemoryMetadataPtr(); if (parent_part) metadata_snapshot = metadata_snapshot->projections.get(name).metadata; @@ -1172,30 +1287,26 @@ void IMergeTreeDataPart::loadColumns(bool require) { /// We can get list of columns only from columns.txt in compact parts. if (require || part_type == Type::Compact) - throw Exception("No columns.txt in part " + name + ", expected path " + path + " on drive " + data_part_storage->getDiskName(), + throw Exception("No columns.txt in part " + name + ", expected path " + path + " on drive " + getDataPartStorage().getDiskName(), ErrorCodes::NO_FILE_IN_DATA_PART); /// If there is no file with a list of columns, write it down. for (const NameAndTypePair & column : metadata_snapshot->getColumns().getAllPhysical()) - if (data_part_storage->exists(getFileNameForColumn(column) + ".bin")) + if (getDataPartStorage().exists(getFileNameForColumn(column) + ".bin")) loaded_columns.push_back(column); if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - data_part_storage->writeColumns(loaded_columns, {}); + writeColumns(loaded_columns, {}); } else { auto in = metadata_manager->read("columns.txt"); loaded_columns.readText(*in); - for (const auto & column : loaded_columns) - { - const auto * aggregate_function_data_type = typeid_cast(column.type.get()); - if (aggregate_function_data_type && aggregate_function_data_type->isVersioned()) - aggregate_function_data_type->setVersion(0, /* if_empty */true); - } + for (auto & column : loaded_columns) + setVersionToAggregateFunctions(column.type, true); } SerializationInfo::Settings settings = @@ -1231,7 +1342,7 @@ void IMergeTreeDataPart::assertHasVersionMetadata(MergeTreeTransaction * txn) co name, storage.getStorageID().getNameForLogs(), version.creation_tid, txn ? txn->dumpDescription() : ""); assert(!txn || storage.supportsTransactions()); - assert(!txn || data_part_storage->exists(TXN_VERSION_METADATA_FILE_NAME)); + assert(!txn || getDataPartStorage().exists(TXN_VERSION_METADATA_FILE_NAME)); } void IMergeTreeDataPart::storeVersionMetadata(bool force) const @@ -1246,7 +1357,7 @@ void IMergeTreeDataPart::storeVersionMetadata(bool force) const throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Transactions are not supported for in-memory parts (table: {}, part: {})", storage.getStorageID().getNameForLogs(), name); - data_part_storage->writeVersionMetadata(version, storage.getSettings()->fsync_part_directory); + writeVersionMetadata(version, storage.getSettings()->fsync_part_directory); } void IMergeTreeDataPart::appendCSNToVersionMetadata(VersionMetadata::WhichCSN which_csn) const @@ -1258,7 +1369,14 @@ void IMergeTreeDataPart::appendCSNToVersionMetadata(VersionMetadata::WhichCSN wh chassert(!(which_csn == VersionMetadata::WhichCSN::REMOVAL && version.removal_csn == 0)); chassert(isStoredOnDisk()); - data_part_storage->appendCSNToVersionMetadata(version, which_csn); + /// Small enough appends to file are usually atomic, + /// so we append new metadata instead of rewriting file to reduce number of fsyncs. + /// We don't need to do fsync when writing CSN, because in case of hard restart + /// we will be able to restore CSN from transaction log in Keeper. + + auto out = getDataPartStorage().writeTransactionFile(WriteMode::Append); + version.writeCSN(*out, which_csn); + out->finalize(); } void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const @@ -1281,13 +1399,74 @@ void IMergeTreeDataPart::appendRemovalTIDToVersionMetadata(bool clear) const else LOG_TEST(storage.log, "Appending removal TID for {} (creation: {}, removal {})", name, version.creation_tid, version.removal_tid); - data_part_storage->appendRemovalTIDToVersionMetadata(version, clear); + auto out = getDataPartStorage().writeTransactionFile(WriteMode::Append); + version.writeRemovalTID(*out, clear); + out->finalize(); + + /// fsync is not required when we clearing removal TID, because after hard restart we will fix metadata + if (!clear) + out->sync(); +} + +static std::unique_ptr openForReading(const IDataPartStorage & part_storage, const String & filename) +{ + size_t file_size = part_storage.getFileSize(filename); + return part_storage.readFile(filename, ReadSettings().adjustBufferSize(file_size), file_size, file_size); } void IMergeTreeDataPart::loadVersionMetadata() const try { - data_part_storage->loadVersionMetadata(version, storage.log); + static constexpr auto version_file_name = "txn_version.txt"; + static constexpr auto tmp_version_file_name = "txn_version.txt.tmp"; + auto & data_part_storage = const_cast(getDataPartStorage()); + + auto remove_tmp_file = [&]() + { + auto last_modified = data_part_storage.getLastModified(); + auto buf = openForReading(data_part_storage, tmp_version_file_name); + + String content; + readStringUntilEOF(content, *buf); + LOG_WARNING(storage.log, "Found file {} that was last modified on {}, has size {} and the following content: {}", + tmp_version_file_name, last_modified.epochTime(), content.size(), content); + data_part_storage.removeFile(tmp_version_file_name); + }; + + if (data_part_storage.exists(version_file_name)) + { + auto buf = openForReading(data_part_storage, version_file_name); + version.read(*buf); + if (data_part_storage.exists(tmp_version_file_name)) + remove_tmp_file(); + return; + } + + /// Four (?) cases are possible: + /// 1. Part was created without transactions. + /// 2. Version metadata file was not renamed from *.tmp on part creation. + /// 3. Version metadata were written to *.tmp file, but hard restart happened before fsync. + /// 4. Fsyncs in storeVersionMetadata() work incorrectly. + + if (!data_part_storage.exists(tmp_version_file_name)) + { + /// Case 1. + /// We do not have version metadata and transactions history for old parts, + /// so let's consider that such parts were created by some ancient transaction + /// and were committed with some prehistoric CSN. + /// NOTE It might be Case 3, but version metadata file is written on part creation before other files, + /// so it's not Case 3 if part is not broken. + version.setCreationTID(Tx::PrehistoricTID, nullptr); + version.creation_csn = Tx::PrehistoricCSN; + return; + } + + /// Case 2. + /// Content of *.tmp file may be broken, just use fake TID. + /// Transaction was not committed if *.tmp file was not renamed, so we should complete rollback by removing part. + version.setCreationTID(Tx::DummyTID, nullptr); + version.creation_csn = Tx::RolledBackCSN; + remove_tmp_file(); } catch (Exception & e) { @@ -1324,15 +1503,15 @@ bool IMergeTreeDataPart::assertHasValidVersionMetadata() const if (state == MergeTreeDataPartState::Temporary) return true; - if (!data_part_storage->exists()) + if (!getDataPartStorage().exists()) return true; String content; String version_file_name = TXN_VERSION_METADATA_FILE_NAME; try { - size_t file_size = data_part_storage->getFileSize(TXN_VERSION_METADATA_FILE_NAME); - auto buf = data_part_storage->readFile(TXN_VERSION_METADATA_FILE_NAME, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); + size_t file_size = getDataPartStorage().getFileSize(TXN_VERSION_METADATA_FILE_NAME); + auto buf = getDataPartStorage().readFile(TXN_VERSION_METADATA_FILE_NAME, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); readStringUntilEOF(content, *buf); ReadBufferFromString str_buf{content}; @@ -1366,10 +1545,11 @@ void IMergeTreeDataPart::appendFilesOfColumns(Strings & files) bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const { - return data_part_storage->shallParticipateInMerges(*storage_policy); + auto disk_name = getDataPartStorage().getDiskName(); + return !storage_policy->getVolumeByDiskName(disk_name)->areMergesAvoided(); } -void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists, DataPartStorageBuilderPtr builder) const +void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) try { assertOnDisk(); @@ -1380,22 +1560,21 @@ try if (parent_part) { /// For projections, move is only possible inside parent part dir. - relative_path = parent_part->data_part_storage->getRelativePath(); + relative_path = parent_part->getDataPartStorage().getRelativePath(); } - String from = data_part_storage->getRelativePath(); + auto old_projection_root_path = getDataPartStorage().getRelativePath(); auto to = fs::path(relative_path) / new_relative_path; metadata_manager->deleteAll(true); metadata_manager->assertAllDeleted(true); - builder->rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); - data_part_storage->onRename(to.parent_path(), to.filename()); + getDataPartStorage().rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); metadata_manager->updateAll(true); - for (const auto & [p_name, part] : projection_parts) - { - part->data_part_storage = data_part_storage->getProjection(p_name + ".proj"); - } + auto new_projection_root_path = to.string(); + + for (const auto & [_, part] : projection_parts) + part->getDataPartStorage().changeRootPath(old_projection_root_path, new_projection_root_path); } catch (...) { @@ -1436,14 +1615,14 @@ void IMergeTreeDataPart::initializePartMetadataManager() void IMergeTreeDataPart::initializeIndexGranularityInfo() { - auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(data_part_storage); + auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(getDataPartStorage()); if (mrk_ext) index_granularity_info = MergeTreeIndexGranularityInfo(storage, MarkType{*mrk_ext}); else index_granularity_info = MergeTreeIndexGranularityInfo(storage, part_type); } -void IMergeTreeDataPart::remove() const +void IMergeTreeDataPart::remove() { assert(assertHasValidVersionMetadata()); part_is_probably_removed_from_disk = true; @@ -1460,7 +1639,6 @@ void IMergeTreeDataPart::remove() const return CanRemoveDescription{.can_remove_anything = can_remove, .files_not_to_remove = files_not_to_remove }; }; - if (!isStoredOnDisk()) return; @@ -1479,11 +1657,12 @@ void IMergeTreeDataPart::remove() const projection_checksums.emplace_back(IDataPartStorage::ProjectionChecksums{.name = p_name, .checksums = projection_part->checksums}); } - data_part_storage->remove(std::move(can_remove_callback), checksums, projection_checksums, is_temp, getState(), storage.log); + getDataPartStorage().remove(std::move(can_remove_callback), checksums, projection_checksums, is_temp, getState(), storage.log); } -String IMergeTreeDataPart::getRelativePathForPrefix(const String & prefix, bool detached) const +std::optional IMergeTreeDataPart::getRelativePathForPrefix(const String & prefix, bool detached, bool broken) const { + assert(!broken || detached); String res; /** If you need to detach a part, and directory into which we want to rename it already exists, @@ -1495,22 +1674,26 @@ String IMergeTreeDataPart::getRelativePathForPrefix(const String & prefix, bool if (detached && parent_part) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot detach projection"); - return data_part_storage->getRelativePathForPrefix(storage.log, prefix, detached); + return getDataPartStorage().getRelativePathForPrefix(storage.log, prefix, detached, broken); } -String IMergeTreeDataPart::getRelativePathForDetachedPart(const String & prefix) const +std::optional IMergeTreeDataPart::getRelativePathForDetachedPart(const String & prefix, bool broken) const { /// Do not allow underscores in the prefix because they are used as separators. assert(prefix.find_first_of('_') == String::npos); assert(prefix.empty() || std::find(DetachedPartInfo::DETACH_REASONS.begin(), DetachedPartInfo::DETACH_REASONS.end(), prefix) != DetachedPartInfo::DETACH_REASONS.end()); - return "detached/" + getRelativePathForPrefix(prefix, /* detached */ true); + if (auto path = getRelativePathForPrefix(prefix, /* detached */ true, broken)) + return "detached/" + *path; + return {}; } -void IMergeTreeDataPart::renameToDetached(const String & prefix, DataPartStorageBuilderPtr builder) const +void IMergeTreeDataPart::renameToDetached(const String & prefix) { - renameTo(getRelativePathForDetachedPart(prefix), true, builder); + auto path_to_detach = getRelativePathForDetachedPart(prefix, /* broken */ false); + assert(path_to_detach); + renameTo(path_to_detach.value(), true); part_is_probably_removed_from_disk = true; } @@ -1522,26 +1705,33 @@ void IMergeTreeDataPart::makeCloneInDetached(const String & prefix, const Storag /// because hardlinks tracking doesn't work for detached parts. bool copy_instead_of_hardlink = isStoredOnRemoteDiskWithZeroCopySupport() && storage.supportsReplication() && storage_settings->allow_remote_fs_zero_copy_replication; - data_part_storage->freeze( + /// Avoid unneeded duplicates of broken parts if we try to detach the same broken part multiple times. + /// Otherwise it may pollute detached/ with dirs with _tryN suffix and we will fail to remove broken part after 10 attempts. + bool broken = !prefix.empty(); + auto maybe_path_in_detached = getRelativePathForDetachedPart(prefix, broken); + if (!maybe_path_in_detached) + return; + + getDataPartStorage().freeze( storage.relative_data_path, - getRelativePathForDetachedPart(prefix), + *maybe_path_in_detached, /*make_source_readonly*/ true, {}, copy_instead_of_hardlink, {}); } -DataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const +MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const { assertOnDisk(); - if (disk->getName() == data_part_storage->getDiskName()) - throw Exception("Can not clone data part " + name + " to same disk " + data_part_storage->getDiskName(), ErrorCodes::LOGICAL_ERROR); + if (disk->getName() == getDataPartStorage().getDiskName()) + throw Exception("Can not clone data part " + name + " to same disk " + getDataPartStorage().getDiskName(), ErrorCodes::LOGICAL_ERROR); if (directory_name.empty()) throw Exception("Can not clone data part " + name + " to empty directory.", ErrorCodes::LOGICAL_ERROR); String path_to_clone = fs::path(storage.relative_data_path) / directory_name / ""; - return data_part_storage->clone(path_to_clone, data_part_storage->getPartDirectory(), disk, storage.log); + return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log); } void IMergeTreeDataPart::checkConsistencyBase() const @@ -1582,26 +1772,26 @@ void IMergeTreeDataPart::checkConsistencyBase() const } } - data_part_storage->checkConsistency(checksums); + getDataPartStorage().checkConsistency(checksums); } else { auto check_file_not_empty = [this](const String & file_path) { UInt64 file_size; - if (!data_part_storage->exists(file_path) || (file_size = data_part_storage->getFileSize(file_path)) == 0) + if (!getDataPartStorage().exists(file_path) || (file_size = getDataPartStorage().getFileSize(file_path)) == 0) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty", - data_part_storage->getFullPath(), - std::string(fs::path(data_part_storage->getFullPath()) / file_path)); + getDataPartStorage().getFullPath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / file_path)); return file_size; }; /// Check that the primary key index is not empty. if (!pk.column_names.empty()) { - String index_name = "primary" + getIndexExtensionFromFilesystem(data_part_storage).value(); + String index_name = "primary" + getIndexExtensionFromFilesystem(getDataPartStorage()).value(); check_file_not_empty(index_name); } @@ -1745,7 +1935,7 @@ bool IMergeTreeDataPart::checkAllTTLCalculated(const StorageMetadataPtr & metada String IMergeTreeDataPart::getUniqueId() const { - return data_part_storage->getUniqueId(); + return getDataPartStorage().getUniqueId(); } String IMergeTreeDataPart::getZeroLevelPartBlockID(std::string_view token) const @@ -1784,11 +1974,11 @@ IMergeTreeDataPart::uint128 IMergeTreeDataPart::getActualChecksumByFile(const St return it->second.file_hash; } - if (!data_part_storage->exists(file_name)) + if (!getDataPartStorage().exists(file_name)) { return {}; } - std::unique_ptr in_file = data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + std::unique_ptr in_file = getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); HashingReadBuffer in_hash(*in_file); String value; @@ -1816,11 +2006,11 @@ bool isInMemoryPart(const MergeTreeDataPartPtr & data_part) return (data_part && data_part->getType() == MergeTreeDataPartType::InMemory); } -std::optional getIndexExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage) +std::optional getIndexExtensionFromFilesystem(const IDataPartStorage & data_part_storage) { - if (data_part_storage->exists()) + if (data_part_storage.exists()) { - for (auto it = data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = data_part_storage.iterate(); it->isValid(); it->next()) { const auto & extension = fs::path(it->name()).extension(); if (extension == getIndexExtension(false) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 32afa2a482d..6515eb1a65c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -1,5 +1,6 @@ #pragma once +#include "IO/WriteSettings.h" #include #include #include @@ -46,7 +47,7 @@ class UncompressedCache; class MergeTreeTransaction; /// Description of the data part. -class IMergeTreeDataPart : public std::enable_shared_from_this +class IMergeTreeDataPart : public std::enable_shared_from_this, public DataPartStorageHolder { public: static constexpr auto DATA_FILE_EXTENSION = ".bin"; @@ -67,19 +68,18 @@ public: using uint128 = IPartMetadataManager::uint128; - IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_); IMergeTreeDataPart( const MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, Type part_type_, const IMergeTreeDataPart * parent_part_); @@ -94,13 +94,12 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback_) const = 0; virtual MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const = 0; + const MergeTreeIndexGranularity & computed_index_granularity) = 0; virtual bool isStoredOnDisk() const = 0; @@ -152,7 +151,7 @@ public: /// Throws an exception if part is not stored in on-disk format. void assertOnDisk() const; - void remove() const; + void remove(); /// Initialize columns (from columns.txt if exists, or create from column files if not). /// Load checksums from checksums.txt if exists. Load index if required. @@ -200,10 +199,6 @@ public: /// processed by multiple shards. UUID uuid = UUIDHelpers::Nil; - /// This is an object which encapsulates all the operations with disk. - /// Contains a path to stored data. - DataPartStoragePtr data_part_storage; - MergeTreeIndexGranularityInfo index_granularity_info; size_t rows_count = 0; @@ -289,8 +284,8 @@ public: using WrittenFiles = std::vector>; - [[nodiscard]] WrittenFiles store(const MergeTreeData & data, const DataPartStorageBuilderPtr & data_part_storage_builder, Checksums & checksums) const; - [[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, const DataPartStorageBuilderPtr & data_part_storage_builder, Checksums & checksums) const; + [[nodiscard]] WrittenFiles store(const MergeTreeData & data, IDataPartStorage & part_storage, Checksums & checksums) const; + [[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, IDataPartStorage & part_storage, Checksums & checksums) const; void update(const Block & block, const Names & column_names); void merge(const MinMaxIndex & other); @@ -321,17 +316,17 @@ public: size_t getFileSizeOrZero(const String & file_name) const; /// Moves a part to detached/ directory and adds prefix to its name - void renameToDetached(const String & prefix, DataPartStorageBuilderPtr builder) const; + void renameToDetached(const String & prefix); /// Makes checks and move part to new directory /// Changes only relative_dir_name, you need to update other metadata (name, is_temp) explicitly - virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists, DataPartStorageBuilderPtr builder) const; + virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists); /// Makes clone of a part in detached/ directory via hard links virtual void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const; /// Makes full clone of part in specified subdirectory (relative to storage data directory, e.g. "detached") on another disk - DataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const; + MutableDataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const; /// Checks that .bin and .mrk files exist. /// @@ -347,7 +342,7 @@ public: /// Calculate column and secondary indices sizes on disk. void calculateColumnsAndSecondaryIndicesSizesOnDisk(); - String getRelativePathForPrefix(const String & prefix, bool detached = false) const; + std::optional getRelativePathForPrefix(const String & prefix, bool detached = false, bool broken = false) const; bool isProjectionPart() const { return parent_part != nullptr; } @@ -445,6 +440,12 @@ public: /// True if here is lightweight deleted mask file in part. bool hasLightweightDelete() const { return columns.contains(LightweightDeleteDescription::FILTER_COLUMN.name); } + void writeChecksums(const MergeTreeDataPartChecksums & checksums_, const WriteSettings & settings); + + void writeDeleteOnDestroyMarker(); + void removeDeleteOnDestroyMarker(); + void removeVersionMetadata(); + protected: /// Total size of all columns, calculated once in calcuateColumnSizesOnDisk @@ -485,7 +486,7 @@ protected: /// disk using columns and checksums. virtual void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const = 0; - String getRelativePathForDetachedPart(const String & prefix) const; + std::optional getRelativePathForDetachedPart(const String & prefix, bool broken) const; /// Checks that part can be actually removed from disk. /// In ordinary scenario always returns true, but in case of @@ -566,6 +567,12 @@ private: /// any specifial compression. void loadDefaultCompressionCodec(); + void writeColumns(const NamesAndTypesList & columns_, const WriteSettings & settings); + void writeVersionMetadata(const VersionMetadata & version_, bool fsync_part_dir) const; + + template + void writeMetadata(const String & filename, const WriteSettings & settings, Writer && writer); + static void appendFilesOfDefaultCompressionCodec(Strings & files); /// Found column without specific compression and return codec @@ -585,7 +592,7 @@ bool isCompactPart(const MergeTreeDataPartPtr & data_part); bool isWidePart(const MergeTreeDataPartPtr & data_part); bool isInMemoryPart(const MergeTreeDataPartPtr & data_part); inline String getIndexExtension(bool is_compressed_primary_key) { return is_compressed_primary_key ? ".cidx" : ".idx"; } -std::optional getIndexExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage); +std::optional getIndexExtensionFromFilesystem(const IDataPartStorage & data_part_storage); bool isCompressedFromIndexExtension(const String & index_extension); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h b/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h index 28f834d661d..2e4972c2788 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartInfoForReader.h @@ -7,7 +7,8 @@ namespace DB { class IDataPartStorage; -using DataPartStoragePtr = std::shared_ptr; +using DataPartStoragePtr = std::shared_ptr; + class MergeTreeIndexGranularity; struct MergeTreeDataPartChecksums; struct MergeTreeIndexGranularityInfo; @@ -36,7 +37,7 @@ public: virtual bool isProjectionPart() const = 0; - virtual const DataPartStoragePtr & getDataPartStorage() const = 0; + virtual DataPartStoragePtr getDataPartStorage() const = 0; virtual const NamesAndTypesList & getColumns() const = 0; diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index 84d0b50ae2f..2488c63e309 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -38,14 +38,12 @@ Block permuteBlockIfNeeded(const Block & block, const IColumn::Permutation * per } IMergeTreeDataPartWriter::IMergeTreeDataPartWriter( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) : data_part(data_part_) - , data_part_storage_builder(std::move(data_part_storage_builder_)) , storage(data_part_->storage) , metadata_snapshot(metadata_snapshot_) , columns_list(columns_list_) diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index 417e2713180..fa3c675f7da 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -22,8 +22,7 @@ class IMergeTreeDataPartWriter : private boost::noncopyable { public: IMergeTreeDataPartWriter( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeWriterSettings & settings_, @@ -42,8 +41,7 @@ public: protected: - const MergeTreeData::DataPartPtr data_part; - DataPartStorageBuilderPtr data_part_storage_builder; + const MergeTreeMutableDataPartPtr data_part; const MergeTreeData & storage; const StorageMetadataPtr metadata_snapshot; const NamesAndTypesList columns_list; diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 5af9bbd3ed8..37da6014d1b 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -6,14 +6,13 @@ namespace DB { IMergedBlockOutputStream::IMergedBlockOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list, bool reset_columns_) : storage(data_part->storage) , metadata_snapshot(metadata_snapshot_) - , data_part_storage_builder(std::move(data_part_storage_builder_)) + , data_part_storage(data_part->getDataPartStoragePtr()) , reset_columns(reset_columns_) { if (reset_columns) diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index dbcca1443b5..ca4e3899b29 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -1,5 +1,6 @@ #pragma once +#include "Storages/MergeTree/IDataPartStorage.h" #include #include #include @@ -12,8 +13,7 @@ class IMergedBlockOutputStream { public: IMergedBlockOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list, bool reset_columns_); @@ -42,7 +42,7 @@ protected: const MergeTreeData & storage; StorageMetadataPtr metadata_snapshot; - DataPartStorageBuilderPtr data_part_storage_builder; + MutableDataPartStoragePtr data_part_storage; IMergeTreeDataPart::MergeTreeWriterPtr writer; bool reset_columns = false; diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index d7c33c8663b..99c14ede3e2 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -86,6 +87,88 @@ String extractFixedPrefixFromLikePattern(const String & like_pattern) return fixed_prefix; } +/// for "^prefix..." string it returns "prefix" +static String extractFixedPrefixFromRegularExpression(const String & regexp) +{ + if (regexp.size() <= 1 || regexp[0] != '^') + return {}; + + String fixed_prefix; + const char * begin = regexp.data() + 1; + const char * pos = begin; + const char * end = regexp.data() + regexp.size(); + + while (pos != end) + { + switch (*pos) + { + case '\0': + pos = end; + break; + + case '\\': + { + ++pos; + if (pos == end) + break; + + switch (*pos) + { + case '|': + case '(': + case ')': + case '^': + case '$': + case '.': + case '[': + case '?': + case '*': + case '+': + case '{': + fixed_prefix += *pos; + break; + default: + /// all other escape sequences are not supported + pos = end; + break; + } + + ++pos; + break; + } + + /// non-trivial cases + case '|': + fixed_prefix.clear(); + [[fallthrough]]; + case '(': + case '[': + case '^': + case '$': + case '.': + case '+': + pos = end; + break; + + /// Quantifiers that allow a zero number of occurrences. + case '{': + case '?': + case '*': + if (!fixed_prefix.empty()) + fixed_prefix.pop_back(); + + pos = end; + break; + default: + fixed_prefix += *pos; + pos++; + break; + } + } + + return fixed_prefix; +} + /** For a given string, get a minimum string that is strictly greater than all strings with this prefix, * or return an empty string if there are no such strings. @@ -112,289 +195,6 @@ static String firstStringThatIsGreaterThanAllStringsWithPrefix(const String & pr return res; } -static void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool legacy = false) -{ - switch (node.type) - { - case (ActionsDAG::ActionType::INPUT): - writeString(node.result_name, out); - break; - case (ActionsDAG::ActionType::COLUMN): - { - /// If it was created from ASTLiteral, then result_name can be an alias. - /// We need to convert value back to string here. - if (const auto * column_const = typeid_cast(node.column.get())) - writeString(applyVisitor(FieldVisitorToString(), column_const->getField()), out); - /// It may be possible that column is ColumnSet - else - writeString(node.result_name, out); - break; - } - case (ActionsDAG::ActionType::ALIAS): - appendColumnNameWithoutAlias(*node.children.front(), out, legacy); - break; - case (ActionsDAG::ActionType::ARRAY_JOIN): - writeCString("arrayJoin(", out); - appendColumnNameWithoutAlias(*node.children.front(), out, legacy); - writeChar(')', out); - break; - case (ActionsDAG::ActionType::FUNCTION): - { - auto name = node.function_base->getName(); - if (legacy && name == "modulo") - writeCString("moduleLegacy", out); - else - writeString(name, out); - - writeChar('(', out); - bool first = true; - for (const auto * arg : node.children) - { - if (!first) - writeCString(", ", out); - first = false; - - appendColumnNameWithoutAlias(*arg, out, legacy); - } - writeChar(')', out); - } - } -} - -static std::string getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool legacy = false) -{ - WriteBufferFromOwnString out; - appendColumnNameWithoutAlias(node, out, legacy); - return std::move(out.str()); -} - -class KeyCondition::Tree -{ -public: - explicit Tree(const IAST * ast_) : ast(ast_) { assert(ast); } - explicit Tree(const ActionsDAG::Node * dag_) : dag(dag_) { assert(dag); } - - std::string getColumnName() const - { - if (ast) - return ast->getColumnNameWithoutAlias(); - else - return getColumnNameWithoutAlias(*dag); - } - - std::string getColumnNameLegacy() const - { - if (ast) - { - auto adjusted_ast = ast->clone(); - KeyDescription::moduloToModuloLegacyRecursive(adjusted_ast); - return adjusted_ast->getColumnNameWithoutAlias(); - } - else - return getColumnNameWithoutAlias(*dag, true); - } - - bool isFunction() const - { - if (ast) - return typeid_cast(ast); - else - return dag->type == ActionsDAG::ActionType::FUNCTION; - } - - bool isConstant() const - { - if (ast) - return typeid_cast(ast); - else - return dag->column && isColumnConst(*dag->column); - } - - ColumnWithTypeAndName getConstant() const - { - if (!isConstant()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "KeyCondition::Tree node is not a constant"); - - ColumnWithTypeAndName res; - - if (ast) - { - const auto * literal = assert_cast(ast); - res.type = applyVisitor(FieldToDataType(), literal->value); - res.column = res.type->createColumnConst(0, literal->value); - - } - else - { - res.type = dag->result_type; - res.column = dag->column; - } - - return res; - } - - bool tryGetConstant(const Block & block_with_constants, Field & out_value, DataTypePtr & out_type) const - { - if (ast) - { - // Constant expr should use alias names if any - String column_name = ast->getColumnName(); - - if (const auto * lit = ast->as()) - { - /// By default block_with_constants has only one column named "_dummy". - /// If block contains only constants it's may not be preprocessed by - // ExpressionAnalyzer, so try to look up in the default column. - if (!block_with_constants.has(column_name)) - column_name = "_dummy"; - - /// Simple literal - out_value = lit->value; - out_type = block_with_constants.getByName(column_name).type; - - /// If constant is not Null, we can assume it's type is not Nullable as well. - if (!out_value.isNull()) - out_type = removeNullable(out_type); - - return true; - } - else if (block_with_constants.has(column_name) && isColumnConst(*block_with_constants.getByName(column_name).column)) - { - /// An expression which is dependent on constants only - const auto & expr_info = block_with_constants.getByName(column_name); - out_value = (*expr_info.column)[0]; - out_type = expr_info.type; - - if (!out_value.isNull()) - out_type = removeNullable(out_type); - - return true; - } - } - else - { - if (dag->column && isColumnConst(*dag->column)) - { - out_value = (*dag->column)[0]; - out_type = dag->result_type; - - if (!out_value.isNull()) - out_type = removeNullable(out_type); - - return true; - } - } - - return false; - } - - ConstSetPtr tryGetPreparedSet( - const PreparedSetsPtr & sets, - const std::vector & indexes_mapping, - const DataTypes & data_types) const - { - if (sets && ast) - { - if (ast->as() || ast->as()) - return sets->get(PreparedSetKey::forSubquery(*ast)); - - /// We have `PreparedSetKey::forLiteral` but it is useless here as we don't have enough information - /// about types in left argument of the IN operator. Instead, we manually iterate through all the sets - /// and find the one for the right arg based on the AST structure (getTreeHash), after that we check - /// that the types it was prepared with are compatible with the types of the primary key. - auto types_match = [&indexes_mapping, &data_types](const SetPtr & candidate_set) - { - assert(indexes_mapping.size() == data_types.size()); - - for (size_t i = 0; i < indexes_mapping.size(); ++i) - { - if (!candidate_set->areTypesEqual(indexes_mapping[i].tuple_index, data_types[i])) - return false; - } - - return true; - }; - - for (const auto & set : sets->getByTreeHash(ast->getTreeHash())) - { - if (types_match(set)) - return set; - } - } - else if (dag->column) - { - const IColumn * col = dag->column.get(); - if (const auto * col_const = typeid_cast(col)) - col = &col_const->getDataColumn(); - - if (const auto * col_set = typeid_cast(col)) - { - auto set = col_set->getData(); - if (set->isCreated()) - return set; - } - } - - return nullptr; - } - - FunctionTree asFunction() const; - -protected: - const IAST * ast = nullptr; - const ActionsDAG::Node * dag = nullptr; -}; - -class KeyCondition::FunctionTree : public KeyCondition::Tree -{ -public: - std::string getFunctionName() const - { - if (ast) - return assert_cast(ast)->name; - else - return dag->function_base->getName(); - } - - size_t numArguments() const - { - if (ast) - { - const auto * func = assert_cast(ast); - return func->arguments ? func->arguments->children.size() : 0; - } - else - return dag->children.size(); - } - - Tree getArgumentAt(size_t idx) const - { - if (ast) - return Tree(assert_cast(ast)->arguments->children[idx].get()); - else - return Tree(dag->children[idx]); - } - -private: - using Tree::Tree; - - friend class Tree; -}; - - -KeyCondition::FunctionTree KeyCondition::Tree::asFunction() const -{ - if (!isFunction()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "KeyCondition::Tree node is not a function"); - - if (ast) - return KeyCondition::FunctionTree(ast); - else - return KeyCondition::FunctionTree(dag); -} - - -/// A dictionary containing actions to the corresponding functions to turn them into `RPNElement` const KeyCondition::AtomMap KeyCondition::atom_map { { @@ -581,6 +381,27 @@ const KeyCondition::AtomMap KeyCondition::atom_map return true; } }, + { + "match", + [] (RPNElement & out, const Field & value) + { + if (value.getType() != Field::Types::String) + return false; + + String prefix = extractFixedPrefixFromRegularExpression(value.get()); + if (prefix.empty()) + return false; + + String right_bound = firstStringThatIsGreaterThanAllStringsWithPrefix(prefix); + + out.function = RPNElement::FUNCTION_IN_RANGE; + out.range = !right_bound.empty() + ? Range(prefix, true, right_bound, false) + : Range::createLeftBounded(prefix, true); + + return true; + } + }, { "isNotNull", [] (RPNElement & out, const Field &) @@ -848,9 +669,11 @@ Block KeyCondition::getBlockWithConstants( { DataTypeUInt8().createColumnConstWithDefaultValue(1), std::make_shared(), "_dummy" } }; - const auto expr_for_constant_folding = ExpressionAnalyzer(query, syntax_analyzer_result, context).getConstActions(); - - expr_for_constant_folding->execute(result); + if (syntax_analyzer_result) + { + const auto expr_for_constant_folding = ExpressionAnalyzer(query, syntax_analyzer_result, context).getConstActions(); + expr_for_constant_folding->execute(result); + } return result; } @@ -867,16 +690,17 @@ static NameSet getAllSubexpressionNames(const ExpressionActions & key_expr) KeyCondition::KeyCondition( const ASTPtr & query, const ASTs & additional_filter_asts, - TreeRewriterResultPtr syntax_analyzer_result, - PreparedSetsPtr prepared_sets_, + Block block_with_constants, + PreparedSetsPtr prepared_sets, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr_, + NameSet array_joined_column_names_, bool single_point_, bool strict_) : key_expr(key_expr_) , key_subexpr_names(getAllSubexpressionNames(*key_expr)) - , prepared_sets(prepared_sets_) + , array_joined_column_names(std::move(array_joined_column_names_)) , single_point(single_point_) , strict(strict_) { @@ -887,73 +711,64 @@ KeyCondition::KeyCondition( key_columns[name] = i; } - /** Evaluation of expressions that depend only on constants. - * For the index to be used, if it is written, for example `WHERE Date = toDate(now())`. - */ - Block block_with_constants = getBlockWithConstants(query, syntax_analyzer_result, context); + auto filter_node = buildFilterNode(query, additional_filter_asts); - for (const auto & [name, _] : syntax_analyzer_result->array_join_result_to_source) - array_joined_columns.insert(name); - - const ASTSelectQuery & select = query->as(); - - ASTs filters; - if (select.where()) - filters.push_back(select.where()); - - if (select.prewhere()) - filters.push_back(select.prewhere()); - - for (const auto & filter_ast : additional_filter_asts) - filters.push_back(filter_ast); - - if (!filters.empty()) - { - ASTPtr filter_query; - if (filters.size() == 1) - { - filter_query = filters.front(); - } - else - { - auto function = std::make_shared(); - - function->name = "and"; - function->arguments = std::make_shared(); - function->children.push_back(function->arguments); - function->arguments->children = std::move(filters); - - filter_query = function; - } - - /** When non-strictly monotonic functions are employed in functional index (e.g. ORDER BY toStartOfHour(dateTime)), - * the use of NOT operator in predicate will result in the indexing algorithm leave out some data. - * This is caused by rewriting in KeyCondition::tryParseAtomFromAST of relational operators to less strict - * when parsing the AST into internal RPN representation. - * To overcome the problem, before parsing the AST we transform it to its semantically equivalent form where all NOT's - * are pushed down and applied (when possible) to leaf nodes. - */ - auto ast = cloneASTWithInversionPushDown(filter_query); - traverseAST(Tree(ast.get()), context, block_with_constants); - } - else + if (!filter_node) { rpn.emplace_back(RPNElement::FUNCTION_UNKNOWN); + return; } + + /** When non-strictly monotonic functions are employed in functional index (e.g. ORDER BY toStartOfHour(dateTime)), + * the use of NOT operator in predicate will result in the indexing algorithm leave out some data. + * This is caused by rewriting in KeyCondition::tryParseAtomFromAST of relational operators to less strict + * when parsing the AST into internal RPN representation. + * To overcome the problem, before parsing the AST we transform it to its semantically equivalent form where all NOT's + * are pushed down and applied (when possible) to leaf nodes. + */ + auto inverted_filter_node = cloneASTWithInversionPushDown(filter_node); + + RPNBuilder builder( + inverted_filter_node, + std::move(context), + std::move(block_with_constants), + std::move(prepared_sets), + [&](const RPNBuilderTreeNode & node, RPNElement & out) { return extractAtomFromTree(node, out); }); + rpn = std::move(builder).extractRPN(); +} + +KeyCondition::KeyCondition( + const SelectQueryInfo & query_info, + ContextPtr context, + const Names & key_column_names, + const ExpressionActionsPtr & key_expr_, + bool single_point_, + bool strict_) + : KeyCondition( + query_info.query, + query_info.filter_asts, + KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context), + query_info.prepared_sets, + context, + key_column_names, + key_expr_, + query_info.syntax_analyzer_result->getArrayJoinSourceNameSet(), + single_point_, + strict_) +{ } KeyCondition::KeyCondition( ActionDAGNodes dag_nodes, - TreeRewriterResultPtr syntax_analyzer_result, - PreparedSetsPtr prepared_sets_, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr_, + NameSet array_joined_column_names_, bool single_point_, bool strict_) : key_expr(key_expr_) , key_subexpr_names(getAllSubexpressionNames(*key_expr)) - , prepared_sets(prepared_sets_) + , array_joined_column_names(std::move(array_joined_column_names_)) , single_point(single_point_) , strict(strict_) { @@ -964,23 +779,23 @@ KeyCondition::KeyCondition( key_columns[name] = i; } - for (const auto & [name, _] : syntax_analyzer_result->array_join_result_to_source) - array_joined_columns.insert(name); - - if (!dag_nodes.nodes.empty()) - { - auto inverted_dag = cloneASTWithInversionPushDown(std::move(dag_nodes.nodes), context); - - // std::cerr << "========== inverted dag: " << inverted_dag->dumpDAG() << std::endl; - - Block empty; - for (const auto * node : inverted_dag->getOutputs()) - traverseAST(Tree(node), context, empty); - } - else + if (dag_nodes.nodes.empty()) { rpn.emplace_back(RPNElement::FUNCTION_UNKNOWN); + return; } + + auto inverted_dag = cloneASTWithInversionPushDown(std::move(dag_nodes.nodes), context); + assert(inverted_dag->getOutputs().size() == 1); + + const auto * inverted_dag_filter_node = inverted_dag->getOutputs()[0]; + + RPNBuilder builder(inverted_dag_filter_node, context, [&](const RPNBuilderTreeNode & node, RPNElement & out) + { + return extractAtomFromTree(node, out); + }); + + rpn = std::move(builder).extractRPN(); } bool KeyCondition::addCondition(const String & column, const Range & range) @@ -992,12 +807,12 @@ bool KeyCondition::addCondition(const String & column, const Range & range) return true; } -/** Computes value of constant expression and its data type. - * Returns false, if expression isn't constant. - */ bool KeyCondition::getConstant(const ASTPtr & expr, Block & block_with_constants, Field & out_value, DataTypePtr & out_type) { - return Tree(expr.get()).tryGetConstant(block_with_constants, out_value, out_type); + RPNBuilderTreeContext tree_context(nullptr, block_with_constants, nullptr); + RPNBuilderTreeNode node(expr.get(), tree_context); + + return node.tryGetConstant(out_value, out_type); } @@ -1081,39 +896,6 @@ static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & return {field.columns, field.row_idx, result_idx}; } -void KeyCondition::traverseAST(const Tree & node, ContextPtr context, Block & block_with_constants) -{ - RPNElement element; - - if (node.isFunction()) - { - auto func = node.asFunction(); - if (tryParseLogicalOperatorFromAST(func, element)) - { - size_t num_args = func.numArguments(); - for (size_t i = 0; i < num_args; ++i) - { - traverseAST(func.getArgumentAt(i), context, block_with_constants); - - /** The first part of the condition is for the correct support of `and` and `or` functions of arbitrary arity - * - in this case `n - 1` elements are added (where `n` is the number of arguments). - */ - if (i != 0 || element.function == RPNElement::FUNCTION_NOT) - rpn.emplace_back(element); - } - - return; - } - } - - if (!tryParseAtomFromAST(node, context, block_with_constants, element)) - { - element.function = RPNElement::FUNCTION_UNKNOWN; - } - - rpn.emplace_back(std::move(element)); -} - /** The key functional expression constraint may be inferred from a plain column in the expression. * For example, if the key contains `toStartOfHour(Timestamp)` and query contains `WHERE Timestamp >= now()`, * it can be assumed that if `toStartOfHour()` is monotonic on [now(), inf), the `toStartOfHour(Timestamp) >= toStartOfHour(now())` @@ -1180,7 +962,8 @@ bool KeyCondition::transformConstantWithValidFunctions( if (is_valid_chain) { - auto const_type = cur_node->result_type; + out_type = removeLowCardinality(out_type); + auto const_type = removeLowCardinality(cur_node->result_type); auto const_column = out_type->createColumnConst(1, out_value); auto const_value = (*castColumnAccurateOrNull({const_column, out_type, ""}, const_type))[0]; @@ -1234,7 +1017,7 @@ bool KeyCondition::transformConstantWithValidFunctions( } bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( - const Tree & node, + const RPNBuilderTreeNode & node, size_t & out_key_column_num, DataTypePtr & out_key_column_type, Field & out_value, @@ -1242,7 +1025,7 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( { String expr_name = node.getColumnName(); - if (array_joined_columns.contains(expr_name)) + if (array_joined_column_names.contains(expr_name)) return false; if (!key_subexpr_names.contains(expr_name)) @@ -1269,11 +1052,15 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( /// Looking for possible transformation of `column = constant` into `partition_expr = function(constant)` bool KeyCondition::canConstantBeWrappedByFunctions( - const Tree & node, size_t & out_key_column_num, DataTypePtr & out_key_column_type, Field & out_value, DataTypePtr & out_type) + const RPNBuilderTreeNode & node, + size_t & out_key_column_num, + DataTypePtr & out_key_column_type, + Field & out_value, + DataTypePtr & out_type) { String expr_name = node.getColumnName(); - if (array_joined_columns.contains(expr_name)) + if (array_joined_column_names.contains(expr_name)) return false; if (!key_subexpr_names.contains(expr_name)) @@ -1287,7 +1074,7 @@ bool KeyCondition::canConstantBeWrappedByFunctions( /// The case `f(modulo(...))` for totally monotonic `f ` is considered to be rare. /// /// Note: for negative values, we can filter more partitions then needed. - expr_name = node.getColumnNameLegacy(); + expr_name = node.getColumnNameWithModuloLegacy(); if (!key_subexpr_names.contains(expr_name)) return false; @@ -1304,8 +1091,7 @@ bool KeyCondition::canConstantBeWrappedByFunctions( } bool KeyCondition::tryPrepareSetIndex( - const FunctionTree & func, - ContextPtr context, + const RPNBuilderFunctionTreeNode & func, RPNElement & out, size_t & out_key_column_num) { @@ -1315,13 +1101,12 @@ bool KeyCondition::tryPrepareSetIndex( std::vector indexes_mapping; DataTypes data_types; - auto get_key_tuple_position_mapping = [&](const Tree & node, size_t tuple_index) + auto get_key_tuple_position_mapping = [&](const RPNBuilderTreeNode & node, size_t tuple_index) { MergeTreeSetIndex::KeyTuplePositionMapping index_mapping; index_mapping.tuple_index = tuple_index; DataTypePtr data_type; - if (isKeyPossiblyWrappedByMonotonicFunctions( - node, context, index_mapping.key_index, data_type, index_mapping.functions)) + if (isKeyPossiblyWrappedByMonotonicFunctions(node, index_mapping.key_index, data_type, index_mapping.functions)) { indexes_mapping.push_back(index_mapping); data_types.push_back(data_type); @@ -1335,25 +1120,29 @@ bool KeyCondition::tryPrepareSetIndex( { /// Note: in case of ActionsDAG, tuple may be a constant. /// In this case, there is no keys in tuple. So, we don't have to check it. - auto left_arg_tuple = left_arg.asFunction(); + auto left_arg_tuple = left_arg.toFunctionNode(); if (left_arg_tuple.getFunctionName() == "tuple") { - left_args_count = left_arg_tuple.numArguments(); + left_args_count = left_arg_tuple.getArgumentsSize(); for (size_t i = 0; i < left_args_count; ++i) get_key_tuple_position_mapping(left_arg_tuple.getArgumentAt(i), i); } else + { get_key_tuple_position_mapping(left_arg, 0); + } } else + { get_key_tuple_position_mapping(left_arg, 0); + } if (indexes_mapping.empty()) return false; const auto right_arg = func.getArgumentAt(1); - auto prepared_set = right_arg.tryGetPreparedSet(prepared_sets, indexes_mapping, data_types); + auto prepared_set = right_arg.tryGetPreparedSet(indexes_mapping, data_types); if (!prepared_set) return false; @@ -1407,6 +1196,7 @@ public: ColumnsWithTypeAndName new_arguments; new_arguments.reserve(arguments.size() + 1); new_arguments.push_back(const_arg); + new_arguments.front().column = new_arguments.front().column->cloneResized(input_rows_count); for (const auto & arg : arguments) new_arguments.push_back(arg); return func->prepare(new_arguments)->execute(new_arguments, result_type, input_rows_count, dry_run); @@ -1415,6 +1205,7 @@ public: { auto new_arguments = arguments; new_arguments.push_back(const_arg); + new_arguments.back().column = new_arguments.back().column->cloneResized(input_rows_count); return func->prepare(new_arguments)->execute(new_arguments, result_type, input_rows_count, dry_run); } else @@ -1445,13 +1236,12 @@ private: bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctions( - const Tree & node, - ContextPtr context, + const RPNBuilderTreeNode & node, size_t & out_key_column_num, DataTypePtr & out_key_res_column_type, MonotonicFunctionsChain & out_functions_chain) { - std::vector chain_not_tested_for_monotonicity; + std::vector chain_not_tested_for_monotonicity; DataTypePtr key_column_type; if (!isKeyPossiblyWrappedByMonotonicFunctionsImpl(node, out_key_column_num, key_column_type, chain_not_tested_for_monotonicity)) @@ -1460,17 +1250,17 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctions( for (auto it = chain_not_tested_for_monotonicity.rbegin(); it != chain_not_tested_for_monotonicity.rend(); ++it) { auto function = *it; - auto func_builder = FunctionFactory::instance().tryGet(function.getFunctionName(), context); + auto func_builder = FunctionFactory::instance().tryGet(function.getFunctionName(), node.getTreeContext().getQueryContext()); if (!func_builder) return false; ColumnsWithTypeAndName arguments; ColumnWithTypeAndName const_arg; FunctionWithOptionalConstArg::Kind kind = FunctionWithOptionalConstArg::Kind::NO_CONST; - if (function.numArguments() == 2) + if (function.getArgumentsSize() == 2) { if (function.getArgumentAt(0).isConstant()) { - const_arg = function.getArgumentAt(0).getConstant(); + const_arg = function.getArgumentAt(0).getConstantColumn(); arguments.push_back(const_arg); arguments.push_back({ nullptr, key_column_type, "" }); kind = FunctionWithOptionalConstArg::Kind::LEFT_CONST; @@ -1478,7 +1268,7 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctions( else if (function.getArgumentAt(1).isConstant()) { arguments.push_back({ nullptr, key_column_type, "" }); - const_arg = function.getArgumentAt(1).getConstant(); + const_arg = function.getArgumentAt(1).getConstantColumn(); arguments.push_back(const_arg); kind = FunctionWithOptionalConstArg::Kind::RIGHT_CONST; } @@ -1504,10 +1294,10 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctions( } bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl( - const Tree & node, + const RPNBuilderTreeNode & node, size_t & out_key_column_num, DataTypePtr & out_key_column_type, - std::vector & out_functions_chain) + std::vector & out_functions_chain) { /** By itself, the key column can be a functional expression. for example, `intHash32(UserID)`. * Therefore, use the full name of the expression for search. @@ -1517,7 +1307,7 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl( // Key columns should use canonical names for index analysis String name = node.getColumnName(); - if (array_joined_columns.contains(name)) + if (array_joined_column_names.contains(name)) return false; auto it = key_columns.find(name); @@ -1530,37 +1320,39 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl( if (node.isFunction()) { - auto func = node.asFunction(); + auto function_node = node.toFunctionNode(); - size_t num_args = func.numArguments(); - if (num_args > 2 || num_args == 0) + size_t arguments_size = function_node.getArgumentsSize(); + if (arguments_size > 2 || arguments_size == 0) return false; - out_functions_chain.push_back(func); - bool ret = false; - if (num_args == 2) + out_functions_chain.push_back(function_node); + + bool result = false; + if (arguments_size == 2) { - if (func.getArgumentAt(0).isConstant()) + if (function_node.getArgumentAt(0).isConstant()) { - ret = isKeyPossiblyWrappedByMonotonicFunctionsImpl(func.getArgumentAt(1), out_key_column_num, out_key_column_type, out_functions_chain); + result = isKeyPossiblyWrappedByMonotonicFunctionsImpl(function_node.getArgumentAt(1), out_key_column_num, out_key_column_type, out_functions_chain); } - else if (func.getArgumentAt(1).isConstant()) + else if (function_node.getArgumentAt(1).isConstant()) { - ret = isKeyPossiblyWrappedByMonotonicFunctionsImpl(func.getArgumentAt(0), out_key_column_num, out_key_column_type, out_functions_chain); + result = isKeyPossiblyWrappedByMonotonicFunctionsImpl(function_node.getArgumentAt(0), out_key_column_num, out_key_column_type, out_functions_chain); } } else { - ret = isKeyPossiblyWrappedByMonotonicFunctionsImpl(func.getArgumentAt(0), out_key_column_num, out_key_column_type, out_functions_chain); + result = isKeyPossiblyWrappedByMonotonicFunctionsImpl(function_node.getArgumentAt(0), out_key_column_num, out_key_column_type, out_functions_chain); } - return ret; + + return result; } return false; } -static void castValueToType(const DataTypePtr & desired_type, Field & src_value, const DataTypePtr & src_type, const KeyCondition::Tree & node) +static void castValueToType(const DataTypePtr & desired_type, Field & src_value, const DataTypePtr & src_type, const String & node_column_name) { try { @@ -1570,13 +1362,13 @@ static void castValueToType(const DataTypePtr & desired_type, Field & src_value, { throw Exception("Key expression contains comparison between inconvertible types: " + desired_type->getName() + " and " + src_type->getName() + - " inside " + node.getColumnName(), + " inside " + node_column_name, ErrorCodes::BAD_TYPE_OF_FIELD); } } -bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Block & block_with_constants, RPNElement & out) +bool KeyCondition::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNElement & out) { /** Functions < > = != <= >= in `notIn` isNull isNotNull, where one argument is a constant, and the other is one of columns of key, * or itself, wrapped in a chain of possibly-monotonic functions, @@ -1586,8 +1378,8 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl DataTypePtr const_type; if (node.isFunction()) { - auto func = node.asFunction(); - size_t num_args = func.numArguments(); + auto func = node.toFunctionNode(); + size_t num_args = func.getArgumentsSize(); DataTypePtr key_expr_type; /// Type of expression containing key column size_t key_column_num = -1; /// Number of a key column (inside key_column_names array) @@ -1599,7 +1391,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl if (num_args == 1) { - if (!(isKeyPossiblyWrappedByMonotonicFunctions(func.getArgumentAt(0), context, key_column_num, key_expr_type, chain))) + if (!(isKeyPossiblyWrappedByMonotonicFunctions(func.getArgumentAt(0), key_column_num, key_expr_type, chain))) return false; if (key_column_num == static_cast(-1)) @@ -1630,7 +1422,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl if (functionIsInOrGlobalInOperator(func_name)) { - if (tryPrepareSetIndex(func, context, out, key_column_num)) + if (tryPrepareSetIndex(func, out, key_column_num)) { key_arg_pos = 0; is_set_const = true; @@ -1638,7 +1430,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl else return false; } - else if (func.getArgumentAt(1).tryGetConstant(block_with_constants, const_value, const_type)) + else if (func.getArgumentAt(1).tryGetConstant(const_value, const_type)) { /// If the const operand is null, the atom will be always false if (const_value.isNull()) @@ -1647,7 +1439,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl return true; } - if (isKeyPossiblyWrappedByMonotonicFunctions(func.getArgumentAt(0), context, key_column_num, key_expr_type, chain)) + if (isKeyPossiblyWrappedByMonotonicFunctions(func.getArgumentAt(0), key_column_num, key_expr_type, chain)) { key_arg_pos = 0; } @@ -1668,7 +1460,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl else return false; } - else if (func.getArgumentAt(0).tryGetConstant(block_with_constants, const_value, const_type)) + else if (func.getArgumentAt(0).tryGetConstant(const_value, const_type)) { /// If the const operand is null, the atom will be always false if (const_value.isNull()) @@ -1677,7 +1469,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl return true; } - if (isKeyPossiblyWrappedByMonotonicFunctions(func.getArgumentAt(1), context, key_column_num, key_expr_type, chain)) + if (isKeyPossiblyWrappedByMonotonicFunctions(func.getArgumentAt(1), key_column_num, key_expr_type, chain)) { key_arg_pos = 1; } @@ -1718,7 +1510,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl else if (func_name == "in" || func_name == "notIn" || func_name == "like" || func_name == "notLike" || func_name == "ilike" || func_name == "notIlike" || - func_name == "startsWith") + func_name == "startsWith" || func_name == "match") { /// "const IN data_column" doesn't make sense (unlike "data_column IN const") return false; @@ -1757,7 +1549,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl if (!const_type->equals(*common_type)) { - castValueToType(common_type, const_value, const_type, node); + castValueToType(common_type, const_value, const_type, node.getColumnName()); // Need to set is_constant_transformed unless we're doing exact conversion if (!key_expr_type_not_null->equals(*common_type)) @@ -1802,7 +1594,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl return atom_it->second(out, const_value); } - else if (node.tryGetConstant(block_with_constants, const_value, const_type)) + else if (node.tryGetConstant(const_value, const_type)) { /// For cases where it says, for example, `WHERE 0 AND something` @@ -1825,32 +1617,6 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl return false; } -bool KeyCondition::tryParseLogicalOperatorFromAST(const FunctionTree & func, RPNElement & out) -{ - /// Functions AND, OR, NOT. - /// Also a special function `indexHint` - works as if instead of calling a function there are just parentheses - /// (or, the same thing - calling the function `and` from one argument). - - if (func.getFunctionName() == "not") - { - if (func.numArguments() != 1) - return false; - - out.function = RPNElement::FUNCTION_NOT; - } - else - { - if (func.getFunctionName() == "and" || func.getFunctionName() == "indexHint") - out.function = RPNElement::FUNCTION_AND; - else if (func.getFunctionName() == "or") - out.function = RPNElement::FUNCTION_OR; - else - return false; - } - - return true; -} - String KeyCondition::toString() const { String res; diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index d00a25a1077..fe1bffa9305 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -2,11 +2,16 @@ #include -#include #include -#include -#include +#include + +#include +#include +#include + +#include +#include namespace DB { @@ -205,45 +210,37 @@ public: class KeyCondition { public: - /// Does not take into account the SAMPLE section. all_columns - the set of all columns of the table. + /// Construct key condition from AST SELECT query WHERE, PREWHERE and additional filters KeyCondition( const ASTPtr & query, const ASTs & additional_filter_asts, - TreeRewriterResultPtr syntax_analyzer_result, + Block block_with_constants, PreparedSetsPtr prepared_sets_, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr, + NameSet array_joined_column_names, bool single_point_ = false, bool strict_ = false); + /** Construct key condition from AST SELECT query WHERE, PREWHERE and additional filters. + * Select query, additional filters, prepared sets are initialized using query info. + */ KeyCondition( const SelectQueryInfo & query_info, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr_, bool single_point_ = false, - bool strict_ = false) - : KeyCondition( - query_info.query, - query_info.filter_asts, - query_info.syntax_analyzer_result, - query_info.prepared_sets, - context, - key_column_names, - key_expr_, - single_point_, - strict_) - { - } + bool strict_ = false); + /// Construct key condition from ActionsDAG nodes KeyCondition( ActionDAGNodes dag_nodes, - TreeRewriterResultPtr syntax_analyzer_result, - PreparedSetsPtr prepared_sets_, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr, + NameSet array_joined_column_names, bool single_point_ = false, bool strict_ = false); @@ -275,6 +272,7 @@ public: /// Checks that the index can not be used /// FUNCTION_UNKNOWN will be AND'ed (if any). bool alwaysUnknownOrTrue() const; + /// Checks that the index can not be used /// Does not allow any FUNCTION_UNKNOWN (will instantly return true). bool anyUnknownOrAlwaysTrue() const; @@ -313,10 +311,18 @@ public: * Returns false, if expression isn't constant. */ static bool getConstant( - const ASTPtr & expr, Block & block_with_constants, Field & out_value, DataTypePtr & out_type); + const ASTPtr & expr, + Block & block_with_constants, + Field & out_value, + DataTypePtr & out_type); + /** Calculate expressions, that depend only on constants. + * For index to work when something like "WHERE Date = toDate(now())" is written. + */ static Block getBlockWithConstants( - const ASTPtr & query, const TreeRewriterResultPtr & syntax_analyzer_result, ContextPtr context); + const ASTPtr & query, + const TreeRewriterResultPtr & syntax_analyzer_result, + ContextPtr context); static std::optional applyMonotonicFunctionsChainToRange( Range key_range, @@ -373,14 +379,11 @@ private: using RPN = std::vector; using ColumnIndices = std::map; - using AtomMap = std::unordered_map; public: + using AtomMap = std::unordered_map; static const AtomMap atom_map; - class Tree; - class FunctionTree; - private: BoolMask checkInRange( size_t used_key_size, @@ -390,9 +393,7 @@ private: bool right_bounded, BoolMask initial_mask) const; - void traverseAST(const Tree & node, ContextPtr context, Block & block_with_constants); - bool tryParseAtomFromAST(const Tree & node, ContextPtr context, Block & block_with_constants, RPNElement & out); - static bool tryParseLogicalOperatorFromAST(const FunctionTree & func, RPNElement & out); + bool extractAtomFromTree(const RPNBuilderTreeNode & node, RPNElement & out); /** Is node the key column * or expression in which column of key is wrapped by chain of functions, @@ -401,17 +402,16 @@ private: * and fills chain of possibly-monotonic functions. */ bool isKeyPossiblyWrappedByMonotonicFunctions( - const Tree & node, - ContextPtr context, + const RPNBuilderTreeNode & node, size_t & out_key_column_num, DataTypePtr & out_key_res_column_type, MonotonicFunctionsChain & out_functions_chain); bool isKeyPossiblyWrappedByMonotonicFunctionsImpl( - const Tree & node, + const RPNBuilderTreeNode & node, size_t & out_key_column_num, DataTypePtr & out_key_column_type, - std::vector & out_functions_chain); + std::vector & out_functions_chain); bool transformConstantWithValidFunctions( const String & expr_name, @@ -422,21 +422,24 @@ private: std::function always_monotonic) const; bool canConstantBeWrappedByMonotonicFunctions( - const Tree & node, + const RPNBuilderTreeNode & node, size_t & out_key_column_num, DataTypePtr & out_key_column_type, Field & out_value, DataTypePtr & out_type); bool canConstantBeWrappedByFunctions( - const Tree & node, size_t & out_key_column_num, DataTypePtr & out_key_column_type, Field & out_value, DataTypePtr & out_type); + const RPNBuilderTreeNode & node, + size_t & out_key_column_num, + DataTypePtr & out_key_column_type, + Field & out_value, + DataTypePtr & out_type); /// If it's possible to make an RPNElement /// that will filter values (possibly tuples) by the content of 'prepared_set', /// do it and return true. bool tryPrepareSetIndex( - const FunctionTree & func, - ContextPtr context, + const RPNBuilderFunctionTreeNode & func, RPNElement & out, size_t & out_key_column_num); @@ -472,11 +475,12 @@ private: /// All intermediate columns are used to calculate key_expr. const NameSet key_subexpr_names; - NameSet array_joined_columns; - PreparedSetsPtr prepared_sets; + /// Array joined column names + NameSet array_joined_column_names; // If true, always allow key_expr to be wrapped by function bool single_point; + // If true, do not use always_monotonic information to transform constants bool strict; }; diff --git a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h index a16aaa728ae..bc786ec0428 100644 --- a/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h +++ b/src/Storages/MergeTree/LoadedMergeTreeDataPartInfoForReader.h @@ -12,7 +12,8 @@ public: explicit LoadedMergeTreeDataPartInfoForReader(MergeTreeData::DataPartPtr data_part_) : IMergeTreeDataPartInfoForReader(data_part_->storage.getContext()) , data_part(data_part_) - {} + { + } bool isCompactPart() const override { return DB::isCompactPart(data_part); } @@ -22,7 +23,7 @@ public: bool isProjectionPart() const override { return data_part->isProjectionPart(); } - const DataPartStoragePtr & getDataPartStorage() const override { return data_part->data_part_storage; } + DataPartStoragePtr getDataPartStorage() const override { return data_part->getDataPartStoragePtr(); } const NamesAndTypesList & getColumns() const override { return data_part->getColumns(); } diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 18982c3bbf4..9a9b8a4a6bb 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -160,7 +160,9 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() for (auto & part_ptr : parts) { ttl_infos.update(part_ptr->ttl_infos); - max_volume_index = std::max(max_volume_index, part_ptr->data_part_storage->getVolumeIndex(*storage.getStoragePolicy())); + auto disk_name = part_ptr->getDataPartStorage().getDiskName(); + size_t volume_index = storage.getStoragePolicy()->getVolumeIndexByDiskName(disk_name); + max_volume_index = std::max(max_volume_index, volume_index); } /// It will live until the whole task is being destroyed @@ -294,12 +296,10 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWriter write_part_log) { part = merge_task->getFuture().get(); - auto builder = merge_task->getBuilder(); /// Task is not needed merge_task.reset(); - - storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr, builder); + storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr); try { diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index ebe826531d2..02e61a70eb6 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -65,7 +65,7 @@ MergeListElement::MergeListElement( for (const auto & source_part : future_part->parts) { source_part_names.emplace_back(source_part->name); - source_part_paths.emplace_back(source_part->data_part_storage->getFullPath()); + source_part_paths.emplace_back(source_part->getDataPartStorage().getFullPath()); total_size_bytes_compressed += source_part->getBytesOnDisk(); total_size_marks += source_part->getMarksCount(); diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp index 0dcdd927e7b..cc5e87956a1 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.cpp @@ -115,10 +115,9 @@ void MergePlainMergeTreeTask::prepare() void MergePlainMergeTreeTask::finish() { new_part = merge_task->getFuture().get(); - auto builder = merge_task->getBuilder(); MergeTreeData::Transaction transaction(storage, txn.get()); - storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, transaction, builder); + storage.merger_mutator.renameMergedTemporaryPart(new_part, future_part->parts, txn, transaction); transaction.commit(); write_part_log({}); diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index c247d2d2476..0b6fe23e961 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -1,3 +1,4 @@ +#include "Storages/MergeTree/IDataPartStorage.h" #include #include @@ -125,23 +126,26 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() ctx->disk = global_ctx->space_reservation->getDisk(); String local_tmp_part_basename = local_tmp_prefix + global_ctx->future_part->name + local_tmp_suffix; + MutableDataPartStoragePtr data_part_storage; - if (global_ctx->parent_path_storage_builder) + if (global_ctx->parent_part) { - global_ctx->data_part_storage_builder = global_ctx->parent_path_storage_builder->getProjection(local_tmp_part_basename); + data_part_storage = global_ctx->parent_part->getDataPartStorage().getProjection(local_tmp_part_basename); } else { auto local_single_disk_volume = std::make_shared("volume_" + global_ctx->future_part->name, ctx->disk, 0); - global_ctx->data_part_storage_builder = std::make_shared( + data_part_storage = std::make_shared( local_single_disk_volume, global_ctx->data->relative_data_path, local_tmp_part_basename); + + data_part_storage->beginTransaction(); } - if (global_ctx->data_part_storage_builder->exists()) - throw Exception("Directory " + global_ctx->data_part_storage_builder->getFullPath() + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS); + if (data_part_storage->exists()) + throw Exception("Directory " + data_part_storage->getFullPath() + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS); if (!global_ctx->parent_part) global_ctx->temporary_directory_lock = global_ctx->data->getTemporaryPartDirectoryHolder(local_tmp_part_basename); @@ -149,7 +153,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() global_ctx->all_column_names = global_ctx->metadata_snapshot->getColumns().getNamesOfPhysical(); global_ctx->storage_columns = global_ctx->metadata_snapshot->getColumns().getAllPhysical(); - auto object_columns = MergeTreeData::getObjectColumns(global_ctx->future_part->parts, global_ctx->metadata_snapshot->getColumns()); + auto object_columns = MergeTreeData::getConcreteObjectColumns(global_ctx->future_part->parts, global_ctx->metadata_snapshot->getColumns()); global_ctx->storage_snapshot = std::make_shared(*global_ctx->data, global_ctx->metadata_snapshot, object_columns); extendObjectColumns(global_ctx->storage_columns, object_columns, false); @@ -163,8 +167,6 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() global_ctx->merging_columns, global_ctx->merging_column_names); - auto data_part_storage = global_ctx->data_part_storage_builder->getStorage(); - global_ctx->new_data_part = global_ctx->data->createPart( global_ctx->future_part->name, global_ctx->future_part->type, @@ -302,7 +304,6 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() global_ctx->to = std::make_shared( global_ctx->new_data_part, - global_ctx->data_part_storage_builder, global_ctx->metadata_snapshot, global_ctx->merging_columns, MergeTreeIndexFactory::instance().getMany(global_ctx->metadata_snapshot->getSecondaryIndices()), @@ -501,7 +502,6 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const ctx->executor = std::make_unique(ctx->column_parts_pipeline); ctx->column_to = std::make_unique( - global_ctx->data_part_storage_builder, global_ctx->new_data_part, global_ctx->metadata_snapshot, ctx->executor->getHeader(), @@ -654,7 +654,6 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c global_ctx->deduplicate_by_columns, projection_merging_params, global_ctx->new_data_part.get(), - global_ctx->data_part_storage_builder.get(), ".proj", NO_TRANSACTION_PTR, global_ctx->data, diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 43aba602052..6a29cdbb5ca 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -59,8 +59,7 @@ public: bool deduplicate_, Names deduplicate_by_columns_, MergeTreeData::MergingParams merging_params_, - const IMergeTreeDataPart * parent_part_, - const IDataPartStorageBuilder * parent_path_storage_builder_, + IMergeTreeDataPart * parent_part_, String suffix_, MergeTreeTransactionPtr txn, MergeTreeData * data_, @@ -82,7 +81,6 @@ public: global_ctx->deduplicate = std::move(deduplicate_); global_ctx->deduplicate_by_columns = std::move(deduplicate_by_columns_); global_ctx->parent_part = std::move(parent_part_); - global_ctx->parent_path_storage_builder = std::move(parent_path_storage_builder_); global_ctx->data = std::move(data_); global_ctx->mutator = std::move(mutator_); global_ctx->merges_blocker = std::move(merges_blocker_); @@ -102,11 +100,6 @@ public: return global_ctx->promise.get_future(); } - DataPartStorageBuilderPtr getBuilder() - { - return global_ctx->data_part_storage_builder; - } - bool execute(); private: @@ -141,8 +134,7 @@ private: StorageMetadataPtr metadata_snapshot{nullptr}; FutureMergedMutatedPartPtr future_part{nullptr}; /// This will be either nullptr or new_data_part, so raw pointer is ok. - const IMergeTreeDataPart * parent_part{nullptr}; - const IDataPartStorageBuilder * parent_path_storage_builder{nullptr}; + IMergeTreeDataPart * parent_part{nullptr}; ContextPtr context{nullptr}; time_t time_of_merge{0}; ReservationSharedPtr space_reservation{nullptr}; @@ -168,7 +160,6 @@ private: std::unique_ptr merging_executor; MergeTreeData::MutableDataPartPtr new_data_part{nullptr}; - DataPartStorageBuilderPtr data_part_storage_builder; /// If lightweight delete mask is present then some input rows are filtered out right after reading. std::shared_ptr> input_rows_filtered{std::make_shared>(0)}; diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp index 475407a402b..b63e08b733d 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.cpp @@ -607,7 +607,7 @@ Block MergeTreeBaseSelectProcessor::transformHeader( if (!row_level_column.type->canBeUsedInBooleanContext()) { throw Exception("Invalid type for filter in PREWHERE: " + row_level_column.type->getName(), - ErrorCodes::LOGICAL_ERROR); + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } block.erase(prewhere_info->row_level_column_name); @@ -620,7 +620,7 @@ Block MergeTreeBaseSelectProcessor::transformHeader( if (!prewhere_column.type->canBeUsedInBooleanContext()) { throw Exception("Invalid type for filter in PREWHERE: " + prewhere_column.type->getName(), - ErrorCodes::LOGICAL_ERROR); + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); } if (prewhere_info->remove_prewhere_column) @@ -628,13 +628,13 @@ Block MergeTreeBaseSelectProcessor::transformHeader( else { WhichDataType which(removeNullable(recursiveRemoveLowCardinality(prewhere_column.type))); - if (which.isInt() || which.isUInt()) + if (which.isNativeInt() || which.isNativeUInt()) prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst(); else if (which.isFloat()) prewhere_column.column = prewhere_column.type->createColumnConst(block.rows(), 1.0f)->convertToFullColumnIfConst(); else - throw Exception("Illegal type " + prewhere_column.type->getName() + " of column for filter.", - ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER); + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER, "Illegal type {} of column for filter", prewhere_column.type->getName()); } } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 8957f134053..83e87a0e462 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -943,8 +943,8 @@ Int64 MergeTreeData::getMaxBlockNumber() const } void MergeTreeData::loadDataPartsFromDisk( - DataPartsVector & broken_parts_to_detach, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & broken_parts_to_detach, + MutableDataPartsVector & duplicate_parts_to_remove, ThreadPool & pool, size_t num_parts, std::queue>> & parts_queue, @@ -1082,7 +1082,6 @@ void MergeTreeData::loadDataPartsFromDisk( if (size_of_part.has_value()) part_size_str = formatReadableSizeWithBinarySuffix(*size_of_part); - LOG_ERROR(log, "Detaching broken part {}{} (size: {}). " "If it happened after update, it is likely because of backward incompatibility. " @@ -1200,8 +1199,7 @@ void MergeTreeData::loadDataPartsFromDisk( void MergeTreeData::loadDataPartsFromWAL( - DataPartsVector & /* broken_parts_to_detach */, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & duplicate_parts_to_remove, MutableDataPartsVector & parts_from_wal) { for (auto & part : parts_from_wal) @@ -1215,7 +1213,7 @@ void MergeTreeData::loadDataPartsFromWAL( { if ((*it)->checksums.getTotalChecksumHex() == part->checksums.getTotalChecksumHex()) { - LOG_ERROR(log, "Remove duplicate part {}", part->data_part_storage->getFullPath()); + LOG_ERROR(log, "Remove duplicate part {}", part->getDataPartStorage().getFullPath()); duplicate_parts_to_remove.push_back(part); } else @@ -1329,8 +1327,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) auto part_lock = lockParts(); data_parts_indexes.clear(); - DataPartsVector broken_parts_to_detach; - DataPartsVector duplicate_parts_to_remove; + MutableDataPartsVector broken_parts_to_detach; + MutableDataPartsVector duplicate_parts_to_remove; if (num_parts > 0) loadDataPartsFromDisk( @@ -1384,7 +1382,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) parts_from_wal.insert( parts_from_wal.end(), std::make_move_iterator(disk_wal_parts.begin()), std::make_move_iterator(disk_wal_parts.end())); - loadDataPartsFromWAL(broken_parts_to_detach, duplicate_parts_to_remove, parts_from_wal); + loadDataPartsFromWAL(duplicate_parts_to_remove, parts_from_wal); num_parts += parts_from_wal.size(); } @@ -1397,11 +1395,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks) } for (auto & part : broken_parts_to_detach) - { - auto builder = part->data_part_storage->getBuilder(); - part->renameToDetached("broken-on-start", builder); /// detached parts must not have '_' in prefixes - builder->commit(); - } + part->renameToDetached("broken-on-start"); /// detached parts must not have '_' in prefixes for (auto & part : duplicate_parts_to_remove) part->remove(); @@ -1689,6 +1683,15 @@ scope_guard MergeTreeData::getTemporaryPartDirectoryHolder(const String & part_d return [this, part_dir_name]() { temporary_parts.remove(part_dir_name); }; } +MergeTreeData::MutableDataPartPtr MergeTreeData::preparePartForRemoval(const DataPartPtr & part) +{ + auto state = part->getState(); + if (state != DataPartState::Deleting && state != DataPartState::DeleteOnDestroy) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Cannot remove part {}, because it has state: {}", part->name, magic_enum::enum_name(part->getState())); + + return std::const_pointer_cast(part); +} MergeTreeData::DataPartsVector MergeTreeData::grabOldParts(bool force) { @@ -1864,7 +1867,7 @@ void MergeTreeData::flushAllInMemoryPartsIfNeeded() { if (auto part_in_memory = asInMemoryPart(part)) { - part_in_memory->flushToDisk(part_in_memory->data_part_storage->getPartDirectory(), metadata_snapshot); + part_in_memory->flushToDisk(part_in_memory->getDataPartStorage().getPartDirectory(), metadata_snapshot); } } } @@ -1948,7 +1951,7 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t if (thread_group) CurrentThread::attachToIfDetached(thread_group); - part->remove(); + preparePartForRemoval(part)->remove(); if (part_names_succeed) { std::lock_guard lock(part_names_mutex); @@ -1964,7 +1967,7 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t LOG_DEBUG(log, "Removing {} parts from filesystem: {}", parts_to_remove.size(), fmt::join(parts_to_remove, ", ")); for (const DataPartPtr & part : parts_to_remove) { - part->remove(); + preparePartForRemoval(part)->remove(); if (part_names_succeed) part_names_succeed->insert(part->name); } @@ -2144,11 +2147,14 @@ void MergeTreeData::rename(const String & new_table_path, const StorageID & new_ if (!getStorageID().hasUUID()) getContext()->dropCaches(); + /// TODO: remove const_cast for (const auto & part : data_parts_by_info) - part->data_part_storage->changeRootPath(relative_data_path, new_table_path); + { + auto & part_mutable = const_cast(*part); + part_mutable.getDataPartStorage().changeRootPath(relative_data_path, new_table_path); + } relative_data_path = new_table_path; - renameInMemory(new_table_id); } @@ -2166,7 +2172,12 @@ void MergeTreeData::dropAllData() auto lock = lockParts(); - DataPartsVector all_parts(data_parts_by_info.begin(), data_parts_by_info.end()); + DataPartsVector all_parts; + for (auto it = data_parts_by_info.begin(); it != data_parts_by_info.end(); ++it) + { + modifyPartState(it, DataPartState::Deleting); + all_parts.push_back(*it); + } { std::lock_guard wal_lock(write_ahead_log_mutex); @@ -2179,7 +2190,6 @@ void MergeTreeData::dropAllData() if (!getStorageID().hasUUID()) getContext()->dropCaches(); - /// Removing of each data part before recursive removal of directory is to speed-up removal, because there will be less number of syscalls. NameSet part_names_failed; try @@ -2189,6 +2199,7 @@ void MergeTreeData::dropAllData() LOG_TRACE(log, "dropAllData: removing all data parts from memory."); data_parts_indexes.clear(); + all_data_dropped = true; } catch (...) { @@ -2726,7 +2737,7 @@ MergeTreeDataPartType MergeTreeData::choosePartTypeOnDisk(size_t bytes_uncompres MergeTreeData::MutableDataPartPtr MergeTreeData::createPart(const String & name, MergeTreeDataPartType type, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { if (type == MergeTreeDataPartType::Compact) return std::make_shared(*this, name, part_info, data_part_storage, parent_part); @@ -2739,17 +2750,17 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::createPart(const String & name, } MergeTreeData::MutableDataPartPtr MergeTreeData::createPart( - const String & name, const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const + const String & name, const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { return createPart(name, MergeTreePartInfo::fromPartName(name, format_version), data_part_storage, parent_part); } MergeTreeData::MutableDataPartPtr MergeTreeData::createPart( const String & name, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part) const { MergeTreeDataPartType type; - auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(data_part_storage); + auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(*data_part_storage); if (mrk_ext) { @@ -2943,12 +2954,11 @@ MergeTreeData::DataPartsVector MergeTreeData::getActivePartsToReplace( bool MergeTreeData::renameTempPartAndAdd( MutableDataPartPtr & part, Transaction & out_transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock) { DataPartsVector covered_parts; - if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, builder, &covered_parts)) + if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts)) return false; if (!covered_parts.empty()) @@ -2982,32 +2992,31 @@ void MergeTreeData::checkPartCanBeAddedToTable(MutableDataPartPtr & part, DataPa } } -void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, DataPartStorageBuilderPtr builder) +void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction) { part->is_temp = false; part->setState(DataPartState::PreActive); assert([&]() { - String dir_name = fs::path(part->data_part_storage->getRelativePath()).filename(); + String dir_name = fs::path(part->getDataPartStorage().getRelativePath()).filename(); bool may_be_cleaned_up = dir_name.starts_with("tmp_") || dir_name.starts_with("tmp-fetch_"); return !may_be_cleaned_up || temporary_parts.contains(dir_name); }()); - part->renameTo(part->name, true, builder); + part->renameTo(part->name, true); data_parts_indexes.insert(part); - out_transaction.addPart(part, builder); + out_transaction.addPart(part); } bool MergeTreeData::renameTempPartAndReplaceImpl( MutableDataPartPtr & part, Transaction & out_transaction, DataPartsLock & lock, - DataPartStorageBuilderPtr builder, DataPartsVector * out_covered_parts) { - LOG_TRACE(log, "Renaming temporary part {} to {}.", part->data_part_storage->getPartDirectory(), part->name); + LOG_TRACE(log, "Renaming temporary part {} to {}.", part->getDataPartStorage().getPartDirectory(), part->name); if (&out_transaction.data != this) throw Exception("MergeTreeData::Transaction for one table cannot be used with another. It is a bug.", @@ -3029,7 +3038,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl( /// All checks are passed. Now we can rename the part on disk. /// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts - preparePartForCommit(part, out_transaction, builder); + preparePartForCommit(part, out_transaction); if (out_covered_parts) { @@ -3045,21 +3054,19 @@ bool MergeTreeData::renameTempPartAndReplaceImpl( MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplaceUnlocked( MutableDataPartPtr & part, Transaction & out_transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock) { DataPartsVector covered_parts; - renameTempPartAndReplaceImpl(part, out_transaction, lock, builder, &covered_parts); + renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts); return covered_parts; } MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace( MutableDataPartPtr & part, - Transaction & out_transaction, - DataPartStorageBuilderPtr builder) + Transaction & out_transaction) { auto part_lock = lockParts(); - return renameTempPartAndReplaceUnlocked(part, out_transaction, builder, part_lock); + return renameTempPartAndReplaceUnlocked(part, out_transaction, part_lock); } void MergeTreeData::removePartsFromWorkingSet(MergeTreeTransaction * txn, const MergeTreeData::DataPartsVector & remove, bool clear_without_timeout, DataPartsLock & acquired_lock) @@ -3136,7 +3143,7 @@ void MergeTreeData::removePartsInRangeFromWorkingSet(MergeTreeTransaction * txn, removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(txn, drop_range, lock); } -MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper( +MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper( MergeTreeTransaction * txn, const MergeTreePartInfo & drop_range, DataPartsLock & lock) { DataPartsVector parts_to_remove; @@ -3214,15 +3221,20 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSetAn /// FIXME refactor removePartsFromWorkingSet(...), do not remove parts twice removePartsFromWorkingSet(txn, parts_to_remove, clear_without_timeout, lock); + /// Since we can return parts in Deleting state, we have to use a wrapper that restricts access to such parts. + PartsToRemoveFromZooKeeper parts_to_remove_from_zookeeper; + for (auto & part : parts_to_remove) + parts_to_remove_from_zookeeper.emplace_back(std::move(part)); + for (auto & part : inactive_parts_to_remove_immediately) { if (!drop_range.contains(part->info)) continue; part->remove_time.store(0, std::memory_order_relaxed); - parts_to_remove.push_back(std::move(part)); + parts_to_remove_from_zookeeper.emplace_back(std::move(part), /* was_active */ false); } - return parts_to_remove; + return parts_to_remove_from_zookeeper; } void MergeTreeData::restoreAndActivatePart(const DataPartPtr & part, DataPartsLock * acquired_lock) @@ -3240,20 +3252,23 @@ void MergeTreeData::outdateBrokenPartAndCloneToDetached(const DataPartPtr & part { auto metadata_snapshot = getInMemoryMetadataPtr(); if (prefix.empty()) - LOG_INFO(log, "Cloning part {} to {} and making it obsolete.", part_to_detach->data_part_storage->getPartDirectory(), part_to_detach->name); + LOG_INFO(log, "Cloning part {} to {} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), part_to_detach->name); else - LOG_INFO(log, "Cloning part {} to {}_{} and making it obsolete.", part_to_detach->data_part_storage->getPartDirectory(), prefix, part_to_detach->name); + LOG_INFO(log, "Cloning part {} to {}_{} and making it obsolete.", part_to_detach->getDataPartStorage().getPartDirectory(), prefix, part_to_detach->name); part_to_detach->makeCloneInDetached(prefix, metadata_snapshot); - removePartsFromWorkingSet(NO_TRANSACTION_RAW, {part_to_detach}, true); + + DataPartsLock lock = lockParts(); + if (part_to_detach->getState() == DataPartState::Active) + removePartsFromWorkingSet(NO_TRANSACTION_RAW, {part_to_detach}, true, &lock); } void MergeTreeData::forcefullyMovePartToDetachedAndRemoveFromMemory(const MergeTreeData::DataPartPtr & part_to_detach, const String & prefix, bool restore_covered) { if (prefix.empty()) - LOG_INFO(log, "Renaming {} to {} and forgetting it.", part_to_detach->data_part_storage->getPartDirectory(), part_to_detach->name); + LOG_INFO(log, "Renaming {} to {} and forgetting it.", part_to_detach->getDataPartStorage().getPartDirectory(), part_to_detach->name); else - LOG_INFO(log, "Renaming {} to {}_{} and forgetting it.", part_to_detach->data_part_storage->getPartDirectory(), prefix, part_to_detach->name); + LOG_INFO(log, "Renaming {} to {}_{} and forgetting it.", part_to_detach->getDataPartStorage().getPartDirectory(), prefix, part_to_detach->name); auto lock = lockParts(); bool removed_active_part = false; @@ -3276,11 +3291,7 @@ void MergeTreeData::forcefullyMovePartToDetachedAndRemoveFromMemory(const MergeT } modifyPartState(it_part, DataPartState::Deleting); - - auto builder = part->data_part_storage->getBuilder(); - part->renameToDetached(prefix, builder); - builder->commit(); - + preparePartForRemoval(part)->renameToDetached(prefix); data_parts_indexes.erase(it_part); if (restore_covered && part->info.level == 0) @@ -3434,7 +3445,7 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part) try { - part_to_delete->remove(); + preparePartForRemoval(part_to_delete)->remove(); } catch (...) { @@ -3644,9 +3655,9 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy) /// when allow_remote_fs_zero_copy_replication turned on and off again original_active_part->force_keep_shared_data = false; - if (original_active_part->data_part_storage->supportZeroCopyReplication() && - part_copy->data_part_storage->supportZeroCopyReplication() && - original_active_part->data_part_storage->getUniqueId() == part_copy->data_part_storage->getUniqueId()) + if (original_active_part->getDataPartStorage().supportZeroCopyReplication() && + part_copy->getDataPartStorage().supportZeroCopyReplication() && + original_active_part->getDataPartStorage().getUniqueId() == part_copy->getDataPartStorage().getUniqueId()) { /// May be when several volumes use the same S3/HDFS storage original_active_part->force_keep_shared_data = true; @@ -3666,7 +3677,7 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy) /// All other locks are taken in StorageReplicatedMergeTree lockSharedData(*part_copy); - original_active_part->data_part_storage->writeDeleteOnDestroyMarker(log); + preparePartForRemoval(original_active_part)->writeDeleteOnDestroyMarker(); return; } } @@ -3800,9 +3811,9 @@ MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_na static void loadPartAndFixMetadataImpl(MergeTreeData::MutableDataPartPtr part) { part->loadColumnsChecksumsIndexes(false, true); - part->modification_time = part->data_part_storage->getLastModified().epochTime(); - part->data_part_storage->removeDeleteOnDestroyMarker(); - part->data_part_storage->removeVersionMetadata(); + part->modification_time = part->getDataPartStorage().getLastModified().epochTime(); + part->removeDeleteOnDestroyMarker(); + part->removeVersionMetadata(); } void MergeTreeData::calculateColumnAndSecondaryIndexSizesImpl() @@ -3962,7 +3973,7 @@ void MergeTreeData::movePartitionToDisk(const ASTPtr & partition, const String & auto disk = getStoragePolicy()->getDiskByName(name); std::erase_if(parts, [&](auto part_ptr) { - return part_ptr->data_part_storage->getDiskName() == disk->getName(); + return part_ptr->getDataPartStorage().getDiskName() == disk->getName(); }); if (parts.empty()) @@ -4012,7 +4023,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String { for (const auto & disk : volume->getDisks()) { - if (part_ptr->data_part_storage->getDiskName() == disk->getName()) + if (part_ptr->getDataPartStorage().getDiskName() == disk->getName()) { return true; } @@ -4209,7 +4220,7 @@ BackupEntries MergeTreeData::backupParts(const DataPartsVector & data_parts, con make_temporary_hard_links = false; hold_storage_and_part_ptrs = true; } - else if (supportsReplication() && part->data_part_storage->supportZeroCopyReplication() && getSettings()->allow_remote_fs_zero_copy_replication) + else if (supportsReplication() && part->getDataPartStorage().supportZeroCopyReplication() && getSettings()->allow_remote_fs_zero_copy_replication) { /// Hard links don't work correctly with zero copy replication. make_temporary_hard_links = false; @@ -4221,7 +4232,7 @@ BackupEntries MergeTreeData::backupParts(const DataPartsVector & data_parts, con table_lock = lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); BackupEntries backup_entries_from_part; - part->data_part_storage->backup( + part->getDataPartStorage().backup( part->checksums, part->getFileNamesWithoutChecksums(), data_path_in_backup, @@ -4232,7 +4243,7 @@ BackupEntries MergeTreeData::backupParts(const DataPartsVector & data_parts, con auto projection_parts = part->getProjectionParts(); for (const auto & [projection_name, projection_part] : projection_parts) { - projection_part->data_part_storage->backup( + projection_part->getDataPartStorage().backup( projection_part->checksums, projection_part->getFileNamesWithoutChecksums(), fs::path{data_path_in_backup} / part->name, @@ -4908,22 +4919,16 @@ ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, SpacePtr space) return checkAndReturnReservation(expected_size, std::move(reservation)); } -ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage) +ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage) { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - return data_part_storage->reserve(expected_size); + return data_part_storage.reserve(expected_size); } -ReservationPtr MergeTreeData::reserveSpace(UInt64 expected_size, const DataPartStorageBuilderPtr & data_part_storage_builder) +ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage) { expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - return data_part_storage_builder->reserve(expected_size); -} - -ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage) -{ - expected_size = std::max(RESERVATION_MIN_ESTIMATION_SIZE, expected_size); - return data_part_storage->tryReserve(expected_size); + return data_part_storage.tryReserve(expected_size); } ReservationPtr MergeTreeData::tryReserveSpace(UInt64 expected_size, SpacePtr space) @@ -5060,7 +5065,7 @@ bool MergeTreeData::shouldPerformTTLMoveOnInsert(const SpacePtr & move_destinati if (move_destination->isDisk()) { auto disk = std::static_pointer_cast(move_destination); - if (auto volume = getStoragePolicy()->tryGetVolumeByDisk(disk)) + if (auto volume = getStoragePolicy()->tryGetVolumeByDiskName(disk->getName())) return volume->perform_ttl_move_on_insert; } return false; @@ -5072,11 +5077,11 @@ bool MergeTreeData::isPartInTTLDestination(const TTLDescription & ttl, const IMe if (ttl.destination_type == DataDestinationType::VOLUME) { for (const auto & disk : policy->getVolumeByName(ttl.destination_name)->getDisks()) - if (disk->getName() == part.data_part_storage->getDiskName()) + if (disk->getName() == part.getDataPartStorage().getDiskName()) return true; } else if (ttl.destination_type == DataDestinationType::DISK) - return policy->getDiskByName(ttl.destination_name)->getName() == part.data_part_storage->getDiskName(); + return policy->getDiskByName(ttl.destination_name)->getName() == part.getDataPartStorage().getDiskName(); return false; } @@ -5148,7 +5153,7 @@ void MergeTreeData::Transaction::rollbackPartsToTemporaryState() WriteBufferFromOwnString buf; buf << " Rollbacking parts state to temporary and removing from working set:"; for (const auto & part : precommitted_parts) - buf << " " << part->data_part_storage->getPartDirectory(); + buf << " " << part->getDataPartStorage().getPartDirectory(); buf << "."; LOG_DEBUG(data.log, "Undoing transaction.{}", buf.str()); @@ -5159,12 +5164,11 @@ void MergeTreeData::Transaction::rollbackPartsToTemporaryState() clear(); } -void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part, DataPartStorageBuilderPtr builder) +void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part) { precommitted_parts.insert(part); if (asInMemoryPart(part)) has_in_memory_parts = true; - part_builders.push_back(builder); } void MergeTreeData::Transaction::rollback() @@ -5174,13 +5178,31 @@ void MergeTreeData::Transaction::rollback() WriteBufferFromOwnString buf; buf << " Removing parts:"; for (const auto & part : precommitted_parts) - buf << " " << part->data_part_storage->getPartDirectory(); + buf << " " << part->getDataPartStorage().getPartDirectory(); buf << "."; LOG_DEBUG(data.log, "Undoing transaction.{}", buf.str()); - data.removePartsFromWorkingSet(txn, - DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()), - /* clear_without_timeout = */ true); + auto lock = data.lockParts(); + + if (data.data_parts_indexes.empty()) + { + /// Table was dropped concurrently and all parts (including PreActive parts) were cleared, so there's nothing to rollback + if (!data.all_data_dropped) + { + Strings part_names; + for (const auto & part : precommitted_parts) + part_names.emplace_back(part->name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "There are some PreActive parts ({}) to rollback, " + "but data parts set is empty and table {} was not dropped. It's a bug", + fmt::join(part_names, ", "), data.getStorageID().getNameForLogs()); + } + } + else + { + data.removePartsFromWorkingSet(txn, + DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()), + /* clear_without_timeout = */ true, &lock); + } } clear(); @@ -5202,8 +5224,9 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: auto parts_lock = acquired_parts_lock ? MergeTreeData::DataPartsLock() : data.lockParts(); auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock; - for (auto & builder : part_builders) - builder->commit(); + for (const auto & part : precommitted_parts) + if (part->getDataPartStorage().hasActiveTransaction()) + part->getDataPartStorage().commitTransaction(); bool commit_to_wal = has_in_memory_parts && settings->in_memory_parts_enable_wal; if (txn || commit_to_wal) @@ -5212,7 +5235,7 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: if (commit_to_wal) wal = data.getWriteAheadLog(); - for (const DataPartPtr & part : precommitted_parts) + for (const auto & part : precommitted_parts) { if (txn) { @@ -5237,7 +5260,7 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: size_t reduce_rows = 0; size_t reduce_parts = 0; - for (const DataPartPtr & part : precommitted_parts) + for (const auto & part : precommitted_parts) { DataPartPtr covering_part; DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock); @@ -5699,6 +5722,11 @@ std::optional MergeTreeData::getQueryProcessingStageWithAgg { const auto & metadata_snapshot = storage_snapshot->metadata; const auto & settings = query_context->getSettingsRef(); + + /// TODO: Analyzer syntax analyzer result + if (!query_info.syntax_analyzer_result) + return std::nullopt; + if (!settings.allow_experimental_projection_optimization || query_info.ignore_projections || query_info.is_projection_query || settings.aggregate_functions_null_for_empty /* projections don't work correctly with this setting */) return std::nullopt; @@ -6224,7 +6252,7 @@ std::pair MergeTreeData::cloneAn bool does_storage_policy_allow_same_disk = false; for (const DiskPtr & disk : getStoragePolicy()->getDisks()) { - if (disk->getName() == src_part->data_part_storage->getDiskName()) + if (disk->getName() == src_part->getDataPartStorage().getDiskName()) { does_storage_policy_allow_same_disk = true; break; @@ -6234,7 +6262,7 @@ std::pair MergeTreeData::cloneAn throw Exception( ErrorCodes::BAD_ARGUMENTS, "Could not clone and load part {} because disk does not belong to storage policy", - quoteString(src_part->data_part_storage->getFullPath())); + quoteString(src_part->getDataPartStorage().getFullPath())); String dst_part_name = src_part->getNewName(dst_part_info); assert(!tmp_part_prefix.empty()); @@ -6242,15 +6270,14 @@ std::pair MergeTreeData::cloneAn auto temporary_directory_lock = getTemporaryPartDirectoryHolder(tmp_dst_part_name); /// Why it is needed if we only hardlink files? - auto reservation = src_part->data_part_storage->reserve(src_part->getBytesOnDisk()); - - auto src_part_storage = src_part->data_part_storage; + auto reservation = src_part->getDataPartStorage().reserve(src_part->getBytesOnDisk()); + auto src_part_storage = src_part->getDataPartStoragePtr(); /// If source part is in memory, flush it to disk and clone it already in on-disk format if (auto src_part_in_memory = asInMemoryPart(src_part)) { auto flushed_part_path = src_part_in_memory->getRelativePathForPrefix(tmp_part_prefix); - src_part_storage = src_part_in_memory->flushToDisk(flushed_part_path, metadata_snapshot); + src_part_storage = src_part_in_memory->flushToDisk(*flushed_part_path, metadata_snapshot); } String with_copy; @@ -6271,7 +6298,7 @@ std::pair MergeTreeData::cloneAn hardlinked_files->source_part_name = src_part->name; hardlinked_files->source_table_shared_id = src_part->storage.getTableSharedID(); - for (auto it = src_part->data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = src_part->getDataPartStorage().iterate(); it->isValid(); it->next()) { if (!files_to_copy_instead_of_hardlinks.contains(it->name()) && it->name() != IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME @@ -6330,14 +6357,14 @@ Strings MergeTreeData::getDataPaths() const void MergeTreeData::reportBrokenPart(MergeTreeData::DataPartPtr & data_part) const { - if (data_part->data_part_storage && data_part->data_part_storage->isBroken()) + if (data_part->getDataPartStorage().isBroken()) { auto parts = getDataPartsForInternalUsage(); - LOG_WARNING(log, "Scanning parts to recover on broken disk {}@{}.", data_part->data_part_storage->getDiskName(), data_part->data_part_storage->getDiskPath()); + LOG_WARNING(log, "Scanning parts to recover on broken disk {}@{}.", data_part->getDataPartStorage().getDiskName(), data_part->getDataPartStorage().getDiskPath()); for (const auto & part : parts) { - if (part->data_part_storage && part->data_part_storage->getDiskName() == data_part->data_part_storage->getDiskName()) + if (part->getDataPartStorage().getDiskName() == data_part->getDataPartStorage().getDiskName()) broken_part_callback(part->name); } } @@ -6428,13 +6455,13 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher( LOG_DEBUG(log, "Freezing part {} snapshot will be placed at {}", part->name, backup_path); - auto data_part_storage = part->data_part_storage; + auto data_part_storage = part->getDataPartStoragePtr(); String src_part_path = data_part_storage->getRelativePath(); String backup_part_path = fs::path(backup_path) / relative_data_path; if (auto part_in_memory = asInMemoryPart(part)) { auto flushed_part_path = part_in_memory->getRelativePathForPrefix("tmp_freeze"); - data_part_storage = part_in_memory->flushToDisk(flushed_part_path, metadata_snapshot); + data_part_storage = part_in_memory->flushToDisk(*flushed_part_path, metadata_snapshot); } auto callback = [this, &part, &backup_part_path](const DiskPtr & disk) @@ -6442,12 +6469,12 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher( // Store metadata for replicated table. // Do nothing for non-replicated. - createAndStoreFreezeMetadata(disk, part, fs::path(backup_part_path) / part->data_part_storage->getPartDirectory()); + createAndStoreFreezeMetadata(disk, part, fs::path(backup_part_path) / part->getDataPartStorage().getPartDirectory()); }; auto new_storage = data_part_storage->freeze( backup_part_path, - part->data_part_storage->getPartDirectory(), + part->getDataPartStorage().getPartDirectory(), /*make_source_readonly*/ true, callback, /*copy_instead_of_hardlink*/ false, @@ -6569,8 +6596,8 @@ try if (result_part) { - part_log_elem.disk_name = result_part->data_part_storage->getDiskName(); - part_log_elem.path_on_disk = result_part->data_part_storage->getFullPath(); + part_log_elem.disk_name = result_part->getDataPartStorage().getDiskName(); + part_log_elem.path_on_disk = result_part->getDataPartStorage().getFullPath(); part_log_elem.bytes_compressed_on_disk = result_part->getBytesOnDisk(); part_log_elem.rows = result_part->rows_count; part_log_elem.part_type = result_part->getType(); @@ -6726,7 +6753,7 @@ bool MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagge for (const auto & moving_part : moving_tagger->parts_to_move) { Stopwatch stopwatch; - DataPartPtr cloned_part; + MutableDataPartPtr cloned_part; auto write_part_log = [&](const ExecutionStatus & execution_status) { @@ -6989,7 +7016,7 @@ ReservationPtr MergeTreeData::balancedReservation( if (part->isStoredOnDisk() && part->getBytesOnDisk() >= min_bytes_to_rebalance_partition_over_jbod && part_info.partition_id == part->info.partition_id) { - auto name = part->data_part_storage->getDiskName(); + auto name = part->getDataPartStorage().getDiskName(); auto it = disk_occupation.find(name); if (it != disk_occupation.end()) { @@ -7097,18 +7124,18 @@ ReservationPtr MergeTreeData::balancedReservation( return reserved_space; } -ColumnsDescription MergeTreeData::getObjectColumns( +ColumnsDescription MergeTreeData::getConcreteObjectColumns( const DataPartsVector & parts, const ColumnsDescription & storage_columns) { - return DB::getObjectColumns( + return DB::getConcreteObjectColumns( parts.begin(), parts.end(), storage_columns, [](const auto & part) -> const auto & { return part->getColumns(); }); } -ColumnsDescription MergeTreeData::getObjectColumns( +ColumnsDescription MergeTreeData::getConcreteObjectColumns( boost::iterator_range range, const ColumnsDescription & storage_columns) { - return DB::getObjectColumns( + return DB::getConcreteObjectColumns( range.begin(), range.end(), storage_columns, [](const auto & part) -> const auto & { return part->getColumns(); }); } @@ -7117,21 +7144,21 @@ void MergeTreeData::resetObjectColumnsFromActiveParts(const DataPartsLock & /*lo { auto metadata_snapshot = getInMemoryMetadataPtr(); const auto & columns = metadata_snapshot->getColumns(); - if (!hasObjectColumns(columns)) + if (!hasDynamicSubcolumns(columns)) return; auto range = getDataPartsStateRange(DataPartState::Active); - object_columns = getObjectColumns(range, columns); + object_columns = getConcreteObjectColumns(range, columns); } void MergeTreeData::updateObjectColumns(const DataPartPtr & part, const DataPartsLock & /*lock*/) { auto metadata_snapshot = getInMemoryMetadataPtr(); const auto & columns = metadata_snapshot->getColumns(); - if (!hasObjectColumns(columns)) + if (!hasDynamicSubcolumns(columns)) return; - DB::updateObjectColumns(object_columns, part->getColumns()); + DB::updateObjectColumns(object_columns, columns, part->getColumns()); } StorageSnapshotPtr MergeTreeData::getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index c4a5d66ccbe..8bd0fc1f280 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -214,6 +214,7 @@ public: }; using DataParts = std::set; + using MutableDataParts = std::set; using DataPartsVector = std::vector; using DataPartsLock = std::unique_lock; @@ -225,15 +226,15 @@ public: /// After this method setColumns must be called MutableDataPartPtr createPart(const String & name, MergeTreeDataPartType type, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; /// Create part, that already exists on filesystem. /// After this methods 'loadColumnsChecksumsIndexes' must be called. MutableDataPartPtr createPart(const String & name, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; MutableDataPartPtr createPart(const String & name, const MergeTreePartInfo & part_info, - const DataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; + const MutableDataPartStoragePtr & data_part_storage, const IMergeTreeDataPart * parent_part = nullptr) const; /// Auxiliary object to add a set of parts into the working set in two steps: /// * First, as PreActive parts (the parts are ready, but not yet in the active set). @@ -247,7 +248,7 @@ public: DataPartsVector commit(MergeTreeData::DataPartsLock * acquired_parts_lock = nullptr); - void addPart(MutableDataPartPtr & part, DataPartStorageBuilderPtr builder); + void addPart(MutableDataPartPtr & part); void rollback(); @@ -275,9 +276,8 @@ public: MergeTreeData & data; MergeTreeTransaction * txn; - DataParts precommitted_parts; - std::vector part_builders; - DataParts locked_parts; + MutableDataParts precommitted_parts; + MutableDataParts locked_parts; bool has_in_memory_parts = false; void clear(); @@ -414,9 +414,8 @@ public: SelectQueryInfo & info) const override; ReservationPtr reserveSpace(UInt64 expected_size, VolumePtr & volume) const; - static ReservationPtr tryReserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage); - static ReservationPtr reserveSpace(UInt64 expected_size, const DataPartStoragePtr & data_part_storage); - static ReservationPtr reserveSpace(UInt64 expected_size, const DataPartStorageBuilderPtr & data_part_storage_builder); + static ReservationPtr tryReserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage); + static ReservationPtr reserveSpace(UInt64 expected_size, const IDataPartStorage & data_part_storage); static bool partsContainSameProjections(const DataPartPtr & left, const DataPartPtr & right); @@ -555,21 +554,18 @@ public: bool renameTempPartAndAdd( MutableDataPartPtr & part, Transaction & transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock); /// The same as renameTempPartAndAdd but the block range of the part can contain existing parts. /// Returns all parts covered by the added part (in ascending order). DataPartsVector renameTempPartAndReplace( MutableDataPartPtr & part, - Transaction & out_transaction, - DataPartStorageBuilderPtr builder); + Transaction & out_transaction); /// Unlocked version of previous one. Useful when added multiple parts with a single lock. DataPartsVector renameTempPartAndReplaceUnlocked( MutableDataPartPtr & part, Transaction & out_transaction, - DataPartStorageBuilderPtr builder, DataPartsLock & lock); /// Remove parts from working set immediately (without wait for background @@ -588,10 +584,33 @@ public: /// Used in REPLACE PARTITION command. void removePartsInRangeFromWorkingSet(MergeTreeTransaction * txn, const MergeTreePartInfo & drop_range, DataPartsLock & lock); + /// This wrapper is required to restrict access to parts in Deleting state + class PartToRemoveFromZooKeeper + { + DataPartPtr part; + bool was_active; + + public: + explicit PartToRemoveFromZooKeeper(DataPartPtr && part_, bool was_active_ = true) + : part(std::move(part_)), was_active(was_active_) + { + } + + /// It's safe to get name of any part + const String & getPartName() const { return part->name; } + + DataPartPtr getPartIfItWasActive() const + { + return was_active ? part : nullptr; + } + }; + + using PartsToRemoveFromZooKeeper = std::vector; + /// Same as above, but also returns list of parts to remove from ZooKeeper. /// It includes parts that have been just removed by these method /// and Outdated parts covered by drop_range that were removed earlier for any reason. - DataPartsVector removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper( + PartsToRemoveFromZooKeeper removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper( MergeTreeTransaction * txn, const MergeTreePartInfo & drop_range, DataPartsLock & lock); /// Restores Outdated part and adds it to working set @@ -644,6 +663,9 @@ public: /// Deletes the data directory and flushes the uncompressed blocks cache and the marks cache. void dropAllData(); + /// This flag is for hardening and assertions. + bool all_data_dropped = false; + /// Drop data directories if they are empty. It is safe to call this method if table creation was unsuccessful. void dropIfEmpty(); @@ -757,10 +779,10 @@ public: return column_sizes; } - const ColumnsDescription & getObjectColumns() const { return object_columns; } + const ColumnsDescription & getConcreteObjectColumns() const { return object_columns; } /// Creates description of columns of data type Object from the range of data parts. - static ColumnsDescription getObjectColumns( + static ColumnsDescription getConcreteObjectColumns( const DataPartsVector & parts, const ColumnsDescription & storage_columns); IndexSizeByName getSecondaryIndexSizes() const override @@ -979,7 +1001,7 @@ public: /// Fetch part only if some replica has it on shared storage like S3 /// Overridden in StorageReplicatedMergeTree - virtual DataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return nullptr; } + virtual MutableDataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return nullptr; } /// Check shared data usage on other replicas for detached/freezed part /// Remove local files and remote files if needed @@ -1129,7 +1151,7 @@ protected: } /// Creates description of columns of data type Object from the range of data parts. - static ColumnsDescription getObjectColumns( + static ColumnsDescription getConcreteObjectColumns( boost::iterator_range range, const ColumnsDescription & storage_columns); std::optional totalRowsByPartitionPredicateImpl( @@ -1264,13 +1286,12 @@ protected: static void incrementMergedPartsProfileEvent(MergeTreeDataPartType type); private: - /// Checking that candidate part doesn't break invariants: correct partition and doesn't exist already void checkPartCanBeAddedToTable(MutableDataPartPtr & part, DataPartsLock & lock) const; /// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes /// in precommitted state and to transaction - void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, DataPartStorageBuilderPtr builder); + void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction); /// Low-level method for preparing parts for commit (in-memory). /// FIXME Merge MergeTreeTransaction and Transaction @@ -1278,7 +1299,6 @@ private: MutableDataPartPtr & part, Transaction & out_transaction, DataPartsLock & lock, - DataPartStorageBuilderPtr builder, DataPartsVector * out_covered_parts); /// RAII Wrapper for atomic work with currently moving parts @@ -1334,8 +1354,8 @@ private: virtual std::unique_ptr getDefaultSettings() const = 0; void loadDataPartsFromDisk( - DataPartsVector & broken_parts_to_detach, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & broken_parts_to_detach, + MutableDataPartsVector & duplicate_parts_to_remove, ThreadPool & pool, size_t num_parts, std::queue>> & parts_queue, @@ -1343,8 +1363,7 @@ private: const MergeTreeSettingsPtr & settings); void loadDataPartsFromWAL( - DataPartsVector & broken_parts_to_detach, - DataPartsVector & duplicate_parts_to_remove, + MutableDataPartsVector & duplicate_parts_to_remove, MutableDataPartsVector & parts_from_wal); /// Create zero-copy exclusive lock for part and disk. Useful for coordination of @@ -1356,6 +1375,8 @@ private: /// Otherwise, in non-parallel case will break and return. void clearPartsFromFilesystemImpl(const DataPartsVector & parts, NameSet * part_names_succeed); + static MutableDataPartPtr preparePartForRemoval(const DataPartPtr & part); + TemporaryParts temporary_parts; }; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 1a5c94a2e26..0b5c5285d15 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -333,6 +333,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( SimpleMergeSelector::Settings merge_settings; /// Override value from table settings merge_settings.max_parts_to_merge_at_once = data_settings->max_parts_to_merge_at_once; + merge_settings.min_age_to_force_merge = data_settings->min_age_to_force_merge_seconds; if (aggressive) merge_settings.base = 1; @@ -482,8 +483,7 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart( const Names & deduplicate_by_columns, const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, - const IMergeTreeDataPart * parent_part, - const IDataPartStorageBuilder * parent_path_storage_builder, + IMergeTreeDataPart * parent_part, const String & suffix) { return std::make_shared( @@ -498,7 +498,6 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart( deduplicate_by_columns, merging_params, parent_part, - parent_path_storage_builder, suffix, txn, &data, @@ -540,8 +539,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart MergeTreeData::MutableDataPartPtr & new_data_part, const MergeTreeData::DataPartsVector & parts, const MergeTreeTransactionPtr & txn, - MergeTreeData::Transaction & out_transaction, - DataPartStorageBuilderPtr builder) + MergeTreeData::Transaction & out_transaction) { /// Some of source parts was possibly created in transaction, so non-transactional merge may break isolation. if (data.transactions_enabled.load(std::memory_order_relaxed) && !txn) @@ -549,7 +547,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart "but transactions were enabled for this table"); /// Rename new part, add to the set and remove original parts. - auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction, builder); + auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction); /// Let's check that all original parts have been deleted and only them. if (replaced_parts.size() != parts.size()) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 14eb82c641c..5d98f526325 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -113,8 +113,7 @@ public: const Names & deduplicate_by_columns, const MergeTreeData::MergingParams & merging_params, const MergeTreeTransactionPtr & txn, - const IMergeTreeDataPart * parent_part = nullptr, - const IDataPartStorageBuilder * parent_path_storage_builder = nullptr, + IMergeTreeDataPart * parent_part = nullptr, const String & suffix = ""); /// Mutate a single data part with the specified commands. Will create and return a temporary part. @@ -133,8 +132,7 @@ public: MergeTreeData::MutableDataPartPtr & new_data_part, const MergeTreeData::DataPartsVector & parts, const MergeTreeTransactionPtr & txn, - MergeTreeData::Transaction & out_transaction, - DataPartStorageBuilderPtr builder); + MergeTreeData::Transaction & out_transaction); /// The approximate amount of disk space needed for merge or mutation. With a surplus. diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 9298e841072..a537b44d9ea 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes MergeTreeDataPartCompact::MergeTreeDataPartCompact( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, data_part_storage_, Type::Compact, parent_part_) { @@ -32,7 +32,7 @@ MergeTreeDataPartCompact::MergeTreeDataPartCompact( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, info_, data_part_storage_, Type::Compact, parent_part_) { @@ -58,13 +58,12 @@ IMergeTreeDataPart::MergeTreeReaderPtr MergeTreeDataPartCompact::getReader( } IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartCompact::getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const + const MergeTreeIndexGranularity & computed_index_granularity) { NamesAndTypesList ordered_columns_list; std::copy_if(columns_list.begin(), columns_list.end(), std::back_inserter(ordered_columns_list), @@ -75,7 +74,7 @@ IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartCompact::getWriter( { return *getColumnPosition(lhs.name) < *getColumnPosition(rhs.name); }); return std::make_unique( - shared_from_this(), std::move(data_part_storage_builder), ordered_columns_list, metadata_snapshot, + shared_from_this(), ordered_columns_list, metadata_snapshot, indices_to_recalc, getMarksFileExtension(), default_codec_, writer_settings, computed_index_granularity); } @@ -97,21 +96,21 @@ void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*eac void MergeTreeDataPartCompact::loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, const MergeTreeIndexGranularityInfo & index_granularity_info_, - size_t columns_count, const DataPartStoragePtr & data_part_storage_) + size_t columns_count, const IDataPartStorage & data_part_storage_) { if (!index_granularity_info_.mark_type.adaptive) throw Exception("MergeTreeDataPartCompact cannot be created with non-adaptive granulary.", ErrorCodes::NOT_IMPLEMENTED); auto marks_file_path = index_granularity_info_.getMarksFilePath("data"); - if (!data_part_storage_->exists(marks_file_path)) + if (!data_part_storage_.exists(marks_file_path)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "Marks file '{}' doesn't exist", - std::string(fs::path(data_part_storage_->getFullPath()) / marks_file_path)); + std::string(fs::path(data_part_storage_.getFullPath()) / marks_file_path)); - size_t marks_file_size = data_part_storage_->getFileSize(marks_file_path); + size_t marks_file_size = data_part_storage_.getFileSize(marks_file_path); - std::unique_ptr buffer = data_part_storage_->readFile( + std::unique_ptr buffer = data_part_storage_.readFile( marks_file_path, ReadSettings().adjustBufferSize(marks_file_size), marks_file_size, std::nullopt); std::unique_ptr marks_reader; @@ -140,7 +139,7 @@ void MergeTreeDataPartCompact::loadIndexGranularity() if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), data_part_storage); + loadIndexGranularityImpl(index_granularity, index_granularity_info, columns.size(), getDataPartStorage()); } bool MergeTreeDataPartCompact::hasColumnFiles(const NameAndTypePair & column) const @@ -171,12 +170,12 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No marks file checksum for column in part {}", - data_part_storage->getFullPath()); + getDataPartStorage().getFullPath()); if (!checksums.files.contains(DATA_FILE_NAME_WITH_EXTENSION)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No data file checksum for in part {}", - data_part_storage->getFullPath()); + getDataPartStorage().getFullPath()); } } else @@ -184,33 +183,33 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons { /// count.txt should be present even in non custom-partitioned parts std::string file_path = "count.txt"; - if (!data_part_storage->exists(file_path) || data_part_storage->getFileSize(file_path) == 0) + if (!getDataPartStorage().exists(file_path) || getDataPartStorage().getFileSize(file_path) == 0) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty", - data_part_storage->getRelativePath(), - std::string(fs::path(data_part_storage->getFullPath()) / file_path)); + getDataPartStorage().getRelativePath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / file_path)); } /// Check that marks are nonempty and have the consistent size with columns number. - if (data_part_storage->exists(mrk_file_name)) + if (getDataPartStorage().exists(mrk_file_name)) { - UInt64 file_size = data_part_storage->getFileSize(mrk_file_name); + UInt64 file_size = getDataPartStorage().getFileSize(mrk_file_name); if (!file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty.", - data_part_storage->getRelativePath(), - std::string(fs::path(data_part_storage->getFullPath()) / mrk_file_name)); + getDataPartStorage().getRelativePath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / mrk_file_name)); UInt64 expected_file_size = index_granularity_info.getMarkSizeInBytes(columns.size()) * index_granularity.getMarksCount(); if (expected_file_size != file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: bad size of marks file '{}': {}, must be: {}", - data_part_storage->getRelativePath(), - std::string(fs::path(data_part_storage->getFullPath()) / mrk_file_name), + getDataPartStorage().getRelativePath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / mrk_file_name), std::to_string(file_size), std::to_string(expected_file_size)); } } @@ -218,12 +217,12 @@ void MergeTreeDataPartCompact::checkConsistency(bool require_part_metadata) cons bool MergeTreeDataPartCompact::isStoredOnRemoteDisk() const { - return data_part_storage->isStoredOnRemoteDisk(); + return getDataPartStorage().isStoredOnRemoteDisk(); } bool MergeTreeDataPartCompact::isStoredOnRemoteDiskWithZeroCopySupport() const { - return data_part_storage->supportZeroCopyReplication(); + return getDataPartStorage().supportZeroCopyReplication(); } MergeTreeDataPartCompact::~MergeTreeDataPartCompact() diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index d3ac71cb02a..e275c586cb9 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -25,13 +25,13 @@ public: const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeDataPartCompact( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeReaderPtr getReader( @@ -45,13 +45,12 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback) const override; MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const override; + const MergeTreeIndexGranularity & computed_index_granularity) override; bool isStoredOnDisk() const override { return true; } @@ -68,7 +67,7 @@ public: protected: static void loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, const MergeTreeIndexGranularityInfo & index_granularity_info_, - size_t columns_count, const DataPartStoragePtr & data_part_storage_); + size_t columns_count, const IDataPartStorage & data_part_storage_); private: void checkConsistency(bool require_part_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp b/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp index c7c831c23ec..48b1b6bab60 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartInMemory.cpp @@ -1,10 +1,12 @@ -#include "MergeTreeDataPartInMemory.h" +#include #include #include #include #include #include +#include #include +#include #include #include #include @@ -21,7 +23,7 @@ namespace ErrorCodes MergeTreeDataPartInMemory::MergeTreeDataPartInMemory( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, data_part_storage_, Type::InMemory, parent_part_) { @@ -32,7 +34,7 @@ MergeTreeDataPartInMemory::MergeTreeDataPartInMemory( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, info_, data_part_storage_, Type::InMemory, parent_part_) { @@ -56,27 +58,33 @@ IMergeTreeDataPart::MergeTreeReaderPtr MergeTreeDataPartInMemory::getReader( } IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartInMemory::getWriter( - DataPartStorageBuilderPtr data_part_storage_builder_, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & /* indices_to_recalc */, const CompressionCodecPtr & /* default_codec */, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & /* computed_index_granularity */) const + const MergeTreeIndexGranularity & /* computed_index_granularity */) { - data_part_storage_builder = data_part_storage_builder_; - auto ptr = std::static_pointer_cast(shared_from_this()); + auto ptr = std::static_pointer_cast(shared_from_this()); return std::make_unique( ptr, columns_list, metadata_snapshot, writer_settings); } -DataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const +MutableDataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const { - auto current_full_path = data_part_storage_builder->getFullPath(); - data_part_storage_builder->setRelativePath(new_relative_path); + auto reservation = storage.reserveSpace(block.bytes(), getDataPartStorage()); + VolumePtr volume = storage.getStoragePolicy()->getVolume(0); + VolumePtr data_part_volume = createVolumeFromReservation(reservation, volume); + auto new_data_part_storage = std::make_shared( + data_part_volume, + storage.getRelativeDataPath(), + new_relative_path); + + new_data_part_storage->beginTransaction(); + + auto current_full_path = getDataPartStorage().getFullPath(); auto new_type = storage.choosePartTypeOnDisk(block.bytes(), rows_count); - auto new_data_part_storage = data_part_storage_builder->getStorage(); auto new_data_part = storage.createPart(name, new_type, info, new_data_part_storage); new_data_part->uuid = uuid; @@ -84,50 +92,50 @@ DataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_rel new_data_part->partition.value = partition.value; new_data_part->minmax_idx = minmax_idx; - if (data_part_storage_builder->exists()) + if (new_data_part_storage->exists()) { throw Exception( ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Could not flush part {}. Part in {} already exists", quoteString(current_full_path), - data_part_storage_builder->getFullPath()); + new_data_part_storage->getFullPath()); } - data_part_storage_builder->createDirectories(); + new_data_part_storage->createDirectories(); auto compression_codec = storage.getContext()->chooseCompressionCodec(0, 0); auto indices = MergeTreeIndexFactory::instance().getMany(metadata_snapshot->getSecondaryIndices()); - MergedBlockOutputStream out(new_data_part, data_part_storage_builder, metadata_snapshot, columns, indices, compression_codec, NO_TRANSACTION_PTR); + MergedBlockOutputStream out(new_data_part, metadata_snapshot, columns, indices, compression_codec, NO_TRANSACTION_PTR); out.write(block); const auto & projections = metadata_snapshot->getProjections(); for (const auto & [projection_name, projection] : projection_parts) { if (projections.has(projection_name)) { - auto projection_part_storage_builder = data_part_storage_builder->getProjection(projection_name + ".proj"); - if (projection_part_storage_builder->exists()) + auto projection_part_storage = new_data_part_storage->getProjection(projection_name + ".proj"); + if (projection_part_storage->exists()) { throw Exception( ErrorCodes::DIRECTORY_ALREADY_EXISTS, "Could not flush projection part {}. Projection part in {} already exists", projection_name, - projection_part_storage_builder->getFullPath()); + projection_part_storage->getFullPath()); } auto projection_part = asInMemoryPart(projection); auto projection_type = storage.choosePartTypeOnDisk(projection_part->block.bytes(), rows_count); MergeTreePartInfo projection_info("all", 0, 0, 0); auto projection_data_part - = storage.createPart(projection_name, projection_type, projection_info, projection_part_storage_builder->getStorage(), parent_part); + = storage.createPart(projection_name, projection_type, projection_info, projection_part_storage, parent_part); projection_data_part->is_temp = false; // clean up will be done on parent part projection_data_part->setColumns(projection->getColumns(), {}); - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); const auto & desc = projections.get(name); auto projection_compression_codec = storage.getContext()->chooseCompressionCodec(0, 0); auto projection_indices = MergeTreeIndexFactory::instance().getMany(desc.metadata->getSecondaryIndices()); MergedBlockOutputStream projection_out( - projection_data_part, projection_part_storage_builder, desc.metadata, projection_part->columns, projection_indices, + projection_data_part, desc.metadata, projection_part->columns, projection_indices, projection_compression_codec, NO_TRANSACTION_PTR); projection_out.write(projection_part->block); @@ -137,21 +145,19 @@ DataPartStoragePtr MergeTreeDataPartInMemory::flushToDisk(const String & new_rel } out.finalizePart(new_data_part, false); + new_data_part_storage->commitTransaction(); return new_data_part_storage; } void MergeTreeDataPartInMemory::makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const { - String detached_path = getRelativePathForDetachedPart(prefix); + String detached_path = *getRelativePathForDetachedPart(prefix, /* broken */ false); flushToDisk(detached_path, metadata_snapshot); } -void MergeTreeDataPartInMemory::renameTo(const String & new_relative_path, bool /* remove_new_dir_if_exists */, DataPartStorageBuilderPtr) const +void MergeTreeDataPartInMemory::renameTo(const String & new_relative_path, bool /* remove_new_dir_if_exists */) { - data_part_storage->setRelativePath(new_relative_path); - - if (data_part_storage_builder) - data_part_storage_builder->setRelativePath(new_relative_path); + getDataPartStorage().setRelativePath(new_relative_path); } void MergeTreeDataPartInMemory::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const diff --git a/src/Storages/MergeTree/MergeTreeDataPartInMemory.h b/src/Storages/MergeTree/MergeTreeDataPartInMemory.h index d985c7f055e..e58701b04a1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartInMemory.h +++ b/src/Storages/MergeTree/MergeTreeDataPartInMemory.h @@ -14,13 +14,13 @@ public: const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeDataPartInMemory( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeReaderPtr getReader( @@ -34,29 +34,27 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback) const override; MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder_, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const override; + const MergeTreeIndexGranularity & computed_index_granularity) override; bool isStoredOnDisk() const override { return false; } bool isStoredOnRemoteDisk() const override { return false; } bool isStoredOnRemoteDiskWithZeroCopySupport() const override { return false; } bool hasColumnFiles(const NameAndTypePair & column) const override { return !!getColumnPosition(column.getNameInStorage()); } String getFileNameForColumn(const NameAndTypePair & /* column */) const override { return ""; } - void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists, DataPartStorageBuilderPtr) const override; + void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) override; void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const override; - DataPartStoragePtr flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const; + MutableDataPartStoragePtr flushToDisk(const String & new_relative_path, const StorageMetadataPtr & metadata_snapshot) const; /// Returns hash of parts's block Checksum calculateBlockChecksum() const; mutable Block block; - mutable DataPartStorageBuilderPtr data_part_storage_builder; private: mutable std::condition_variable is_merged; @@ -66,6 +64,8 @@ private: }; using DataPartInMemoryPtr = std::shared_ptr; +using MutableDataPartInMemoryPtr = std::shared_ptr; + DataPartInMemoryPtr asInMemoryPart(const MergeTreeDataPartPtr & part); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index 170d1b1d703..2418960f992 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -21,7 +21,7 @@ namespace ErrorCodes MergeTreeDataPartWide::MergeTreeDataPartWide( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, data_part_storage_, Type::Wide, parent_part_) { @@ -31,7 +31,7 @@ MergeTreeDataPartWide::MergeTreeDataPartWide( const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_) : IMergeTreeDataPart(storage_, name_, info_, data_part_storage_, Type::Wide, parent_part_) { @@ -56,17 +56,16 @@ IMergeTreeDataPart::MergeTreeReaderPtr MergeTreeDataPartWide::getReader( } IMergeTreeDataPart::MergeTreeWriterPtr MergeTreeDataPartWide::getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const + const MergeTreeIndexGranularity & computed_index_granularity) { return std::make_unique( - shared_from_this(), data_part_storage_builder, - columns_list, metadata_snapshot, indices_to_recalc, + shared_from_this(), columns_list, + metadata_snapshot, indices_to_recalc, getMarksFileExtension(), default_codec_, writer_settings, computed_index_granularity); } @@ -105,18 +104,18 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( void MergeTreeDataPartWide::loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, - const DataPartStoragePtr & data_part_storage_, const std::string & any_column_file_name) + const IDataPartStorage & data_part_storage_, const std::string & any_column_file_name) { index_granularity_info_.changeGranularityIfRequired(data_part_storage_); /// We can use any column, it doesn't matter std::string marks_file_path = index_granularity_info_.getMarksFilePath(any_column_file_name); - if (!data_part_storage_->exists(marks_file_path)) + if (!data_part_storage_.exists(marks_file_path)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "Marks file '{}' doesn't exist", - std::string(fs::path(data_part_storage_->getFullPath()) / marks_file_path)); + std::string(fs::path(data_part_storage_.getFullPath()) / marks_file_path)); - size_t marks_file_size = data_part_storage_->getFileSize(marks_file_path); + size_t marks_file_size = data_part_storage_.getFileSize(marks_file_path); if (!index_granularity_info_.mark_type.adaptive && !index_granularity_info_.mark_type.compressed) { @@ -126,7 +125,7 @@ void MergeTreeDataPartWide::loadIndexGranularityImpl( } else { - auto marks_file = data_part_storage_->readFile(marks_file_path, ReadSettings().adjustBufferSize(marks_file_size), marks_file_size, std::nullopt); + auto marks_file = data_part_storage_.readFile(marks_file_path, ReadSettings().adjustBufferSize(marks_file_size), marks_file_size, std::nullopt); std::unique_ptr marks_reader; if (!index_granularity_info_.mark_type.compressed) @@ -163,18 +162,18 @@ void MergeTreeDataPartWide::loadIndexGranularity() if (columns.empty()) throw Exception("No columns in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART); - loadIndexGranularityImpl(index_granularity, index_granularity_info, data_part_storage, getFileNameForColumn(columns.front())); + loadIndexGranularityImpl(index_granularity, index_granularity_info, getDataPartStorage(), getFileNameForColumn(columns.front())); } bool MergeTreeDataPartWide::isStoredOnRemoteDisk() const { - return data_part_storage->isStoredOnRemoteDisk(); + return getDataPartStorage().isStoredOnRemoteDisk(); } bool MergeTreeDataPartWide::isStoredOnRemoteDiskWithZeroCopySupport() const { - return data_part_storage->supportZeroCopyReplication(); + return getDataPartStorage().supportZeroCopyReplication(); } MergeTreeDataPartWide::~MergeTreeDataPartWide() @@ -203,13 +202,13 @@ void MergeTreeDataPartWide::checkConsistency(bool require_part_metadata) const throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No {} file checksum for column {} in part {} ", - mrk_file_name, name_type.name, data_part_storage->getFullPath()); + mrk_file_name, name_type.name, getDataPartStorage().getFullPath()); if (!checksums.files.contains(bin_file_name)) throw Exception( ErrorCodes::NO_FILE_IN_DATA_PART, "No {} file checksum for column {} in part ", - bin_file_name, name_type.name, data_part_storage->getFullPath()); + bin_file_name, name_type.name, getDataPartStorage().getFullPath()); }); } } @@ -225,23 +224,23 @@ void MergeTreeDataPartWide::checkConsistency(bool require_part_metadata) const auto file_path = ISerialization::getFileNameForStream(name_type, substream_path) + marks_file_extension; /// Missing file is Ok for case when new column was added. - if (data_part_storage->exists(file_path)) + if (getDataPartStorage().exists(file_path)) { - UInt64 file_size = data_part_storage->getFileSize(file_path); + UInt64 file_size = getDataPartStorage().getFileSize(file_path); if (!file_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Part {} is broken: {} is empty.", - data_part_storage->getFullPath(), - std::string(fs::path(data_part_storage->getFullPath()) / file_path)); + getDataPartStorage().getFullPath(), + std::string(fs::path(getDataPartStorage().getFullPath()) / file_path)); if (!marks_size) marks_size = file_size; else if (file_size != *marks_size) throw Exception( ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, - "Part {} is broken: marks have different sizes.", data_part_storage->getFullPath()); + "Part {} is broken: marks have different sizes.", getDataPartStorage().getFullPath()); } }); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 52afa9e82d4..601bdff51a1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -1,5 +1,6 @@ #pragma once +#include "Storages/MergeTree/IDataPartStorage.h" #include namespace DB @@ -19,13 +20,13 @@ public: const MergeTreeData & storage_, const String & name_, const MergeTreePartInfo & info_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeDataPartWide( MergeTreeData & storage_, const String & name_, - const DataPartStoragePtr & data_part_storage_, + const MutableDataPartStoragePtr & data_part_storage_, const IMergeTreeDataPart * parent_part_ = nullptr); MergeTreeReaderPtr getReader( @@ -39,13 +40,12 @@ public: const ReadBufferFromFileBase::ProfileCallback & profile_callback) const override; MergeTreeWriterPtr getWriter( - DataPartStorageBuilderPtr data_part_storage_builder, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & writer_settings, - const MergeTreeIndexGranularity & computed_index_granularity) const override; + const MergeTreeIndexGranularity & computed_index_granularity) override; bool isStoredOnDisk() const override { return true; } @@ -64,7 +64,7 @@ public: protected: static void loadIndexGranularityImpl( MergeTreeIndexGranularity & index_granularity_, MergeTreeIndexGranularityInfo & index_granularity_info_, - const DataPartStoragePtr & data_part_storage_, const std::string & any_column_file_name); + const IDataPartStorage & data_part_storage_, const std::string & any_column_file_name); private: void checkConsistency(bool require_part_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 457aad55023..020121e59d7 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -10,8 +10,7 @@ namespace ErrorCodes } MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc_, @@ -19,16 +18,16 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) - : MergeTreeDataPartWriterOnDisk(data_part_, std::move(data_part_storage_builder_), columns_list_, metadata_snapshot_, + : MergeTreeDataPartWriterOnDisk(data_part_, columns_list_, metadata_snapshot_, indices_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_) - , plain_file(data_part_storage_builder->writeFile( + , plain_file(data_part_->getDataPartStorage().writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION, settings.max_compress_block_size, settings_.query_write_settings)) , plain_hashing(*plain_file) { - marks_file = data_part_storage_builder->writeFile( + marks_file = data_part_->getDataPartStorage().writeFile( MergeTreeDataPartCompact::DATA_FILE_NAME + marks_file_extension_, 4096, settings_.query_write_settings); @@ -132,7 +131,7 @@ void writeColumnSingleGranule( serialize_settings.position_independent_encoding = true; //-V1048 serialize_settings.low_cardinality_max_dictionary_size = 0; //-V1048 - serialization->serializeBinaryBulkStatePrefix(serialize_settings, state); + serialization->serializeBinaryBulkStatePrefix(*column.column, serialize_settings, state); serialization->serializeBinaryBulkWithMultipleStreams(*column.column, from_row, number_of_rows, serialize_settings, state); serialization->serializeBinaryBulkStateSuffix(serialize_settings, state); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index 7b68f61925f..06f8122393f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -11,8 +11,7 @@ class MergeTreeDataPartWriterCompact : public MergeTreeDataPartWriterOnDisk { public: MergeTreeDataPartWriterCompact( - const MergeTreeData::DataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp index e1145868ce2..8066a097499 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.cpp @@ -11,11 +11,11 @@ namespace ErrorCodes } MergeTreeDataPartWriterInMemory::MergeTreeDataPartWriterInMemory( - const DataPartInMemoryPtr & part_, + const MutableDataPartInMemoryPtr & part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeWriterSettings & settings_) - : IMergeTreeDataPartWriter(part_, nullptr, columns_list_, metadata_snapshot_, settings_) + : IMergeTreeDataPartWriter(part_, columns_list_, metadata_snapshot_, settings_) , part_in_memory(part_) {} void MergeTreeDataPartWriterInMemory::write( diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h index 233ca81a697..9e1e868beac 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterInMemory.h @@ -10,7 +10,7 @@ class MergeTreeDataPartWriterInMemory : public IMergeTreeDataPartWriter { public: MergeTreeDataPartWriterInMemory( - const DataPartInMemoryPtr & part_, + const MutableDataPartInMemoryPtr & part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot, const MergeTreeWriterSettings & settings_); @@ -24,7 +24,7 @@ public: private: void calculateAndSerializePrimaryIndex(const Block & primary_index_block); - DataPartInMemoryPtr part_in_memory; + MutableDataPartInMemoryPtr part_in_memory; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 1d2b095330e..d085bb29b20 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -48,7 +48,7 @@ void MergeTreeDataPartWriterOnDisk::Stream::sync() const MergeTreeDataPartWriterOnDisk::Stream::Stream( const String & escaped_column_name_, - const DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, const std::string & data_file_extension_, const std::string & marks_path_, @@ -61,11 +61,11 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, - plain_file(data_part_storage_builder->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), + plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), compressor(plain_hashing, compression_codec_, max_compress_block_size_), compressed_hashing(compressor), - marks_file(data_part_storage_builder->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), + marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), marks_hashing(*marks_file), marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_), marks_compressed_hashing(marks_compressor), @@ -96,8 +96,7 @@ void MergeTreeDataPartWriterOnDisk::Stream::addToChecksums(MergeTreeData::DataPa MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const MergeTreeIndices & indices_to_recalc_, @@ -105,8 +104,7 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) - : IMergeTreeDataPartWriter(data_part_, std::move(data_part_storage_builder_), - columns_list_, metadata_snapshot_, settings_, index_granularity_) + : IMergeTreeDataPartWriter(data_part_, columns_list_, metadata_snapshot_, settings_, index_granularity_) , skip_indices(indices_to_recalc_) , marks_file_extension(marks_file_extension_) , default_codec(default_codec_) @@ -116,8 +114,8 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk( if (settings.blocks_are_granules_size && !index_granularity.empty()) throw Exception("Can't take information about index granularity from blocks, when non empty index_granularity array specified", ErrorCodes::LOGICAL_ERROR); - if (!data_part_storage_builder->exists()) - data_part_storage_builder->createDirectories(); + if (!data_part->getDataPartStorage().exists()) + data_part->getDataPartStorage().createDirectories(); if (settings.rewrite_primary_key) initPrimaryIndex(); @@ -178,7 +176,7 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() if (metadata_snapshot->hasPrimaryKey()) { String index_name = "primary" + getIndexExtension(compress_primary_key); - index_file_stream = data_part_storage_builder->writeFile(index_name, DBMS_DEFAULT_BUFFER_SIZE, settings.query_write_settings); + index_file_stream = data_part->getDataPartStorage().writeFile(index_name, DBMS_DEFAULT_BUFFER_SIZE, settings.query_write_settings); index_file_hashing_stream = std::make_unique(*index_file_stream); if (compress_primary_key) @@ -204,7 +202,7 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() skip_indices_streams.emplace_back( std::make_unique( stream_name, - data_part_storage_builder, + data_part->getDataPartStoragePtr(), stream_name, index_helper->getSerializedFileExtension(), stream_name, marks_file_extension, default_codec, settings.max_compress_block_size, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 4b58224de78..ab1adfe7f59 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -50,7 +50,7 @@ public: { Stream( const String & escaped_column_name_, - const DataPartStorageBuilderPtr & data_part_storage_builder, + const MutableDataPartStoragePtr & data_part_storage, const String & data_path_, const std::string & data_file_extension_, const std::string & marks_path_, @@ -92,8 +92,7 @@ public: using StreamPtr = std::unique_ptr; MergeTreeDataPartWriterOnDisk( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc, diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 70654f521a1..62917bcb084 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -71,8 +71,7 @@ Granules getGranulesToWrite(const MergeTreeIndexGranularity & index_granularity, } MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( - const MergeTreeData::DataPartPtr & data_part_, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part_, const NamesAndTypesList & columns_list_, const StorageMetadataPtr & metadata_snapshot_, const std::vector & indices_to_recalc_, @@ -80,7 +79,7 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( const CompressionCodecPtr & default_codec_, const MergeTreeWriterSettings & settings_, const MergeTreeIndexGranularity & index_granularity_) - : MergeTreeDataPartWriterOnDisk(data_part_, std::move(data_part_storage_builder_), columns_list_, metadata_snapshot_, + : MergeTreeDataPartWriterOnDisk(data_part_, columns_list_, metadata_snapshot_, indices_to_recalc_, marks_file_extension_, default_codec_, settings_, index_granularity_) { @@ -117,7 +116,7 @@ void MergeTreeDataPartWriterWide::addStreams( column_streams[stream_name] = std::make_unique( stream_name, - data_part_storage_builder, + data_part->getDataPartStoragePtr(), stream_name, DATA_FILE_EXTENSION, stream_name, marks_file_extension, compression_codec, @@ -356,7 +355,7 @@ void MergeTreeDataPartWriterWide::writeColumn( { ISerialization::SerializeBinaryBulkSettings serialize_settings; serialize_settings.getter = createStreamGetter(name_and_type, offset_columns); - serialization->serializeBinaryBulkStatePrefix(serialize_settings, it->second); + serialization->serializeBinaryBulkStatePrefix(column, serialize_settings, it->second); } const auto & global_settings = storage.getContext()->getSettingsRef(); @@ -421,20 +420,18 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai String mrk_path = escaped_name + marks_file_extension; String bin_path = escaped_name + DATA_FILE_EXTENSION; - auto data_part_storage = data_part_storage_builder->getStorage(); - /// Some columns may be removed because of ttl. Skip them. - if (!data_part_storage->exists(mrk_path)) + if (!data_part->getDataPartStorage().exists(mrk_path)) return; - auto mrk_file_in = data_part_storage->readFile(mrk_path, {}, std::nullopt, std::nullopt); + auto mrk_file_in = data_part->getDataPartStorage().readFile(mrk_path, {}, std::nullopt, std::nullopt); std::unique_ptr mrk_in; if (data_part->index_granularity_info.mark_type.compressed) mrk_in = std::make_unique(std::move(mrk_file_in)); else mrk_in = std::move(mrk_file_in); - DB::CompressedReadBufferFromFile bin_in(data_part_storage->readFile(bin_path, {}, std::nullopt, std::nullopt)); + DB::CompressedReadBufferFromFile bin_in(data_part->getDataPartStorage().readFile(bin_path, {}, std::nullopt, std::nullopt)); bool must_be_last = false; UInt64 offset_in_compressed_file = 0; UInt64 offset_in_decompressed_block = 0; @@ -485,7 +482,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai if (index_granularity_rows != index_granularity.getMarkRows(mark_num)) throw Exception( ErrorCodes::LOGICAL_ERROR, "Incorrect mark rows for part {} for mark #{} (compressed offset {}, decompressed offset {}), in-memory {}, on disk {}, total marks {}", - data_part_storage_builder->getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, index_granularity.getMarkRows(mark_num), index_granularity_rows, index_granularity.getMarksCount()); + data_part->getDataPartStorage().getFullPath(), mark_num, offset_in_compressed_file, offset_in_decompressed_block, index_granularity.getMarkRows(mark_num), index_granularity_rows, index_granularity.getMarksCount()); auto column = type->createColumn(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index 08815d9930a..633b5119474 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -18,8 +18,7 @@ class MergeTreeDataPartWriterWide : public MergeTreeDataPartWriterOnDisk { public: MergeTreeDataPartWriterWide( - const MergeTreeData::DataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const NamesAndTypesList & columns_list, const StorageMetadataPtr & metadata_snapshot, const std::vector & indices_to_recalc, diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 91ecb3a37a0..afdd98b8e41 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -107,14 +107,12 @@ static std::string toString(const RelativeSize & x) } /// Converts sample size to an approximate number of rows (ex. `SAMPLE 1000000`) to relative value (ex. `SAMPLE 0.1`). -static RelativeSize convertAbsoluteSampleSizeToRelative(const ASTPtr & node, size_t approx_total_rows) +static RelativeSize convertAbsoluteSampleSizeToRelative(const ASTSampleRatio::Rational & ratio, size_t approx_total_rows) { if (approx_total_rows == 0) return 1; - const auto & node_sample = node->as(); - - auto absolute_sample_size = node_sample.ratio.numerator / node_sample.ratio.denominator; + auto absolute_sample_size = ratio.numerator / ratio.denominator; return std::min(RelativeSize(1), RelativeSize(absolute_sample_size) / RelativeSize(approx_total_rows)); } @@ -140,7 +138,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( const SelectQueryInfo & query_info, ContextPtr context, const UInt64 max_block_size, - const unsigned num_streams, + const size_t num_streams, QueryProcessingStage::Enum processed_stage, std::shared_ptr max_block_numbers_to_read, bool enable_parallel_reading) const @@ -467,7 +465,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( } MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( - const ASTSelectQuery & select, + const SelectQueryInfo & select_query_info, NamesAndTypesList available_real_columns, const MergeTreeData::DataPartsVector & parts, KeyCondition & key_condition, @@ -484,23 +482,42 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( RelativeSize relative_sample_size = 0; RelativeSize relative_sample_offset = 0; - auto select_sample_size = select.sampleSize(); - auto select_sample_offset = select.sampleOffset(); + bool final = false; + std::optional sample_size_ratio; + std::optional sample_offset_ratio; - if (select_sample_size) + if (select_query_info.table_expression_modifiers) { - relative_sample_size.assign( - select_sample_size->as().ratio.numerator, - select_sample_size->as().ratio.denominator); + const auto & table_expression_modifiers = *select_query_info.table_expression_modifiers; + final = table_expression_modifiers.hasFinal(); + sample_size_ratio = table_expression_modifiers.getSampleSizeRatio(); + sample_offset_ratio = table_expression_modifiers.getSampleOffsetRatio(); + } + else + { + auto & select = select_query_info.query->as(); + + final = select.final(); + auto select_sample_size = select.sampleSize(); + auto select_sample_offset = select.sampleOffset(); + + if (select_sample_size) + sample_size_ratio = select_sample_size->as().ratio; + + if (select_sample_offset) + sample_offset_ratio = select_sample_offset->as().ratio; + } + + if (sample_size_ratio) + { + relative_sample_size.assign(sample_size_ratio->numerator, sample_size_ratio->denominator); if (relative_sample_size < 0) throw Exception("Negative sample size", ErrorCodes::ARGUMENT_OUT_OF_BOUND); relative_sample_offset = 0; - if (select_sample_offset) - relative_sample_offset.assign( - select_sample_offset->as().ratio.numerator, - select_sample_offset->as().ratio.denominator); + if (sample_offset_ratio) + relative_sample_offset.assign(sample_offset_ratio->numerator, sample_offset_ratio->denominator); if (relative_sample_offset < 0) throw Exception("Negative sample offset", ErrorCodes::ARGUMENT_OUT_OF_BOUND); @@ -513,7 +530,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( if (relative_sample_size > 1) { - relative_sample_size = convertAbsoluteSampleSizeToRelative(select_sample_size, approx_total_rows); + relative_sample_size = convertAbsoluteSampleSizeToRelative(*sample_size_ratio, approx_total_rows); LOG_DEBUG(log, "Selected relative sample size: {}", toString(relative_sample_size)); } @@ -526,7 +543,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( if (relative_sample_offset > 1) { - relative_sample_offset = convertAbsoluteSampleSizeToRelative(select_sample_offset, approx_total_rows); + relative_sample_offset = convertAbsoluteSampleSizeToRelative(*sample_offset_ratio, approx_total_rows); LOG_DEBUG(log, "Selected relative sample offset: {}", toString(relative_sample_offset)); } } @@ -660,7 +677,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( /// So, assume that we already have calculated column. ASTPtr sampling_key_ast = metadata_snapshot->getSamplingKeyAST(); - if (select.final()) + if (final) { sampling_key_ast = std::make_shared(sampling_key.column_names[0]); /// We do spoil available_real_columns here, but it is not used later. @@ -930,7 +947,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd Strings forced_indices; { Tokens tokens(indices.data(), &indices[indices.size()], settings.max_query_size); - IParser::Pos pos(tokens, settings.max_parser_depth); + IParser::Pos pos(tokens, static_cast(settings.max_parser_depth)); Expected expected; if (!parseIdentifiersOrStringLiterals(pos, expected, forced_indices)) throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse force_data_skipping_indices ('{}')", indices); @@ -1061,6 +1078,10 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd auto current_rows_estimate = ranges.getRowsCount(); size_t prev_total_rows_estimate = total_rows.fetch_add(current_rows_estimate); size_t total_rows_estimate = current_rows_estimate + prev_total_rows_estimate; + if (query_info.limit > 0 && total_rows_estimate > query_info.limit) + { + total_rows_estimate = query_info.limit; + } limits.check(total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read' setting)", ErrorCodes::TOO_MANY_ROWS); leaf_limits.check( total_rows_estimate, 0, "rows (controlled by 'max_rows_to_read_leaf' setting)", ErrorCodes::TOO_MANY_ROWS); @@ -1279,7 +1300,7 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar const SelectQueryInfo & query_info, const ActionDAGNodes & added_filter_nodes, ContextPtr context, - unsigned num_streams, + size_t num_streams, std::shared_ptr max_block_numbers_to_read) const { size_t total_parts = parts.size(); @@ -1318,7 +1339,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts( const SelectQueryInfo & query_info, ContextPtr context, const UInt64 max_block_size, - const unsigned num_streams, + const size_t num_streams, std::shared_ptr max_block_numbers_to_read, MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr, bool enable_parallel_reading) const @@ -1618,10 +1639,10 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( UncompressedCache * uncompressed_cache, Poco::Logger * log) { - if (!index_helper->getDeserializedFormat(part->data_part_storage, index_helper->getFileName())) + if (!index_helper->getDeserializedFormat(part->getDataPartStorage(), index_helper->getFileName())) { LOG_DEBUG(log, "File for index {} does not exist ({}.*). Skipping it.", backQuote(index_helper->index.name), - (fs::path(part->data_part_storage->getFullPath()) / index_helper->getFileName()).string()); + (fs::path(part->getDataPartStorage().getFullPath()) / index_helper->getFileName()).string()); return ranges; } @@ -1736,7 +1757,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingMergedIndex( { for (const auto & index_helper : indices) { - if (!part->data_part_storage->exists(index_helper->getFileName() + ".idx")) + if (!part->getDataPartStorage().exists(index_helper->getFileName() + ".idx")) { LOG_DEBUG(log, "File for index {} does not exist. Skipping it.", backQuote(index_helper->index.name)); return ranges; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index bb44f260eec..541f6446674 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -33,7 +33,7 @@ public: const SelectQueryInfo & query_info, ContextPtr context, UInt64 max_block_size, - unsigned num_streams, + size_t num_streams, QueryProcessingStage::Enum processed_stage, std::shared_ptr max_block_numbers_to_read = nullptr, bool enable_parallel_reading = false) const; @@ -46,7 +46,7 @@ public: const SelectQueryInfo & query_info, ContextPtr context, UInt64 max_block_size, - unsigned num_streams, + size_t num_streams, std::shared_ptr max_block_numbers_to_read = nullptr, MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr = nullptr, bool enable_parallel_reading = false) const; @@ -62,7 +62,7 @@ public: const SelectQueryInfo & query_info, const ActionDAGNodes & added_filter_nodes, ContextPtr context, - unsigned num_streams, + size_t num_streams, std::shared_ptr max_block_numbers_to_read = nullptr) const; private: @@ -201,7 +201,7 @@ public: /// Also, calculate _sample_factor if needed. /// Also, update key condition with selected sampling range. static MergeTreeDataSelectSamplingData getSampling( - const ASTSelectQuery & select, + const SelectQueryInfo & select_query_info, NamesAndTypesList available_real_columns, const MergeTreeData::DataPartsVector & parts, KeyCondition & key_condition, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 7b99819340e..815e62848a2 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -288,7 +288,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( auto columns = metadata_snapshot->getColumns().getAllPhysical().filter(block.getNames()); for (auto & column : columns) - if (isObject(column.type)) + if (column.type->hasDynamicSubcolumns()) column.type = block.getByName(column.name).type; static const String TMP_PREFIX = "tmp_insert_"; @@ -378,10 +378,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( data.relative_data_path, TMP_PREFIX + part_name); - auto data_part_storage_builder = std::make_shared( - data_part_volume, - data.relative_data_path, - TMP_PREFIX + part_name); + data_part_storage->beginTransaction(); auto new_data_part = data.createPart( part_name, @@ -408,15 +405,15 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( if (new_data_part->isStoredOnDisk()) { /// The name could be non-unique in case of stale files from previous runs. - String full_path = new_data_part->data_part_storage->getFullPath(); + String full_path = new_data_part->getDataPartStorage().getFullPath(); - if (new_data_part->data_part_storage->exists()) + if (new_data_part->getDataPartStorage().exists()) { LOG_WARNING(log, "Removing old temporary directory {}", full_path); - data_part_storage_builder->removeRecursive(); + data_part_storage->removeRecursive(); } - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); if (data.getSettings()->fsync_part_directory) { @@ -448,7 +445,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( auto compression_codec = data.getContext()->chooseCompressionCodec(0, 0); const auto & index_factory = MergeTreeIndexFactory::instance(); - auto out = std::make_unique(new_data_part, data_part_storage_builder, metadata_snapshot, columns, + auto out = std::make_unique(new_data_part, metadata_snapshot, columns, index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, context->getCurrentTransaction(), false, false, context->getWriteSettings()); @@ -459,9 +456,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( auto projection_block = projection.calculate(block, context); if (projection_block.rows()) { - auto proj_temp_part = writeProjectionPart(data, log, projection_block, projection, data_part_storage_builder, new_data_part.get()); + auto proj_temp_part = writeProjectionPart(data, log, projection_block, projection, new_data_part.get()); new_data_part->addProjectionPart(projection.name, std::move(proj_temp_part.part)); - proj_temp_part.builder->commit(); for (auto & stream : proj_temp_part.streams) temp_part.streams.emplace_back(std::move(stream)); } @@ -473,7 +469,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( nullptr, nullptr); temp_part.part = new_data_part; - temp_part.builder = data_part_storage_builder; temp_part.streams.emplace_back(TemporaryPart::Stream{.stream = std::move(out), .finalizer = std::move(finalizer)}); ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterRows, block.rows()); @@ -485,11 +480,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( const String & part_name, - MergeTreeDataPartType part_type, - const String & relative_path, - const DataPartStorageBuilderPtr & data_part_storage_builder, bool is_temp, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, const MergeTreeData & data, Poco::Logger * log, Block block, @@ -498,7 +490,23 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( TemporaryPart temp_part; const StorageMetadataPtr & metadata_snapshot = projection.metadata; MergeTreePartInfo new_part_info("all", 0, 0, 0); - auto projection_part_storage = parent_part->data_part_storage->getProjection(relative_path); + + MergeTreeDataPartType part_type; + if (parent_part->getType() == MergeTreeDataPartType::InMemory) + { + part_type = MergeTreeDataPartType::InMemory; + } + else + { + /// Size of part would not be greater than block.bytes() + epsilon + size_t expected_size = block.bytes(); + // just check if there is enough space on parent volume + data.reserveSpace(expected_size, parent_part->getDataPartStorage()); + part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); + } + + auto relative_path = part_name + (is_temp ? ".tmp_proj" : ".proj"); + auto projection_part_storage = parent_part->getDataPartStorage().getProjection(relative_path); auto new_data_part = data.createPart( part_name, part_type, @@ -506,7 +514,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( projection_part_storage, parent_part); - auto projection_part_storage_builder = data_part_storage_builder->getProjection(relative_path); new_data_part->is_temp = is_temp; NamesAndTypesList columns = metadata_snapshot->getColumns().getAllPhysical().filter(block.getNames()); @@ -522,10 +529,10 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( if (projection_part_storage->exists()) { LOG_WARNING(log, "Removing old temporary directory {}", projection_part_storage->getFullPath()); - projection_part_storage_builder->removeRecursive(); + projection_part_storage->removeRecursive(); } - projection_part_storage_builder->createDirectories(); + projection_part_storage->createDirectories(); } /// If we need to calculate some columns to sort. @@ -569,7 +576,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( auto out = std::make_unique( new_data_part, - projection_part_storage_builder, metadata_snapshot, columns, MergeTreeIndices{}, @@ -580,7 +586,6 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( out->writeWithPermutation(block, perm_ptr); auto finalizer = out->finalizePartAsync(new_data_part, false); temp_part.part = new_data_part; - temp_part.builder = projection_part_storage_builder; temp_part.streams.emplace_back(TemporaryPart::Stream{.stream = std::move(out), .finalizer = std::move(finalizer)}); ProfileEvents::increment(ProfileEvents::MergeTreeDataProjectionWriterRows, block.rows()); @@ -591,98 +596,40 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( } MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( - MergeTreeData & data, + const MergeTreeData & data, Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, - const IMergeTreeDataPart * parent_part) + IMergeTreeDataPart * parent_part) { - String part_name = projection.name; - MergeTreeDataPartType part_type; - if (parent_part->getType() == MergeTreeDataPartType::InMemory) - { - part_type = MergeTreeDataPartType::InMemory; - } - else - { - /// Size of part would not be greater than block.bytes() + epsilon - size_t expected_size = block.bytes(); - // just check if there is enough space on parent volume - data.reserveSpace(expected_size, data_part_storage_builder); - part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); - } - return writeProjectionPartImpl( - part_name, - part_type, - part_name + ".proj" /* relative_path */, - data_part_storage_builder, + projection.name, false /* is_temp */, parent_part, data, log, - block, + std::move(block), projection); } /// This is used for projection materialization process which may contain multiple stages of /// projection part merges. MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( - MergeTreeData & data, - Poco::Logger * log, - Block block, - const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, - const IMergeTreeDataPart * parent_part, - size_t block_num) -{ - String part_name = fmt::format("{}_{}", projection.name, block_num); - MergeTreeDataPartType part_type; - if (parent_part->getType() == MergeTreeDataPartType::InMemory) - { - part_type = MergeTreeDataPartType::InMemory; - } - else - { - /// Size of part would not be greater than block.bytes() + epsilon - size_t expected_size = block.bytes(); - // just check if there is enough space on parent volume - data.reserveSpace(expected_size, data_part_storage_builder); - part_type = data.choosePartTypeOnDisk(expected_size, block.rows()); - } - - return writeProjectionPartImpl( - part_name, - part_type, - part_name + ".tmp_proj" /* relative_path */, - data_part_storage_builder, - true /* is_temp */, - parent_part, - data, - log, - block, - projection); -} - -MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeInMemoryProjectionPart( const MergeTreeData & data, Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, - const IMergeTreeDataPart * parent_part) + IMergeTreeDataPart * parent_part, + size_t block_num) { + String part_name = fmt::format("{}_{}", projection.name, block_num); return writeProjectionPartImpl( - projection.name, - MergeTreeDataPartType::InMemory, - projection.name + ".proj" /* relative_path */, - data_part_storage_builder, - false /* is_temp */, + part_name, + true /* is_temp */, parent_part, data, log, - block, + std::move(block), projection); } diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index 00438a29fa1..8c2bf66e8f8 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -52,7 +52,6 @@ public: struct TemporaryPart { MergeTreeData::MutableDataPartPtr part; - DataPartStorageBuilderPtr builder; struct Stream { @@ -74,31 +73,20 @@ public: /// For insertion. static TemporaryPart writeProjectionPart( - MergeTreeData & data, - Poco::Logger * log, - Block block, - const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, - const IMergeTreeDataPart * parent_part); - - /// For mutation: MATERIALIZE PROJECTION. - static TemporaryPart writeTempProjectionPart( - MergeTreeData & data, - Poco::Logger * log, - Block block, - const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, - const IMergeTreeDataPart * parent_part, - size_t block_num); - - /// For WriteAheadLog AddPart. - static TemporaryPart writeInMemoryProjectionPart( const MergeTreeData & data, Poco::Logger * log, Block block, const ProjectionDescription & projection, - const DataPartStorageBuilderPtr & data_part_storage_builder, - const IMergeTreeDataPart * parent_part); + IMergeTreeDataPart * parent_part); + + /// For mutation: MATERIALIZE PROJECTION. + static TemporaryPart writeTempProjectionPart( + const MergeTreeData & data, + Poco::Logger * log, + Block block, + const ProjectionDescription & projection, + IMergeTreeDataPart * parent_part, + size_t block_num); static Block mergeBlock( const Block & block, @@ -110,18 +98,14 @@ public: private: static TemporaryPart writeProjectionPartImpl( const String & part_name, - MergeTreeDataPartType part_type, - const String & relative_path, - const DataPartStorageBuilderPtr & data_part_storage_builder, bool is_temp, - const IMergeTreeDataPart * parent_part, + IMergeTreeDataPart * parent_part, const MergeTreeData & data, Poco::Logger * log, Block block, const ProjectionDescription & projection); MergeTreeData & data; - Poco::Logger * log; }; diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index 3b16998337e..052834358bb 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace DB @@ -64,9 +65,11 @@ uint64_t AnnoyIndex::getNumOfDimensions() const namespace ErrorCodes { - extern const int LOGICAL_ERROR; - extern const int INCORRECT_QUERY; + extern const int ILLEGAL_COLUMN; extern const int INCORRECT_DATA; + extern const int INCORRECT_NUMBER_OF_COLUMNS; + extern const int INCORRECT_QUERY; + extern const int LOGICAL_ERROR; } MergeTreeIndexGranuleAnnoy::MergeTreeIndexGranuleAnnoy(const String & index_name_, const Block & index_sample_block_) @@ -113,7 +116,7 @@ MergeTreeIndexAggregatorAnnoy::MergeTreeIndexAggregatorAnnoy( MergeTreeIndexGranulePtr MergeTreeIndexAggregatorAnnoy::getGranuleAndReset() { // NOLINTNEXTLINE(*) - index->build(number_of_trees, /*number_of_threads=*/1); + index->build(static_cast(number_of_trees), /*number_of_threads=*/1); auto granule = std::make_shared(index_name, index_sample_block, index); index = nullptr; return granule; @@ -132,9 +135,7 @@ void MergeTreeIndexAggregatorAnnoy::update(const Block & block, size_t * pos, si return; if (index_sample_block.columns() > 1) - { throw Exception("Only one column is supported", ErrorCodes::LOGICAL_ERROR); - } auto index_column_name = index_sample_block.getByPosition(0).name; const auto & column_cut = block.getByName(index_column_name).column->cut(*pos, rows_read); @@ -144,27 +145,22 @@ void MergeTreeIndexAggregatorAnnoy::update(const Block & block, size_t * pos, si const auto & data = column_array->getData(); const auto & array = typeid_cast(data).getData(); if (array.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array have 0 rows, but {} expected", rows_read); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Array has 0 rows, {} rows expected", rows_read); const auto & offsets = column_array->getOffsets(); size_t num_rows = offsets.size(); - /// All sizes are the same + /// Check all sizes are the same size_t size = offsets[0]; for (size_t i = 0; i < num_rows - 1; ++i) - { if (offsets[i + 1] - offsets[i] != size) - { throw Exception(ErrorCodes::INCORRECT_DATA, "Arrays should have same length"); - } - } + index = std::make_shared(size); index->add_item(index->get_n_items(), array.data()); /// add all rows from 1 to num_rows - 1 (this is the same as the beginning of the last element) for (size_t current_row = 1; current_row < num_rows; ++current_row) - { index->add_item(index->get_n_items(), &array[offsets[current_row - 1]]); - } } else { @@ -181,19 +177,13 @@ void MergeTreeIndexAggregatorAnnoy::update(const Block & block, size_t * pos, si { const auto& pod_array = typeid_cast(column.get())->getData(); for (size_t i = 0; i < pod_array.size(); ++i) - { data[i].push_back(pod_array[i]); - } } assert(!data.empty()); if (!index) - { index = std::make_shared(data[0].size()); - } for (const auto& item : data) - { index->add_item(index->get_n_items(), item.data()); - } } *pos += rows_read; @@ -222,7 +212,7 @@ std::vector MergeTreeIndexConditionAnnoy::getUsefulRanges(MergeTreeIndex { UInt64 limit = condition.getLimit(); UInt64 index_granularity = condition.getIndexGranularity(); - std::optional comp_dist = condition.getQueryType() == ANN::ANNQueryInformation::Type::Where ? + std::optional comp_dist = condition.getQueryType() == ApproximateNearestNeighbour::ANNQueryInformation::Type::Where ? std::optional(condition.getComparisonDistanceForWhereQuery()) : std::nullopt; if (comp_dist && comp_dist.value() < 0) @@ -232,16 +222,13 @@ std::vector MergeTreeIndexConditionAnnoy::getUsefulRanges(MergeTreeIndex auto granule = std::dynamic_pointer_cast(idx_granule); if (granule == nullptr) - { throw Exception("Granule has the wrong type", ErrorCodes::LOGICAL_ERROR); - } + auto annoy = granule->index; if (condition.getNumOfDimensions() != annoy->getNumOfDimensions()) - { throw Exception("The dimension of the space in the request (" + toString(condition.getNumOfDimensions()) + ") " + "does not match with the dimension in the index (" + toString(annoy->getNumOfDimensions()) + ")", ErrorCodes::INCORRECT_QUERY); - } /// neighbors contain indexes of dots which were closest to target vector std::vector neighbors; @@ -268,23 +255,25 @@ std::vector MergeTreeIndexConditionAnnoy::getUsefulRanges(MergeTreeIndex for (size_t i = 0; i < neighbors.size(); ++i) { if (comp_dist && distances[i] > comp_dist) - { continue; - } granule_numbers.insert(neighbors[i] / index_granularity); } std::vector result_vector; result_vector.reserve(granule_numbers.size()); for (auto granule_number : granule_numbers) - { result_vector.push_back(granule_number); - } return result_vector; } +MergeTreeIndexAnnoy::MergeTreeIndexAnnoy(const IndexDescription & index_, uint64_t number_of_trees_) + : IMergeTreeIndex(index_) + , number_of_trees(number_of_trees_) +{ +} + MergeTreeIndexGranulePtr MergeTreeIndexAnnoy::createIndexGranule() const { return std::make_shared(index.name, index.sample_block); @@ -307,6 +296,40 @@ MergeTreeIndexPtr annoyIndexCreator(const IndexDescription & index) return std::make_shared(index, param); } +static void assertIndexColumnsType(const Block & header) +{ + DataTypePtr column_data_type_ptr = header.getDataTypes()[0]; + + if (const auto * array_type = typeid_cast(column_data_type_ptr.get())) + { + TypeIndex nested_type_index = array_type->getNestedType()->getTypeId(); + if (!WhichDataType(nested_type_index).isFloat32()) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Unexpected type {} of Annoy index. Only Array(Float32) and Tuple(Float32) are supported.", + column_data_type_ptr->getName()); + } + else if (const auto * tuple_type = typeid_cast(column_data_type_ptr.get())) + { + const DataTypes & nested_types = tuple_type->getElements(); + for (const auto & type : nested_types) + { + TypeIndex nested_type_index = type->getTypeId(); + if (!WhichDataType(nested_type_index).isFloat32()) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Unexpected type {} of Annoy index. Only Array(Float32) and Tuple(Float32) are supported.", + column_data_type_ptr->getName()); + } + } + else + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Unexpected type {} of Annoy index. Only Array(Float32) and Tuple(Float32) are supported.", + column_data_type_ptr->getName()); + +} + void annoyIndexValidator(const IndexDescription & index, bool /* attach */) { if (index.arguments.size() != 1) @@ -317,6 +340,11 @@ void annoyIndexValidator(const IndexDescription & index, bool /* attach */) { throw Exception("Annoy index argument must be UInt64.", ErrorCodes::INCORRECT_QUERY); } + + if (index.column_names.size() != 1 || index.data_types.size() != 1) + throw Exception("Annoy indexes must be created on a single column", ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS); + + assertIndexColumnsType(index.sample_block); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h index 85bbb0a1bd2..6a844947bd2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h @@ -10,8 +10,6 @@ namespace DB { -namespace ANN = ApproximateNearestNeighbour; - // auxiliary namespace for working with spotify-annoy library // mainly for serialization and deserialization of the index namespace ApproximateNearestNeighbour @@ -33,7 +31,7 @@ namespace ApproximateNearestNeighbour struct MergeTreeIndexGranuleAnnoy final : public IMergeTreeIndexGranule { - using AnnoyIndex = ANN::AnnoyIndex<>; + using AnnoyIndex = ApproximateNearestNeighbour::AnnoyIndex<>; using AnnoyIndexPtr = std::shared_ptr; MergeTreeIndexGranuleAnnoy(const String & index_name_, const Block & index_sample_block_); @@ -57,7 +55,7 @@ struct MergeTreeIndexGranuleAnnoy final : public IMergeTreeIndexGranule struct MergeTreeIndexAggregatorAnnoy final : IMergeTreeIndexAggregator { - using AnnoyIndex = ANN::AnnoyIndex<>; + using AnnoyIndex = ApproximateNearestNeighbour::AnnoyIndex<>; using AnnoyIndexPtr = std::shared_ptr; MergeTreeIndexAggregatorAnnoy(const String & index_name_, const Block & index_sample_block, uint64_t number_of_trees); @@ -74,7 +72,7 @@ struct MergeTreeIndexAggregatorAnnoy final : IMergeTreeIndexAggregator }; -class MergeTreeIndexConditionAnnoy final : public ANN::IMergeTreeIndexConditionAnn +class MergeTreeIndexConditionAnnoy final : public ApproximateNearestNeighbour::IMergeTreeIndexConditionAnn { public: MergeTreeIndexConditionAnnoy( @@ -91,18 +89,14 @@ public: ~MergeTreeIndexConditionAnnoy() override = default; private: - ANN::ANNCondition condition; + ApproximateNearestNeighbour::ANNCondition condition; }; class MergeTreeIndexAnnoy : public IMergeTreeIndex { public: - MergeTreeIndexAnnoy(const IndexDescription & index_, uint64_t number_of_trees_) - : IMergeTreeIndex(index_) - , number_of_trees(number_of_trees_) - {} - + MergeTreeIndexAnnoy(const IndexDescription & index_, uint64_t number_of_trees_); ~MergeTreeIndexAnnoy() override = default; MergeTreeIndexGranulePtr createIndexGranule() const override; diff --git a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index 3dd0568107e..be7118066bb 100644 --- a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -6,11 +6,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -28,19 +30,7 @@ namespace ErrorCodes namespace { -PreparedSetKey getPreparedSetKey(const ASTPtr & node, const DataTypePtr & data_type) -{ - /// If the data type is tuple, let's try unbox once - if (node->as() || node->as()) - return PreparedSetKey::forSubquery(*node); - - if (const auto * date_type_tuple = typeid_cast(&*data_type)) - return PreparedSetKey::forLiteral(*node, date_type_tuple->getElements()); - - return PreparedSetKey::forLiteral(*node, DataTypes(1, data_type)); -} - -ColumnWithTypeAndName getPreparedSetInfo(const SetPtr & prepared_set) +ColumnWithTypeAndName getPreparedSetInfo(const ConstSetPtr & prepared_set) { if (prepared_set->getDataTypes().size() == 1) return {prepared_set->getSetElements()[0], prepared_set->getElementsTypes()[0], "dummy"}; @@ -110,8 +100,22 @@ MergeTreeIndexConditionBloomFilter::MergeTreeIndexConditionBloomFilter( const SelectQueryInfo & info_, ContextPtr context_, const Block & header_, size_t hash_functions_) : WithContext(context_), header(header_), query_info(info_), hash_functions(hash_functions_) { - auto atom_from_ast = [this](auto & node, auto, auto & constants, auto & out) { return traverseAtomAST(node, constants, out); }; - rpn = std::move(RPNBuilder(info_, getContext(), atom_from_ast).extractRPN()); + ASTPtr filter_node = buildFilterNode(query_info.query); + + if (!filter_node) + { + rpn.push_back(RPNElement::FUNCTION_UNKNOWN); + return; + } + + auto block_with_constants = KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context_); + RPNBuilder builder( + filter_node, + context_, + std::move(block_with_constants), + query_info.prepared_sets, + [&](const RPNBuilderTreeNode & node, RPNElement & out) { return extractAtomFromTree(node, out); }); + rpn = std::move(builder).extractRPN(); } bool MergeTreeIndexConditionBloomFilter::alwaysUnknownOrTrue() const @@ -235,12 +239,13 @@ bool MergeTreeIndexConditionBloomFilter::mayBeTrueOnGranule(const MergeTreeIndex return rpn_stack[0].can_be_true; } -bool MergeTreeIndexConditionBloomFilter::traverseAtomAST(const ASTPtr & node, Block & block_with_constants, RPNElement & out) +bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNElement & out) { { Field const_value; DataTypePtr const_type; - if (KeyCondition::getConstant(node, block_with_constants, const_value, const_type)) + + if (node.tryGetConstant(const_value, const_type)) { if (const_value.getType() == Field::Types::UInt64) { @@ -262,56 +267,62 @@ bool MergeTreeIndexConditionBloomFilter::traverseAtomAST(const ASTPtr & node, Bl } } - return traverseFunction(node, block_with_constants, out, nullptr); + return traverseFunction(node, out, nullptr /*parent*/); } -bool MergeTreeIndexConditionBloomFilter::traverseFunction(const ASTPtr & node, Block & block_with_constants, RPNElement & out, const ASTPtr & parent) +bool MergeTreeIndexConditionBloomFilter::traverseFunction(const RPNBuilderTreeNode & node, RPNElement & out, const RPNBuilderTreeNode * parent) { bool maybe_useful = false; - if (const auto * function = node->as()) + if (node.isFunction()) { - if (!function->arguments) - return false; + const auto function = node.toFunctionNode(); + auto arguments_size = function.getArgumentsSize(); + auto function_name = function.getFunctionName(); - const ASTs & arguments = function->arguments->children; - for (const auto & arg : arguments) + for (size_t i = 0; i < arguments_size; ++i) { - if (traverseFunction(arg, block_with_constants, out, node)) + auto argument = function.getArgumentAt(i); + if (traverseFunction(argument, out, &node)) maybe_useful = true; } - if (arguments.size() != 2) + if (arguments_size != 2) return false; - if (functionIsInOrGlobalInOperator(function->name)) - { - auto prepared_set = getPreparedSet(arguments[1]); + auto lhs_argument = function.getArgumentAt(0); + auto rhs_argument = function.getArgumentAt(1); - if (prepared_set) + if (functionIsInOrGlobalInOperator(function_name)) + { + ConstSetPtr prepared_set = rhs_argument.tryGetPreparedSet(); + + if (prepared_set && prepared_set->hasExplicitSetElements()) { - if (traverseASTIn(function->name, arguments[0], prepared_set, out)) + const auto prepared_info = getPreparedSetInfo(prepared_set); + if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out)) maybe_useful = true; } } - else if (function->name == "equals" || - function->name == "notEquals" || - function->name == "has" || - function->name == "mapContains" || - function->name == "indexOf" || - function->name == "hasAny" || - function->name == "hasAll") + else if (function_name == "equals" || + function_name == "notEquals" || + function_name == "has" || + function_name == "mapContains" || + function_name == "indexOf" || + function_name == "hasAny" || + function_name == "hasAll") { Field const_value; DataTypePtr const_type; - if (KeyCondition::getConstant(arguments[1], block_with_constants, const_value, const_type)) + + if (rhs_argument.tryGetConstant(const_value, const_type)) { - if (traverseASTEquals(function->name, arguments[0], const_type, const_value, out, parent)) + if (traverseTreeEquals(function_name, lhs_argument, const_type, const_value, out, parent)) maybe_useful = true; } - else if (KeyCondition::getConstant(arguments[0], block_with_constants, const_value, const_type)) + else if (lhs_argument.tryGetConstant(const_value, const_type)) { - if (traverseASTEquals(function->name, arguments[1], const_type, const_value, out, parent)) + if (traverseTreeEquals(function_name, rhs_argument, const_type, const_value, out, parent)) maybe_useful = true; } } @@ -320,28 +331,20 @@ bool MergeTreeIndexConditionBloomFilter::traverseFunction(const ASTPtr & node, B return maybe_useful; } -bool MergeTreeIndexConditionBloomFilter::traverseASTIn( +bool MergeTreeIndexConditionBloomFilter::traverseTreeIn( const String & function_name, - const ASTPtr & key_ast, - const SetPtr & prepared_set, - RPNElement & out) -{ - const auto prepared_info = getPreparedSetInfo(prepared_set); - return traverseASTIn(function_name, key_ast, prepared_set, prepared_info.type, prepared_info.column, out); -} - -bool MergeTreeIndexConditionBloomFilter::traverseASTIn( - const String & function_name, - const ASTPtr & key_ast, - const SetPtr & prepared_set, + const RPNBuilderTreeNode & key_node, + const ConstSetPtr & prepared_set, const DataTypePtr & type, const ColumnPtr & column, RPNElement & out) { - if (header.has(key_ast->getColumnName())) + auto key_node_column_name = key_node.getColumnName(); + + if (header.has(key_node_column_name)) { size_t row_size = column->size(); - size_t position = header.getPositionByName(key_ast->getColumnName()); + size_t position = header.getPositionByName(key_node_column_name); const DataTypePtr & index_type = header.getByPosition(position).type; const auto & converted_column = castColumn(ColumnWithTypeAndName{column, type, ""}, index_type); out.predicate.emplace_back(std::make_pair(position, BloomFilterHash::hashWithColumn(index_type, converted_column, 0, row_size))); @@ -355,30 +358,33 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTIn( return true; } - if (const auto * function = key_ast->as()) + if (key_node.isFunction()) { + auto key_node_function = key_node.toFunctionNode(); + auto key_node_function_name = key_node_function.getFunctionName(); + size_t key_node_function_arguments_size = key_node_function.getArgumentsSize(); + WhichDataType which(type); - if (which.isTuple() && function->name == "tuple") + if (which.isTuple() && key_node_function_name == "tuple") { const auto & tuple_column = typeid_cast(column.get()); const auto & tuple_data_type = typeid_cast(type.get()); - const ASTs & arguments = typeid_cast(*function->arguments).children; - if (tuple_data_type->getElements().size() != arguments.size() || tuple_column->getColumns().size() != arguments.size()) + if (tuple_data_type->getElements().size() != key_node_function_arguments_size || tuple_column->getColumns().size() != key_node_function_arguments_size) throw Exception("Illegal types of arguments of function " + function_name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); bool match_with_subtype = false; const auto & sub_columns = tuple_column->getColumns(); const auto & sub_data_types = tuple_data_type->getElements(); - for (size_t index = 0; index < arguments.size(); ++index) - match_with_subtype |= traverseASTIn(function_name, arguments[index], nullptr, sub_data_types[index], sub_columns[index], out); + for (size_t index = 0; index < key_node_function_arguments_size; ++index) + match_with_subtype |= traverseTreeIn(function_name, key_node_function.getArgumentAt(index), nullptr, sub_data_types[index], sub_columns[index], out); return match_with_subtype; } - if (function->name == "arrayElement") + if (key_node_function_name == "arrayElement") { /** Try to parse arrayElement for mapKeys index. * It is important to ignore keys like column_map['Key'] IN ('') because if key does not exists in map @@ -387,7 +393,6 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTIn( * We cannot skip keys that does not exist in map if comparison is with default type value because * that way we skip necessary granules where map key does not exists. */ - if (!prepared_set) return false; @@ -400,28 +405,26 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTIn( if (set_contain_default_value) return false; - const auto * column_ast_identifier = function->arguments.get()->children[0].get()->as(); - if (!column_ast_identifier) - return false; - - const auto & col_name = column_ast_identifier->name(); - auto map_keys_index_column_name = fmt::format("mapKeys({})", col_name); - auto map_values_index_column_name = fmt::format("mapValues({})", col_name); + auto first_argument = key_node_function.getArgumentAt(0); + const auto column_name = first_argument.getColumnName(); + auto map_keys_index_column_name = fmt::format("mapKeys({})", column_name); + auto map_values_index_column_name = fmt::format("mapValues({})", column_name); if (header.has(map_keys_index_column_name)) { /// For mapKeys we serialize key argument with bloom filter - auto & argument = function->arguments.get()->children[1]; + auto second_argument = key_node_function.getArgumentAt(1); - if (const auto * literal = argument->as()) + Field constant_value; + DataTypePtr constant_type; + + if (second_argument.tryGetConstant(constant_value, constant_type)) { size_t position = header.getPositionByName(map_keys_index_column_name); const DataTypePtr & index_type = header.getByPosition(position).type; - - auto element_key = literal->value; const DataTypePtr actual_type = BloomFilter::getPrimitiveType(index_type); - out.predicate.emplace_back(std::make_pair(position, BloomFilterHash::hashWithField(actual_type.get(), element_key))); + out.predicate.emplace_back(std::make_pair(position, BloomFilterHash::hashWithField(actual_type.get(), constant_value))); } else { @@ -459,74 +462,97 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTIn( } -static bool indexOfCanUseBloomFilter(const ASTPtr & parent) +static bool indexOfCanUseBloomFilter(const RPNBuilderTreeNode * parent) { if (!parent) return true; + if (!parent->isFunction()) + return false; + + auto function = parent->toFunctionNode(); + auto function_name = function.getFunctionName(); + /// `parent` is a function where `indexOf` is located. /// Example: `indexOf(arr, x) = 1`, parent is a function named `equals`. - if (const auto * function = parent->as()) + if (function_name == "and") { - if (function->name == "and") + return true; + } + else if (function_name == "equals" /// notEquals is not applicable + || function_name == "greater" || function_name == "greaterOrEquals" + || function_name == "less" || function_name == "lessOrEquals") + { + size_t function_arguments_size = function.getArgumentsSize(); + if (function_arguments_size != 2) + return false; + + /// We don't allow constant expressions like `indexOf(arr, x) = 1 + 0` but it's negligible. + + /// We should return true when the corresponding expression implies that the array contains the element. + /// Example: when `indexOf(arr, x)` > 10 is written, it means that arr definitely should contain the element + /// (at least at 11th position but it does not matter). + + bool reversed = false; + Field constant_value; + DataTypePtr constant_type; + + if (function.getArgumentAt(0).tryGetConstant(constant_value, constant_type)) { + reversed = true; + } + else if (function.getArgumentAt(1).tryGetConstant(constant_value, constant_type)) + { + } + else + { + return false; + } + + Field zero(0); + bool constant_equal_zero = applyVisitor(FieldVisitorAccurateEquals(), constant_value, zero); + + if (function_name == "equals" && !constant_equal_zero) + { + /// indexOf(...) = c, c != 0 return true; } - else if (function->name == "equals" /// notEquals is not applicable - || function->name == "greater" || function->name == "greaterOrEquals" - || function->name == "less" || function->name == "lessOrEquals") + else if (function_name == "notEquals" && constant_equal_zero) { - if (function->arguments->children.size() != 2) - return false; - - /// We don't allow constant expressions like `indexOf(arr, x) = 1 + 0` but it's negligible. - - /// We should return true when the corresponding expression implies that the array contains the element. - /// Example: when `indexOf(arr, x)` > 10 is written, it means that arr definitely should contain the element - /// (at least at 11th position but it does not matter). - - bool reversed = false; - const ASTLiteral * constant = nullptr; - - if (const ASTLiteral * left = function->arguments->children[0]->as()) - { - constant = left; - reversed = true; - } - else if (const ASTLiteral * right = function->arguments->children[1]->as()) - { - constant = right; - } - else - return false; - - Field zero(0); - return (function->name == "equals" /// indexOf(...) = c, c != 0 - && !applyVisitor(FieldVisitorAccurateEquals(), constant->value, zero)) - || (function->name == "notEquals" /// indexOf(...) != c, c = 0 - && applyVisitor(FieldVisitorAccurateEquals(), constant->value, zero)) - || (function->name == (reversed ? "less" : "greater") /// indexOf(...) > c, c >= 0 - && !applyVisitor(FieldVisitorAccurateLess(), constant->value, zero)) - || (function->name == (reversed ? "lessOrEquals" : "greaterOrEquals") /// indexOf(...) >= c, c > 0 - && applyVisitor(FieldVisitorAccurateLess(), zero, constant->value)); + /// indexOf(...) != c, c = 0 + return true; } + else if (function_name == (reversed ? "less" : "greater") && !applyVisitor(FieldVisitorAccurateLess(), constant_value, zero)) + { + /// indexOf(...) > c, c >= 0 + return true; + } + else if (function_name == (reversed ? "lessOrEquals" : "greaterOrEquals") && applyVisitor(FieldVisitorAccurateLess(), zero, constant_value)) + { + /// indexOf(...) >= c, c > 0 + return true; + } + + return false; } return false; } -bool MergeTreeIndexConditionBloomFilter::traverseASTEquals( +bool MergeTreeIndexConditionBloomFilter::traverseTreeEquals( const String & function_name, - const ASTPtr & key_ast, + const RPNBuilderTreeNode & key_node, const DataTypePtr & value_type, const Field & value_field, RPNElement & out, - const ASTPtr & parent) + const RPNBuilderTreeNode * parent) { - if (header.has(key_ast->getColumnName())) + auto key_column_name = key_node.getColumnName(); + + if (header.has(key_column_name)) { - size_t position = header.getPositionByName(key_ast->getColumnName()); + size_t position = header.getPositionByName(key_column_name); const DataTypePtr & index_type = header.getByPosition(position).type; const auto * array_type = typeid_cast(index_type.get()); @@ -602,13 +628,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTEquals( if (function_name == "mapContains" || function_name == "has") { - const auto * key_ast_identifier = key_ast.get()->as(); - if (!key_ast_identifier) - return false; - - const auto & col_name = key_ast_identifier->name(); - auto map_keys_index_column_name = fmt::format("mapKeys({})", col_name); - + auto map_keys_index_column_name = fmt::format("mapKeys({})", key_column_name); if (!header.has(map_keys_index_column_name)) return false; @@ -629,29 +649,32 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTEquals( return true; } - if (const auto * function = key_ast->as()) + if (key_node.isFunction()) { WhichDataType which(value_type); - if (which.isTuple() && function->name == "tuple") + auto key_node_function = key_node.toFunctionNode(); + auto key_node_function_name = key_node_function.getFunctionName(); + size_t key_node_function_arguments_size = key_node_function.getArgumentsSize(); + + if (which.isTuple() && key_node_function_name == "tuple") { const Tuple & tuple = value_field.get(); const auto * value_tuple_data_type = typeid_cast(value_type.get()); - const ASTs & arguments = typeid_cast(*function->arguments).children; - if (tuple.size() != arguments.size()) + if (tuple.size() != key_node_function_arguments_size) throw Exception("Illegal types of arguments of function " + function_name, ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); bool match_with_subtype = false; const DataTypes & subtypes = value_tuple_data_type->getElements(); for (size_t index = 0; index < tuple.size(); ++index) - match_with_subtype |= traverseASTEquals(function_name, arguments[index], subtypes[index], tuple[index], out, key_ast); + match_with_subtype |= traverseTreeEquals(function_name, key_node_function.getArgumentAt(index), subtypes[index], tuple[index], out, &key_node); return match_with_subtype; } - if (function->name == "arrayElement" && (function_name == "equals" || function_name == "notEquals")) + if (key_node_function_name == "arrayElement" && (function_name == "equals" || function_name == "notEquals")) { /** Try to parse arrayElement for mapKeys index. * It is important to ignore keys like column_map['Key'] = '' because if key does not exists in map @@ -663,27 +686,22 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTEquals( if (value_field == value_type->getDefault()) return false; - const auto * column_ast_identifier = function->arguments.get()->children[0].get()->as(); - if (!column_ast_identifier) - return false; + auto first_argument = key_node_function.getArgumentAt(0); + const auto column_name = first_argument.getColumnName(); - const auto & col_name = column_ast_identifier->name(); - - auto map_keys_index_column_name = fmt::format("mapKeys({})", col_name); - auto map_values_index_column_name = fmt::format("mapValues({})", col_name); + auto map_keys_index_column_name = fmt::format("mapKeys({})", column_name); + auto map_values_index_column_name = fmt::format("mapValues({})", column_name); size_t position = 0; Field const_value = value_field; + DataTypePtr const_type; if (header.has(map_keys_index_column_name)) { position = header.getPositionByName(map_keys_index_column_name); + auto second_argument = key_node_function.getArgumentAt(1); - auto & argument = function->arguments.get()->children[1]; - - if (const auto * literal = argument->as()) - const_value = literal->value; - else + if (!second_argument.tryGetConstant(const_value, const_type)) return false; } else if (header.has(map_values_index_column_name)) @@ -708,23 +726,4 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTEquals( return false; } -SetPtr MergeTreeIndexConditionBloomFilter::getPreparedSet(const ASTPtr & node) -{ - if (header.has(node->getColumnName())) - { - const auto & column_and_type = header.getByName(node->getColumnName()); - auto set_key = getPreparedSetKey(node, column_and_type.type); - if (auto prepared_set = query_info.prepared_sets->get(set_key)) - return prepared_set; - } - else - { - for (const auto & set : query_info.prepared_sets->getByTreeHash(node->getTreeHash())) - if (set->hasExplicitSetElements()) - return set; - } - - return DB::SetPtr(); -} - } diff --git a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h index 27fd701c67b..5d7ea371a83 100644 --- a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h @@ -62,35 +62,27 @@ private: const size_t hash_functions; std::vector rpn; - SetPtr getPreparedSet(const ASTPtr & node); - bool mayBeTrueOnGranule(const MergeTreeIndexGranuleBloomFilter * granule) const; - bool traverseAtomAST(const ASTPtr & node, Block & block_with_constants, RPNElement & out); + bool extractAtomFromTree(const RPNBuilderTreeNode & node, RPNElement & out); - bool traverseFunction(const ASTPtr & node, Block & block_with_constants, RPNElement & out, const ASTPtr & parent); + bool traverseFunction(const RPNBuilderTreeNode & node, RPNElement & out, const RPNBuilderTreeNode * parent); - bool traverseASTIn( + bool traverseTreeIn( const String & function_name, - const ASTPtr & key_ast, - const SetPtr & prepared_set, - RPNElement & out); - - bool traverseASTIn( - const String & function_name, - const ASTPtr & key_ast, - const SetPtr & prepared_set, + const RPNBuilderTreeNode & key_node, + const ConstSetPtr & prepared_set, const DataTypePtr & type, const ColumnPtr & column, RPNElement & out); - bool traverseASTEquals( + bool traverseTreeEquals( const String & function_name, - const ASTPtr & key_ast, + const RPNBuilderTreeNode & key_node, const DataTypePtr & value_type, const Field & value_field, RPNElement & out, - const ASTPtr & parent); + const RPNBuilderTreeNode * parent); }; } diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index ff924290783..b96d40f5759 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -11,9 +11,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -148,13 +150,22 @@ MergeTreeConditionFullText::MergeTreeConditionFullText( , token_extractor(token_extactor_) , prepared_sets(query_info.prepared_sets) { - rpn = std::move( - RPNBuilder( - query_info, context, - [this] (const ASTPtr & node, ContextPtr /* context */, Block & block_with_constants, RPNElement & out) -> bool - { - return this->traverseAtomAST(node, block_with_constants, out); - }).extractRPN()); + ASTPtr filter_node = buildFilterNode(query_info.query); + + if (!filter_node) + { + rpn.push_back(RPNElement::FUNCTION_UNKNOWN); + return; + } + + auto block_with_constants = KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context); + RPNBuilder builder( + filter_node, + context, + std::move(block_with_constants), + query_info.prepared_sets, + [&](const RPNBuilderTreeNode & node, RPNElement & out) { return extractAtomFromTree(node, out); }); + rpn = std::move(builder).extractRPN(); } bool MergeTreeConditionFullText::alwaysUnknownOrTrue() const @@ -306,13 +317,13 @@ bool MergeTreeConditionFullText::getKey(const std::string & key_column_name, siz return true; } -bool MergeTreeConditionFullText::traverseAtomAST(const ASTPtr & node, Block & block_with_constants, RPNElement & out) +bool MergeTreeConditionFullText::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNElement & out) { { Field const_value; DataTypePtr const_type; - if (KeyCondition::getConstant(node, block_with_constants, const_value, const_type)) + if (node.tryGetConstant(const_value, const_type)) { /// Check constant like in KeyCondition if (const_value.getType() == Field::Types::UInt64 @@ -329,53 +340,56 @@ bool MergeTreeConditionFullText::traverseAtomAST(const ASTPtr & node, Block & bl } } - if (const auto * function = node->as()) + if (node.isFunction()) { - if (!function->arguments) + auto function_node = node.toFunctionNode(); + auto function_name = function_node.getFunctionName(); + + size_t arguments_size = function_node.getArgumentsSize(); + if (arguments_size != 2) return false; - const ASTs & arguments = function->arguments->children; + auto left_argument = function_node.getArgumentAt(0); + auto right_argument = function_node.getArgumentAt(1); - if (arguments.size() != 2) - return false; - - if (functionIsInOrGlobalInOperator(function->name)) + if (functionIsInOrGlobalInOperator(function_name)) { - if (tryPrepareSetBloomFilter(arguments, out)) + if (tryPrepareSetBloomFilter(left_argument, right_argument, out)) { - if (function->name == "notIn") + if (function_name == "notIn") { out.function = RPNElement::FUNCTION_NOT_IN; return true; } - else if (function->name == "in") + else if (function_name == "in") { out.function = RPNElement::FUNCTION_IN; return true; } } } - else if (function->name == "equals" || - function->name == "notEquals" || - function->name == "has" || - function->name == "mapContains" || - function->name == "like" || - function->name == "notLike" || - function->name == "hasToken" || - function->name == "startsWith" || - function->name == "endsWith" || - function->name == "multiSearchAny") + else if (function_name == "equals" || + function_name == "notEquals" || + function_name == "has" || + function_name == "mapContains" || + function_name == "like" || + function_name == "notLike" || + function_name == "hasToken" || + function_name == "startsWith" || + function_name == "endsWith" || + function_name == "multiSearchAny") { Field const_value; DataTypePtr const_type; - if (KeyCondition::getConstant(arguments[1], block_with_constants, const_value, const_type)) + + if (right_argument.tryGetConstant(const_value, const_type)) { - if (traverseASTEquals(function->name, arguments[0], const_type, const_value, out)) + if (traverseTreeEquals(function_name, left_argument, const_type, const_value, out)) return true; } - else if (KeyCondition::getConstant(arguments[0], block_with_constants, const_value, const_type) && (function->name == "equals" || function->name == "notEquals")) + else if (left_argument.tryGetConstant(const_value, const_type) && (function_name == "equals" || function_name == "notEquals")) { - if (traverseASTEquals(function->name, arguments[1], const_type, const_value, out)) + if (traverseTreeEquals(function_name, right_argument, const_type, const_value, out)) return true; } } @@ -384,9 +398,9 @@ bool MergeTreeConditionFullText::traverseAtomAST(const ASTPtr & node, Block & bl return false; } -bool MergeTreeConditionFullText::traverseASTEquals( +bool MergeTreeConditionFullText::traverseTreeEquals( const String & function_name, - const ASTPtr & key_ast, + const RPNBuilderTreeNode & key_node, const DataTypePtr & value_type, const Field & value_field, RPNElement & out) @@ -397,13 +411,17 @@ bool MergeTreeConditionFullText::traverseASTEquals( Field const_value = value_field; + auto column_name = key_node.getColumnName(); size_t key_column_num = 0; - bool key_exists = getKey(key_ast->getColumnName(), key_column_num); - bool map_key_exists = getKey(fmt::format("mapKeys({})", key_ast->getColumnName()), key_column_num); + bool key_exists = getKey(column_name, key_column_num); + bool map_key_exists = getKey(fmt::format("mapKeys({})", column_name), key_column_num); - if (const auto * function = key_ast->as()) + if (key_node.isFunction()) { - if (function->name == "arrayElement") + auto key_function_node = key_node.toFunctionNode(); + auto key_function_node_function_name = key_function_node.getFunctionName(); + + if (key_function_node_function_name == "arrayElement") { /** Try to parse arrayElement for mapKeys index. * It is important to ignore keys like column_map['Key'] = '' because if key does not exists in map @@ -415,11 +433,8 @@ bool MergeTreeConditionFullText::traverseASTEquals( if (value_field == value_type->getDefault()) return false; - const auto * column_ast_identifier = function->arguments.get()->children[0].get()->as(); - if (!column_ast_identifier) - return false; - - const auto & map_column_name = column_ast_identifier->name(); + auto first_argument = key_function_node.getArgumentAt(0); + const auto map_column_name = first_argument.getColumnName(); size_t map_keys_key_column_num = 0; auto map_keys_index_column_name = fmt::format("mapKeys({})", map_column_name); @@ -431,12 +446,11 @@ bool MergeTreeConditionFullText::traverseASTEquals( if (map_keys_exists) { - auto & argument = function->arguments.get()->children[1]; + auto second_argument = key_function_node.getArgumentAt(1); + DataTypePtr const_type; - if (const auto * literal = argument->as()) + if (second_argument.tryGetConstant(const_value, const_type)) { - auto element_key = literal->value; - const_value = element_key; key_column_num = map_keys_key_column_num; key_exists = true; } @@ -567,23 +581,24 @@ bool MergeTreeConditionFullText::traverseASTEquals( } bool MergeTreeConditionFullText::tryPrepareSetBloomFilter( - const ASTs & args, + const RPNBuilderTreeNode & left_argument, + const RPNBuilderTreeNode & right_argument, RPNElement & out) { - const ASTPtr & left_arg = args[0]; - const ASTPtr & right_arg = args[1]; - std::vector key_tuple_mapping; DataTypes data_types; - const auto * left_arg_tuple = typeid_cast(left_arg.get()); - if (left_arg_tuple && left_arg_tuple->name == "tuple") + auto left_argument_function_node_optional = left_argument.toFunctionNodeOrNull(); + + if (left_argument_function_node_optional && left_argument_function_node_optional->getFunctionName() == "tuple") { - const auto & tuple_elements = left_arg_tuple->arguments->children; - for (size_t i = 0; i < tuple_elements.size(); ++i) + const auto & left_argument_function_node = *left_argument_function_node_optional; + size_t left_argument_function_node_arguments_size = left_argument_function_node.getArgumentsSize(); + + for (size_t i = 0; i < left_argument_function_node_arguments_size; ++i) { size_t key = 0; - if (getKey(tuple_elements[i]->getColumnName(), key)) + if (getKey(left_argument_function_node.getArgumentAt(i).getColumnName(), key)) { key_tuple_mapping.emplace_back(i, key); data_types.push_back(index_data_types[key]); @@ -593,7 +608,7 @@ bool MergeTreeConditionFullText::tryPrepareSetBloomFilter( else { size_t key = 0; - if (getKey(left_arg->getColumnName(), key)) + if (getKey(left_argument.getColumnName(), key)) { key_tuple_mapping.emplace_back(0, key); data_types.push_back(index_data_types[key]); @@ -603,19 +618,10 @@ bool MergeTreeConditionFullText::tryPrepareSetBloomFilter( if (key_tuple_mapping.empty()) return false; - PreparedSetKey set_key; - if (typeid_cast(right_arg.get()) || typeid_cast(right_arg.get())) - set_key = PreparedSetKey::forSubquery(*right_arg); - else - set_key = PreparedSetKey::forLiteral(*right_arg, data_types); - - auto prepared_set = prepared_sets->get(set_key); + auto prepared_set = right_argument.tryGetPreparedSet(data_types); if (!prepared_set) return false; - if (!prepared_set->hasExplicitSetElements()) - return false; - for (const auto & data_type : prepared_set->getDataTypes()) if (data_type->getTypeId() != TypeIndex::String && data_type->getTypeId() != TypeIndex::FixedString) return false; diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.h b/src/Storages/MergeTree/MergeTreeIndexFullText.h index bb4f52a463e..ad487816aef 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.h +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.h @@ -122,17 +122,17 @@ private: using RPN = std::vector; - bool traverseAtomAST(const ASTPtr & node, Block & block_with_constants, RPNElement & out); + bool extractAtomFromTree(const RPNBuilderTreeNode & node, RPNElement & out); - bool traverseASTEquals( + bool traverseTreeEquals( const String & function_name, - const ASTPtr & key_ast, + const RPNBuilderTreeNode & key_node, const DataTypePtr & value_type, const Field & value_field, RPNElement & out); bool getKey(const std::string & key_column_name, size_t & key_column_num); - bool tryPrepareSetBloomFilter(const ASTs & args, RPNElement & out); + bool tryPrepareSetBloomFilter(const RPNBuilderTreeNode & left_argument, const RPNBuilderTreeNode & right_argument, RPNElement & out); static bool createFunctionEqualsCondition( RPNElement & out, const Field & value, const BloomFilterParameters & params, TokenExtractorPtr token_extractor); diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp index 9c154f786f7..11e1f9efcc2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp @@ -89,10 +89,10 @@ std::string MarkType::getFileExtension() const } -std::optional MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage) +std::optional MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(const IDataPartStorage & data_part_storage) { - if (data_part_storage->exists()) - for (auto it = data_part_storage->iterate(); it->isValid(); it->next()) + if (data_part_storage.exists()) + for (auto it = data_part_storage.iterate(); it->isValid(); it->next()) if (it->isFile()) if (std::string ext = fs::path(it->name()).extension(); MarkType::isMarkFileExtension(ext)) return ext; @@ -110,7 +110,7 @@ MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(const MergeTreeData fixed_index_granularity = storage.getSettings()->index_granularity; } -void MergeTreeIndexGranularityInfo::changeGranularityIfRequired(const DataPartStoragePtr & data_part_storage) +void MergeTreeIndexGranularityInfo::changeGranularityIfRequired(const IDataPartStorage & data_part_storage) { auto mrk_ext = getMarksExtensionFromFilesystem(data_part_storage); if (mrk_ext && !MarkType(*mrk_ext).adaptive) diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h index 883fe3c899e..aed3081d3d0 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h @@ -48,7 +48,7 @@ public: MergeTreeIndexGranularityInfo(MergeTreeDataPartType type_, bool is_adaptive_, size_t index_granularity_, size_t index_granularity_bytes_); - void changeGranularityIfRequired(const DataPartStoragePtr & data_part_storage); + void changeGranularityIfRequired(const IDataPartStorage & data_part_storage); String getMarksFilePath(const String & path_prefix) const { @@ -57,7 +57,7 @@ public: size_t getMarkSizeInBytes(size_t columns_num = 1) const; - static std::optional getMarksExtensionFromFilesystem(const DataPartStoragePtr & data_part_storage); + static std::optional getMarksExtensionFromFilesystem(const IDataPartStorage & data_part_storage); }; constexpr inline auto getNonAdaptiveMrkSizeWide() { return sizeof(UInt64) * 2; } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index b190ac2b2fd..43e655a4ee5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -211,11 +211,11 @@ bool MergeTreeIndexMinMax::mayBenefitFromIndexForIn(const ASTPtr & node) const return false; } -MergeTreeIndexFormat MergeTreeIndexMinMax::getDeserializedFormat(const DataPartStoragePtr & data_part_storage, const std::string & relative_path_prefix) const +MergeTreeIndexFormat MergeTreeIndexMinMax::getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & relative_path_prefix) const { - if (data_part_storage->exists(relative_path_prefix + ".idx2")) + if (data_part_storage.exists(relative_path_prefix + ".idx2")) return {2, ".idx2"}; - else if (data_part_storage->exists(relative_path_prefix + ".idx")) + else if (data_part_storage.exists(relative_path_prefix + ".idx")) return {1, ".idx"}; return {0 /* unknown */, ""}; } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/src/Storages/MergeTree/MergeTreeIndexMinMax.h index 0566a15d535..af420613855 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.h +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.h @@ -83,7 +83,7 @@ public: bool mayBenefitFromIndexForIn(const ASTPtr & node) const override; const char* getSerializedFileExtension() const override { return ".idx2"; } - MergeTreeIndexFormat getDeserializedFormat(const DataPartStoragePtr & data_part_storage, const std::string & path_prefix) const override; /// NOLINT + MergeTreeIndexFormat getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & path_prefix) const override; /// NOLINT }; } diff --git a/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/src/Storages/MergeTree/MergeTreeIndexReader.cpp index 33106f7ab64..7d7024a8ac2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexReader.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexReader.cpp @@ -20,7 +20,7 @@ std::unique_ptr makeIndexReader( auto * load_marks_threadpool = settings.read_settings.load_marks_asynchronously ? &context->getLoadMarksThreadpool() : nullptr; return std::make_unique( - part->data_part_storage, + part->getDataPartStoragePtr(), index->getFileName(), extension, marks_count, all_mark_ranges, std::move(settings), mark_cache, uncompressed_cache, @@ -44,7 +44,7 @@ MergeTreeIndexReader::MergeTreeIndexReader( MergeTreeReaderSettings settings) : index(index_) { - auto index_format = index->getDeserializedFormat(part_->data_part_storage, index->getFileName()); + auto index_format = index->getDeserializedFormat(part_->getDataPartStorage(), index->getFileName()); stream = makeIndexReader( index_format.extension, diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 3c31deda823..0e15f2c4cb6 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -74,8 +74,9 @@ void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const auto serialization = type->getDefaultSerialization(); ISerialization::SerializeBinaryBulkStatePtr state; - serialization->serializeBinaryBulkStatePrefix(settings, state); - serialization->serializeBinaryBulkWithMultipleStreams(*block.getByPosition(i).column, 0, size(), settings, state); + const auto & column = *block.getByPosition(i).column; + serialization->serializeBinaryBulkStatePrefix(column, settings, state); + serialization->serializeBinaryBulkWithMultipleStreams(column, 0, size(), settings, state); serialization->serializeBinaryBulkStateSuffix(settings, state); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexUtils.cpp b/src/Storages/MergeTree/MergeTreeIndexUtils.cpp new file mode 100644 index 00000000000..652f0c853d4 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexUtils.cpp @@ -0,0 +1,47 @@ +#include + +#include +#include +#include + +namespace DB +{ + +ASTPtr buildFilterNode(const ASTPtr & select_query, ASTs additional_filters) +{ + auto & select_query_typed = select_query->as(); + + ASTs filters; + if (select_query_typed.where()) + filters.push_back(select_query_typed.where()); + + if (select_query_typed.prewhere()) + filters.push_back(select_query_typed.prewhere()); + + filters.insert(filters.end(), additional_filters.begin(), additional_filters.end()); + + if (filters.empty()) + return nullptr; + + ASTPtr filter_node; + + if (filters.size() == 1) + { + filter_node = filters.front(); + } + else + { + auto function = std::make_shared(); + + function->name = "and"; + function->arguments = std::make_shared(); + function->children.push_back(function->arguments); + function->arguments->children = std::move(filters); + + filter_node = std::move(function); + } + + return filter_node; +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexUtils.h b/src/Storages/MergeTree/MergeTreeIndexUtils.h new file mode 100644 index 00000000000..6ba9725b564 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexUtils.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace DB +{ + +/** Build AST filter node for index analysis from WHERE and PREWHERE sections of select query and additional filters. + * If select query does not have WHERE and PREWHERE and additional filters are empty null is returned. + */ +ASTPtr buildFilterNode(const ASTPtr & select_query, ASTs additional_filters = {}); + +} diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 14002534c94..6a671c31944 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -148,9 +148,9 @@ struct IMergeTreeIndex /// Returns extension for deserialization. /// /// Return pair. - virtual MergeTreeIndexFormat getDeserializedFormat(const DataPartStoragePtr & data_part_storage, const std::string & relative_path_prefix) const + virtual MergeTreeIndexFormat getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & relative_path_prefix) const { - if (data_part_storage->exists(relative_path_prefix + ".idx")) + if (data_part_storage.exists(relative_path_prefix + ".idx")) return {1, ".idx"}; return {0 /*unknown*/, ""}; } diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index 4ea6ec11ecc..10f5cc95baf 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -382,20 +382,20 @@ void MergeTreePartition::load(const MergeTreeData & storage, const PartMetadataM partition_key_sample.getByPosition(i).type->getDefaultSerialization()->deserializeBinary(value[i], *file); } -std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums) const +std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums) const { auto metadata_snapshot = storage.getInMemoryMetadataPtr(); const auto & context = storage.getContext(); const auto & partition_key_sample = adjustPartitionKey(metadata_snapshot, storage.getContext()).sample_block; - return store(partition_key_sample, data_part_storage_builder, checksums, context->getWriteSettings()); + return store(partition_key_sample, data_part_storage, checksums, context->getWriteSettings()); } -std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const +std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const { if (!partition_key_sample) return nullptr; - auto out = data_part_storage_builder->writeFile("partition.dat", DBMS_DEFAULT_BUFFER_SIZE, settings); + auto out = data_part_storage.writeFile("partition.dat", DBMS_DEFAULT_BUFFER_SIZE, settings); HashingWriteBuffer out_hashing(*out); for (size_t i = 0; i < value.size(); ++i) { diff --git a/src/Storages/MergeTree/MergeTreePartition.h b/src/Storages/MergeTree/MergeTreePartition.h index 6394641dfa3..78b141f26ec 100644 --- a/src/Storages/MergeTree/MergeTreePartition.h +++ b/src/Storages/MergeTree/MergeTreePartition.h @@ -15,10 +15,10 @@ class MergeTreeData; struct FormatSettings; struct MergeTreeDataPartChecksums; struct StorageInMemoryMetadata; -class IDataPartStorageBuilder; +class IDataPartStorage; using StorageMetadataPtr = std::shared_ptr; -using DataPartStorageBuilderPtr = std::shared_ptr; +using MutableDataPartStoragePtr = std::shared_ptr; /// This class represents a partition value of a single part and encapsulates its loading/storing logic. struct MergeTreePartition @@ -44,8 +44,8 @@ public: /// Store functions return write buffer with written but not finalized data. /// User must call finish() for returned object. - [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums) const; - [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; + [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums) const; + [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, IDataPartStorage & data_part_storage, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; void assign(const MergeTreePartition & other) { value = other.value; } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index afeeacbe5d6..b618b068769 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -100,7 +100,6 @@ bool MergeTreePartsMover::selectPartsForMove( return false; std::unordered_map need_to_move; - std::unordered_set need_to_move_disks; const auto policy = data->getStoragePolicy(); const auto & volumes = policy->getVolumes(); @@ -115,10 +114,7 @@ bool MergeTreePartsMover::selectPartsForMove( UInt64 unreserved_space = disk->getUnreservedSpace(); if (unreserved_space < required_maximum_available_space && !disk->isBroken()) - { need_to_move.emplace(disk, required_maximum_available_space - unreserved_space); - need_to_move_disks.emplace(disk); - } } } } @@ -140,8 +136,16 @@ bool MergeTreePartsMover::selectPartsForMove( auto ttl_entry = selectTTLDescriptionForTTLInfos(metadata_snapshot->getMoveTTLs(), part->ttl_infos.moves_ttl, time_of_move, true); auto to_insert = need_to_move.end(); - if (auto disk_it = part->data_part_storage->isStoredOnDisk(need_to_move_disks); disk_it != need_to_move_disks.end()) - to_insert = need_to_move.find(*disk_it); + auto part_disk_name = part->getDataPartStorage().getDiskName(); + + for (auto it = need_to_move.begin(); it != need_to_move.end(); ++it) + { + if (it->first->getName() == part_disk_name) + { + to_insert = it; + break; + } + } ReservationPtr reservation; if (ttl_entry) @@ -158,9 +162,8 @@ bool MergeTreePartsMover::selectPartsForMove( /// In order to not over-move, we need to "release" required space on this disk, /// possibly to zero. if (to_insert != need_to_move.end()) - { to_insert->second.decreaseRequiredSizeAndRemoveRedundantParts(part->getBytesOnDisk()); - } + ++parts_to_move_by_ttl_rules; parts_to_move_total_size_bytes += part->getBytesOnDisk(); } @@ -173,7 +176,7 @@ bool MergeTreePartsMover::selectPartsForMove( for (auto && move : need_to_move) { - auto min_volume_index = policy->getVolumeIndexByDisk(move.first) + 1; + auto min_volume_index = policy->getVolumeIndexByDiskName(move.first->getName()) + 1; for (auto && part : move.second.getAccumulatedParts()) { auto reservation = policy->reserve(part->getBytesOnDisk(), min_volume_index); @@ -199,7 +202,7 @@ bool MergeTreePartsMover::selectPartsForMove( return false; } -MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const +MergeTreeMutableDataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const { if (moves_blocker.isCancelled()) throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED); @@ -207,16 +210,15 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt auto settings = data->getSettings(); auto part = moving_part.part; auto disk = moving_part.reserved_space->getDisk(); - LOG_DEBUG(log, "Cloning part {} from '{}' to '{}'", part->name, part->data_part_storage->getDiskName(), disk->getName()); - - DataPartStoragePtr cloned_part_storage; + LOG_DEBUG(log, "Cloning part {} from '{}' to '{}'", part->name, part->getDataPartStorage().getDiskName(), disk->getName()); + MutableDataPartStoragePtr cloned_part_storage; if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) { /// Try zero-copy replication and fallback to default copy if it's not possible moving_part.part->assertOnDisk(); String path_to_clone = fs::path(data->getRelativeDataPath()) / MergeTreeData::MOVING_DIR_NAME / ""; - String relative_path = part->data_part_storage->getPartDirectory(); + String relative_path = part->getDataPartStorage().getPartDirectory(); if (disk->exists(path_to_clone + relative_path)) { LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone + relative_path)); @@ -230,7 +232,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt if (!cloned_part_storage) { LOG_INFO(log, "Part {} was not fetched, we are the first who move it to another disk, so we will copy it", part->name); - cloned_part_storage = part->data_part_storage->clone(path_to_clone, part->data_part_storage->getPartDirectory(), disk, log); + cloned_part_storage = part->getDataPartStorage().clonePart(path_to_clone, part->getDataPartStorage().getPartDirectory(), disk, log); } } else @@ -238,18 +240,17 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME); } - MergeTreeData::MutableDataPartPtr cloned_part = data->createPart(part->name, cloned_part_storage); - LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->data_part_storage->getFullPath()); + auto cloned_part = data->createPart(part->name, cloned_part_storage); + LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->getDataPartStorage().getFullPath()); cloned_part->loadColumnsChecksumsIndexes(true, true); cloned_part->loadVersionMetadata(); - cloned_part->modification_time = cloned_part->data_part_storage->getLastModified().epochTime(); + cloned_part->modification_time = cloned_part->getDataPartStorage().getLastModified().epochTime(); return cloned_part; - } -void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & cloned_part) const +void MergeTreePartsMover::swapClonedPart(const MergeTreeMutableDataPartPtr & cloned_part) const { if (moves_blocker.isCancelled()) throw Exception("Cancelled moving parts.", ErrorCodes::ABORTED); @@ -259,20 +260,17 @@ void MergeTreePartsMover::swapClonedPart(const MergeTreeData::DataPartPtr & clon /// It's ok, because we don't block moving parts for merges or mutations if (!active_part || active_part->name != cloned_part->name) { - LOG_INFO(log, "Failed to swap {}. Active part doesn't exist. Possible it was merged or mutated. Will remove copy on path '{}'.", cloned_part->name, cloned_part->data_part_storage->getFullPath()); + LOG_INFO(log, "Failed to swap {}. Active part doesn't exist. Possible it was merged or mutated. Will remove copy on path '{}'.", cloned_part->name, cloned_part->getDataPartStorage().getFullPath()); return; } - auto builder = cloned_part->data_part_storage->getBuilder(); /// Don't remove new directory but throw an error because it may contain part which is currently in use. - cloned_part->renameTo(active_part->name, false, builder); - - builder->commit(); + cloned_part->renameTo(active_part->name, false); /// TODO what happen if server goes down here? data->swapActivePart(cloned_part); - LOG_TRACE(log, "Part {} was moved to {}", cloned_part->name, cloned_part->data_part_storage->getFullPath()); + LOG_TRACE(log, "Part {} was moved to {}", cloned_part->name, cloned_part->getDataPartStorage().getFullPath()); } } diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index 6ad658c2cb3..0266b2daa46 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -50,14 +50,14 @@ public: const std::lock_guard & moving_parts_lock); /// Copies part to selected reservation in detached folder. Throws exception if part already exists. - MergeTreeDataPartPtr clonePart(const MergeTreeMoveEntry & moving_part) const; + MergeTreeMutableDataPartPtr clonePart(const MergeTreeMoveEntry & moving_part) const; /// Replaces cloned part from detached directory into active data parts set. /// Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of ///IMergeTreeDataPart called. If replacing part doesn't exists or not active (committed) than /// cloned part will be removed and log message will be reported. It may happen in case of concurrent /// merge or mutation. - void swapClonedPart(const MergeTreeDataPartPtr & cloned_parts) const; + void swapClonedPart(const MergeTreeMutableDataPartPtr & cloned_parts) const; /// Can stop background moves and moves from queries ActionBlocker moves_blocker; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 3f51673a6b1..ca9cde0ae61 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -263,7 +263,7 @@ void MergeTreeReadPool::fillPerThreadInfo( { PartInfo part_info{parts[i], per_part_sum_marks[i], i}; if (parts[i].data_part->isStoredOnDisk()) - parts_per_disk[parts[i].data_part->data_part_storage->getDiskName()].push_back(std::move(part_info)); + parts_per_disk[parts[i].data_part->getDataPartStorage().getDiskName()].push_back(std::move(part_info)); else parts_per_disk[""].push_back(std::move(part_info)); } diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index 4801c9a4058..b0488d29f8e 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -59,13 +59,15 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read to empty buffer."); const String path = MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; + auto data_part_storage = data_part_info_for_read->getDataPartStorage(); + if (uncompressed_cache) { auto buffer = std::make_unique( - std::string(fs::path(data_part_info_for_read->getDataPartStorage()->getFullPath()) / path), - [this, path]() + std::string(fs::path(data_part_storage->getFullPath()) / path), + [this, path, data_part_storage]() { - return data_part_info_for_read->getDataPartStorage()->readFile( + return data_part_storage->readFile( path, settings.read_settings, std::nullopt, std::nullopt); @@ -87,7 +89,7 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( { auto buffer = std::make_unique( - data_part_info_for_read->getDataPartStorage()->readFile( + data_part_storage->readFile( path, settings.read_settings, std::nullopt, std::nullopt), diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 59cbae3f914..2490eb77772 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -38,14 +38,6 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( has_limit_below_one_block(has_limit_below_one_block_), total_rows(data_part->index_granularity.getRowsCountInRanges(all_mark_ranges)) { - /// Actually it means that parallel reading from replicas enabled - /// and we have to collaborate with initiator. - /// In this case we won't set approximate rows, because it will be accounted multiple times. - /// Also do not count amount of read rows if we read in order of sorting key, - /// because we don't know actual amount of read rows in case when limit is set. - if (!extension_.has_value() && !reader_settings.read_in_order) - addTotalRowsApprox(total_rows); - ordered_names = header_without_virtual_columns.getNames(); } diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index a0db39a97f1..3fecb85f484 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -62,6 +62,7 @@ struct Settings; M(UInt64, merge_tree_clear_old_temporary_directories_interval_seconds, 60, "The period of executing the clear old temporary directories operation in background.", 0) \ M(UInt64, merge_tree_clear_old_parts_interval_seconds, 1, "The period of executing the clear old parts operation in background.", 0) \ M(UInt64, merge_tree_clear_old_broken_detached_parts_ttl_timeout_seconds, 1ULL * 3600 * 24 * 30, "Remove old broken detached parts in the background if they remained intouched for a specified by this setting period of time.", 0) \ + M(UInt64, min_age_to_force_merge_seconds, 0, "If all parts in a certain range are older than this value, range will be always eligible for merging. Set to 0 to disable.", 0) \ M(UInt64, merge_tree_enable_clear_old_broken_detached, false, "Enable clearing old broken detached parts operation in background.", 0) \ M(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", 0) \ \ diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 5d00db861a8..13a72c24c59 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -1,8 +1,8 @@ #include #include #include -#include #include +#include namespace ProfileEvents { @@ -56,8 +56,9 @@ struct MergeTreeSink::DelayedChunk void MergeTreeSink::consume(Chunk chunk) { auto block = getHeader().cloneWithColumns(chunk.detachColumns()); + if (!storage_snapshot->object_columns.empty()) + convertDynamicColumnsToTuples(block, storage_snapshot); - deduceTypesOfObjectColumns(storage_snapshot, block); auto part_blocks = storage.writer.splitBlockIntoParts(block, max_parts_per_block, metadata_snapshot, context); using DelayedPartitions = std::vector; @@ -81,7 +82,7 @@ void MergeTreeSink::consume(Chunk chunk) if (!temp_part.part) continue; - if (!support_parallel_write && temp_part.part->data_part_storage->supportParallelWrite()) + if (!support_parallel_write && temp_part.part->getDataPartStorage().supportParallelWrite()) support_parallel_write = true; if (storage.getDeduplicationLog()) @@ -160,7 +161,7 @@ void MergeTreeSink::finishDelayedChunk() } } - added = storage.renameTempPartAndAdd(part, transaction, partition.temp_part.builder, lock); + added = storage.renameTempPartAndAdd(part, transaction, lock); transaction.commit(&lock); } diff --git a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp index 4735eae8fdd..b3625ba8e93 100644 --- a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp +++ b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp @@ -150,7 +150,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( while (!in->eof()) { MergeTreeData::MutableDataPartPtr part; - DataPartStorageBuilderPtr data_part_storage_builder; UInt8 version; String part_name; Block block; @@ -177,7 +176,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( { auto single_disk_volume = std::make_shared("volume_" + part_name, disk, 0); auto data_part_storage = std::make_shared(single_disk_volume, storage.getRelativeDataPath(), part_name); - data_part_storage_builder = std::make_shared(single_disk_volume, storage.getRelativeDataPath(), part_name); part = storage.createPart( part_name, @@ -222,7 +220,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( { MergedBlockOutputStream part_out( part, - data_part_storage_builder, metadata_snapshot, block.getNamesAndTypesList(), {}, @@ -240,11 +237,12 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore( for (const auto & projection : metadata_snapshot->getProjections()) { auto projection_block = projection.calculate(block, context); - auto temp_part = MergeTreeDataWriter::writeInMemoryProjectionPart(storage, log, projection_block, projection, data_part_storage_builder, part.get()); + auto temp_part = MergeTreeDataWriter::writeProjectionPart(storage, log, projection_block, projection, part.get()); temp_part.finalize(); if (projection_block.rows()) part->addProjectionPart(projection.name, std::move(temp_part.part)); } + part_out.finalizePart(part, false); min_block_number = std::min(min_block_number, part->info.min_block); diff --git a/src/Storages/MergeTree/MergeType.cpp b/src/Storages/MergeTree/MergeType.cpp index 4b03f5ab57c..045114578d0 100644 --- a/src/Storages/MergeTree/MergeType.cpp +++ b/src/Storages/MergeTree/MergeType.cpp @@ -10,7 +10,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } -MergeType checkAndGetMergeType(UInt64 merge_type) +MergeType checkAndGetMergeType(UInt32 merge_type) { if (auto maybe_merge_type = magic_enum::enum_cast(merge_type)) return *maybe_merge_type; diff --git a/src/Storages/MergeTree/MergeType.h b/src/Storages/MergeTree/MergeType.h index fad1ba33e3e..ce9a40c5931 100644 --- a/src/Storages/MergeTree/MergeType.h +++ b/src/Storages/MergeTree/MergeType.h @@ -22,7 +22,7 @@ enum class MergeType }; /// Check parsed merge_type from raw int and get enum value. -MergeType checkAndGetMergeType(UInt64 merge_type); +MergeType checkAndGetMergeType(UInt32 merge_type); /// Check this merge assigned with TTL bool isTTLMergeType(MergeType merge_type); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 269a78977ad..991a8d359a8 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -14,8 +14,7 @@ namespace ErrorCodes MergedBlockOutputStream::MergedBlockOutputStream( - const MergeTreeDataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list_, const MergeTreeIndices & skip_indices, @@ -24,7 +23,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( bool reset_columns_, bool blocks_are_granules_size, const WriteSettings & write_settings_) - : IMergedBlockOutputStream(std::move(data_part_storage_builder_), data_part, metadata_snapshot_, columns_list_, reset_columns_) + : IMergedBlockOutputStream(data_part, metadata_snapshot_, columns_list_, reset_columns_) , columns_list(columns_list_) , default_codec(default_codec_) , write_settings(write_settings_) @@ -38,7 +37,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( blocks_are_granules_size); if (data_part->isStoredOnDisk()) - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); /// We should write version metadata on part creation to distinguish it from parts that were created without transaction. TransactionID tid = txn ? txn->tid : Tx::PrehistoricTID; @@ -47,7 +46,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( data_part->version.setCreationTID(tid, nullptr); data_part->storeVersionMetadata(); - writer = data_part->getWriter(data_part_storage_builder, columns_list, metadata_snapshot, skip_indices, default_codec, writer_settings, {}); + writer = data_part->getWriter(columns_list, metadata_snapshot, skip_indices, default_codec, writer_settings, {}); } /// If data is pre-sorted. @@ -68,17 +67,17 @@ struct MergedBlockOutputStream::Finalizer::Impl { IMergeTreeDataPartWriter & writer; MergeTreeData::MutableDataPartPtr part; - DataPartStorageBuilderPtr data_part_storage_builder; NameSet files_to_remove_after_finish; std::vector> written_files; bool sync; - Impl(IMergeTreeDataPartWriter & writer_, MergeTreeData::MutableDataPartPtr part_, DataPartStorageBuilderPtr data_part_storage_builder_, const NameSet & files_to_remove_after_finish_, bool sync_) + Impl(IMergeTreeDataPartWriter & writer_, MergeTreeData::MutableDataPartPtr part_, const NameSet & files_to_remove_after_finish_, bool sync_) : writer(writer_) , part(std::move(part_)) - , data_part_storage_builder(std::move(data_part_storage_builder_)) , files_to_remove_after_finish(files_to_remove_after_finish_) - , sync(sync_) {} + , sync(sync_) + { + } void finish(); }; @@ -95,7 +94,7 @@ void MergedBlockOutputStream::Finalizer::Impl::finish() writer.finish(sync); for (const auto & file_name : files_to_remove_after_finish) - data_part_storage_builder->removeFile(file_name); + part->getDataPartStorage().removeFile(file_name); for (auto & file : written_files) { @@ -122,19 +121,19 @@ MergedBlockOutputStream::Finalizer & MergedBlockOutputStream::Finalizer::operato MergedBlockOutputStream::Finalizer::Finalizer(std::unique_ptr impl_) : impl(std::move(impl_)) {} void MergedBlockOutputStream::finalizePart( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list, - MergeTreeData::DataPart::Checksums * additional_column_checksums) + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list, + MergeTreeData::DataPart::Checksums * additional_column_checksums) { finalizePartAsync(new_part, sync, total_columns_list, additional_column_checksums).finish(); } MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list, - MergeTreeData::DataPart::Checksums * additional_column_checksums) + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list, + MergeTreeData::DataPart::Checksums * additional_column_checksums) { /// Finish write and get checksums. MergeTreeData::DataPart::Checksums checksums; @@ -165,7 +164,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->setColumns(part_columns, serialization_infos); } - auto finalizer = std::make_unique(*writer, new_part, data_part_storage_builder, files_to_remove_after_sync, sync); + auto finalizer = std::make_unique(*writer, new_part, files_to_remove_after_sync, sync); if (new_part->isStoredOnDisk()) finalizer->written_files = finalizePartOnDisk(new_part, checksums); @@ -184,7 +183,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( } MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDisk( - const MergeTreeData::DataPartPtr & new_part, + const MergeTreeMutableDataPartPtr & new_part, MergeTreeData::DataPart::Checksums & checksums) { WrittenFiles written_files; @@ -192,7 +191,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || isCompactPart(new_part)) { - auto count_out = data_part_storage_builder->writeFile("count.txt", 4096, write_settings); + auto count_out = new_part->getDataPartStorage().writeFile("count.txt", 4096, write_settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -206,7 +205,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (new_part->uuid != UUIDHelpers::Nil) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, write_settings); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_part->uuid, out_hashing); checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -217,12 +216,12 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - if (auto file = new_part->partition.store(storage, data_part_storage_builder, checksums)) + if (auto file = new_part->partition.store(storage, new_part->getDataPartStorage(), checksums)) written_files.emplace_back(std::move(file)); if (new_part->minmax_idx->initialized) { - auto files = new_part->minmax_idx->store(storage, data_part_storage_builder, checksums); + auto files = new_part->minmax_idx->store(storage, new_part->getDataPartStorage(), checksums); for (auto & file : files) written_files.emplace_back(std::move(file)); } @@ -232,7 +231,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis } { - auto count_out = data_part_storage_builder->writeFile("count.txt", 4096, write_settings); + auto count_out = new_part->getDataPartStorage().writeFile("count.txt", 4096, write_settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -246,7 +245,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->ttl_infos.empty()) { /// Write a file with ttl infos in json format. - auto out = data_part_storage_builder->writeFile("ttl.txt", 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile("ttl.txt", 4096, write_settings); HashingWriteBuffer out_hashing(*out); new_part->ttl_infos.write(out_hashing); checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -257,7 +256,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->getSerializationInfos().empty()) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, write_settings); HashingWriteBuffer out_hashing(*out); new_part->getSerializationInfos().writeJSON(out_hashing); checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -268,7 +267,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write a file with a description of columns. - auto out = data_part_storage_builder->writeFile("columns.txt", 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile("columns.txt", 4096, write_settings); new_part->getColumns().writeText(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -276,7 +275,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (default_codec != nullptr) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, write_settings); DB::writeText(queryToString(default_codec->getFullCodecDesc()), *out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -289,7 +288,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write file with checksums. - auto out = data_part_storage_builder->writeFile("checksums.txt", 4096, write_settings); + auto out = new_part->getDataPartStorage().writeFile("checksums.txt", 4096, write_settings); checksums.write(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index 92dcd8dd272..ad1bb584788 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -15,8 +15,7 @@ class MergedBlockOutputStream final : public IMergedBlockOutputStream { public: MergedBlockOutputStream( - const MergeTreeDataPartPtr & data_part, - DataPartStorageBuilderPtr data_part_storage_builder_, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const NamesAndTypesList & columns_list_, const MergeTreeIndices & skip_indices, @@ -55,16 +54,16 @@ public: /// Finalize writing part and fill inner structures /// If part is new and contains projections, they should be added before invoking this method. Finalizer finalizePartAsync( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list = nullptr, - MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list = nullptr, + MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); void finalizePart( - MergeTreeData::MutableDataPartPtr & new_part, - bool sync, - const NamesAndTypesList * total_columns_list = nullptr, - MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); + const MergeTreeMutableDataPartPtr & new_part, + bool sync, + const NamesAndTypesList * total_columns_list = nullptr, + MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); private: /** If `permutation` is given, it rearranges the values in the columns when writing. @@ -74,8 +73,8 @@ private: using WrittenFiles = std::vector>; WrittenFiles finalizePartOnDisk( - const MergeTreeData::DataPartPtr & new_part, - MergeTreeData::DataPart::Checksums & checksums); + const MergeTreeMutableDataPartPtr & new_part, + MergeTreeData::DataPart::Checksums & checksums); NamesAndTypesList columns_list; IMergeTreeDataPart::MinMaxIndex minmax_idx; diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index dd75cddd380..e4a5a0bc3ba 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -11,8 +11,7 @@ namespace ErrorCodes } MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const Block & header_, CompressionCodecPtr default_codec, @@ -20,7 +19,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( WrittenOffsetColumns * offset_columns_, const MergeTreeIndexGranularity & index_granularity, const MergeTreeIndexGranularityInfo * index_granularity_info) - : IMergedBlockOutputStream(std::move(data_part_storage_builder_), data_part, metadata_snapshot_, header_.getNamesAndTypesList(), /*reset_columns=*/ true) + : IMergedBlockOutputStream(data_part, metadata_snapshot_, header_.getNamesAndTypesList(), /*reset_columns=*/ true) , header(header_) { const auto & global_settings = data_part->storage.getContext()->getSettings(); @@ -34,7 +33,6 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( /* rewrite_primary_key = */ false); writer = data_part->getWriter( - data_part_storage_builder, header.getNamesAndTypesList(), metadata_snapshot_, indices_to_recalc, @@ -81,7 +79,7 @@ MergedColumnOnlyOutputStream::fillChecksums( for (const String & removed_file : removed_files) { - data_part_storage_builder->removeFileIfExists(removed_file); + new_part->getDataPartStorage().removeFileIfExists(removed_file); if (all_checksums.files.contains(removed_file)) all_checksums.files.erase(removed_file); diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h index 1fd1c752226..f382b0fef60 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h @@ -14,8 +14,7 @@ public: /// Pass empty 'already_written_offset_columns' first time then and pass the same object to subsequent instances of MergedColumnOnlyOutputStream /// if you want to serialize elements of Nested data structure in different instances of MergedColumnOnlyOutputStream. MergedColumnOnlyOutputStream( - DataPartStorageBuilderPtr data_part_storage_builder_, - const MergeTreeDataPartPtr & data_part, + const MergeTreeMutableDataPartPtr & data_part, const StorageMetadataPtr & metadata_snapshot_, const Block & header_, CompressionCodecPtr default_codec_, diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 549c4e7373f..9e3cbb0640b 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -92,7 +92,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() /// Once we mutate part, we must reserve space on the same disk, because mutations can possibly create hardlinks. /// Can throw an exception. - reserved_space = storage.reserveSpace(estimated_space_for_result, source_part->data_part_storage); + reserved_space = storage.reserveSpace(estimated_space_for_result, source_part->getDataPartStorage()); table_lock_holder = storage.lockForShare( RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations); @@ -193,12 +193,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare() bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWriter write_part_log) { new_part = mutate_task->getFuture().get(); - auto builder = mutate_task->getBuilder(); - - if (!builder) - builder = new_part->data_part_storage->getBuilder(); - - storage.renameTempPartAndReplace(new_part, *transaction_ptr, builder); + storage.renameTempPartAndReplace(new_part, *transaction_ptr); try { diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 0cf10ee1935..b1714076a46 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -83,14 +83,9 @@ bool MutatePlainMergeTreeTask::executeStep() new_part = mutate_task->getFuture().get(); - auto builder = mutate_task->getBuilder(); - if (!builder) - builder = new_part->data_part_storage->getBuilder(); - - MergeTreeData::Transaction transaction(storage, merge_mutate_entry->txn.get()); /// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction - storage.renameTempPartAndReplace(new_part, transaction, builder); + storage.renameTempPartAndReplace(new_part, transaction); transaction.commit(); storage.updateMutationEntriesErrors(future_part, true, ""); diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 3d964e60798..e5ba771a198 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -619,7 +619,6 @@ static NameToNameVector collectFilesForRenames( /// Initialize and write to disk new part fields like checksums, columns, etc. void finalizeMutatedPart( const MergeTreeDataPartPtr & source_part, - const DataPartStorageBuilderPtr & data_part_storage_builder, MergeTreeData::MutableDataPartPtr new_data_part, ExecuteTTLType execute_ttl_type, const CompressionCodecPtr & codec, @@ -627,7 +626,7 @@ void finalizeMutatedPart( { if (new_data_part->uuid != UUIDHelpers::Nil) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->getDataPartStorage().writeFile(IMergeTreeDataPart::UUID_FILE_NAME, 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_data_part->uuid, out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -637,7 +636,7 @@ void finalizeMutatedPart( if (execute_ttl_type != ExecuteTTLType::NONE) { /// Write a file with ttl infos in json format. - auto out_ttl = data_part_storage_builder->writeFile("ttl.txt", 4096, context->getWriteSettings()); + auto out_ttl = new_data_part->getDataPartStorage().writeFile("ttl.txt", 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out_ttl); new_data_part->ttl_infos.write(out_hashing); new_data_part->checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -646,7 +645,7 @@ void finalizeMutatedPart( if (!new_data_part->getSerializationInfos().empty()) { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->getDataPartStorage().writeFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); new_data_part->getSerializationInfos().writeJSON(out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -655,18 +654,18 @@ void finalizeMutatedPart( { /// Write file with checksums. - auto out_checksums = data_part_storage_builder->writeFile("checksums.txt", 4096, context->getWriteSettings()); + auto out_checksums = new_data_part->getDataPartStorage().writeFile("checksums.txt", 4096, context->getWriteSettings()); new_data_part->checksums.write(*out_checksums); } /// close fd { - auto out = data_part_storage_builder->writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, context->getWriteSettings()); + auto out = new_data_part->getDataPartStorage().writeFile(IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, context->getWriteSettings()); DB::writeText(queryToString(codec->getFullCodecDesc()), *out); } /// close fd { /// Write a file with a description of columns. - auto out_columns = data_part_storage_builder->writeFile("columns.txt", 4096, context->getWriteSettings()); + auto out_columns = new_data_part->getDataPartStorage().writeFile("columns.txt", 4096, context->getWriteSettings()); new_data_part->getColumns().writeText(*out_columns); } /// close fd @@ -734,8 +733,6 @@ struct MutationContext = MutationsInterpreter::MutationKind::MutationKindEnum::MUTATE_UNKNOWN; MergeTreeData::MutableDataPartPtr new_data_part; - DataPartStorageBuilderPtr data_part_storage_builder; - IMergedBlockOutputStreamPtr out{nullptr}; String mrk_extension; @@ -816,11 +813,9 @@ public: if (next_level_parts.empty()) { LOG_DEBUG(log, "Merged a projection part in level {}", current_level); - auto builder = selected_parts[0]->data_part_storage->getBuilder(); - selected_parts[0]->renameTo(projection.name + ".proj", true, builder); + selected_parts[0]->renameTo(projection.name + ".proj", true); selected_parts[0]->name = projection.name; selected_parts[0]->is_temp = false; - builder->commit(); ctx->new_data_part->addProjectionPart(name, std::move(selected_parts[0])); /// Task is finished @@ -865,7 +860,6 @@ public: projection_merging_params, NO_TRANSACTION_PTR, ctx->new_data_part.get(), - ctx->data_part_storage_builder.get(), ".tmp_proj"); next_level_parts.push_back(executeHere(tmp_part_merge_task)); @@ -1025,8 +1019,7 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() if (projection_block) { auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, projection_block, projection, ctx->data_part_storage_builder, ctx->new_data_part.get(), ++block_num); - tmp_part.builder->commit(); + *ctx->data, ctx->log, projection_block, projection, ctx->new_data_part.get(), ++block_num); tmp_part.finalize(); projection_parts[projection.name].emplace_back(std::move(tmp_part.part)); } @@ -1048,8 +1041,7 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() if (projection_block) { auto temp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, projection_block, projection, ctx->data_part_storage_builder, ctx->new_data_part.get(), ++block_num); - temp_part.builder->commit(); + *ctx->data, ctx->log, projection_block, projection, ctx->new_data_part.get(), ++block_num); temp_part.finalize(); projection_parts[projection.name].emplace_back(std::move(temp_part.part)); } @@ -1149,7 +1141,7 @@ private: void prepare() { - ctx->data_part_storage_builder->createDirectories(); + ctx->new_data_part->getDataPartStorage().createDirectories(); /// Note: this is done before creating input streams, because otherwise data.data_parts_mutex /// (which is locked in data.getTotalActiveSizeInBytes()) @@ -1184,7 +1176,6 @@ private: ctx->out = std::make_shared( ctx->new_data_part, - ctx->data_part_storage_builder, ctx->metadata_snapshot, ctx->new_data_part->getColumns(), skip_part_indices, @@ -1280,7 +1271,7 @@ private: if (ctx->execute_ttl_type != ExecuteTTLType::NONE) ctx->files_to_skip.insert("ttl.txt"); - ctx->data_part_storage_builder->createDirectories(); + ctx->new_data_part->getDataPartStorage().createDirectories(); /// We should write version metadata on part creation to distinguish it from parts that were created without transaction. TransactionID tid = ctx->txn ? ctx->txn->tid : Tx::PrehistoricTID; @@ -1291,7 +1282,7 @@ private: NameSet hardlinked_files; /// Create hardlinks for unchanged files - for (auto it = ctx->source_part->data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = ctx->source_part->getDataPartStorage().iterate(); it->isValid(); it->next()) { if (ctx->files_to_skip.contains(it->name())) continue; @@ -1317,22 +1308,22 @@ private: if (it->isFile()) { - ctx->data_part_storage_builder->createHardLinkFrom( - *ctx->source_part->data_part_storage, it->name(), destination); + ctx->new_data_part->getDataPartStorage().createHardLinkFrom( + ctx->source_part->getDataPartStorage(), it->name(), destination); hardlinked_files.insert(it->name()); } else if (!endsWith(it->name(), ".tmp_proj")) // ignore projection tmp merge dir { // it's a projection part directory - ctx->data_part_storage_builder->createProjection(destination); + ctx->new_data_part->getDataPartStorage().createProjection(destination); - auto projection_data_part_storage = ctx->source_part->data_part_storage->getProjection(destination); - auto projection_data_part_storage_builder = ctx->data_part_storage_builder->getProjection(destination); + auto projection_data_part_storage_src = ctx->source_part->getDataPartStorage().getProjection(destination); + auto projection_data_part_storage_dst = ctx->new_data_part->getDataPartStorage().getProjection(destination); - for (auto p_it = projection_data_part_storage->iterate(); p_it->isValid(); p_it->next()) + for (auto p_it = projection_data_part_storage_src->iterate(); p_it->isValid(); p_it->next()) { - projection_data_part_storage_builder->createHardLinkFrom( - *projection_data_part_storage, p_it->name(), p_it->name()); + projection_data_part_storage_dst->createHardLinkFrom( + *projection_data_part_storage_src, p_it->name(), p_it->name()); hardlinked_files.insert(p_it->name()); } } @@ -1362,7 +1353,6 @@ private: builder.addTransform(std::make_shared(builder.getHeader(), *ctx->data, ctx->metadata_snapshot, ctx->new_data_part, ctx->time_of_mutation, true)); ctx->out = std::make_shared( - ctx->data_part_storage_builder, ctx->new_data_part, ctx->metadata_snapshot, ctx->updated_header, @@ -1414,7 +1404,7 @@ private: } } - MutationHelpers::finalizeMutatedPart(ctx->source_part, ctx->data_part_storage_builder, ctx->new_data_part, ctx->execute_ttl_type, ctx->compression_codec, ctx->context); + MutationHelpers::finalizeMutatedPart(ctx->source_part, ctx->new_data_part, ctx->execute_ttl_type, ctx->compression_codec, ctx->context); } @@ -1584,10 +1574,7 @@ bool MutateTask::prepare() ctx->data->getRelativeDataPath(), tmp_part_dir_name); - ctx->data_part_storage_builder = std::make_shared( - single_disk_volume, - ctx->data->getRelativeDataPath(), - tmp_part_dir_name); + data_part_storage->beginTransaction(); ctx->new_data_part = ctx->data->createPart( ctx->future_part->name, ctx->future_part->type, ctx->future_part->part_info, data_part_storage); @@ -1690,9 +1677,4 @@ const MergeTreeData::HardlinkedFiles & MutateTask::getHardlinkedFiles() const return ctx->hardlinked_files; } -DataPartStorageBuilderPtr MutateTask::getBuilder() const -{ - return ctx->data_part_storage_builder; -} - } diff --git a/src/Storages/MergeTree/MutateTask.h b/src/Storages/MergeTree/MutateTask.h index 1f2e8a6fd20..3df30670d7f 100644 --- a/src/Storages/MergeTree/MutateTask.h +++ b/src/Storages/MergeTree/MutateTask.h @@ -46,7 +46,7 @@ public: const MergeTreeData::HardlinkedFiles & getHardlinkedFiles() const; - DataPartStorageBuilderPtr getBuilder() const; + MutableDataPartStoragePtr getBuilder() const; private: diff --git a/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp b/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp index 7eb868f7754..30823d593a2 100644 --- a/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp +++ b/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp @@ -8,20 +8,10 @@ namespace DB { -static std::unique_ptr openForReading(const DataPartStoragePtr & data_part_storage, const String & path) -{ - size_t file_size = data_part_storage->getFileSize(path); - return data_part_storage->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); -} - -PartMetadataManagerOrdinary::PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_) : IPartMetadataManager(part_) -{ -} - - std::unique_ptr PartMetadataManagerOrdinary::read(const String & file_name) const { - auto res = openForReading(part->data_part_storage, file_name); + size_t file_size = part->getDataPartStorage().getFileSize(file_name); + auto res = part->getDataPartStorage().readFile(file_name, ReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); if (isCompressedFromFileName(file_name)) return std::make_unique(std::move(res)); @@ -31,7 +21,7 @@ std::unique_ptr PartMetadataManagerOrdinary::read(const String & fil bool PartMetadataManagerOrdinary::exists(const String & file_name) const { - return part->data_part_storage->exists(file_name); + return part->getDataPartStorage().exists(file_name); } diff --git a/src/Storages/MergeTree/PartMetadataManagerOrdinary.h b/src/Storages/MergeTree/PartMetadataManagerOrdinary.h index d86d5c54c00..428b6d4710a 100644 --- a/src/Storages/MergeTree/PartMetadataManagerOrdinary.h +++ b/src/Storages/MergeTree/PartMetadataManagerOrdinary.h @@ -8,7 +8,7 @@ namespace DB class PartMetadataManagerOrdinary : public IPartMetadataManager { public: - explicit PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_); + explicit PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_) : IPartMetadataManager(part_) {} ~PartMetadataManagerOrdinary() override = default; diff --git a/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp b/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp index ee0970984f9..90fd25bc4e7 100644 --- a/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp +++ b/src/Storages/MergeTree/PartMetadataManagerWithCache.cpp @@ -31,24 +31,24 @@ PartMetadataManagerWithCache::PartMetadataManagerWithCache(const IMergeTreeDataP String PartMetadataManagerWithCache::getKeyFromFilePath(const String & file_path) const { - return part->data_part_storage->getDiskName() + ":" + file_path; + return part->getDataPartStorage().getDiskName() + ":" + file_path; } String PartMetadataManagerWithCache::getFilePathFromKey(const String & key) const { - return key.substr(part->data_part_storage->getDiskName().size() + 1); + return key.substr(part->getDataPartStorage().getDiskName().size() + 1); } std::unique_ptr PartMetadataManagerWithCache::read(const String & file_name) const { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; String key = getKeyFromFilePath(file_path); String value; auto status = cache->get(key, value); if (!status.ok()) { ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheMiss); - auto in = part->data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto in = part->getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); std::unique_ptr reader; if (!isCompressedFromFileName(file_name)) reader = std::move(in); @@ -67,7 +67,7 @@ std::unique_ptr PartMetadataManagerWithCache::read(const String & fi bool PartMetadataManagerWithCache::exists(const String & file_name) const { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; String key = getKeyFromFilePath(file_path); String value; auto status = cache->get(key, value); @@ -79,7 +79,7 @@ bool PartMetadataManagerWithCache::exists(const String & file_name) const else { ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheMiss); - return part->data_part_storage->exists(file_name); + return part->getDataPartStorage().exists(file_name); } } @@ -91,7 +91,7 @@ void PartMetadataManagerWithCache::deleteAll(bool include_projection) String value; for (const auto & file_name : file_names) { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; String key = getKeyFromFilePath(file_path); auto status = cache->del(key); if (!status.ok()) @@ -119,10 +119,10 @@ void PartMetadataManagerWithCache::updateAll(bool include_projection) String read_value; for (const auto & file_name : file_names) { - String file_path = fs::path(part->data_part_storage->getRelativePath()) / file_name; - if (!part->data_part_storage->exists(file_name)) + String file_path = fs::path(part->getDataPartStorage().getRelativePath()) / file_name; + if (!part->getDataPartStorage().exists(file_name)) continue; - auto in = part->data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); + auto in = part->getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt); readStringUntilEOF(value, *in); String key = getKeyFromFilePath(file_path); @@ -159,7 +159,7 @@ void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) con file_name = fs::path(file_path).filename(); /// Metadata file belongs to current part - if (fs::path(part->data_part_storage->getRelativePath()) / file_name == file_path) + if (fs::path(part->getDataPartStorage().getRelativePath()) / file_name == file_path) throw Exception( ErrorCodes::LOGICAL_ERROR, "Data part {} with type {} with meta file {} still in cache", @@ -173,7 +173,7 @@ void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) con const auto & projection_parts = part->getProjectionParts(); for (const auto & [projection_name, projection_part] : projection_parts) { - if (fs::path(part->data_part_storage->getRelativePath()) / (projection_name + ".proj") / file_name == file_path) + if (fs::path(part->getDataPartStorage().getRelativePath()) / (projection_name + ".proj") / file_name == file_path) { throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -190,7 +190,7 @@ void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) con void PartMetadataManagerWithCache::getKeysAndCheckSums(Strings & keys, std::vector & checksums) const { - String prefix = getKeyFromFilePath(fs::path(part->data_part_storage->getRelativePath()) / ""); + String prefix = getKeyFromFilePath(fs::path(part->getDataPartStorage().getRelativePath()) / ""); Strings values; cache->getByPrefix(prefix, keys, values); size_t size = keys.size(); @@ -225,7 +225,7 @@ std::unordered_map PartMetadataManagerWit results.emplace(file_name, cache_checksums[i]); /// File belongs to normal part - if (fs::path(part->data_part_storage->getRelativePath()) / file_name == file_path) + if (fs::path(part->getDataPartStorage().getRelativePath()) / file_name == file_path) { auto disk_checksum = part->getActualChecksumByFile(file_name); if (disk_checksum != cache_checksums[i]) diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp new file mode 100644 index 00000000000..d7ea68e7d64 --- /dev/null +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -0,0 +1,417 @@ +#include + +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace +{ + +void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool legacy = false) +{ + switch (node.type) + { + case ActionsDAG::ActionType::INPUT: + writeString(node.result_name, out); + break; + case ActionsDAG::ActionType::COLUMN: + { + /// If it was created from ASTLiteral, then result_name can be an alias. + /// We need to convert value back to string here. + if (const auto * column_const = typeid_cast(node.column.get())) + writeString(applyVisitor(FieldVisitorToString(), column_const->getField()), out); + /// It may be possible that column is ColumnSet + else + writeString(node.result_name, out); + break; + } + case ActionsDAG::ActionType::ALIAS: + appendColumnNameWithoutAlias(*node.children.front(), out, legacy); + break; + case ActionsDAG::ActionType::ARRAY_JOIN: + writeCString("arrayJoin(", out); + appendColumnNameWithoutAlias(*node.children.front(), out, legacy); + writeChar(')', out); + break; + case ActionsDAG::ActionType::FUNCTION: + { + auto name = node.function_base->getName(); + if (legacy && name == "modulo") + writeCString("moduleLegacy", out); + else + writeString(name, out); + + writeChar('(', out); + bool first = true; + for (const auto * arg : node.children) + { + if (!first) + writeCString(", ", out); + first = false; + + appendColumnNameWithoutAlias(*arg, out, legacy); + } + writeChar(')', out); + } + } +} + +String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool legacy = false) +{ + WriteBufferFromOwnString out; + appendColumnNameWithoutAlias(node, out, legacy); + return std::move(out.str()); +} + +} + +RPNBuilderTreeContext::RPNBuilderTreeContext(ContextPtr query_context_) + : query_context(std::move(query_context_)) +{} + +RPNBuilderTreeContext::RPNBuilderTreeContext(ContextPtr query_context_, Block block_with_constants_, PreparedSetsPtr prepared_sets_) + : query_context(std::move(query_context_)) + , block_with_constants(std::move(block_with_constants_)) + , prepared_sets(std::move(prepared_sets_)) +{} + +RPNBuilderTreeNode::RPNBuilderTreeNode(const ActionsDAG::Node * dag_node_, RPNBuilderTreeContext & tree_context_) + : dag_node(dag_node_) + , tree_context(tree_context_) +{ + assert(dag_node); +} + +RPNBuilderTreeNode::RPNBuilderTreeNode(const IAST * ast_node_, RPNBuilderTreeContext & tree_context_) + : ast_node(ast_node_) + , tree_context(tree_context_) +{ + assert(ast_node); +} + +std::string RPNBuilderTreeNode::getColumnName() const +{ + if (ast_node) + return ast_node->getColumnNameWithoutAlias(); + else + return getColumnNameWithoutAlias(*dag_node); +} + +std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const +{ + if (ast_node) + { + auto adjusted_ast = ast_node->clone(); + KeyDescription::moduloToModuloLegacyRecursive(adjusted_ast); + return adjusted_ast->getColumnNameWithoutAlias(); + } + else + { + return getColumnNameWithoutAlias(*dag_node, true /*legacy*/); + } +} + +bool RPNBuilderTreeNode::isFunction() const +{ + if (ast_node) + return typeid_cast(ast_node); + else + return dag_node->type == ActionsDAG::ActionType::FUNCTION; +} + +bool RPNBuilderTreeNode::isConstant() const +{ + if (ast_node) + { + bool is_literal = typeid_cast(ast_node); + if (is_literal) + return true; + + String column_name = ast_node->getColumnName(); + const auto & block_with_constants = tree_context.getBlockWithConstants(); + + if (block_with_constants.has(column_name) && isColumnConst(*block_with_constants.getByName(column_name).column)) + return true; + + return false; + } + else + { + return dag_node->column && isColumnConst(*dag_node->column); + } +} + +ColumnWithTypeAndName RPNBuilderTreeNode::getConstantColumn() const +{ + if (!isConstant()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "RPNBuilderTree node is not a constant"); + + ColumnWithTypeAndName result; + + if (ast_node) + { + const auto * literal = assert_cast(ast_node); + if (literal) + { + result.type = applyVisitor(FieldToDataType(), literal->value); + result.column = result.type->createColumnConst(0, literal->value); + + return result; + } + + String column_name = ast_node->getColumnName(); + const auto & block_with_constants = tree_context.getBlockWithConstants(); + + return block_with_constants.getByName(column_name); + } + else + { + result.type = dag_node->result_type; + result.column = dag_node->column; + } + + return result; +} + +bool RPNBuilderTreeNode::tryGetConstant(Field & output_value, DataTypePtr & output_type) const +{ + if (ast_node) + { + // Constant expr should use alias names if any + String column_name = ast_node->getColumnName(); + const auto & block_with_constants = tree_context.getBlockWithConstants(); + + if (const auto * literal = ast_node->as()) + { + /// By default block_with_constants has only one column named "_dummy". + /// If block contains only constants it's may not be preprocessed by + // ExpressionAnalyzer, so try to look up in the default column. + if (!block_with_constants.has(column_name)) + column_name = "_dummy"; + + /// Simple literal + output_value = literal->value; + output_type = block_with_constants.getByName(column_name).type; + + /// If constant is not Null, we can assume it's type is not Nullable as well. + if (!output_value.isNull()) + output_type = removeNullable(output_type); + + return true; + } + else if (block_with_constants.has(column_name) && + isColumnConst(*block_with_constants.getByName(column_name).column)) + { + /// An expression which is dependent on constants only + const auto & constant_column = block_with_constants.getByName(column_name); + output_value = (*constant_column.column)[0]; + output_type = constant_column.type; + + if (!output_value.isNull()) + output_type = removeNullable(output_type); + + return true; + } + } + else + { + if (dag_node->column && isColumnConst(*dag_node->column)) + { + output_value = (*dag_node->column)[0]; + output_type = dag_node->result_type; + + if (!output_value.isNull()) + output_type = removeNullable(output_type); + + return true; + } + } + + return false; +} + +namespace +{ + +ConstSetPtr tryGetSetFromDAGNode(const ActionsDAG::Node * dag_node) +{ + if (!dag_node->column) + return {}; + + const IColumn * column = dag_node->column.get(); + if (const auto * column_const = typeid_cast(column)) + column = &column_const->getDataColumn(); + + if (const auto * column_set = typeid_cast(column)) + { + auto set = column_set->getData(); + + if (set->isCreated()) + return set; + } + + return {}; +} + +} + +ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet() const +{ + const auto & prepared_sets = getTreeContext().getPreparedSets(); + + if (ast_node && prepared_sets) + { + auto prepared_sets_with_same_hash = prepared_sets->getByTreeHash(ast_node->getTreeHash()); + for (auto & set : prepared_sets_with_same_hash) + if (set->isCreated()) + return set; + } + else if (dag_node) + { + return tryGetSetFromDAGNode(dag_node); + } + + return {}; +} + +ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet(const DataTypes & data_types) const +{ + const auto & prepared_sets = getTreeContext().getPreparedSets(); + + if (prepared_sets && ast_node) + { + if (ast_node->as() || ast_node->as()) + return prepared_sets->get(PreparedSetKey::forSubquery(*ast_node)); + + return prepared_sets->get(PreparedSetKey::forLiteral(*ast_node, data_types)); + } + else if (dag_node) + { + return tryGetSetFromDAGNode(dag_node); + } + + return nullptr; +} + +ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet( + const std::vector & indexes_mapping, + const DataTypes & data_types) const +{ + const auto & prepared_sets = getTreeContext().getPreparedSets(); + + if (prepared_sets && ast_node) + { + if (ast_node->as() || ast_node->as()) + return prepared_sets->get(PreparedSetKey::forSubquery(*ast_node)); + + /// We have `PreparedSetKey::forLiteral` but it is useless here as we don't have enough information + /// about types in left argument of the IN operator. Instead, we manually iterate through all the sets + /// and find the one for the right arg based on the AST structure (getTreeHash), after that we check + /// that the types it was prepared with are compatible with the types of the primary key. + auto types_match = [&indexes_mapping, &data_types](const SetPtr & candidate_set) + { + assert(indexes_mapping.size() == data_types.size()); + + for (size_t i = 0; i < indexes_mapping.size(); ++i) + { + if (!candidate_set->areTypesEqual(indexes_mapping[i].tuple_index, data_types[i])) + return false; + } + + return true; + }; + + auto tree_hash = ast_node->getTreeHash(); + for (const auto & set : prepared_sets->getByTreeHash(tree_hash)) + { + if (types_match(set)) + return set; + } + } + else if (dag_node->column) + { + return tryGetSetFromDAGNode(dag_node); + } + + return nullptr; +} + +RPNBuilderFunctionTreeNode RPNBuilderTreeNode::toFunctionNode() const +{ + if (!isFunction()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "RPNBuilderTree node is not a function"); + + if (this->ast_node) + return RPNBuilderFunctionTreeNode(this->ast_node, tree_context); + else + return RPNBuilderFunctionTreeNode(this->dag_node, tree_context); +} + +std::optional RPNBuilderTreeNode::toFunctionNodeOrNull() const +{ + if (!isFunction()) + return {}; + + if (this->ast_node) + return RPNBuilderFunctionTreeNode(this->ast_node, tree_context); + else + return RPNBuilderFunctionTreeNode(this->dag_node, tree_context); +} + +std::string RPNBuilderFunctionTreeNode::getFunctionName() const +{ + if (ast_node) + return assert_cast(ast_node)->name; + else + return dag_node->function_base->getName(); +} + +size_t RPNBuilderFunctionTreeNode::getArgumentsSize() const +{ + if (ast_node) + { + const auto * ast_function = assert_cast(ast_node); + return ast_function->arguments ? ast_function->arguments->children.size() : 0; + } + else + { + return dag_node->children.size(); + } +} + +RPNBuilderTreeNode RPNBuilderFunctionTreeNode::getArgumentAt(size_t index) const +{ + if (ast_node) + { + const auto * ast_function = assert_cast(ast_node); + return RPNBuilderTreeNode(ast_function->arguments->children[index].get(), tree_context); + } + else + { + return RPNBuilderTreeNode(dag_node->children[index], tree_context); + } +} + +} diff --git a/src/Storages/MergeTree/RPNBuilder.h b/src/Storages/MergeTree/RPNBuilder.h index 27b616dc301..132d3aa44e8 100644 --- a/src/Storages/MergeTree/RPNBuilder.h +++ b/src/Storages/MergeTree/RPNBuilder.h @@ -1,111 +1,266 @@ #pragma once #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include namespace DB { -/// Builds reverse polish notation -template -class RPNBuilder : WithContext +/** Context of RPNBuilderTree. + * + * For AST tree context, precalculated block with constants and prepared sets are required for index analysis. + * For DAG tree precalculated block with constants and prepared sets are not required, because constants and sets already + * calculated inside COLUMN actions dag node. + */ +class RPNBuilderTreeContext { public: - using RPN = std::vector; - using AtomFromASTFunc = std::function< - bool(const ASTPtr & node, ContextPtr context, Block & block_with_constants, RPNElement & out)>; + /// Construct RPNBuilderTreeContext for ActionsDAG tree + explicit RPNBuilderTreeContext(ContextPtr query_context_); - RPNBuilder(const SelectQueryInfo & query_info, ContextPtr context_, const AtomFromASTFunc & atom_from_ast_) - : WithContext(context_), atom_from_ast(atom_from_ast_) + /// Construct RPNBuilderTreeContext for AST tree + explicit RPNBuilderTreeContext(ContextPtr query_context_, Block block_with_constants_, PreparedSetsPtr prepared_sets_); + + /// Get query context + const ContextPtr & getQueryContext() const { - /** Evaluation of expressions that depend only on constants. - * For the index to be used, if it is written, for example `WHERE Date = toDate(now())`. - */ - block_with_constants = KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, getContext()); - - /// Transform WHERE section to Reverse Polish notation - const ASTSelectQuery & select = typeid_cast(*query_info.query); - if (select.where()) - { - traverseAST(select.where()); - - if (select.prewhere()) - { - traverseAST(select.prewhere()); - rpn.emplace_back(RPNElement::FUNCTION_AND); - } - } - else if (select.prewhere()) - { - traverseAST(select.prewhere()); - } - else - { - rpn.emplace_back(RPNElement::FUNCTION_UNKNOWN); - } + return query_context; } - RPN && extractRPN() { return std::move(rpn); } + /** Get block with constants. + * Valid only for AST tree. + */ + const Block & getBlockWithConstants() const + { + return block_with_constants; + } + + /** Get prepared sets. + * Valid only for AST tree. + */ + const PreparedSetsPtr & getPreparedSets() const + { + return prepared_sets; + } private: - void traverseAST(const ASTPtr & node) + /// Valid for both AST and ActionDAG tree + ContextPtr query_context; + + /// Valid only for AST tree + Block block_with_constants; + + /// Valid only for AST tree + PreparedSetsPtr prepared_sets; +}; + +class RPNBuilderFunctionTreeNode; + +/** RPNBuilderTreeNode is wrapper around DAG or AST node. + * It defines unified interface for index analysis. + */ +class RPNBuilderTreeNode +{ +public: + /// Construct RPNBuilderTreeNode with non null dag node and tree context + explicit RPNBuilderTreeNode(const ActionsDAG::Node * dag_node_, RPNBuilderTreeContext & tree_context_); + + /// Construct RPNBuilderTreeNode with non null ast node and tree context + explicit RPNBuilderTreeNode(const IAST * ast_node_, RPNBuilderTreeContext & tree_context_); + + /// Get column name + std::string getColumnName() const; + + /** Get column name. + * Function `modulo` is replaced with `moduloLegacy`. + */ + std::string getColumnNameWithModuloLegacy() const; + + /// Is node function + bool isFunction() const; + + /// Is node constant + bool isConstant() const; + + /** Get constant as constant column. + * Node must be constant before calling these method, otherwise logical exception is thrown. + */ + ColumnWithTypeAndName getConstantColumn() const; + + /** Try get constant from node. If node is constant returns true, and constant value and constant type output parameters are set. + * Otherwise false is returned. + */ + bool tryGetConstant(Field & output_value, DataTypePtr & output_type) const; + + /// Try get prepared set from node + ConstSetPtr tryGetPreparedSet() const; + + /// Try get prepared set from node that match data types + ConstSetPtr tryGetPreparedSet(const DataTypes & data_types) const; + + /// Try get prepared set from node that match indexes mapping and data types + ConstSetPtr tryGetPreparedSet( + const std::vector & indexes_mapping, + const DataTypes & data_types) const; + + /** Convert node to function node. + * Node must be function before calling these method, otherwise exception is thrown. + */ + RPNBuilderFunctionTreeNode toFunctionNode() const; + + /// Convert node to function node or null optional + std::optional toFunctionNodeOrNull() const; + + /// Get tree context + const RPNBuilderTreeContext & getTreeContext() const + { + return tree_context; + } + + /// Get tree context + RPNBuilderTreeContext & getTreeContext() + { + return tree_context; + } + +protected: + const IAST * ast_node = nullptr; + const ActionsDAG::Node * dag_node = nullptr; + RPNBuilderTreeContext & tree_context; +}; + +/** RPNBuilderFunctionTreeNode is wrapper around RPNBuilderTreeNode with function type. + * It provide additional functionality that is specific for function. + */ +class RPNBuilderFunctionTreeNode : public RPNBuilderTreeNode +{ +public: + using RPNBuilderTreeNode::RPNBuilderTreeNode; + + /// Get function name + std::string getFunctionName() const; + + /// Get function arguments size + size_t getArgumentsSize() const; + + /// Get function argument at index + RPNBuilderTreeNode getArgumentAt(size_t index) const; +}; + +/** RPN Builder build stack of reverse polish notation elements (RPNElements) required for index analysis. + * + * RPNBuilder client must provide RPNElement type that has following interface: + * + * struct RPNElementInterface + * { + * enum Function + * { + * FUNCTION_UNKNOWN, /// Can take any value. + * /// Operators of the logical expression. + * FUNCTION_NOT, + * FUNCTION_AND, + * FUNCTION_OR, + * ... + * }; + * + * RPNElementInterface(); + * + * Function function = FUNCTION_UNKNOWN; + * + * } + * + * RPNBuilder take care of building stack of RPNElements with `NOT`, `AND`, `OR` types. + * In addition client must provide ExtractAtomFromTreeFunction that returns true and RPNElement as output parameter, + * if it can convert RPNBuilderTree node to RPNElement, false otherwise. + */ +template +class RPNBuilder +{ +public: + using RPNElements = std::vector; + using ExtractAtomFromTreeFunction = std::function; + + explicit RPNBuilder(const ActionsDAG::Node * filter_actions_dag_node, + ContextPtr query_context_, + const ExtractAtomFromTreeFunction & extract_atom_from_tree_function_) + : tree_context(std::move(query_context_)) + , extract_atom_from_tree_function(extract_atom_from_tree_function_) + { + traverseTree(RPNBuilderTreeNode(filter_actions_dag_node, tree_context)); + } + + RPNBuilder(const ASTPtr & filter_node, + ContextPtr query_context_, + Block block_with_constants_, + PreparedSetsPtr prepared_sets_, + const ExtractAtomFromTreeFunction & extract_atom_from_tree_function_) + : tree_context(std::move(query_context_), std::move(block_with_constants_), std::move(prepared_sets_)) + , extract_atom_from_tree_function(extract_atom_from_tree_function_) + { + traverseTree(RPNBuilderTreeNode(filter_node.get(), tree_context)); + } + + RPNElements && extractRPN() && { return std::move(rpn_elements); } + +private: + void traverseTree(const RPNBuilderTreeNode & node) { RPNElement element; - if (ASTFunction * func = typeid_cast(&*node)) + if (node.isFunction()) { - if (operatorFromAST(func, element)) + auto function_node = node.toFunctionNode(); + + if (extractLogicalOperatorFromTree(function_node, element)) { - auto & args = typeid_cast(*func->arguments).children; - for (size_t i = 0, size = args.size(); i < size; ++i) + size_t arguments_size = function_node.getArgumentsSize(); + + for (size_t argument_index = 0; argument_index < arguments_size; ++argument_index) { - traverseAST(args[i]); + auto function_node_argument = function_node.getArgumentAt(argument_index); + traverseTree(function_node_argument); /** The first part of the condition is for the correct support of `and` and `or` functions of arbitrary arity * - in this case `n - 1` elements are added (where `n` is the number of arguments). */ - if (i != 0 || element.function == RPNElement::FUNCTION_NOT) - rpn.emplace_back(std::move(element)); + if (argument_index != 0 || element.function == RPNElement::FUNCTION_NOT) + rpn_elements.emplace_back(std::move(element)); } return; } } - if (!atom_from_ast(node, getContext(), block_with_constants, element)) - { + if (!extract_atom_from_tree_function(node, element)) element.function = RPNElement::FUNCTION_UNKNOWN; - } - rpn.emplace_back(std::move(element)); + rpn_elements.emplace_back(std::move(element)); } - bool operatorFromAST(const ASTFunction * func, RPNElement & out) + bool extractLogicalOperatorFromTree(const RPNBuilderFunctionTreeNode & function_node, RPNElement & out) { - /// Functions AND, OR, NOT. - /// Also a special function `indexHint` - works as if instead of calling a function there are just parentheses - /// (or, the same thing - calling the function `and` from one argument). - const ASTs & args = typeid_cast(*func->arguments).children; + /** Functions AND, OR, NOT. + * Also a special function `indexHint` - works as if instead of calling a function there are just parentheses + * (or, the same thing - calling the function `and` from one argument). + */ - if (func->name == "not") + auto function_name = function_node.getFunctionName(); + if (function_name == "not") { - if (args.size() != 1) + if (function_node.getArgumentsSize() != 1) return false; out.function = RPNElement::FUNCTION_NOT; } else { - if (func->name == "and" || func->name == "indexHint") + if (function_name == "and" || function_name == "indexHint") out.function = RPNElement::FUNCTION_AND; - else if (func->name == "or") + else if (function_name == "or") out.function = RPNElement::FUNCTION_OR; else return false; @@ -114,10 +269,9 @@ private: return true; } - const AtomFromASTFunc & atom_from_ast; - Block block_with_constants; - RPN rpn; + RPNBuilderTreeContext tree_context; + const ExtractAtomFromTreeFunction & extract_atom_from_tree_function; + RPNElements rpn_elements; }; - } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 3936ee61b70..7993840f1d9 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -419,14 +419,14 @@ void ReplicatedMergeTreeCleanupThread::getBlocksSortedByTime(zkutil::ZooKeeper & LOG_TRACE(log, "Checking {} blocks ({} are not cached){}", stat.numChildren, not_cached_blocks, " to clear old ones from ZooKeeper."); } - zkutil::AsyncResponses exists_futures; + std::vector exists_paths; for (const String & block : blocks) { auto it = cached_block_stats.find(block); if (it == cached_block_stats.end()) { /// New block. Fetch its stat asynchronously. - exists_futures.emplace_back(block, zookeeper.asyncExists(storage.zookeeper_path + "/blocks/" + block)); + exists_paths.emplace_back(storage.zookeeper_path + "/blocks/" + block); } else { @@ -436,14 +436,18 @@ void ReplicatedMergeTreeCleanupThread::getBlocksSortedByTime(zkutil::ZooKeeper & } } + auto exists_size = exists_paths.size(); + auto exists_results = zookeeper.exists(exists_paths); + /// Put fetched stats into the cache - for (auto & elem : exists_futures) + for (size_t i = 0; i < exists_size; ++i) { - auto status = elem.second.get(); + auto status = exists_results[i]; if (status.error != Coordination::Error::ZNONODE) { - cached_block_stats.emplace(elem.first, std::make_pair(status.stat.ctime, status.stat.version)); - timed_blocks.emplace_back(elem.first, status.stat.ctime, status.stat.version); + auto node_name = fs::path(exists_paths[i]).filename(); + cached_block_stats.emplace(node_name, std::make_pair(status.stat.ctime, status.stat.version)); + timed_blocks.emplace_back(node_name, status.stat.ctime, status.stat.version); } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp index e7882ce4952..d7e3c3b1955 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeLogEntry.cpp @@ -240,7 +240,7 @@ void ReplicatedMergeTreeLogEntryData::readText(ReadBuffer & in) if (checkString("merge_type: ", in)) { - UInt64 value; + UInt32 value; in >> value; merge_type = checkAndGetMergeType(value); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp index ce33ac8c467..626295d7255 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp @@ -91,8 +91,8 @@ std::optional ReplicatedMergeTreeMergeStrategyPicker::pickReplicaToExecu void ReplicatedMergeTreeMergeStrategyPicker::refreshState() { const auto settings = storage.getSettings(); - auto threshold = settings->execute_merges_on_single_replica_time_threshold.totalSeconds(); - auto threshold_init = 0; + time_t threshold = settings->execute_merges_on_single_replica_time_threshold.totalSeconds(); + time_t threshold_init = 0; if (settings->allow_remote_fs_zero_copy_replication) threshold_init = settings->remote_fs_execute_merges_on_single_replica_time_threshold.totalSeconds(); @@ -127,7 +127,7 @@ void ReplicatedMergeTreeMergeStrategyPicker::refreshState() active_replicas_tmp.push_back(replica); if (replica == storage.replica_name) { - current_replica_index_tmp = active_replicas_tmp.size() - 1; + current_replica_index_tmp = static_cast(active_replicas_tmp.size() - 1); } } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 0305ce440f9..d6d937ce66f 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -41,7 +41,7 @@ ReplicatedMergeTreeQueue::ReplicatedMergeTreeQueue(StorageReplicatedMergeTree & void ReplicatedMergeTreeQueue::clear() { auto locks = lockQueue(); - assert(future_parts.empty()); + chassert(future_parts.empty()); current_parts.clear(); virtual_parts.clear(); queue.clear(); @@ -62,6 +62,7 @@ void ReplicatedMergeTreeQueue::setBrokenPartsToEnqueueFetchesOnLoading(Strings & void ReplicatedMergeTreeQueue::initialize(zkutil::ZooKeeperPtr zookeeper) { + clear(); std::lock_guard lock(state_mutex); LOG_TRACE(log, "Initializing parts in queue"); @@ -153,17 +154,19 @@ bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper) ::sort(children.begin(), children.end()); - zkutil::AsyncResponses futures; - futures.reserve(children.size()); + auto children_num = children.size(); + std::vector paths; + paths.reserve(children_num); for (const String & child : children) - futures.emplace_back(child, zookeeper->asyncGet(fs::path(queue_path) / child)); + paths.emplace_back(fs::path(queue_path) / child); - for (auto & future : futures) + auto results = zookeeper->get(paths); + for (size_t i = 0; i < children_num; ++i) { - Coordination::GetResponse res = future.second.get(); + auto res = results[i]; LogEntryPtr entry = LogEntry::parse(res.data, res.stat); - entry->znode_name = future.first; + entry->znode_name = children[i]; std::lock_guard lock(state_mutex); @@ -641,11 +644,11 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper LOG_DEBUG(log, "Pulling {} entries to queue: {} - {}", (end - begin), *begin, *last); - zkutil::AsyncResponses futures; - futures.reserve(end - begin); + Strings get_paths; + get_paths.reserve(end - begin); for (auto it = begin; it != end; ++it) - futures.emplace_back(*it, zookeeper->asyncGet(fs::path(zookeeper_path) / "log" / *it)); + get_paths.emplace_back(fs::path(zookeeper_path) / "log" / *it); /// Simultaneously add all new entries to the queue and move the pointer to the log. @@ -655,9 +658,11 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper std::optional min_unprocessed_insert_time_changed; - for (auto & future : futures) + auto get_results = zookeeper->get(get_paths); + auto get_num = get_results.size(); + for (size_t i = 0; i < get_num; ++i) { - Coordination::GetResponse res = future.second.get(); + auto res = get_results[i]; copied_entries.emplace_back(LogEntry::parse(res.data, res.stat)); @@ -1804,9 +1809,9 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const Status res; - res.future_parts = future_parts.size(); - res.queue_size = queue.size(); - res.last_queue_update = last_queue_update; + res.future_parts = static_cast(future_parts.size()); + res.queue_size = static_cast(queue.size()); + res.last_queue_update = static_cast(last_queue_update); res.inserts_in_queue = 0; res.merges_in_queue = 0; @@ -1819,7 +1824,7 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const for (const LogEntryPtr & entry : queue) { if (entry->create_time && (!res.queue_oldest_time || entry->create_time < res.queue_oldest_time)) - res.queue_oldest_time = entry->create_time; + res.queue_oldest_time = static_cast(entry->create_time); if (entry->type == LogEntry::GET_PART || entry->type == LogEntry::ATTACH_PART) { @@ -1827,7 +1832,7 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const if (entry->create_time && (!res.inserts_oldest_time || entry->create_time < res.inserts_oldest_time)) { - res.inserts_oldest_time = entry->create_time; + res.inserts_oldest_time = static_cast(entry->create_time); res.oldest_part_to_get = entry->new_part_name; } } @@ -1838,7 +1843,7 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const if (entry->create_time && (!res.merges_oldest_time || entry->create_time < res.merges_oldest_time)) { - res.merges_oldest_time = entry->create_time; + res.merges_oldest_time = static_cast(entry->create_time); res.oldest_part_to_merge_to = entry->new_part_name; } } @@ -1849,7 +1854,7 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const if (entry->create_time && (!res.part_mutations_oldest_time || entry->create_time < res.part_mutations_oldest_time)) { - res.part_mutations_oldest_time = entry->create_time; + res.part_mutations_oldest_time = static_cast(entry->create_time); res.oldest_part_to_mutate_to = entry->new_part_name; } } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 6d1a3efb01d..2ebdd604af2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -425,6 +425,7 @@ public: struct Status { + /// TODO: consider using UInt64 here UInt32 future_parts; UInt32 queue_size; UInt32 inserts_in_queue; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index e2b23d75746..10ec4702b53 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -26,19 +27,12 @@ namespace DB namespace ErrorCodes { extern const int REPLICA_IS_ALREADY_ACTIVE; - extern const int REPLICA_STATUS_CHANGED; - -} - -namespace -{ - constexpr auto retry_period_ms = 1000; } /// Used to check whether it's us who set node `is_active`, or not. static String generateActiveNodeIdentifier() { - return "pid: " + toString(getpid()) + ", random: " + toString(randomSeed()); + return Field(ServerUUID::get()).dump(); } ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_) @@ -58,27 +52,34 @@ void ReplicatedMergeTreeRestartingThread::run() if (need_stop) return; - size_t reschedule_period_ms = check_period_ms; + /// In case of any exceptions we want to rerun the this task as fast as possible but we also don't want to keep retrying immediately + /// in a close loop (as fast as tasks can be processed), so we'll retry in between 100 and 10000 ms + const size_t backoff_ms = 100 * ((consecutive_check_failures + 1) * (consecutive_check_failures + 2)) / 2; + const size_t next_failure_retry_ms = std::min(size_t{10000}, backoff_ms); try { bool replica_is_active = runImpl(); - if (!replica_is_active) - reschedule_period_ms = retry_period_ms; - } - catch (const Exception & e) - { - /// We couldn't activate table let's set it into readonly mode - partialShutdown(); - tryLogCurrentException(log, __PRETTY_FUNCTION__); - - if (e.code() == ErrorCodes::REPLICA_STATUS_CHANGED) - reschedule_period_ms = 0; + if (replica_is_active) + { + consecutive_check_failures = 0; + task->scheduleAfter(check_period_ms); + } + else + { + consecutive_check_failures++; + task->scheduleAfter(next_failure_retry_ms); + } } catch (...) { + consecutive_check_failures++; + task->scheduleAfter(next_failure_retry_ms); + + /// We couldn't activate table let's set it into readonly mode if necessary + /// We do this after scheduling the task in case it throws partialShutdown(); - tryLogCurrentException(log, __PRETTY_FUNCTION__); + tryLogCurrentException(log, "Failed to restart the table. Will try again"); } if (first_time) @@ -92,14 +93,6 @@ void ReplicatedMergeTreeRestartingThread::run() storage.startup_event.set(); first_time = false; } - - if (need_stop) - return; - - if (reschedule_period_ms) - task->scheduleAfter(reschedule_period_ms); - else - task->schedule(); } bool ReplicatedMergeTreeRestartingThread::runImpl() @@ -132,8 +125,8 @@ bool ReplicatedMergeTreeRestartingThread::runImpl() } catch (const Coordination::Exception &) { - /// The exception when you try to zookeeper_init usually happens if DNS does not work. We will try to do it again. - tryLogCurrentException(log, __PRETTY_FUNCTION__); + /// The exception when you try to zookeeper_init usually happens if DNS does not work or the connection with ZK fails + tryLogCurrentException(log, "Failed to establish a new ZK connection. Will try again"); assert(storage.is_readonly); return false; } @@ -158,12 +151,15 @@ bool ReplicatedMergeTreeRestartingThread::runImpl() storage.cleanup_thread.start(); storage.part_check_thread.start(); + LOG_DEBUG(log, "Table started successfully"); + return true; } bool ReplicatedMergeTreeRestartingThread::tryStartup() { + LOG_DEBUG(log, "Trying to start replica up"); try { removeFailedQuorumParts(); @@ -177,9 +173,7 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup() try { storage.queue.initialize(zookeeper); - storage.queue.load(zookeeper); - storage.queue.createLogEntriesToFetchBrokenParts(); /// pullLogsToQueue() after we mark replica 'is_active' (and after we repair if it was lost); @@ -302,7 +296,7 @@ void ReplicatedMergeTreeRestartingThread::activateReplica() ReplicatedMergeTreeAddress address = storage.getReplicatedMergeTreeAddress(); String is_active_path = fs::path(storage.replica_path) / "is_active"; - zookeeper->waitForEphemeralToDisappearIfAny(is_active_path); + zookeeper->handleEphemeralNodeExistence(is_active_path, active_node_identifier); /// Simultaneously declare that this replica is active, and update the host. Coordination::Requests ops; @@ -348,7 +342,6 @@ void ReplicatedMergeTreeRestartingThread::partialShutdown(bool part_of_full_shut storage.replica_is_active_node = nullptr; LOG_TRACE(log, "Waiting for threads to finish"); - storage.merge_selecting_task->deactivate(); storage.queue_updating_task->deactivate(); storage.mutations_updating_task->deactivate(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 3d443a236ed..bb4b0c0fdd2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -41,6 +41,7 @@ private: BackgroundSchedulePool::TaskHolder task; Int64 check_period_ms; /// The frequency of checking expiration of session in ZK. + UInt32 consecutive_check_failures = 0; /// How many consecutive checks have failed bool first_time = true; /// Activate replica for the first time. void run(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 0abea5977c3..dbc2bd98e20 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -1,10 +1,10 @@ #include #include #include -#include #include #include #include +#include #include #include @@ -99,19 +99,22 @@ size_t ReplicatedMergeTreeSink::checkQuorumPrecondition(zkutil::ZooKeeperPtr & z quorum_info.status_path = storage.zookeeper_path + "/quorum/status"; Strings replicas = zookeeper->getChildren(fs::path(storage.zookeeper_path) / "replicas"); - std::vector> replicas_status_futures; - replicas_status_futures.reserve(replicas.size()); + + Strings exists_paths; for (const auto & replica : replicas) if (replica != storage.replica_name) - replicas_status_futures.emplace_back(zookeeper->asyncExists(fs::path(storage.zookeeper_path) / "replicas" / replica / "is_active")); + exists_paths.emplace_back(fs::path(storage.zookeeper_path) / "replicas" / replica / "is_active"); - std::future is_active_future = zookeeper->asyncTryGet(storage.replica_path + "/is_active"); - std::future host_future = zookeeper->asyncTryGet(storage.replica_path + "/host"); + auto exists_result = zookeeper->exists(exists_paths); + auto get_results = zookeeper->get(Strings{storage.replica_path + "/is_active", storage.replica_path + "/host"}); size_t active_replicas = 1; /// Assume current replica is active (will check below) - for (auto & status : replicas_status_futures) - if (status.get().error == Coordination::Error::ZOK) + for (size_t i = 0; i < exists_paths.size(); ++i) + { + auto status = exists_result[i]; + if (status.error == Coordination::Error::ZOK) ++active_replicas; + } size_t replicas_number = replicas.size(); size_t quorum_size = getQuorumSize(replicas_number); @@ -135,8 +138,8 @@ size_t ReplicatedMergeTreeSink::checkQuorumPrecondition(zkutil::ZooKeeperPtr & z /// Both checks are implicitly made also later (otherwise there would be a race condition). - auto is_active = is_active_future.get(); - auto host = host_future.get(); + auto is_active = get_results[0]; + auto host = get_results[1]; if (is_active.error == Coordination::Error::ZNONODE || host.error == Coordination::Error::ZNONODE) throw Exception("Replica is not active right now", ErrorCodes::READONLY); @@ -162,7 +165,9 @@ void ReplicatedMergeTreeSink::consume(Chunk chunk) */ size_t replicas_num = checkQuorumPrecondition(zookeeper); - deduceTypesOfObjectColumns(storage_snapshot, block); + if (!storage_snapshot->object_columns.empty()) + convertDynamicColumnsToTuples(block, storage_snapshot); + auto part_blocks = storage.writer.splitBlockIntoParts(block, max_parts_per_block, metadata_snapshot, context); using DelayedPartitions = std::vector; @@ -265,7 +270,7 @@ void ReplicatedMergeTreeSink::finishDelayedChunk(zkutil::ZooKeeperPtr & zookeepe try { - commitPart(zookeeper, part, partition.block_id, partition.temp_part.builder, delayed_chunk->replicas_num); + commitPart(zookeeper, part, partition.block_id, delayed_chunk->replicas_num); last_block_is_duplicate = last_block_is_duplicate || part->is_duplicate; @@ -298,7 +303,7 @@ void ReplicatedMergeTreeSink::writeExistingPart(MergeTreeData::MutableDataPartPt try { part->version.setCreationTID(Tx::PrehistoricTID, nullptr); - commitPart(zookeeper, part, "", part->data_part_storage->getBuilder(), replicas_num); + commitPart(zookeeper, part, "", replicas_num); PartLog::addNewPart(storage.getContext(), part, watch.elapsed()); } catch (...) @@ -312,7 +317,6 @@ void ReplicatedMergeTreeSink::commitPart( zkutil::ZooKeeperPtr & zookeeper, MergeTreeData::MutableDataPartPtr & part, const String & block_id, - DataPartStorageBuilderPtr builder, size_t replicas_num) { /// It is possible that we alter a part with different types of source columns. @@ -323,7 +327,7 @@ void ReplicatedMergeTreeSink::commitPart( assertSessionIsNotExpired(zookeeper); - String temporary_part_relative_path = part->data_part_storage->getPartDirectory(); + String temporary_part_relative_path = part->getDataPartStorage().getPartDirectory(); /// There is one case when we need to retry transaction in a loop. /// But don't do it too many times - just as defensive measure. @@ -496,7 +500,7 @@ void ReplicatedMergeTreeSink::commitPart( try { auto lock = storage.lockParts(); - renamed = storage.renameTempPartAndAdd(part, transaction, builder, lock); + renamed = storage.renameTempPartAndAdd(part, transaction, lock); } catch (const Exception & e) { @@ -560,8 +564,7 @@ void ReplicatedMergeTreeSink::commitPart( transaction.rollbackPartsToTemporaryState(); part->is_temp = true; - part->renameTo(temporary_part_relative_path, false, builder); - builder->commit(); + part->renameTo(temporary_part_relative_path, false); /// If this part appeared on other replica than it's better to try to write it locally one more time. If it's our part /// than it will be ignored on the next itration. diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index ab729e6edec..da87ddc0d63 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -79,7 +79,6 @@ private: zkutil::ZooKeeperPtr & zookeeper, MergeTreeData::MutableDataPartPtr & part, const String & block_id, - DataPartStorageBuilderPtr part_builder, size_t replicas_num); /// Wait for quorum to be satisfied on path (quorum_path) form part (part_name) diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index 3b71e2720c8..f9ed6aedc60 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -102,6 +102,9 @@ bool allow( double max_size_to_lower_base_log, const SimpleMergeSelector::Settings & settings) { + if (settings.min_age_to_force_merge && min_age >= settings.min_age_to_force_merge) + return true; + // std::cerr << "sum_size: " << sum_size << "\n"; /// Map size to 0..1 using logarithmic scale diff --git a/src/Storages/MergeTree/SimpleMergeSelector.h b/src/Storages/MergeTree/SimpleMergeSelector.h index 11ffe8b672a..c20eaa6e8de 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.h +++ b/src/Storages/MergeTree/SimpleMergeSelector.h @@ -141,6 +141,11 @@ public: double heuristic_to_align_parts_max_absolute_difference_in_powers_of_two = 0.5; double heuristic_to_align_parts_max_score_adjustment = 0.75; + /** If it's not 0, all part ranges that have min_age larger than min_age_to_force_merge + * will be considered for merging + */ + size_t min_age_to_force_merge = 0; + /** Heuristic: * From right side of range, remove all parts, that size is less than specified ratio of sum_size. */ diff --git a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h index fd313a10bc8..7bad9947a88 100644 --- a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -47,10 +47,10 @@ public: const StorageMetadataPtr & metadata_snapshot, ContextPtr /*query_context*/) const override { const auto & storage_columns = metadata_snapshot->getColumns(); - if (!hasObjectColumns(storage_columns)) + if (!hasDynamicSubcolumns(storage_columns)) return std::make_shared(*this, metadata_snapshot); - auto object_columns = getObjectColumns( + auto object_columns = getConcreteObjectColumns( parts.begin(), parts.end(), storage_columns, [](const auto & part) -> const auto & { return part->getColumns(); }); @@ -65,7 +65,7 @@ public: ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) override + size_t num_streams) override { query_plan = std::move(*MergeTreeDataSelectExecutor(storage) .readFromParts( diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index d5a838668d2..6f9f16b6155 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -1,3 +1,4 @@ +#include "Storages/MergeTree/IDataPartStorage.h" #include #include @@ -46,7 +47,7 @@ bool isNotEnoughMemoryErrorCode(int code) IMergeTreeDataPart::Checksums checkDataPart( MergeTreeData::DataPartPtr data_part, - const DataPartStoragePtr & data_part_storage, + const IDataPartStorage & data_part_storage, const NamesAndTypesList & columns_list, const MergeTreeDataPartType & part_type, const NameSet & files_without_checksums, @@ -64,13 +65,13 @@ IMergeTreeDataPart::Checksums checkDataPart( NamesAndTypesList columns_txt; { - auto buf = data_part_storage->readFile("columns.txt", {}, std::nullopt, std::nullopt); + auto buf = data_part_storage.readFile("columns.txt", {}, std::nullopt, std::nullopt); columns_txt.readText(*buf); assertEOF(*buf); } if (columns_txt != columns_list) - throw Exception("Columns doesn't match in part " + data_part_storage->getFullPath() + throw Exception("Columns doesn't match in part " + data_part_storage.getFullPath() + ". Expected: " + columns_list.toString() + ". Found: " + columns_txt.toString(), ErrorCodes::CORRUPTED_DATA); @@ -78,9 +79,9 @@ IMergeTreeDataPart::Checksums checkDataPart( IMergeTreeDataPart::Checksums checksums_data; /// This function calculates checksum for both compressed and decompressed contents of compressed file. - auto checksum_compressed_file = [](const DataPartStoragePtr & data_part_storage_, const String & file_path) + auto checksum_compressed_file = [](const IDataPartStorage & data_part_storage_, const String & file_path) { - auto file_buf = data_part_storage_->readFile(file_path, {}, std::nullopt, std::nullopt); + auto file_buf = data_part_storage_.readFile(file_path, {}, std::nullopt, std::nullopt); HashingReadBuffer compressed_hashing_buf(*file_buf); CompressedReadBuffer uncompressing_buf(compressed_hashing_buf); HashingReadBuffer uncompressed_hashing_buf(uncompressing_buf); @@ -96,9 +97,9 @@ IMergeTreeDataPart::Checksums checkDataPart( auto ratio_of_defaults = data_part->storage.getSettings()->ratio_of_defaults_for_sparse_serialization; SerializationInfoByName serialization_infos(columns_txt, SerializationInfo::Settings{ratio_of_defaults, false}); - if (data_part_storage->exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME)) + if (data_part_storage.exists(IMergeTreeDataPart::SERIALIZATION_FILE_NAME)) { - auto serialization_file = data_part_storage->readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt); + auto serialization_file = data_part_storage.readFile(IMergeTreeDataPart::SERIALIZATION_FILE_NAME, {}, std::nullopt, std::nullopt); serialization_infos.readJSON(*serialization_file); } @@ -111,98 +112,17 @@ IMergeTreeDataPart::Checksums checkDataPart( }; /// This function calculates only checksum of file content (compressed or uncompressed). - /// It also calculates checksum of projections. auto checksum_file = [&](const String & file_name) { - if (data_part_storage->isDirectory(file_name) && endsWith(file_name, ".proj")) - { - auto projection_name = file_name.substr(0, file_name.size() - sizeof(".proj") + 1); - auto pit = data_part->getProjectionParts().find(projection_name); - if (pit == data_part->getProjectionParts().end()) - { - if (require_checksums) - throw Exception("Unexpected file " + file_name + " in data part", ErrorCodes::UNEXPECTED_FILE_IN_DATA_PART); - else - return; - } - - const auto & projection = pit->second; - IMergeTreeDataPart::Checksums projection_checksums_data; - - auto projection_part_storage = data_part_storage->getProjection(file_name); - - if (projection->getType() == MergeTreeDataPartType::Compact) - { - auto file_buf = projection_part_storage->readFile(MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION, {}, std::nullopt, std::nullopt); - HashingReadBuffer hashing_buf(*file_buf); - hashing_buf.ignoreAll(); - projection_checksums_data.files[MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION] - = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); - } - else - { - const NamesAndTypesList & projection_columns_list = projection->getColumns(); - for (const auto & projection_column : projection_columns_list) - { - get_serialization(projection_column)->enumerateStreams( - [&](const ISerialization::SubstreamPath & substream_path) - { - String projection_file_name = ISerialization::getFileNameForStream(projection_column, substream_path) + ".bin"; - projection_checksums_data.files[projection_file_name] = checksum_compressed_file(projection_part_storage, projection_file_name); - }); - } - } - - IMergeTreeDataPart::Checksums projection_checksums_txt; - - if (require_checksums || projection_part_storage->exists("checksums.txt")) - { - auto buf = projection_part_storage->readFile("checksums.txt", {}, std::nullopt, std::nullopt); - projection_checksums_txt.read(*buf); - assertEOF(*buf); - } - - const auto & projection_checksum_files_txt = projection_checksums_txt.files; - for (auto projection_it = projection_part_storage->iterate(); projection_it->isValid(); projection_it->next()) - { - const String & projection_file_name = projection_it->name(); - auto projection_checksum_it = projection_checksums_data.files.find(projection_file_name); - - /// Skip files that we already calculated. Also skip metadata files that are not checksummed. - if (projection_checksum_it == projection_checksums_data.files.end() && !files_without_checksums.contains(projection_file_name)) - { - auto projection_txt_checksum_it = projection_checksum_files_txt.find(file_name); - if (projection_txt_checksum_it == projection_checksum_files_txt.end() - || projection_txt_checksum_it->second.uncompressed_size == 0) - { - auto projection_file_buf = projection_part_storage->readFile(projection_file_name, {}, std::nullopt, std::nullopt); - HashingReadBuffer projection_hashing_buf(*projection_file_buf); - projection_hashing_buf.ignoreAll(); - projection_checksums_data.files[projection_file_name] = IMergeTreeDataPart::Checksums::Checksum( - projection_hashing_buf.count(), projection_hashing_buf.getHash()); - } - else - { - projection_checksums_data.files[projection_file_name] = checksum_compressed_file(projection_part_storage, projection_file_name); - } - } - } - checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum( - projection_checksums_data.getTotalSizeOnDisk(), projection_checksums_data.getTotalChecksumUInt128()); - - if (require_checksums || !projection_checksums_txt.files.empty()) - projection_checksums_txt.checkEqual(projection_checksums_data, false); - } - else - { - auto file_buf = data_part_storage->readFile(file_name, {}, std::nullopt, std::nullopt); - HashingReadBuffer hashing_buf(*file_buf); - hashing_buf.ignoreAll(); - checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); - } + auto file_buf = data_part_storage.readFile(file_name, {}, std::nullopt, std::nullopt); + HashingReadBuffer hashing_buf(*file_buf); + hashing_buf.ignoreAll(); + checksums_data.files[file_name] = IMergeTreeDataPart::Checksums::Checksum(hashing_buf.count(), hashing_buf.getHash()); }; - bool check_uncompressed = true; + /// Do not check uncompressed for projections. But why? + bool check_uncompressed = !data_part->isProjectionPart(); + /// First calculate checksums for columns data if (part_type == MergeTreeDataPartType::Compact) { @@ -224,23 +144,32 @@ IMergeTreeDataPart::Checksums checkDataPart( } else { - throw Exception("Unknown type in part " + data_part_storage->getFullPath(), ErrorCodes::UNKNOWN_PART_TYPE); + throw Exception("Unknown type in part " + data_part_storage.getFullPath(), ErrorCodes::UNKNOWN_PART_TYPE); } /// Checksums from the rest files listed in checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums. IMergeTreeDataPart::Checksums checksums_txt; - if (require_checksums || data_part_storage->exists("checksums.txt")) + if (require_checksums || data_part_storage.exists("checksums.txt")) { - auto buf = data_part_storage->readFile("checksums.txt", {}, std::nullopt, std::nullopt); + auto buf = data_part_storage.readFile("checksums.txt", {}, std::nullopt, std::nullopt); checksums_txt.read(*buf); assertEOF(*buf); } + NameSet projections_on_disk; const auto & checksum_files_txt = checksums_txt.files; - for (auto it = data_part_storage->iterate(); it->isValid(); it->next()) + for (auto it = data_part_storage.iterate(); it->isValid(); it->next()) { - const String & file_name = it->name(); + auto file_name = it->name(); + + /// We will check projections later. + if (data_part_storage.isDirectory(file_name) && endsWith(file_name, ".proj")) + { + projections_on_disk.insert(file_name); + continue; + } + auto checksum_it = checksums_data.files.find(file_name); /// Skip files that we already calculated. Also skip metadata files that are not checksummed. @@ -259,11 +188,38 @@ IMergeTreeDataPart::Checksums checkDataPart( } } + for (const auto & [name, projection] : data_part->getProjectionParts()) + { + if (is_cancelled()) + return {}; + + auto projection_file = name + ".proj"; + auto projection_checksums = checkDataPart( + projection, *data_part_storage.getProjection(projection_file), + projection->getColumns(), projection->getType(), + projection->getFileNamesWithoutChecksums(), + require_checksums, is_cancelled); + + checksums_data.files[projection_file] = IMergeTreeDataPart::Checksums::Checksum( + projection_checksums.getTotalSizeOnDisk(), + projection_checksums.getTotalChecksumUInt128()); + + projections_on_disk.erase(projection_file); + } + + if (require_checksums && !projections_on_disk.empty()) + { + throw Exception(ErrorCodes::UNEXPECTED_FILE_IN_DATA_PART, + "Found unexpected projection directories: {}", + fmt::join(projections_on_disk, ",")); + } + if (is_cancelled()) return {}; if (require_checksums || !checksums_txt.files.empty()) checksums_txt.checkEqual(checksums_data, check_uncompressed); + return checksums_data; } @@ -285,7 +241,7 @@ IMergeTreeDataPart::Checksums checkDataPart( return checkDataPart( data_part, - data_part->data_part_storage, + data_part->getDataPartStorage(), data_part->getColumns(), data_part->getType(), data_part->getFileNamesWithoutChecksums(), diff --git a/src/Storages/MySQL/MySQLHelpers.cpp b/src/Storages/MySQL/MySQLHelpers.cpp index 94c07d2670f..127bdb96eaf 100644 --- a/src/Storages/MySQL/MySQLHelpers.cpp +++ b/src/Storages/MySQL/MySQLHelpers.cpp @@ -23,7 +23,7 @@ createMySQLPoolWithFailover(const StorageMySQLConfiguration & configuration, con return mysqlxx::PoolWithFailover( configuration.database, configuration.addresses, configuration.username, configuration.password, MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS, - mysql_settings.connection_pool_size, + static_cast(mysql_settings.connection_pool_size), mysql_settings.connection_max_tries, mysql_settings.connection_wait_timeout, mysql_settings.connect_timeout, diff --git a/src/Storages/NATS/NATSConnection.cpp b/src/Storages/NATS/NATSConnection.cpp index d33138419e2..70b3599aa09 100644 --- a/src/Storages/NATS/NATSConnection.cpp +++ b/src/Storages/NATS/NATSConnection.cpp @@ -111,7 +111,7 @@ void NATSConnectionManager::connectImpl() { servers[i] = configuration.servers[i].c_str(); } - natsOptions_SetServers(options, servers, configuration.servers.size()); + natsOptions_SetServers(options, servers, static_cast(configuration.servers.size())); } natsOptions_SetMaxReconnect(options, configuration.max_reconnect); natsOptions_SetReconnectWait(options, configuration.reconnect_wait); diff --git a/src/Storages/NATS/StorageNATS.cpp b/src/Storages/NATS/StorageNATS.cpp index 4a3ba973e67..dea2553700b 100644 --- a/src/Storages/NATS/StorageNATS.cpp +++ b/src/Storages/NATS/StorageNATS.cpp @@ -60,7 +60,7 @@ StorageNATS::StorageNATS( , schema_name(getContext()->getMacros()->expand(nats_settings->nats_schema)) , num_consumers(nats_settings->nats_num_consumers.value) , log(&Poco::Logger::get("StorageNATS (" + table_id_.table_name + ")")) - , semaphore(0, num_consumers) + , semaphore(0, static_cast(num_consumers)) , queue_size(std::max(QUEUE_SIZE, static_cast(getMaxBlockSize()))) , is_attach(is_attach_) { @@ -289,7 +289,7 @@ void StorageNATS::read( ContextPtr local_context, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, - unsigned /* num_streams */) + size_t /* num_streams */) { if (!consumers_ready) throw Exception("NATS consumers setup not finished. Connection might be lost", ErrorCodes::CANNOT_CONNECT_NATS); diff --git a/src/Storages/NATS/StorageNATS.h b/src/Storages/NATS/StorageNATS.h index 185b39250c8..a5a050d566f 100644 --- a/src/Storages/NATS/StorageNATS.h +++ b/src/Storages/NATS/StorageNATS.h @@ -47,7 +47,7 @@ public: ContextPtr local_context, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, - unsigned /* num_streams */) override; + size_t /* num_streams */) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) override; diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index cc80d567d1d..6d12960824a 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -279,7 +279,7 @@ void StorageMaterializedPostgreSQL::read( ContextPtr context_, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { auto nested_table = getNested(); diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h index d8e9e98c662..af0adb10f9f 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h @@ -98,7 +98,7 @@ public: ContextPtr context_, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; /// This method is called only from MateriaizePostgreSQL database engine, because it needs to maintain /// an invariant: a table exists only if its nested table exists. This atomic variable is set to _true_ diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 70838daec24..57f5ddd86e6 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -87,7 +87,7 @@ StorageRabbitMQ::StorageRabbitMQ( , use_user_setup(rabbitmq_settings->rabbitmq_queue_consume.value) , hash_exchange(num_consumers > 1 || num_queues > 1) , log(&Poco::Logger::get("StorageRabbitMQ (" + table_id_.table_name + ")")) - , semaphore(0, num_consumers) + , semaphore(0, static_cast(num_consumers)) , unique_strbase(getRandomName()) , queue_size(std::max(QUEUE_SIZE, static_cast(getMaxBlockSize()))) , milliseconds_to_wait(RESCHEDULE_MS) @@ -674,7 +674,7 @@ void StorageRabbitMQ::read( ContextPtr local_context, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, - unsigned /* num_streams */) + size_t /* num_streams */) { if (!rabbit_is_ready) throw Exception("RabbitMQ setup not finished. Connection might be lost", ErrorCodes::CANNOT_CONNECT_RABBITMQ); diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index 455b2fe8f09..a1250f50829 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -50,7 +50,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write( const ASTPtr & query, diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index 3ec7a074fd4..28053c84e20 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -35,7 +35,7 @@ void readFinalFromNestedStorage( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned int num_streams) + size_t num_streams) { NameSet column_names_set = NameSet(column_names.begin(), column_names.end()); auto lock = nested_storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); @@ -59,7 +59,8 @@ void readFinalFromNestedStorage( } auto nested_snapshot = nested_storage->getStorageSnapshot(nested_metadata, context); - nested_storage->read(query_plan, require_columns_name, nested_snapshot, query_info, context, processed_stage, max_block_size, num_streams); + nested_storage->read( + query_plan, require_columns_name, nested_snapshot, query_info, context, processed_stage, max_block_size, num_streams); if (!query_plan.isInitialized()) { diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.h b/src/Storages/ReadFinalForExternalReplicaStorage.h index 178164b6643..f8d1264ccb3 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.h +++ b/src/Storages/ReadFinalForExternalReplicaStorage.h @@ -21,7 +21,7 @@ void readFinalFromNestedStorage( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned int num_streams); + size_t num_streams); } diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index 20b1de51a30..46ddb650eee 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -403,7 +403,7 @@ Pipe StorageEmbeddedRocksDB::read( ContextPtr context_, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); @@ -467,7 +467,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) String rocksdb_dir; bool read_only{false}; if (!engine_args.empty()) - ttl = checkAndGetLiteralArgument(engine_args[0], "ttl"); + ttl = static_cast(checkAndGetLiteralArgument(engine_args[0], "ttl")); if (engine_args.size() > 1) rocksdb_dir = checkAndGetLiteralArgument(engine_args[1], "rocksdb_dir"); if (engine_args.size() > 2) diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h index 03848510e66..ca0ab7a1840 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h @@ -46,7 +46,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &) override; diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index f2835ab4dbf..329bb650171 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -7,6 +7,9 @@ #include #include #include +#include +#include +#include #include @@ -177,6 +180,15 @@ struct SelectQueryInfo ASTPtr view_query; /// Optimized VIEW query ASTPtr original_query; /// Unmodified query for projection analysis + /// Planner context + PlannerContextPtr planner_context; + + /// Storage table expression + QueryTreeNodePtr table_expression; + + /// Table expression modifiers for storage + std::optional table_expression_modifiers; + std::shared_ptr storage_limits; /// Cluster for the query. @@ -220,6 +232,9 @@ struct SelectQueryInfo Block minmax_count_projection_block; MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr; + // If limit is not 0, that means it's a trivial limit query. + UInt64 limit = 0; + InputOrderInfoPtr getInputOrderInfo() const { return input_order_info ? input_order_info : (projection ? projection->input_order_info : nullptr); diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index f6b397950ed..65b4dce3ad2 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -228,7 +228,7 @@ void StorageBuffer::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { const auto & metadata_snapshot = storage_snapshot->metadata; diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index 580742c0c84..387165171b9 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -82,7 +82,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool supportsParallelInsert() const override { return true; } diff --git a/src/Storages/StorageDictionary.cpp b/src/Storages/StorageDictionary.cpp index 2839ac03a5b..a76c4dffb5b 100644 --- a/src/Storages/StorageDictionary.cpp +++ b/src/Storages/StorageDictionary.cpp @@ -169,13 +169,19 @@ Pipe StorageDictionary::read( ContextPtr local_context, QueryProcessingStage::Enum /*processed_stage*/, const size_t max_block_size, - const unsigned threads) + const size_t threads) { auto registered_dictionary_name = location == Location::SameDatabaseAndNameAsDictionary ? getStorageID().getInternalDictionaryName() : dictionary_name; auto dictionary = getContext()->getExternalDictionariesLoader().getDictionary(registered_dictionary_name, local_context); return dictionary->read(column_names, max_block_size, threads); } +std::shared_ptr StorageDictionary::getDictionary() const +{ + auto registered_dictionary_name = location == Location::SameDatabaseAndNameAsDictionary ? getStorageID().getInternalDictionaryName() : dictionary_name; + return getContext()->getExternalDictionariesLoader().getDictionary(registered_dictionary_name, getContext()); +} + void StorageDictionary::shutdown() { removeDictionaryConfigurationFromRepository(); diff --git a/src/Storages/StorageDictionary.h b/src/Storages/StorageDictionary.h index f81503910ca..b3442ec2f99 100644 --- a/src/Storages/StorageDictionary.h +++ b/src/Storages/StorageDictionary.h @@ -8,8 +8,10 @@ namespace DB { + struct DictionaryStructure; class TableFunctionDictionary; +class IDictionary; class StorageDictionary final : public IStorage, public WithContext { @@ -69,7 +71,9 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned threads) override; + size_t threads) override; + + std::shared_ptr getDictionary() const; static NamesAndTypesList getNamesAndTypes(const DictionaryStructure & dictionary_structure); static String generateNamesAndTypesDescription(const NamesAndTypesList & list); diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 4eb6697dd6e..f7f68eba30f 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -564,6 +564,10 @@ std::optional StorageDistributed::getOptimizedQueryP return {}; } + /// TODO: Analyzer syntax analyzer result + if (!query_info.syntax_analyzer_result) + return {}; + // GROUP BY const ASTPtr group_by = select.groupBy(); if (!query_info.syntax_analyzer_result->aggregates.empty() || group_by) @@ -594,7 +598,7 @@ std::optional StorageDistributed::getOptimizedQueryP static bool requiresObjectColumns(const ColumnsDescription & all_columns, ASTPtr query) { - if (!hasObjectColumns(all_columns)) + if (!hasDynamicSubcolumns(all_columns)) return false; if (!query) @@ -609,7 +613,7 @@ static bool requiresObjectColumns(const ColumnsDescription & all_columns, ASTPtr auto name_in_storage = Nested::splitName(required_column).first; auto column_in_storage = all_columns.tryGetPhysical(name_in_storage); - if (column_in_storage && isObject(column_in_storage->type)) + if (column_in_storage && column_in_storage->type->hasDynamicSubcolumns()) return true; } @@ -636,7 +640,7 @@ StorageSnapshotPtr StorageDistributed::getStorageSnapshotForQuery( metadata_snapshot->getColumns(), getContext()); - auto object_columns = DB::getObjectColumns( + auto object_columns = DB::getConcreteObjectColumns( snapshot_data->objects_by_shard.begin(), snapshot_data->objects_by_shard.end(), metadata_snapshot->getColumns(), @@ -653,7 +657,7 @@ void StorageDistributed::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { const auto * select_query = query_info.query->as(); if (select_query->final() && local_context->getSettingsRef().allow_experimental_parallel_reading_from_replicas) diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 7cb25ae46ab..334f44a90f9 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -112,7 +112,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t /*max_block_size*/, - unsigned /*num_streams*/) override; + size_t /*num_streams*/) override; bool supportsParallelInsert() const override { return true; } std::optional totalBytes(const Settings &) const override; diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 2931e62b7ef..cd3cc4d48ac 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -111,7 +111,7 @@ void StorageExecutable::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned /*threads*/) + size_t /*threads*/) { auto & script_name = settings.script_name; diff --git a/src/Storages/StorageExecutable.h b/src/Storages/StorageExecutable.h index 2638474082a..2393920fa3c 100644 --- a/src/Storages/StorageExecutable.h +++ b/src/Storages/StorageExecutable.h @@ -41,7 +41,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned threads) override; + size_t threads) override; private: ExecutableSettings settings; diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index dcb7a90b2f6..7d1eef1e47c 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -181,7 +181,7 @@ void StorageExternalDistributed::read( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { std::vector> plans; for (const auto & shard : shards) diff --git a/src/Storages/StorageExternalDistributed.h b/src/Storages/StorageExternalDistributed.h index 52a2a7a4106..a1bdb41dded 100644 --- a/src/Storages/StorageExternalDistributed.h +++ b/src/Storages/StorageExternalDistributed.h @@ -55,7 +55,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; private: using Shards = std::unordered_set; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 29f2d0667d9..6e032a47943 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -81,7 +81,8 @@ void listFilesWithRegexpMatchingImpl( const std::string & path_for_ls, const std::string & for_match, size_t & total_bytes_to_read, - std::vector & result) + std::vector & result, + bool recursive = false) { const size_t first_glob = for_match.find_first_of("*?{"); @@ -89,10 +90,17 @@ void listFilesWithRegexpMatchingImpl( const std::string suffix_with_globs = for_match.substr(end_of_path_without_globs); /// begin with '/' const size_t next_slash = suffix_with_globs.find('/', 1); - auto regexp = makeRegexpPatternFromGlobs(suffix_with_globs.substr(0, next_slash)); + const std::string current_glob = suffix_with_globs.substr(0, next_slash); + auto regexp = makeRegexpPatternFromGlobs(current_glob); + re2::RE2 matcher(regexp); + bool skip_regex = current_glob == "/*" ? true : false; + if (!recursive) + recursive = current_glob == "/**" ; + const std::string prefix_without_globs = path_for_ls + for_match.substr(1, end_of_path_without_globs); + if (!fs::exists(prefix_without_globs)) return; @@ -107,15 +115,21 @@ void listFilesWithRegexpMatchingImpl( /// Condition is_directory means what kind of path is it in current iteration of ls if (!it->is_directory() && !looking_for_directory) { - if (re2::RE2::FullMatch(file_name, matcher)) + if (skip_regex || re2::RE2::FullMatch(file_name, matcher)) { total_bytes_to_read += it->file_size(); result.push_back(it->path().string()); } } - else if (it->is_directory() && looking_for_directory) + else if (it->is_directory()) { - if (re2::RE2::FullMatch(file_name, matcher)) + if (recursive) + { + listFilesWithRegexpMatchingImpl(fs::path(full_path).append(it->path().string()) / "" , + looking_for_directory ? suffix_with_globs.substr(next_slash) : current_glob , + total_bytes_to_read, result, recursive); + } + else if (looking_for_directory && re2::RE2::FullMatch(file_name, matcher)) { /// Recursion depth is limited by pattern. '*' works only for depth = 1, for depth = 2 pattern path is '*/*'. So we do not need additional check. listFilesWithRegexpMatchingImpl(fs::path(full_path) / "", suffix_with_globs.substr(next_slash), total_bytes_to_read, result); @@ -209,7 +223,7 @@ std::unique_ptr createReadBuffer( in.setProgressCallback(context); } - auto zstd_window_log_max = context->getSettingsRef().zstd_window_log_max; + int zstd_window_log_max = static_cast(context->getSettingsRef().zstd_window_log_max); return wrapReadBufferWithCompressionMethod(std::move(nested_buffer), method, zstd_window_log_max); } @@ -645,7 +659,7 @@ Pipe StorageFile::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { if (use_table_fd) { diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index e60e5f6b371..03b3aacb67f 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -48,7 +48,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write( const ASTPtr & query, diff --git a/src/Storages/StorageGenerateRandom.cpp b/src/Storages/StorageGenerateRandom.cpp index 9cac1e57297..c00e82598b2 100644 --- a/src/Storages/StorageGenerateRandom.cpp +++ b/src/Storages/StorageGenerateRandom.cpp @@ -494,7 +494,7 @@ Pipe StorageGenerateRandom::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageGenerateRandom.h b/src/Storages/StorageGenerateRandom.h index 8dc3e490ae7..6b050c07e52 100644 --- a/src/Storages/StorageGenerateRandom.h +++ b/src/Storages/StorageGenerateRandom.h @@ -28,7 +28,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool supportsTransactions() const override { return true; } private: diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 66dcc938aef..a80f21834db 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -526,7 +526,7 @@ void StorageInMemoryMetadata::check(const NamesAndTypesList & provided_columns) const auto * available_type = it->getMapped(); - if (!isObject(*available_type) + if (!available_type->hasDynamicSubcolumns() && !column.type->equals(*available_type) && !isCompatibleEnumTypes(available_type, column.type.get())) throw Exception( @@ -575,7 +575,7 @@ void StorageInMemoryMetadata::check(const NamesAndTypesList & provided_columns, const auto * provided_column_type = it->getMapped(); const auto * available_column_type = jt->getMapped(); - if (!isObject(*provided_column_type) + if (!provided_column_type->hasDynamicSubcolumns() && !provided_column_type->equals(*available_column_type) && !isCompatibleEnumTypes(available_column_type, provided_column_type)) throw Exception( @@ -619,7 +619,7 @@ void StorageInMemoryMetadata::check(const Block & block, bool need_all) const listOfColumns(available_columns)); const auto * available_type = it->getMapped(); - if (!isObject(*available_type) + if (!available_type->hasDynamicSubcolumns() && !column.type->equals(*available_type) && !isCompatibleEnumTypes(available_type, column.type.get())) throw Exception( diff --git a/src/Storages/StorageInput.cpp b/src/Storages/StorageInput.cpp index 4729d0a5bf8..18e8442c1b5 100644 --- a/src/Storages/StorageInput.cpp +++ b/src/Storages/StorageInput.cpp @@ -57,7 +57,7 @@ Pipe StorageInput::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) + size_t /*num_streams*/) { Pipes pipes; auto query_context = context->getQueryContext(); diff --git a/src/Storages/StorageInput.h b/src/Storages/StorageInput.h index 991a4f35b7b..da4669aaf37 100644 --- a/src/Storages/StorageInput.h +++ b/src/Storages/StorageInput.h @@ -25,7 +25,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; private: Pipe pipe; diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index 2e3e1d443ae..e4f786cd23b 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -585,7 +585,7 @@ Pipe StorageJoin::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned /*num_streams*/) + size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageJoin.h b/src/Storages/StorageJoin.h index 390af09422c..43515f800d9 100644 --- a/src/Storages/StorageJoin.h +++ b/src/Storages/StorageJoin.h @@ -68,7 +68,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; std::optional totalRows(const Settings & settings) const override; std::optional totalBytes(const Settings & settings) const override; diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index e62874490f8..21be205c0f6 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -408,7 +408,7 @@ Pipe StorageKeeperMap::read( ContextPtr context_, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { checkTable(); storage_snapshot->check(column_names); @@ -682,24 +682,20 @@ Chunk StorageKeeperMap::getBySerializedKeys(const std::span k auto client = getClient(); - std::vector> values; - values.reserve(keys.size()); + Strings full_key_paths; + full_key_paths.reserve(keys.size()); for (const auto & key : keys) { - const auto full_path = fullPathForKey(key); - values.emplace_back(client->asyncTryGet(full_path)); + full_key_paths.emplace_back(fullPathForKey(key)); } - auto wait_until = std::chrono::system_clock::now() + std::chrono::milliseconds(Coordination::DEFAULT_OPERATION_TIMEOUT_MS); + auto values = client->tryGet(full_key_paths); for (size_t i = 0; i < keys.size(); ++i) { - auto & value = values[i]; - if (value.wait_until(wait_until) != std::future_status::ready) - throw DB::Exception(ErrorCodes::KEEPER_EXCEPTION, "Failed to fetch values: timeout"); + auto response = values[i]; - auto response = value.get(); Coordination::Error code = response.error; if (code == Coordination::Error::ZOK) diff --git a/src/Storages/StorageKeeperMap.h b/src/Storages/StorageKeeperMap.h index 87861362e42..45b32434f15 100644 --- a/src/Storages/StorageKeeperMap.h +++ b/src/Storages/StorageKeeperMap.h @@ -39,7 +39,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) override; diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index c6bc55fd620..8ed33220507 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -462,7 +462,7 @@ void LogSink::writeData(const NameAndTypePair & name_and_type, const IColumn & c settings.getter = createStreamGetter(name_and_type); if (!serialize_states.contains(name)) - serialization->serializeBinaryBulkStatePrefix(settings, serialize_states[name]); + serialization->serializeBinaryBulkStatePrefix(column, settings, serialize_states[name]); if (storage.use_marks_file) { @@ -782,7 +782,7 @@ Pipe StorageLog::read( ContextPtr local_context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageLog.h b/src/Storages/StorageLog.h index 2e677dd3161..a2b1356f240 100644 --- a/src/Storages/StorageLog.h +++ b/src/Storages/StorageLog.h @@ -53,7 +53,7 @@ public: ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) override; diff --git a/src/Storages/StorageMaterializedMySQL.cpp b/src/Storages/StorageMaterializedMySQL.cpp index bb69f211a9e..0dc0b1bff0b 100644 --- a/src/Storages/StorageMaterializedMySQL.cpp +++ b/src/Storages/StorageMaterializedMySQL.cpp @@ -40,7 +40,7 @@ void StorageMaterializedMySQL::read( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned int num_streams) + size_t num_streams) { if (const auto * db = typeid_cast(database)) db->rethrowExceptionIfNeeded(); diff --git a/src/Storages/StorageMaterializedMySQL.h b/src/Storages/StorageMaterializedMySQL.h index a66b7eba804..cbb59e508e8 100644 --- a/src/Storages/StorageMaterializedMySQL.h +++ b/src/Storages/StorageMaterializedMySQL.h @@ -24,8 +24,13 @@ public: bool needRewriteQueryWithFinal(const Names & column_names) const override; void read( - QueryPlan & query_plan, const Names & column_names, const StorageSnapshotPtr & metadata_snapshot, SelectQueryInfo & query_info, - ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned num_streams) override; + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & metadata_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, size_t num_streams) override; SinkToStoragePtr write(const ASTPtr &, const StorageMetadataPtr &, ContextPtr) override { throwNotAllowed(); } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index b01415f9590..e256e087728 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -150,7 +150,7 @@ void StorageMaterializedView::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, const size_t max_block_size, - const unsigned num_streams) + const size_t num_streams) { auto storage = getTargetTable(); auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 1d8808b302e..af2dedf8164 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -91,7 +91,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; Strings getDataPaths() const override; diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index e4dbfe15095..881cbc18b10 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -146,7 +146,7 @@ public: auto extended_storage_columns = storage_snapshot->getColumns( GetColumnsOptions(GetColumnsOptions::AllPhysical).withExtendedObjects()); - convertObjectsToTuples(block, extended_storage_columns); + convertDynamicColumnsToTuples(block, storage_snapshot); } if (storage.compress) @@ -212,10 +212,10 @@ StorageSnapshotPtr StorageMemory::getStorageSnapshot(const StorageMetadataPtr & auto snapshot_data = std::make_unique(); snapshot_data->blocks = data.get(); - if (!hasObjectColumns(metadata_snapshot->getColumns())) + if (!hasDynamicSubcolumns(metadata_snapshot->getColumns())) return std::make_shared(*this, metadata_snapshot, ColumnsDescription{}, std::move(snapshot_data)); - auto object_columns = getObjectColumns( + auto object_columns = getConcreteObjectColumns( snapshot_data->blocks->begin(), snapshot_data->blocks->end(), metadata_snapshot->getColumns(), @@ -231,7 +231,7 @@ Pipe StorageMemory::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageMemory.h b/src/Storages/StorageMemory.h index 3889acb952b..c739088dbe4 100644 --- a/src/Storages/StorageMemory.h +++ b/src/Storages/StorageMemory.h @@ -51,7 +51,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool supportsParallelInsert() const override { return true; } bool supportsSubcolumns() const override { return true; } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 9891340a0d0..7fb21b7e053 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -225,11 +225,15 @@ SelectQueryInfo getModifiedQueryInfo( SelectQueryInfo modified_query_info = query_info; modified_query_info.query = query_info.query->clone(); - /// Original query could contain JOIN but we need only the first joined table and its columns. - auto & modified_select = modified_query_info.query->as(); - TreeRewriterResult new_analyzer_res = *modified_query_info.syntax_analyzer_result; - removeJoin(modified_select, new_analyzer_res, modified_context); - modified_query_info.syntax_analyzer_result = std::make_shared(std::move(new_analyzer_res)); + /// TODO: Analyzer syntax analyzer result + if (modified_query_info.syntax_analyzer_result) + { + /// Original query could contain JOIN but we need only the first joined table and its columns. + auto & modified_select = modified_query_info.query->as(); + TreeRewriterResult new_analyzer_res = *modified_query_info.syntax_analyzer_result; + removeJoin(modified_select, new_analyzer_res, modified_context); + modified_query_info.syntax_analyzer_result = std::make_shared(std::move(new_analyzer_res)); + } if (!is_merge_engine) { @@ -249,7 +253,7 @@ void StorageMerge::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, const size_t max_block_size, - unsigned num_streams) + size_t num_streams) { /** Just in case, turn off optimization "transfer to PREWHERE", * since there is no certainty that it works when one of table is MergeTree and other is not. @@ -513,7 +517,13 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( SelectQueryOptions(processed_stage).analyze()).buildQueryPipeline()); } - if (!modified_select.final() && storage->needRewriteQueryWithFinal(real_column_names)) + bool final = false; + if (modified_query_info.table_expression_modifiers) + final = modified_query_info.table_expression_modifiers->hasFinal(); + else + final = modified_select.final(); + + if (!final && storage->needRewriteQueryWithFinal(real_column_names)) { /// NOTE: It may not work correctly in some cases, because query was analyzed without final. /// However, it's needed for MaterializedMySQL and it's unlikely that someone will use it with Merge tables. diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 6bf68660803..33406321100 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -61,7 +61,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; void checkAlterIsPossible(const AlterCommands & commands, ContextPtr context) const override; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index a65af1cf69e..a450a9ef3a9 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1,4 +1,5 @@ #include "StorageMergeTree.h" +#include "Storages/MergeTree/IMergeTreeDataPart.h" #include @@ -220,7 +221,7 @@ void StorageMergeTree::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { /// If true, then we will ask initiator if we can read chosen ranges bool enable_parallel_reading = local_context->getClientInfo().collaborate_with_initiator; @@ -378,7 +379,9 @@ CurrentlyMergingPartsTagger::CurrentlyMergingPartsTagger( /// if we mutate part, than we should reserve space on the same disk, because mutations possible can create hardlinks if (is_mutation) - reserved_space = storage.tryReserveSpace(total_size, future_part->parts[0]->data_part_storage); + { + reserved_space = storage.tryReserveSpace(total_size, future_part->parts[0]->getDataPartStorage()); + } else { IMergeTreeDataPart::TTLInfos ttl_infos; @@ -386,7 +389,9 @@ CurrentlyMergingPartsTagger::CurrentlyMergingPartsTagger( for (auto & part_ptr : future_part->parts) { ttl_infos.update(part_ptr->ttl_infos); - max_volume_index = std::max(max_volume_index, part_ptr->data_part_storage->getVolumeIndex(*storage.getStoragePolicy())); + auto disk_name = part_ptr->getDataPartStorage().getDiskName(); + size_t volume_index = storage.getStoragePolicy()->getVolumeIndexByDiskName(disk_name); + max_volume_index = std::max(max_volume_index, volume_index); } reserved_space = storage.balancedReservation( @@ -993,14 +998,6 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( const StorageMetadataPtr & metadata_snapshot, String * /* disable_reason */, TableLockHolder & /* table_lock_holder */, std::unique_lock & /*currently_processing_in_background_mutex_lock*/) { - size_t max_ast_elements = getContext()->getSettingsRef().max_expanded_ast_elements; - - auto future_part = std::make_shared(); - if (storage_settings.get()->assign_part_uuids) - future_part->uuid = UUIDHelpers::generateV4(); - - CurrentlyMergingPartsTaggerPtr tagger; - if (current_mutations_by_version.empty()) return {}; @@ -1014,6 +1011,14 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( return {}; } + size_t max_ast_elements = getContext()->getSettingsRef().max_expanded_ast_elements; + + auto future_part = std::make_shared(); + if (storage_settings.get()->assign_part_uuids) + future_part->uuid = UUIDHelpers::generateV4(); + + CurrentlyMergingPartsTaggerPtr tagger; + auto mutations_end_it = current_mutations_by_version.end(); for (const auto & part : getDataPartsVectorForInternalUsage()) { @@ -1152,7 +1157,8 @@ bool StorageMergeTree::scheduleDataProcessingJob(BackgroundJobsAssignee & assign return false; merge_entry = selectPartsToMerge(metadata_snapshot, false, {}, false, nullptr, share_lock, lock, txn); - if (!merge_entry) + + if (!merge_entry && !current_mutations_by_version.empty()) mutate_entry = selectPartsToMutate(metadata_snapshot, nullptr, share_lock, lock); has_mutations = !current_mutations_by_version.empty(); @@ -1473,7 +1479,7 @@ void StorageMergeTree::dropPartsImpl(DataPartsVector && parts_to_remove, bool de /// NOTE: no race with background cleanup until we hold pointers to parts for (const auto & part : parts_to_remove) { - LOG_INFO(log, "Detaching {}", part->data_part_storage->getPartDirectory()); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); part->makeCloneInDetached("", metadata_snapshot); } } @@ -1518,9 +1524,8 @@ PartitionCommandsResultInfo StorageMergeTree::attachPartition( MergeTreeData::Transaction transaction(*this, local_context->getCurrentTransaction().get()); { auto lock = lockParts(); - auto builder = loaded_parts[i]->data_part_storage->getBuilder(); fillNewPartName(loaded_parts[i], lock); - renameTempPartAndAdd(loaded_parts[i], transaction, builder, lock); + renameTempPartAndAdd(loaded_parts[i], transaction, lock); transaction.commit(&lock); } @@ -1603,9 +1608,7 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con for (auto part : dst_parts) { fillNewPartName(part, data_parts_lock); - - auto builder = part->data_part_storage->getBuilder(); - renameTempPartAndReplaceUnlocked(part, transaction, builder, data_parts_lock); + renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock); } /// Populate transaction transaction.commit(&data_parts_lock); @@ -1684,9 +1687,8 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const for (auto & part : dst_parts) { - auto builder = part->data_part_storage->getBuilder(); dest_table_storage->fillNewPartName(part, dest_data_parts_lock); - dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, builder, dest_data_parts_lock); + dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock); } @@ -1740,16 +1742,16 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_ for (auto & part : data_parts) { /// If the checksums file is not present, calculate the checksums and write them to disk. - String checksums_path = "checksums.txt"; - String tmp_checksums_path = "checksums.txt.tmp"; - if (part->isStoredOnDisk() && !part->data_part_storage->exists(checksums_path)) + static constexpr auto checksums_path = "checksums.txt"; + if (part->isStoredOnDisk() && !part->getDataPartStorage().exists(checksums_path)) { try { auto calculated_checksums = checkDataPart(part, false); calculated_checksums.checkEqual(part->checksums, true); - part->data_part_storage->writeChecksums(part->checksums, local_context->getWriteSettings()); + auto & part_mutable = const_cast(*part); + part_mutable.writeChecksums(part->checksums, local_context->getWriteSettings()); part->checkMetadata(); results.emplace_back(part->name, true, "Checksums recounted and written to disk."); @@ -1809,17 +1811,15 @@ BackupEntries StorageMergeTree::backupMutations(UInt64 version, const String & d void StorageMergeTree::attachRestoredParts(MutableDataPartsVector && parts) { - for (auto part : parts) { /// It's important to create it outside of lock scope because /// otherwise it can lock parts in destructor and deadlock is possible. MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW); - auto builder = part->data_part_storage->getBuilder(); { auto lock = lockParts(); fillNewPartName(part, lock); - renameTempPartAndAdd(part, transaction, builder, lock); + renameTempPartAndAdd(part, transaction, lock); transaction.commit(&lock); } } diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index ea2527e44a7..745546b96f6 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -66,7 +66,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; std::optional totalRows(const Settings &) const override; std::optional totalRowsByPartitionPredicate(const SelectQueryInfo &, ContextPtr) const override; diff --git a/src/Storages/StorageMongoDB.cpp b/src/Storages/StorageMongoDB.cpp index dce45b2431a..3ae9c974770 100644 --- a/src/Storages/StorageMongoDB.cpp +++ b/src/Storages/StorageMongoDB.cpp @@ -150,7 +150,7 @@ Pipe StorageMongoDB::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned) + size_t /*num_streams*/) { connectIfNotConnected(); diff --git a/src/Storages/StorageMongoDB.h b/src/Storages/StorageMongoDB.h index 0e00b80432b..04fb759133a 100644 --- a/src/Storages/StorageMongoDB.h +++ b/src/Storages/StorageMongoDB.h @@ -37,7 +37,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write( const ASTPtr & query, diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index 4ccd31ab981..20eb59c7262 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -78,7 +78,7 @@ Pipe StorageMySQL::read( ContextPtr context_, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned) + size_t /*num_streams*/) { storage_snapshot->check(column_names_); String query = transformQueryForExternalDatabase( diff --git a/src/Storages/StorageMySQL.h b/src/Storages/StorageMySQL.h index e3c0712c179..bf9a24c9bfe 100644 --- a/src/Storages/StorageMySQL.h +++ b/src/Storages/StorageMySQL.h @@ -46,7 +46,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; diff --git a/src/Storages/StorageNull.h b/src/Storages/StorageNull.h index 0011b5c94ad..2270731c0e3 100644 --- a/src/Storages/StorageNull.h +++ b/src/Storages/StorageNull.h @@ -35,8 +35,8 @@ public: SelectQueryInfo &, ContextPtr /*context*/, QueryProcessingStage::Enum /*processing_stage*/, - size_t, - unsigned) override + size_t /*max_block_size*/, + size_t /*num_streams*/) override { return Pipe( std::make_shared(storage_snapshot->getSampleBlockForColumns(column_names))); diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index e0c6dbf5463..6cf4e458438 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -81,7 +81,7 @@ Pipe StoragePostgreSQL::read( ContextPtr context_, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size_, - unsigned) + size_t /*num_streams*/) { storage_snapshot->check(column_names_); diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index 0755e33269e..97c62daa50f 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -38,7 +38,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; diff --git a/src/Storages/StorageProxy.h b/src/Storages/StorageProxy.h index 0fabff59db4..2afd9e8a63b 100644 --- a/src/Storages/StorageProxy.h +++ b/src/Storages/StorageProxy.h @@ -50,7 +50,7 @@ public: ContextPtr context, QueryProcessingStage::Enum & processed_stage, size_t max_block_size, - unsigned num_streams) override + size_t num_streams) override { return getNested()->watch(column_names, query_info, context, processed_stage, max_block_size, num_streams); } @@ -63,7 +63,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override + size_t num_streams) override { return getNested()->read(query_plan, column_names, storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 7a2ff56a782..3c0fbb162bc 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1,5 +1,6 @@ #include +#include #include #include "Common/hex.h" #include @@ -179,6 +180,7 @@ namespace ActionLocks static const auto QUEUE_UPDATE_ERROR_SLEEP_MS = 1 * 1000; static const auto MUTATIONS_FINALIZING_SLEEP_MS = 1 * 1000; static const auto MUTATIONS_FINALIZING_IDLE_SLEEP_MS = 5 * 1000; +const String StorageReplicatedMergeTree::default_zookeeper_name = "default"; void StorageReplicatedMergeTree::setZooKeeper() { @@ -285,21 +287,32 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( , replicated_fetches_throttler(std::make_shared(getSettings()->max_replicated_fetches_network_bandwidth, getContext()->getReplicatedFetchesThrottler())) , replicated_sends_throttler(std::make_shared(getSettings()->max_replicated_sends_network_bandwidth, getContext()->getReplicatedSendsThrottler())) { + /// We create and deactivate all tasks for consistency. + /// They all will be scheduled and activated by the restarting thread. queue_updating_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::queueUpdatingTask)", [this]{ queueUpdatingTask(); }); + queue_updating_task->deactivate(); + mutations_updating_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mutationsUpdatingTask)", [this]{ mutationsUpdatingTask(); }); + mutations_updating_task->deactivate(); + merge_selecting_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mergeSelectingTask)", [this] { mergeSelectingTask(); }); - /// Will be activated if we win leader election. + /// Will be activated if we will achieve leader state. merge_selecting_task->deactivate(); mutations_finalizing_task = getContext()->getSchedulePool().createTask( getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::mutationsFinalizingTask)", [this] { mutationsFinalizingTask(); }); + /// This task can be scheduled by different parts of code even when storage is readonly. + /// This can lead to redundant exceptions during startup. + /// Will be activated by restarting thread. + mutations_finalizing_task->deactivate(); + bool has_zookeeper = getContext()->hasZooKeeper() || getContext()->hasAuxiliaryZooKeeper(zookeeper_name); if (has_zookeeper) { @@ -1443,6 +1456,7 @@ MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::attachPartHelperFo const String part_new_name = actual_part_info.getPartName(); for (const DiskPtr & disk : getStoragePolicy()->getDisks()) + { for (const auto it = disk->iterateDirectory(fs::path(relative_data_path) / "detached/"); it->isValid(); it->next()) { const auto part_info = MergeTreePartInfo::tryParsePartName(it->name(), format_version); @@ -1479,6 +1493,7 @@ MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::attachPartHelperFo return part; } } + } return {}; } @@ -1529,8 +1544,7 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry) Transaction transaction(*this, NO_TRANSACTION_RAW); part->version.setCreationTID(Tx::PrehistoricTID, nullptr); - auto builder = part->data_part_storage->getBuilder(); - renameTempPartAndReplace(part, transaction, builder); + renameTempPartAndReplace(part, transaction); checkPartChecksumsAndCommit(transaction, part); writePartLog(PartLogElement::Type::NEW_PART, {}, 0 /** log entry is fake so we don't measure the time */, @@ -1769,7 +1783,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry, bool need_to_che } -DataPartStoragePtr StorageReplicatedMergeTree::executeFetchShared( +MutableDataPartStoragePtr StorageReplicatedMergeTree::executeFetchShared( const String & source_replica, const String & new_part_name, const DiskPtr & disk, @@ -1813,7 +1827,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) /// Therefore, we use all data parts. auto metadata_snapshot = getInMemoryMetadataPtr(); - DataPartsVector parts_to_remove; + PartsToRemoveFromZooKeeper parts_to_remove; { auto data_parts_lock = lockParts(); parts_to_remove = removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(NO_TRANSACTION_RAW, drop_range_info, data_parts_lock); @@ -1835,8 +1849,11 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) /// If DETACH clone parts to detached/ directory for (const auto & part : parts_to_remove) { - LOG_INFO(log, "Detaching {}", part->data_part_storage->getPartDirectory()); - part->makeCloneInDetached("", metadata_snapshot); + if (auto part_to_detach = part.getPartIfItWasActive()) + { + LOG_INFO(log, "Detaching {}", part_to_detach->getDataPartStorage().getPartDirectory()); + part_to_detach->makeCloneInDetached("", metadata_snapshot); + } } } @@ -1927,7 +1944,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) PartDescriptions all_parts; PartDescriptions parts_to_add; - DataPartsVector parts_to_remove; + PartsToRemoveFromZooKeeper parts_to_remove; auto table_lock_holder_dst_table = lockForShare( RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations); @@ -1958,7 +1975,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) String parts_to_remove_str; for (const auto & part : parts_to_remove) { - parts_to_remove_str += part->name; + parts_to_remove_str += part.getPartName(); parts_to_remove_str += " "; } LOG_TRACE(log, "Replacing {} parts {}with empty set", parts_to_remove.size(), parts_to_remove_str); @@ -2214,8 +2231,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) Coordination::Requests ops; for (PartDescriptionPtr & part_desc : final_parts) { - auto builder = part_desc->res_part->data_part_storage->getBuilder(); - renameTempPartAndReplace(part_desc->res_part, transaction, builder); + renameTempPartAndReplace(part_desc->res_part, transaction); getCommitPartOps(ops, part_desc->res_part); lockSharedData(*part_desc->res_part, false, part_desc->hardlinked_files); @@ -2235,7 +2251,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) String parts_to_remove_str; for (const auto & part : parts_to_remove) { - parts_to_remove_str += part->name; + parts_to_remove_str += part.getPartName(); parts_to_remove_str += " "; } LOG_TRACE(log, "Replacing {} parts {}with {} parts {}", parts_to_remove.size(), parts_to_remove_str, @@ -2312,9 +2328,7 @@ void StorageReplicatedMergeTree::executeClonePartFromShard(const LogEntry & entr part = get_part(); // The fetched part is valuable and should not be cleaned like a temp part. part->is_temp = false; - auto builder = part->data_part_storage->getBuilder(); - part->renameTo("detached/" + entry.new_part_name, true, builder); - builder->commit(); + part->renameTo("detached/" + entry.new_part_name, true); LOG_INFO(log, "Cloned part {} to detached directory", part->name); } @@ -2408,6 +2422,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo std::vector source_queue; ActiveDataPartSet get_part_set{format_version}; ActiveDataPartSet drop_range_set{format_version}; + std::unordered_set exact_part_names; { std::vector queue_get_futures; @@ -2445,14 +2460,22 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo info.parsed_entry->znode_name = source_queue_names[i]; if (info.parsed_entry->type == LogEntry::DROP_RANGE) + { drop_range_set.add(info.parsed_entry->new_part_name); - - if (info.parsed_entry->type == LogEntry::GET_PART) + } + else if (info.parsed_entry->type == LogEntry::GET_PART) { String maybe_covering_drop_range = drop_range_set.getContainingPart(info.parsed_entry->new_part_name); if (maybe_covering_drop_range.empty()) get_part_set.add(info.parsed_entry->new_part_name); } + else + { + /// We should keep local parts if they present in the queue of source replica. + /// There's a chance that we are the only replica that has these parts. + Strings entry_virtual_parts = info.parsed_entry->getVirtualPartNames(format_version); + std::move(entry_virtual_parts.begin(), entry_virtual_parts.end(), std::inserter(exact_part_names, exact_part_names.end())); + } } } @@ -2472,11 +2495,17 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo for (const auto & part : local_parts_in_zk) { - if (get_part_set.getContainingPart(part).empty()) - { - parts_to_remove_from_zk.emplace_back(part); - LOG_WARNING(log, "Source replica does not have part {}. Removing it from ZooKeeper.", part); - } + /// We look for exact match (and not for any covering part) + /// because our part might be dropped and covering part might be merged though gap. + /// (avoid resurrection of data that was removed a long time ago) + if (get_part_set.getContainingPart(part) == part) + continue; + + if (exact_part_names.contains(part)) + continue; + + parts_to_remove_from_zk.emplace_back(part); + LOG_WARNING(log, "Source replica does not have part {}. Removing it from ZooKeeper.", part); } { @@ -2498,11 +2527,14 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo for (const auto & part : local_active_parts) { - if (get_part_set.getContainingPart(part->name).empty()) - { - parts_to_remove_from_working_set.emplace_back(part); - LOG_WARNING(log, "Source replica does not have part {}. Removing it from working set.", part->name); - } + if (get_part_set.getContainingPart(part->name) == part->name) + continue; + + if (exact_part_names.contains(part->name)) + continue; + + parts_to_remove_from_working_set.emplace_back(part); + LOG_WARNING(log, "Source replica does not have part {}. Removing it from working set.", part->name); } if (getSettings()->detach_old_local_parts_when_cloning_replica) @@ -2511,7 +2543,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo for (const auto & part : parts_to_remove_from_working_set) { - LOG_INFO(log, "Detaching {}", part->data_part_storage->getPartDirectory()); + LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory()); part->makeCloneInDetached("clone", metadata_snapshot); } } @@ -3206,16 +3238,17 @@ StorageReplicatedMergeTree::CreateMergeEntryResult StorageReplicatedMergeTree::c int32_t log_version, MergeType merge_type) { - std::vector> exists_futures; - exists_futures.reserve(parts.size()); + Strings exists_paths; + exists_paths.reserve(parts.size()); for (const auto & part : parts) - exists_futures.emplace_back(zookeeper->asyncExists(fs::path(replica_path) / "parts" / part->name)); + exists_paths.emplace_back(fs::path(replica_path) / "parts" / part->name); + auto exists_results = zookeeper->exists(exists_paths); bool all_in_zk = true; for (size_t i = 0; i < parts.size(); ++i) { /// If there is no information about part in ZK, we will not merge it. - if (exists_futures[i].get().error == Coordination::Error::ZNONODE) + if (exists_results[i].error == Coordination::Error::ZNONODE) { all_in_zk = false; @@ -3862,7 +3895,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora auto source_part = getActiveContainingPart(covered_part_info); /// Fetch for zero-copy replication is cheap and straightforward, so we don't use local clone here - if (source_part && (!settings_ptr->allow_remote_fs_zero_copy_replication || !source_part->data_part_storage->supportZeroCopyReplication())) + if (source_part && (!settings_ptr->allow_remote_fs_zero_copy_replication || !source_part->getDataPartStorage().supportZeroCopyReplication())) { auto source_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( source_part->getColumns(), source_part->checksums); @@ -3960,11 +3993,10 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora { part = get_part(); - auto builder = part->data_part_storage->getBuilder(); if (!to_detached) { Transaction transaction(*this, NO_TRANSACTION_RAW); - renameTempPartAndReplace(part, transaction, builder); + renameTempPartAndReplace(part, transaction); replaced_parts = checkPartChecksumsAndCommit(transaction, part, hardlinked_files); @@ -4006,8 +4038,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora { // The fetched part is valuable and should not be cleaned like a temp part. part->is_temp = false; - part->renameTo(fs::path("detached") / part_name, true, builder); - builder->commit(); + part->renameTo(fs::path("detached") / part_name, true); } } catch (const Exception & e) @@ -4041,7 +4072,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora } -DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( +MutableDataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & source_replica_path, @@ -4116,14 +4147,11 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( { part = get_part(); - if (part->data_part_storage->getDiskName() != replaced_disk->getName()) - throw Exception("Part " + part->name + " fetched on wrong disk " + part->data_part_storage->getDiskName(), ErrorCodes::LOGICAL_ERROR); + if (part->getDataPartStorage().getDiskName() != replaced_disk->getName()) + throw Exception("Part " + part->name + " fetched on wrong disk " + part->getDataPartStorage().getDiskName(), ErrorCodes::LOGICAL_ERROR); auto replaced_path = fs::path(replaced_part_path); - auto builder = part->data_part_storage->getBuilder(); - builder->rename(replaced_path.parent_path(), replaced_path.filename(), nullptr, true, false); - part->data_part_storage->onRename(replaced_path.parent_path(), replaced_path.filename()); - builder->commit(); + part->getDataPartStorage().rename(replaced_path.parent_path(), replaced_path.filename(), nullptr, true, false); } catch (const Exception & e) { @@ -4146,8 +4174,7 @@ DataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart( ProfileEvents::increment(ProfileEvents::ReplicatedPartFetches); LOG_DEBUG(log, "Fetched part {} from {}", part_name, source_replica_path); - - return part->data_part_storage; + return part->getDataPartStoragePtr(); } void StorageReplicatedMergeTree::startup() @@ -4339,7 +4366,7 @@ void StorageReplicatedMergeTree::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, const size_t max_block_size, - const unsigned num_streams) + const size_t num_streams) { /// If true, then we will ask initiator if we can read chosen ranges const bool enable_parallel_reading = local_context->getClientInfo().collaborate_with_initiator; @@ -5544,7 +5571,8 @@ void StorageReplicatedMergeTree::getStatus(Status & res, bool with_zk_fields) res.queue = queue.getStatus(); res.absolute_delay = getAbsoluteDelay(); /// NOTE: may be slightly inconsistent with queue status. - res.parts_to_check = part_check_thread.size(); + /// NOTE: consider convert to UInt64 + res.parts_to_check = static_cast(part_check_thread.size()); res.zookeeper_path = zookeeper_path; res.replica_name = replica_name; @@ -6205,11 +6233,11 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() } -void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries) +void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(PartsToRemoveFromZooKeeper & parts, size_t max_retries) { Strings part_names_to_remove; for (const auto & part : parts) - part_names_to_remove.emplace_back(part->name); + part_names_to_remove.emplace_back(part.getPartName()); return removePartsFromZooKeeperWithRetries(part_names_to_remove, max_retries); } @@ -6228,19 +6256,20 @@ void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(const Strin auto zookeeper = getZooKeeper(); - std::vector> exists_futures; - exists_futures.reserve(part_names.size()); + Strings exists_paths; + exists_paths.reserve(part_names.size()); for (const String & part_name : part_names) { - String part_path = fs::path(replica_path) / "parts" / part_name; - exists_futures.emplace_back(zookeeper->asyncExists(part_path)); + exists_paths.emplace_back(fs::path(replica_path) / "parts" / part_name); } + auto exists_results = zookeeper->exists(exists_paths); + std::vector> remove_futures; remove_futures.reserve(part_names.size()); for (size_t i = 0; i < part_names.size(); ++i) { - Coordination::ExistsResponse exists_resp = exists_futures[i].get(); + Coordination::ExistsResponse exists_resp = exists_results[i]; if (exists_resp.error == Coordination::Error::ZOK) { Coordination::Requests ops; @@ -6286,9 +6315,9 @@ void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(const Strin void StorageReplicatedMergeTree::removePartsFromZooKeeper( zkutil::ZooKeeperPtr & zookeeper, const Strings & part_names, NameSet * parts_should_be_retried) { - std::vector> exists_futures; + Strings exists_paths; std::vector> remove_futures; - exists_futures.reserve(part_names.size()); + exists_paths.reserve(part_names.size()); remove_futures.reserve(part_names.size()); try { @@ -6296,13 +6325,14 @@ void StorageReplicatedMergeTree::removePartsFromZooKeeper( /// if zk session will be dropped for (const String & part_name : part_names) { - String part_path = fs::path(replica_path) / "parts" / part_name; - exists_futures.emplace_back(zookeeper->asyncExists(part_path)); + exists_paths.emplace_back(fs::path(replica_path) / "parts" / part_name); } + auto exists_results = zookeeper->exists(exists_paths); + for (size_t i = 0; i < part_names.size(); ++i) { - Coordination::ExistsResponse exists_resp = exists_futures[i].get(); + auto exists_resp = exists_results[i]; if (exists_resp.error == Coordination::Error::ZOK) { Coordination::Requests ops; @@ -6534,7 +6564,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom( if (replace) clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); - DataPartsVector parts_to_remove; + PartsToRemoveFromZooKeeper parts_to_remove; Coordination::Responses op_results; try @@ -6560,10 +6590,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom( { auto data_parts_lock = lockParts(); for (auto & part : dst_parts) - { - auto builder = part->data_part_storage->getBuilder(); - renameTempPartAndReplaceUnlocked(part, transaction, builder, data_parts_lock); - } + renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock); } for (size_t i = 0; i < dst_parts.size(); ++i) @@ -6773,7 +6800,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); - DataPartsVector parts_to_remove; + PartsToRemoveFromZooKeeper parts_to_remove; Coordination::Responses op_results; try @@ -6799,10 +6826,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta auto dest_data_parts_lock = dest_table_storage->lockParts(); for (auto & part : dst_parts) - { - auto builder = part->data_part_storage->getBuilder(); - dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, builder, dest_data_parts_lock); - } + dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock); for (size_t i = 0; i < dst_parts.size(); ++i) dest_table_storage->lockSharedData(*dst_parts[i], false, hardlinked_files_for_parts[i]); @@ -7391,7 +7415,7 @@ void StorageReplicatedMergeTree::checkBrokenDisks() for (auto & part : *parts) { - if (part->data_part_storage && part->data_part_storage->getDiskName() == disk_ptr->getName()) + if (part->getDataPartStorage().getDiskName() == disk_ptr->getName()) broken_part_callback(part->name); } continue; @@ -7554,10 +7578,10 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, { auto settings = getSettings(); - if (!part.data_part_storage || !part.isStoredOnDisk() || !settings->allow_remote_fs_zero_copy_replication) + if (!part.isStoredOnDisk() || !settings->allow_remote_fs_zero_copy_replication) return; - if (!part.data_part_storage->supportZeroCopyReplication()) + if (!part.getDataPartStorage().supportZeroCopyReplication()) return; zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper(); @@ -7568,7 +7592,7 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, boost::replace_all(id, "/", "_"); Strings zc_zookeeper_paths = getZeroCopyPartPath( - *getSettings(), part.data_part_storage->getDiskType(), getTableSharedID(), + *getSettings(), part.getDataPartStorage().getDiskType(), getTableSharedID(), part.name, zookeeper_path); String path_to_set_hardlinked_files; @@ -7577,7 +7601,7 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, if (hardlinked_files.has_value() && !hardlinked_files->hardlinks_from_source_part.empty()) { path_to_set_hardlinked_files = getZeroCopyPartPath( - *getSettings(), part.data_part_storage->getDiskType(), hardlinked_files->source_table_shared_id, + *getSettings(), part.getDataPartStorage().getDiskType(), hardlinked_files->source_table_shared_id, hardlinked_files->source_part_name, zookeeper_path)[0]; hardlinks = hardlinked_files->hardlinks_from_source_part; @@ -7601,25 +7625,22 @@ std::pair StorageReplicatedMergeTree::unlockSharedData(const IMer if (!settings->allow_remote_fs_zero_copy_replication) return std::make_pair(true, NameSet{}); - if (!part.data_part_storage) - LOG_WARNING(log, "Datapart storage for part {} (temp: {}) is not initialzied", part.name, part.is_temp); - - if (!part.data_part_storage || !part.isStoredOnDisk()) + if (!part.isStoredOnDisk()) { LOG_TRACE(log, "Part {} is not stored on disk, blobs can be removed", part.name); return std::make_pair(true, NameSet{}); } - if (!part.data_part_storage || !part.data_part_storage->supportZeroCopyReplication()) + if (!part.getDataPartStorage().supportZeroCopyReplication()) { LOG_TRACE(log, "Part {} is not stored on zero-copy replicated disk, blobs can be removed", part.name); return std::make_pair(true, NameSet{}); } /// If part is temporary refcount file may be absent - if (part.data_part_storage->exists(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK)) + if (part.getDataPartStorage().exists(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK)) { - auto ref_count = part.data_part_storage->getRefCount(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK); + auto ref_count = part.getDataPartStorage().getRefCount(IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK); if (ref_count > 0) /// Keep part shard info for frozen backups { LOG_TRACE(log, "Part {} has more than zero local references ({}), blobs cannot be removed", part.name, ref_count); @@ -7657,7 +7678,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedData(const IMer return unlockSharedDataByID( part.getUniqueId(), getTableSharedID(), part.name, replica_name, - part.data_part_storage->getDiskType(), zookeeper, *getSettings(), log, zookeeper_path, format_version); + part.getDataPartStorage().getDiskType(), zookeeper, *getSettings(), log, zookeeper_path, format_version); } namespace @@ -7735,7 +7756,7 @@ std::pair getParentLockedBlobs(zkutil::ZooKeeperPtr zookeeper_ptr std::pair StorageReplicatedMergeTree::unlockSharedDataByID( String part_id, const String & table_uuid, const String & part_name, - const String & replica_name_, std::string disk_type, zkutil::ZooKeeperPtr zookeeper_ptr, const MergeTreeSettings & settings, + const String & replica_name_, const std::string & disk_type, zkutil::ZooKeeperPtr zookeeper_ptr, const MergeTreeSettings & settings, Poco::Logger * logger, const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version) { boost::replace_all(part_id, "/", "_"); @@ -7856,7 +7877,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedDataByID( } -DataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( +MutableDataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared( const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) @@ -7956,7 +7977,7 @@ String StorageReplicatedMergeTree::getSharedDataReplica( Strings StorageReplicatedMergeTree::getZeroCopyPartPath( - const MergeTreeSettings & settings, std::string disk_type, const String & table_uuid, + const MergeTreeSettings & settings, const std::string & disk_type, const String & table_uuid, const String & part_name, const String & zookeeper_path_old) { Strings res; @@ -7964,11 +7985,11 @@ Strings StorageReplicatedMergeTree::getZeroCopyPartPath( String zero_copy = fmt::format("zero_copy_{}", disk_type); String new_path = fs::path(settings.remote_fs_zero_copy_zookeeper_path.toString()) / zero_copy / table_uuid / part_name; - res.push_back(new_path); + res.push_back(std::move(new_path)); if (settings.remote_fs_zero_copy_path_compatible_mode && !zookeeper_path_old.empty()) { /// Compatibility mode for cluster with old and new versions String old_path = fs::path(zookeeper_path_old) / zero_copy / "shared" / part_name; - res.push_back(old_path); + res.push_back(std::move(old_path)); } return res; @@ -8091,15 +8112,13 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP minmax_idx->update(block, getMinMaxColumnsNames(metadata_snapshot->getPartitionKey())); auto new_volume = createVolumeFromReservation(reservation, volume); + auto data_part_storage = std::make_shared( new_volume, relative_data_path, TMP_PREFIX + lost_part_name); - DataPartStorageBuilderPtr data_part_storage_builder = std::make_shared( - new_volume, - relative_data_path, - TMP_PREFIX + lost_part_name); + data_part_storage->beginTransaction(); auto new_data_part = createPart( lost_part_name, @@ -8142,16 +8161,16 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP if (new_data_part->isStoredOnDisk()) { /// The name could be non-unique in case of stale files from previous runs. - if (data_part_storage_builder->exists()) + if (data_part_storage->exists()) { - LOG_WARNING(log, "Removing old temporary directory {}", new_data_part->data_part_storage->getFullPath()); - data_part_storage_builder->removeRecursive(); + LOG_WARNING(log, "Removing old temporary directory {}", new_data_part->getDataPartStorage().getFullPath()); + data_part_storage->removeRecursive(); } - data_part_storage_builder->createDirectories(); + data_part_storage->createDirectories(); if (getSettings()->fsync_part_directory) - sync_guard = data_part_storage_builder->getDirectorySyncGuard(); + sync_guard = data_part_storage->getDirectorySyncGuard(); } /// This effectively chooses minimal compression method: @@ -8159,7 +8178,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP auto compression_codec = getContext()->chooseCompressionCodec(0, 0); const auto & index_factory = MergeTreeIndexFactory::instance(); - MergedBlockOutputStream out(new_data_part, data_part_storage_builder, metadata_snapshot, columns, + MergedBlockOutputStream out(new_data_part, metadata_snapshot, columns, index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, NO_TRANSACTION_PTR); bool sync_on_insert = settings->fsync_after_insert; @@ -8173,7 +8192,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP try { MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW); - auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction, data_part_storage_builder); + auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction); if (!replaced_parts.empty()) { @@ -8361,7 +8380,7 @@ bool StorageReplicatedMergeTree::removeDetachedPart(DiskPtr disk, const String & if (disk->supportZeroCopyReplication()) { String table_id = getTableSharedID(); - return removeSharedDetachedPart(disk, path, part_name, table_id, zookeeper_name, replica_name, zookeeper_path, getContext(), current_zookeeper); + return removeSharedDetachedPart(disk, path, part_name, table_id, replica_name, zookeeper_path, getContext(), current_zookeeper); } disk->removeRecursive(path); @@ -8371,7 +8390,7 @@ bool StorageReplicatedMergeTree::removeDetachedPart(DiskPtr disk, const String & bool StorageReplicatedMergeTree::removeSharedDetachedPart(DiskPtr disk, const String & path, const String & part_name, const String & table_uuid, - const String &, const String & detached_replica_name, const String & detached_zookeeper_path, ContextPtr local_context, const zkutil::ZooKeeperPtr & zookeeper) + const String & detached_replica_name, const String & detached_zookeeper_path, const ContextPtr & local_context, const zkutil::ZooKeeperPtr & zookeeper) { bool keep_shared = false; diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index e10ffcce22c..323b1ce02bf 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -131,7 +131,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; std::optional totalRows(const Settings & settings) const override; std::optional totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, ContextPtr context) const override; @@ -263,7 +263,7 @@ public: bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const; /// Fetch part only when it stored on shared storage like S3 - DataPartStoragePtr executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); + MutableDataPartStoragePtr executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path); /// Lock part in zookeeper for use shared data in several nodes void lockSharedData(const IMergeTreeDataPart & part, bool replace_existing_lock, std::optional hardlinked_files) const override; @@ -279,16 +279,16 @@ public: /// Return true if data unlocked /// Return false if data is still used by another node static std::pair unlockSharedDataByID(String part_id, const String & table_uuid, const String & part_name, const String & replica_name_, - std::string disk_type, zkutil::ZooKeeperPtr zookeeper_, const MergeTreeSettings & settings, Poco::Logger * logger, + const std::string & disk_type, zkutil::ZooKeeperPtr zookeeper_, const MergeTreeSettings & settings, Poco::Logger * logger, const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version); /// Fetch part only if some replica has it on shared storage like S3 - DataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; + MutableDataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override; /// Get best replica having this partition on a same type remote disk String getSharedDataReplica(const IMergeTreeDataPart & part, DataSourceType data_source_type) const; - inline String getReplicaName() const { return replica_name; } + inline const String & getReplicaName() const { return replica_name; } /// Restores table metadata if ZooKeeper lost it. /// Used only on restarted readonly replicas (not checked). All active (Active) parts are moved to detached/ @@ -310,9 +310,9 @@ public: bool createEmptyPartInsteadOfLost(zkutil::ZooKeeperPtr zookeeper, const String & lost_part_name); // Return default or custom zookeeper name for table - String getZooKeeperName() const { return zookeeper_name; } + const String & getZooKeeperName() const { return zookeeper_name; } - String getZooKeeperPath() const { return zookeeper_path; } + const String & getZooKeeperPath() const { return zookeeper_path; } // Return table id, common for different replicas String getTableSharedID() const override; @@ -320,13 +320,13 @@ public: /// Returns the same as getTableSharedID(), but extracts it from a create query. static std::optional tryGetTableSharedIDFromCreateQuery(const IAST & create_query, const ContextPtr & global_context); - static String getDefaultZooKeeperName() { return default_zookeeper_name; } + static const String & getDefaultZooKeeperName() { return default_zookeeper_name; } /// Check if there are new broken disks and enqueue part recovery tasks. void checkBrokenDisks(); static bool removeSharedDetachedPart(DiskPtr disk, const String & path, const String & part_name, const String & table_uuid, - const String & zookeeper_name, const String & replica_name, const String & zookeeper_path, ContextPtr local_context, const zkutil::ZooKeeperPtr & zookeeper); + const String & replica_name, const String & zookeeper_path, const ContextPtr & local_context, const zkutil::ZooKeeperPtr & zookeeper); bool canUseZeroCopyReplication() const; private: @@ -381,11 +381,11 @@ private: /// If false - ZooKeeper is available, but there is no table metadata. It's safe to drop table in this case. std::optional has_metadata_in_zookeeper; - static constexpr auto default_zookeeper_name = "default"; - String zookeeper_name; - String zookeeper_path; - String replica_name; - String replica_path; + static const String default_zookeeper_name; + const String zookeeper_name; + const String zookeeper_path; + const String replica_name; + const String replica_path; /** /replicas/me/is_active. */ @@ -549,7 +549,7 @@ private: /// Remove parts from ZooKeeper, throw exception if unable to do so after max_retries. void removePartsFromZooKeeperWithRetries(const Strings & part_names, size_t max_retries = 5); - void removePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries = 5); + void removePartsFromZooKeeperWithRetries(PartsToRemoveFromZooKeeper & parts, size_t max_retries = 5); /// Removes a part from ZooKeeper and adds a task to the queue to download it. It is supposed to do this with broken parts. void removePartAndEnqueueFetch(const String & part_name); @@ -682,7 +682,7 @@ private: * Used for replace local part on the same s3-shared part in hybrid storage. * Returns false if part is already fetching right now. */ - DataPartStoragePtr fetchExistsPart( + MutableDataPartStoragePtr fetchExistsPart( const String & part_name, const StorageMetadataPtr & metadata_snapshot, const String & replica_path, @@ -829,7 +829,7 @@ private: PartitionBlockNumbersHolder allocateBlockNumbersInAffectedPartitions( const MutationCommands & commands, ContextPtr query_context, const zkutil::ZooKeeperPtr & zookeeper) const; - static Strings getZeroCopyPartPath(const MergeTreeSettings & settings, std::string disk_type, const String & table_uuid, + static Strings getZeroCopyPartPath(const MergeTreeSettings & settings, const std::string & disk_type, const String & table_uuid, const String & part_name, const String & zookeeper_path_old); static void createZeroCopyLockNode( diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 675dd548088..9bbccf5f582 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -139,7 +139,9 @@ public: request.SetBucket(globbed_uri.bucket); request.SetPrefix(key_prefix); + matcher = std::make_unique(makeRegexpPatternFromGlobs(globbed_uri.key)); + recursive = globbed_uri.key == "/**" ? true : false; fillInternalBufferAssumeLocked(); } @@ -197,7 +199,7 @@ private: for (const auto & row : result_batch) { const String & key = row.GetKey(); - if (re2::RE2::FullMatch(key, *matcher)) + if (recursive || re2::RE2::FullMatch(key, *matcher)) { String path = fs::path(globbed_uri.bucket) / key; if (object_infos) @@ -224,7 +226,7 @@ private: for (const auto & row : result_batch) { String key = row.GetKey(); - if (re2::RE2::FullMatch(key, *matcher)) + if (recursive || re2::RE2::FullMatch(key, *matcher)) buffer.emplace_back(std::move(key)); } } @@ -252,6 +254,7 @@ private: Aws::S3::Model::ListObjectsV2Request request; Aws::S3::Model::ListObjectsV2Outcome outcome; std::unique_ptr matcher; + bool recursive{false}; bool is_finished{false}; std::unordered_map * object_infos; Strings * read_keys; @@ -361,39 +364,6 @@ String StorageS3Source::KeysIterator::next() return pimpl->next(); } -class StorageS3Source::ReadTasksIterator::Impl -{ -public: - explicit Impl(const std::vector & read_tasks_, const ReadTaskCallback & new_read_tasks_callback_) - : read_tasks(read_tasks_), new_read_tasks_callback(new_read_tasks_callback_) - { - } - - String next() - { - size_t current_index = index.fetch_add(1, std::memory_order_relaxed); - if (current_index >= read_tasks.size()) - return new_read_tasks_callback(); - return read_tasks[current_index]; - } - -private: - std::atomic_size_t index = 0; - std::vector read_tasks; - ReadTaskCallback new_read_tasks_callback; -}; - -StorageS3Source::ReadTasksIterator::ReadTasksIterator( - const std::vector & read_tasks_, const ReadTaskCallback & new_read_tasks_callback_) - : pimpl(std::make_shared(read_tasks_, new_read_tasks_callback_)) -{ -} - -String StorageS3Source::ReadTasksIterator::next() -{ - return pimpl->next(); -} - Block StorageS3Source::getHeader(Block sample_block, const std::vector & requested_virtual_columns) { for (const auto & virtual_column : requested_virtual_columns) @@ -457,8 +427,9 @@ bool StorageS3Source::initialize() file_path = fs::path(bucket) / current_key; - auto zstd_window_log_max = getContext()->getSettingsRef().zstd_window_log_max; - read_buf = wrapReadBufferWithCompressionMethod(createS3ReadBuffer(current_key), chooseCompressionMethod(current_key, compression_hint), zstd_window_log_max); + int zstd_window_log_max = static_cast(getContext()->getSettingsRef().zstd_window_log_max); + read_buf = wrapReadBufferWithCompressionMethod( + createS3ReadBuffer(current_key), chooseCompressionMethod(current_key, compression_hint), zstd_window_log_max); auto input_format = getContext()->getInputFormat(format, *read_buf, sample_block, max_block_size, format_settings); QueryPipelineBuilder builder; @@ -802,8 +773,7 @@ StorageS3::StorageS3( distributed_processing_, is_key_with_globs, format_settings, - context_, - &read_tasks_used_in_schema_inference); + context_); storage_metadata.setColumns(columns); } else @@ -831,19 +801,14 @@ std::shared_ptr StorageS3::createFileIterator( ContextPtr local_context, ASTPtr query, const Block & virtual_block, - const std::vector & read_tasks, std::unordered_map * object_infos, Strings * read_keys) { if (distributed_processing) { return std::make_shared( - [read_tasks_iterator = std::make_shared(read_tasks, local_context->getReadTaskCallback()), read_keys]() -> String - { - auto key = read_tasks_iterator->next(); - if (read_keys) - read_keys->push_back(key); - return key; + [callback = local_context->getReadTaskCallback()]() -> String { + return callback(); }); } else if (is_key_with_globs) @@ -875,7 +840,7 @@ Pipe StorageS3::read( ContextPtr local_context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { bool has_wildcards = s3_configuration.uri.bucket.find(PARTITION_ID_WILDCARD) != String::npos || keys.back().find(PARTITION_ID_WILDCARD) != String::npos; @@ -903,7 +868,6 @@ Pipe StorageS3::read( local_context, query_info.query, virtual_block, - read_tasks_used_in_schema_inference, &object_infos); ColumnsDescription columns_description; @@ -1079,12 +1043,12 @@ void StorageS3::updateS3Configuration(ContextPtr ctx, StorageS3::S3Configuration S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration( upd.auth_settings.region, ctx->getRemoteHostFilter(), - ctx->getGlobalContext()->getSettingsRef().s3_max_redirects, + static_cast(ctx->getGlobalContext()->getSettingsRef().s3_max_redirects), ctx->getGlobalContext()->getSettingsRef().enable_s3_requests_logging, /* for_disk_s3 = */ false); client_configuration.endpointOverride = upd.uri.endpoint; - client_configuration.maxConnections = upd.rw_settings.max_connections; + client_configuration.maxConnections = static_cast(upd.rw_settings.max_connections); auto credentials = Aws::Auth::AWSCredentials(upd.auth_settings.access_key_id, upd.auth_settings.secret_access_key); auto headers = upd.auth_settings.headers; @@ -1201,7 +1165,7 @@ ColumnsDescription StorageS3::getTableStructureFromData( return getTableStructureFromDataImpl( configuration.format, s3_configuration, configuration.compression_method, distributed_processing, - s3_configuration.uri.key.find_first_of("*?{") != std::string::npos, format_settings, ctx, nullptr, object_infos); + s3_configuration.uri.key.find_first_of("*?{") != std::string::npos, format_settings, ctx, object_infos); } ColumnsDescription StorageS3::getTableStructureFromDataImpl( @@ -1212,13 +1176,12 @@ ColumnsDescription StorageS3::getTableStructureFromDataImpl( bool is_key_with_globs, const std::optional & format_settings, ContextPtr ctx, - std::vector * read_keys_in_distributed_processing, std::unordered_map * object_infos) { std::vector read_keys; auto file_iterator - = createFileIterator(s3_configuration, {s3_configuration.uri.key}, is_key_with_globs, distributed_processing, ctx, nullptr, {}, {}, object_infos, &read_keys); + = createFileIterator(s3_configuration, {s3_configuration.uri.key}, is_key_with_globs, distributed_processing, ctx, nullptr, {}, object_infos, &read_keys); std::optional columns_from_cache; size_t prev_read_keys_size = read_keys.size(); @@ -1254,7 +1217,7 @@ ColumnsDescription StorageS3::getTableStructureFromDataImpl( } first = false; - const auto zstd_window_log_max = ctx->getSettingsRef().zstd_window_log_max; + int zstd_window_log_max = static_cast(ctx->getSettingsRef().zstd_window_log_max); return wrapReadBufferWithCompressionMethod( std::make_unique( s3_configuration.client, s3_configuration.uri.bucket, key, s3_configuration.uri.version_id, s3_configuration.rw_settings.max_single_read_retries, ctx->getReadSettings()), @@ -1271,9 +1234,6 @@ ColumnsDescription StorageS3::getTableStructureFromDataImpl( if (ctx->getSettingsRef().schema_inference_use_cache_for_s3) addColumnsToCache(read_keys, s3_configuration, columns, format, format_settings, ctx); - if (distributed_processing && read_keys_in_distributed_processing) - *read_keys_in_distributed_processing = std::move(read_keys); - return columns; } diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index a983a59d98c..23947a32092 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -66,18 +66,6 @@ public: std::shared_ptr pimpl; }; - class ReadTasksIterator - { - public: - ReadTasksIterator(const std::vector & read_tasks_, const ReadTaskCallback & new_read_tasks_callback_); - String next(); - - private: - class Impl; - /// shared_ptr to have copy constructor - std::shared_ptr pimpl; - }; - using IteratorWrapper = std::function; static Block getHeader(Block sample_block, const std::vector & requested_virtual_columns); @@ -171,7 +159,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; @@ -197,7 +185,7 @@ public: const S3::URI uri; std::shared_ptr client; - S3Settings::AuthSettings auth_settings; + S3::AuthSettings auth_settings; S3Settings::ReadWriteSettings rw_settings; /// If s3 configuration was passed from ast, then it is static. @@ -209,7 +197,7 @@ public: S3Configuration( const String & url_, - const S3Settings::AuthSettings & auth_settings_, + const S3::AuthSettings & auth_settings_, const S3Settings::ReadWriteSettings & rw_settings_, const HeaderCollection & headers_from_ast_) : uri(S3::URI(url_)) @@ -238,8 +226,6 @@ private: ASTPtr partition_by; bool is_key_with_globs = false; - std::vector read_tasks_used_in_schema_inference; - std::unordered_map object_infos; static void updateS3Configuration(ContextPtr, S3Configuration &); @@ -252,7 +238,6 @@ private: ContextPtr local_context, ASTPtr query, const Block & virtual_block, - const std::vector & read_tasks = {}, std::unordered_map * object_infos = nullptr, Strings * read_keys = nullptr); @@ -264,7 +249,6 @@ private: bool is_key_with_globs, const std::optional & format_settings, ContextPtr ctx, - std::vector * read_keys_in_distributed_processing = nullptr, std::unordered_map * object_infos = nullptr); bool supportsSubsetOfColumns() const override; diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp index df927069bb0..3b8c8b1cb92 100644 --- a/src/Storages/StorageS3Cluster.cpp +++ b/src/Storages/StorageS3Cluster.cpp @@ -5,46 +5,40 @@ #if USE_AWS_S3 #include "Common/Exception.h" -#include #include "Client/Connection.h" #include "Core/QueryProcessingStage.h" -#include -#include -#include #include -#include #include #include #include #include #include #include -#include #include #include #include -#include "Processors/ISource.h" #include #include #include #include +#include #include #include #include +#include +#include #include #include #include #include -#include #include #include -#include -#include namespace DB { + StorageS3Cluster::StorageS3Cluster( const StorageS3ClusterConfiguration & configuration_, const StorageID & table_id_, @@ -72,6 +66,7 @@ StorageS3Cluster::StorageS3Cluster( auto columns = StorageS3::getTableStructureFromDataImpl(format_name, s3_configuration, compression_method, /*distributed_processing_*/false, is_key_with_globs, /*format_settings=*/std::nullopt, context_); storage_metadata.setColumns(columns); + add_columns_structure_to_query = true; } else storage_metadata.setColumns(columns_); @@ -97,7 +92,7 @@ Pipe StorageS3Cluster::read( ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t /*max_block_size*/, - unsigned /*num_streams*/) + size_t /*num_streams*/) { StorageS3::updateS3Configuration(context, s3_configuration); @@ -117,6 +112,11 @@ Pipe StorageS3Cluster::read( const bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState; + ASTPtr query_to_send = query_info.original_query->clone(); + if (add_columns_structure_to_query) + addColumnsStructureToQueryWithClusterEngine( + query_to_send, StorageDictionary::generateNamesAndTypesDescription(storage_snapshot->metadata->getColumns().getAll()), 5, getName()); + for (const auto & replicas : cluster->getShardsAddresses()) { /// There will be only one replica, because we consider each replica as a shard @@ -135,7 +135,7 @@ Pipe StorageS3Cluster::read( /// So, task_identifier is passed as constructor argument. It is more obvious. auto remote_query_executor = std::make_shared( connection, - queryToString(query_info.original_query), + queryToString(query_to_send), header, context, /*throttler=*/nullptr, diff --git a/src/Storages/StorageS3Cluster.h b/src/Storages/StorageS3Cluster.h index d2cf1b917a1..3a3942f4222 100644 --- a/src/Storages/StorageS3Cluster.h +++ b/src/Storages/StorageS3Cluster.h @@ -30,7 +30,7 @@ public: std::string getName() const override { return "S3Cluster"; } Pipe read(const Names &, const StorageSnapshotPtr &, SelectQueryInfo &, - ContextPtr, QueryProcessingStage::Enum, size_t /*max_block_size*/, unsigned /*num_streams*/) override; + ContextPtr, QueryProcessingStage::Enum, size_t /*max_block_size*/, size_t /*num_streams*/) override; QueryProcessingStage::Enum getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageSnapshotPtr &, SelectQueryInfo &) const override; @@ -46,6 +46,7 @@ private: String compression_method; NamesAndTypesList virtual_columns; Block virtual_block; + bool add_columns_structure_to_query = false; }; diff --git a/src/Storages/StorageS3Settings.cpp b/src/Storages/StorageS3Settings.cpp index 4ab3375e188..65e9bb1ab8c 100644 --- a/src/Storages/StorageS3Settings.cpp +++ b/src/Storages/StorageS3Settings.cpp @@ -1,5 +1,7 @@ #include +#include + #include #include #include @@ -9,10 +11,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int INVALID_CONFIG_PARAMETER; -} void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::Util::AbstractConfiguration & config, const Settings & settings) { @@ -46,41 +44,8 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U if (config.has(config_elem + "." + key + ".endpoint")) { auto endpoint = get_string_for_key(key, "endpoint", false); - auto access_key_id = get_string_for_key(key, "access_key_id"); - auto secret_access_key = get_string_for_key(key, "secret_access_key"); - auto region = get_string_for_key(key, "region"); - auto server_side_encryption_customer_key_base64 = get_string_for_key(key, "server_side_encryption_customer_key_base64"); - std::optional use_environment_credentials; - if (config.has(config_elem + "." + key + ".use_environment_credentials")) - use_environment_credentials = config.getBool(config_elem + "." + key + ".use_environment_credentials"); - - std::optional use_insecure_imds_request; - if (config.has(config_elem + "." + key + ".use_insecure_imds_request")) - use_insecure_imds_request = config.getBool(config_elem + "." + key + ".use_insecure_imds_request"); - - HeaderCollection headers; - Poco::Util::AbstractConfiguration::Keys subconfig_keys; - config.keys(config_elem + "." + key, subconfig_keys); - for (const String & subkey : subconfig_keys) - { - if (subkey.starts_with("header")) - { - auto header_str = config.getString(config_elem + "." + key + "." + subkey); - auto delimiter = header_str.find(':'); - if (delimiter == String::npos) - throw Exception("Malformed s3 header value", ErrorCodes::INVALID_CONFIG_PARAMETER); - headers.emplace_back(HttpHeader{header_str.substr(0, delimiter), header_str.substr(delimiter + 1, String::npos)}); - } - } - - S3Settings::AuthSettings auth_settings{ - std::move(access_key_id), std::move(secret_access_key), - std::move(region), - std::move(server_side_encryption_customer_key_base64), - std::move(headers), - use_environment_credentials, - use_insecure_imds_request}; + auto auth_settings = S3::AuthSettings::loadFromConfig(config_elem + "." + key, config); S3Settings::ReadWriteSettings rw_settings; rw_settings.max_single_read_retries = get_uint_for_key(key, "max_single_read_retries", true, settings.s3_max_single_read_retries); diff --git a/src/Storages/StorageS3Settings.h b/src/Storages/StorageS3Settings.h index 80ef4f52deb..2da4a1d7590 100644 --- a/src/Storages/StorageS3Settings.h +++ b/src/Storages/StorageS3Settings.h @@ -9,6 +9,8 @@ #include #include +#include + namespace Poco::Util { class AbstractConfiguration; @@ -21,46 +23,6 @@ struct Settings; struct S3Settings { - struct AuthSettings - { - String access_key_id; - String secret_access_key; - String region; - String server_side_encryption_customer_key_base64; - - HeaderCollection headers; - - std::optional use_environment_credentials; - std::optional use_insecure_imds_request; - - inline bool operator==(const AuthSettings & other) const - { - return access_key_id == other.access_key_id && secret_access_key == other.secret_access_key - && region == other.region - && server_side_encryption_customer_key_base64 == other.server_side_encryption_customer_key_base64 - && headers == other.headers - && use_environment_credentials == other.use_environment_credentials - && use_insecure_imds_request == other.use_insecure_imds_request; - } - - void updateFrom(const AuthSettings & from) - { - /// Update with check for emptyness only parameters which - /// can be passed not only from config, but via ast. - - if (!from.access_key_id.empty()) - access_key_id = from.access_key_id; - if (!from.secret_access_key.empty()) - secret_access_key = from.secret_access_key; - - headers = from.headers; - region = from.region; - server_side_encryption_customer_key_base64 = from.server_side_encryption_customer_key_base64; - use_environment_credentials = from.use_environment_credentials; - use_insecure_imds_request = from.use_insecure_imds_request; - } - }; - struct ReadWriteSettings { size_t max_single_read_retries = 0; @@ -90,7 +52,7 @@ struct S3Settings void updateFromSettingsIfEmpty(const Settings & settings); }; - AuthSettings auth_settings; + S3::AuthSettings auth_settings; ReadWriteSettings rw_settings; inline bool operator==(const S3Settings & other) const diff --git a/src/Storages/StorageSQLite.cpp b/src/Storages/StorageSQLite.cpp index a86ed7646b3..92f954ebb9d 100644 --- a/src/Storages/StorageSQLite.cpp +++ b/src/Storages/StorageSQLite.cpp @@ -57,7 +57,7 @@ Pipe StorageSQLite::read( ContextPtr context_, QueryProcessingStage::Enum, size_t max_block_size, - unsigned int) + size_t /*num_streams*/) { if (!sqlite_db) sqlite_db = openSQLiteDB(database_path, getContext(), /* throw_on_error */true); diff --git a/src/Storages/StorageSQLite.h b/src/Storages/StorageSQLite.h index b0f209b5bc3..a021c00f627 100644 --- a/src/Storages/StorageSQLite.h +++ b/src/Storages/StorageSQLite.h @@ -38,7 +38,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; diff --git a/src/Storages/StorageSnapshot.cpp b/src/Storages/StorageSnapshot.cpp index a99fec8c154..48851f0974d 100644 --- a/src/Storages/StorageSnapshot.cpp +++ b/src/Storages/StorageSnapshot.cpp @@ -76,7 +76,7 @@ std::optional StorageSnapshot::tryGetColumn(const GetColumnsOpt { const auto & columns = getMetadataForQuery()->getColumns(); auto column = columns.tryGetColumn(options, column_name); - if (column && (!isObject(column->type) || !options.with_extended_objects)) + if (column && (!column->type->hasDynamicSubcolumns() || !options.with_extended_objects)) return column; if (options.with_extended_objects) diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 0ecbdb0db10..92d53ffc1ac 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -349,7 +349,7 @@ Pipe StorageStripeLog::read( ContextPtr local_context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h index efdf18c0f7b..3f1b4ed0ad5 100644 --- a/src/Storages/StorageStripeLog.h +++ b/src/Storages/StorageStripeLog.h @@ -47,7 +47,7 @@ public: ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) override; diff --git a/src/Storages/StorageTableFunction.h b/src/Storages/StorageTableFunction.h index 2a4bfdf304b..b105e50a54f 100644 --- a/src/Storages/StorageTableFunction.h +++ b/src/Storages/StorageTableFunction.h @@ -101,7 +101,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override + size_t num_streams) override { String cnames; for (const auto & c : column_names) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index f1c924a3448..c38b4313359 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -266,6 +266,7 @@ namespace setCredentials(credentials, request_uri); const auto settings = context->getSettings(); + int zstd_window_log_max = static_cast(settings.zstd_window_log_max); try { if (download_threads > 1) @@ -354,7 +355,7 @@ namespace threadPoolCallbackRunner(IOThreadPool::get(), "URLParallelRead"), download_threads), compression_method, - settings.zstd_window_log_max); + zstd_window_log_max); } } catch (const Poco::Exception & e) @@ -386,7 +387,7 @@ namespace /* use_external_buffer */ false, /* skip_url_not_found_error */ skip_url_not_found_error), compression_method, - settings.zstd_window_log_max); + zstd_window_log_max); } catch (...) { @@ -641,7 +642,7 @@ Pipe IStorageURLBase::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { auto params = getReadURIParams(column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size); @@ -730,7 +731,7 @@ Pipe StorageURLWithFailover::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned /*num_streams*/) + size_t /*num_streams*/) { ColumnsDescription columns_description; Block block_for_format; diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index 63c803f2d26..bf8858b8b66 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -35,7 +35,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; @@ -206,7 +206,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; struct Configuration { diff --git a/src/Storages/StorageValues.cpp b/src/Storages/StorageValues.cpp index 2a3e1743983..300b11b7346 100644 --- a/src/Storages/StorageValues.cpp +++ b/src/Storages/StorageValues.cpp @@ -27,7 +27,7 @@ Pipe StorageValues::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) + size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageValues.h b/src/Storages/StorageValues.h index bf7bf0466e4..55222903797 100644 --- a/src/Storages/StorageValues.h +++ b/src/Storages/StorageValues.h @@ -23,7 +23,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; /// Why we may have virtual columns in the storage from a single block? /// Because it used as tmp storage for pushing blocks into views, and some diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index adaf1c4e404..a55d7ad3c09 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -111,7 +111,7 @@ void StorageView::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { ASTPtr current_inner_query = storage_snapshot->metadata->getSelectQuery().inner_query; diff --git a/src/Storages/StorageView.h b/src/Storages/StorageView.h index 31c96addd08..593ac820ad4 100644 --- a/src/Storages/StorageView.h +++ b/src/Storages/StorageView.h @@ -32,7 +32,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; static void replaceWithSubquery(ASTSelectQuery & select_query, ASTPtr & view_name, const StorageMetadataPtr & metadata_snapshot) { diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index aacbb5fa302..5f57d37278b 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -106,7 +106,7 @@ Pipe StorageXDBC::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); diff --git a/src/Storages/StorageXDBC.h b/src/Storages/StorageXDBC.h index a2bb9c15baf..aa313e024ca 100644 --- a/src/Storages/StorageXDBC.h +++ b/src/Storages/StorageXDBC.h @@ -26,7 +26,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; StorageXDBC( const StorageID & table_id_, diff --git a/src/Storages/System/IStorageSystemOneBlock.h b/src/Storages/System/IStorageSystemOneBlock.h index 2cfe2de05db..63b9a443f95 100644 --- a/src/Storages/System/IStorageSystemOneBlock.h +++ b/src/Storages/System/IStorageSystemOneBlock.h @@ -45,7 +45,7 @@ public: ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) override + size_t /*num_streams*/) override { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemBackups.cpp b/src/Storages/System/StorageSystemBackups.cpp index e7146711c4a..52a26fe0cd6 100644 --- a/src/Storages/System/StorageSystemBackups.cpp +++ b/src/Storages/System/StorageSystemBackups.cpp @@ -51,8 +51,8 @@ void StorageSystemBackups::fillData(MutableColumns & res_columns, ContextPtr con column_uncompressed_size.insertValue(info.uncompressed_size); column_compressed_size.insertValue(info.compressed_size); column_error.insertData(info.error_message.data(), info.error_message.size()); - column_start_time.insertValue(std::chrono::system_clock::to_time_t(info.start_time)); - column_end_time.insertValue(std::chrono::system_clock::to_time_t(info.end_time)); + column_start_time.insertValue(static_cast(std::chrono::system_clock::to_time_t(info.start_time))); + column_end_time.insertValue(static_cast(std::chrono::system_clock::to_time_t(info.end_time))); }; for (const auto & entry : context->getBackupsWorker().getAllInfos()) diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 20cab9fdc47..18e7d269795 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -296,7 +296,7 @@ Pipe StorageSystemColumns::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t max_block_size, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemColumns.h b/src/Storages/System/StorageSystemColumns.h index 542e4ce9661..7b4b5dd8fb3 100644 --- a/src/Storages/System/StorageSystemColumns.h +++ b/src/Storages/System/StorageSystemColumns.h @@ -24,7 +24,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } }; diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 25eae3b83b6..e1f4f7b82bf 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -12,6 +12,7 @@ const char * auto_contributors[] { "821008736@qq.com", "ANDREI STAROVEROV", "Aaron Katz", + "Adam Rutkowski", "Adri Fernandez", "Ahmed Dardery", "Aimiyoo", @@ -76,11 +77,15 @@ const char * auto_contributors[] { "Alexey Elymanov", "Alexey Gusev", "Alexey Ilyukhov", + "Alexey Ivanov", "Alexey Milovidov", "Alexey Tronov", "Alexey Vasiliev", "Alexey Zatelepin", "Alexsey Shestakov", + "AlfVII", + "Alfonso Martinez", + "Alfred Xu", "Ali Demirci", "Aliaksandr Pliutau", "Aliaksandr Shylau", @@ -196,6 +201,7 @@ const char * auto_contributors[] { "Brian Hunter", "Bulat Gaifullin", "Carbyn", + "Carlos Rodríguez Hernández", "Caspian", "Chao Ma", "Chao Wang", @@ -222,6 +228,7 @@ const char * auto_contributors[] { "DIAOZHAFENG", "Dale McDiarmid", "Dale Mcdiarmid", + "Dalitso Banda", "Dan Roscigno", "DanRoscigno", "Daniel Bershatsky", @@ -267,6 +274,7 @@ const char * auto_contributors[] { "Dmitry S..ky / skype: dvska-at-skype", "Dmitry Ukolov", "Doge", + "Dom Del Nano", "Dongdong Yang", "DoomzD", "Dr. Strange Looker", @@ -276,6 +284,7 @@ const char * auto_contributors[] { "Egor Savin", "Ekaterina", "Eldar Zaitov", + "Elena", "Elena Baskakova", "Elghazal Ahmed", "Elizaveta Mironyuk", @@ -342,6 +351,7 @@ const char * auto_contributors[] { "Grigory Pervakov", "GruffGemini", "Guillaume Tassery", + "Guo Wangyang", "Guo Wei (William)", "Haavard Kvaalen", "Habibullah Oladepo", @@ -349,6 +359,7 @@ const char * auto_contributors[] { "Hakob Saghatelyan", "Hamoon", "Han Fei", + "Han Shukai", "Harry Lee", "Harry-Lee", "HarryLeeIBM", @@ -404,6 +415,7 @@ const char * auto_contributors[] { "Jack Song", "JackyWoo", "Jacob Hayes", + "Jacob Herrington", "Jake Liu", "Jakub Kuklis", "James Maidment", @@ -419,6 +431,7 @@ const char * auto_contributors[] { "Jiading Guo", "Jiang Tao", "Jianmei Zhang", + "Jiebin Sun", "Jochen Schalanda", "John", "John Hummel", @@ -432,6 +445,7 @@ const char * auto_contributors[] { "Julian Gilyadov", "Julian Zhou", "Julio Jimenez", + "Jus", "Justin Hilliard", "Kang Liu", "Karl Pietrzak", @@ -652,6 +666,7 @@ const char * auto_contributors[] { "OuO", "PHO", "Pablo Alegre", + "Pablo Marcos", "Paramtamtam", "Patrick Zippenfenig", "Paul Loyd", @@ -681,6 +696,7 @@ const char * auto_contributors[] { "Prashant Shahi", "Pxl", "Pysaoke", + "Quanfa Fu", "Quid37", "Rafael Acevedo", "Rafael David Tinoco", @@ -693,6 +709,7 @@ const char * auto_contributors[] { "RedClusive", "RegulusZ", "Reilee", + "Reinaldy Rafli", "Reto Kromer", "Ri", "Rich Raposa", @@ -726,6 +743,7 @@ const char * auto_contributors[] { "Sachin", "Safronov Michail", "SaltTan", + "Salvatore Mesoraca", "Sami Kerola", "Samuel Chou", "San", @@ -927,6 +945,7 @@ const char * auto_contributors[] { "ZhiYong Wang", "Zhichang Yu", "Zhichun Wu", + "Zhiguo Zhou", "Zhipeng", "Zijie Lu", "Zoran Pandovski", @@ -950,6 +969,7 @@ const char * auto_contributors[] { "alexander goryanets", "alexander kozhikhov", "alexey-milovidov", + "alexeyerm", "alexeypavlenko", "alfredlu", "amesaru", @@ -1131,6 +1151,7 @@ const char * auto_contributors[] { "jennyma", "jetgm", "jewisliu", + "jferroal", "jiahui-97", "jianmei zhang", "jinjunzh", @@ -1236,6 +1257,7 @@ const char * auto_contributors[] { "mo-avatar", "morty", "moscas", + "mosinnik", "mreddy017", "msaf1980", "msirm", @@ -1321,6 +1343,7 @@ const char * auto_contributors[] { "simon-says", "snyk-bot", "songenjie", + "sperlingxx", "spff", "spongedc", "spume", @@ -1422,6 +1445,7 @@ const char * auto_contributors[] { "zhongyuankai", "zhoubintao", "zhukai", + "zimv", "zkun", "zlx19950903", "zombee0", diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index e725f8a03c6..be04261cc4e 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -171,7 +171,7 @@ Pipe StorageSystemDataSkippingIndices::read( ContextPtr context, QueryProcessingStage::Enum /* processed_stage */, size_t max_block_size, - unsigned int /* num_streams */) + size_t /* num_streams */) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.h b/src/Storages/System/StorageSystemDataSkippingIndices.h index 046855edd5e..8a1e8c159b4 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.h +++ b/src/Storages/System/StorageSystemDataSkippingIndices.h @@ -21,7 +21,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } }; diff --git a/src/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp index 574ce4f44c2..d094fefddcb 100644 --- a/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/src/Storages/System/StorageSystemDetachedParts.cpp @@ -36,7 +36,7 @@ Pipe StorageSystemDetachedParts::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { StoragesInfoStream stream(query_info, context); diff --git a/src/Storages/System/StorageSystemDetachedParts.h b/src/Storages/System/StorageSystemDetachedParts.h index 23f27816138..20ac69f0eea 100644 --- a/src/Storages/System/StorageSystemDetachedParts.h +++ b/src/Storages/System/StorageSystemDetachedParts.h @@ -27,7 +27,7 @@ protected: ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, size_t /*max_block_size*/, - unsigned /*num_streams*/) override; + size_t /*num_streams*/) override; }; } diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index 6b50b00dc30..86b5eafdf72 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -37,7 +37,7 @@ Pipe StorageSystemDisks::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemDisks.h b/src/Storages/System/StorageSystemDisks.h index cd1dc1a8bbf..06cc7e8d4e2 100644 --- a/src/Storages/System/StorageSystemDisks.h +++ b/src/Storages/System/StorageSystemDisks.h @@ -27,7 +27,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } }; diff --git a/src/Storages/System/StorageSystemErrors.cpp b/src/Storages/System/StorageSystemErrors.cpp index 4c8c8e60d69..bbe67bc0d21 100644 --- a/src/Storages/System/StorageSystemErrors.cpp +++ b/src/Storages/System/StorageSystemErrors.cpp @@ -51,7 +51,7 @@ void StorageSystemErrors::fillData(MutableColumns & res_columns, ContextPtr cont for (size_t i = 0, end = ErrorCodes::end(); i < end; ++i) { const auto & error = ErrorCodes::values[i].get(); - std::string_view name = ErrorCodes::getName(i); + std::string_view name = ErrorCodes::getName(static_cast(i)); if (name.empty()) continue; diff --git a/src/Storages/System/StorageSystemNumbers.cpp b/src/Storages/System/StorageSystemNumbers.cpp index 523ec25b89c..70c0c64305d 100644 --- a/src/Storages/System/StorageSystemNumbers.cpp +++ b/src/Storages/System/StorageSystemNumbers.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -125,11 +126,11 @@ StorageSystemNumbers::StorageSystemNumbers(const StorageID & table_id, bool mult Pipe StorageSystemNumbers::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo &, + SelectQueryInfo & query_info, ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); @@ -154,7 +155,12 @@ Pipe StorageSystemNumbers::read( auto source = std::make_shared(state, max_block_size, max_counter); if (i == 0) - source->addTotalRowsApprox(*limit); + { + auto rows_appr = *limit; + if (query_info.limit > 0 && query_info.limit < rows_appr) + rows_appr = query_info.limit; + source->addTotalRowsApprox(rows_appr); + } pipe.addSource(std::move(source)); } @@ -167,7 +173,12 @@ Pipe StorageSystemNumbers::read( auto source = std::make_shared(max_block_size, offset + i * max_block_size, num_streams * max_block_size); if (limit && i == 0) - source->addTotalRowsApprox(*limit); + { + auto rows_appr = *limit; + if (query_info.limit > 0 && query_info.limit < rows_appr) + rows_appr = query_info.limit; + source->addTotalRowsApprox(rows_appr); + } pipe.addSource(std::move(source)); } diff --git a/src/Storages/System/StorageSystemNumbers.h b/src/Storages/System/StorageSystemNumbers.h index 6bb89c0525e..acddac681ef 100644 --- a/src/Storages/System/StorageSystemNumbers.h +++ b/src/Storages/System/StorageSystemNumbers.h @@ -38,7 +38,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool hasEvenlyDistributedRead() const override { return true; } bool isSystemStorage() const override { return true; } diff --git a/src/Storages/System/StorageSystemOne.cpp b/src/Storages/System/StorageSystemOne.cpp index f262c981b83..3091ffdb51a 100644 --- a/src/Storages/System/StorageSystemOne.cpp +++ b/src/Storages/System/StorageSystemOne.cpp @@ -27,7 +27,7 @@ Pipe StorageSystemOne::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemOne.h b/src/Storages/System/StorageSystemOne.h index 35dba59a99e..d8a26f1def4 100644 --- a/src/Storages/System/StorageSystemOne.h +++ b/src/Storages/System/StorageSystemOne.h @@ -28,7 +28,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } diff --git a/src/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp index d788efd8860..fa1c26b623d 100644 --- a/src/Storages/System/StorageSystemParts.cpp +++ b/src/Storages/System/StorageSystemParts.cpp @@ -198,9 +198,9 @@ void StorageSystemParts::processNextStorage( if (part->isStoredOnDisk()) { if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); } else { diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index bcfd670ece9..a0c022f5540 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -247,7 +247,7 @@ Pipe StorageSystemPartsBase::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { bool has_state_column = hasStateColumn(column_names, storage_snapshot); diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index 8db96700e1a..cb6265d82df 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -63,7 +63,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; NamesAndTypesList getVirtuals() const override; diff --git a/src/Storages/System/StorageSystemPartsColumns.cpp b/src/Storages/System/StorageSystemPartsColumns.cpp index cc6e69b160f..cd51c767eae 100644 --- a/src/Storages/System/StorageSystemPartsColumns.cpp +++ b/src/Storages/System/StorageSystemPartsColumns.cpp @@ -190,9 +190,9 @@ void StorageSystemPartsColumns::processNextStorage( if (columns_mask[src_index++]) columns[res_index++]->insert(info.engine); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); if (columns_mask[src_index++]) columns[res_index++]->insert(column.name); diff --git a/src/Storages/System/StorageSystemProjectionParts.cpp b/src/Storages/System/StorageSystemProjectionParts.cpp index 3934e7c9623..37c62ba5eb0 100644 --- a/src/Storages/System/StorageSystemProjectionParts.cpp +++ b/src/Storages/System/StorageSystemProjectionParts.cpp @@ -200,9 +200,9 @@ void StorageSystemProjectionParts::processNextStorage( if (part->isStoredOnDisk()) { if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); } else { diff --git a/src/Storages/System/StorageSystemProjectionPartsColumns.cpp b/src/Storages/System/StorageSystemProjectionPartsColumns.cpp index 0847010faaa..a5968597885 100644 --- a/src/Storages/System/StorageSystemProjectionPartsColumns.cpp +++ b/src/Storages/System/StorageSystemProjectionPartsColumns.cpp @@ -211,9 +211,9 @@ void StorageSystemProjectionPartsColumns::processNextStorage( if (columns_mask[src_index++]) columns[res_index++]->insert(info.engine); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getDiskName()); + columns[res_index++]->insert(part->getDataPartStorage().getDiskName()); if (columns_mask[src_index++]) - columns[res_index++]->insert(part->data_part_storage->getFullPath()); + columns[res_index++]->insert(part->getDataPartStorage().getFullPath()); if (columns_mask[src_index++]) columns[res_index++]->insert(column.name); diff --git a/src/Storages/System/StorageSystemQuotaLimits.cpp b/src/Storages/System/StorageSystemQuotaLimits.cpp index 0261d3d2cd9..6cc269130a0 100644 --- a/src/Storages/System/StorageSystemQuotaLimits.cpp +++ b/src/Storages/System/StorageSystemQuotaLimits.cpp @@ -90,7 +90,7 @@ void StorageSystemQuotaLimits::fillData(MutableColumns & res_columns, ContextPtr auto add_row = [&](const String & quota_name, const Quota::Limits & limits) { column_quota_name.insertData(quota_name.data(), quota_name.length()); - column_duration.push_back(limits.duration.count()); + column_duration.push_back(static_cast(limits.duration.count())); column_is_randomized_interval.push_back(limits.randomize_interval); for (auto quota_type : collections::range(QuotaType::MAX)) diff --git a/src/Storages/System/StorageSystemQuotaUsage.cpp b/src/Storages/System/StorageSystemQuotaUsage.cpp index 6ba47a86dbf..5d047dc0359 100644 --- a/src/Storages/System/StorageSystemQuotaUsage.cpp +++ b/src/Storages/System/StorageSystemQuotaUsage.cpp @@ -162,8 +162,8 @@ void StorageSystemQuotaUsage::fillDataImpl( time_t end_time = std::chrono::system_clock::to_time_t(interval->end_of_interval); UInt32 duration = static_cast(std::chrono::duration_cast(interval->duration).count()); time_t start_time = end_time - duration; - column_start_time.getData().push_back(start_time); - column_end_time.getData().push_back(end_time); + column_start_time.getData().push_back(static_cast(start_time)); + column_end_time.getData().push_back(static_cast(end_time)); column_duration.getData().push_back(duration); column_start_time_null_map.push_back(false); column_end_time_null_map.push_back(false); diff --git a/src/Storages/System/StorageSystemQuotas.cpp b/src/Storages/System/StorageSystemQuotas.cpp index 17863fa7326..439883e038a 100644 --- a/src/Storages/System/StorageSystemQuotas.cpp +++ b/src/Storages/System/StorageSystemQuotas.cpp @@ -96,7 +96,10 @@ void StorageSystemQuotas::fillData(MutableColumns & res_columns, ContextPtr cont column_key_types_offsets.push_back(column_key_types.size()); for (const auto & limits : all_limits) - column_durations.push_back(std::chrono::duration_cast(limits.duration).count()); + { + column_durations.push_back( + static_cast(std::chrono::duration_cast(limits.duration).count())); + } column_durations_offsets.push_back(column_durations.size()); auto apply_to_ast = apply_to.toASTWithNames(access_control); diff --git a/src/Storages/System/StorageSystemRemoteDataPaths.cpp b/src/Storages/System/StorageSystemRemoteDataPaths.cpp index de7e1911e44..20076603522 100644 --- a/src/Storages/System/StorageSystemRemoteDataPaths.cpp +++ b/src/Storages/System/StorageSystemRemoteDataPaths.cpp @@ -38,7 +38,7 @@ Pipe StorageSystemRemoteDataPaths::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemRemoteDataPaths.h b/src/Storages/System/StorageSystemRemoteDataPaths.h index f868ae60795..7e883d144ef 100644 --- a/src/Storages/System/StorageSystemRemoteDataPaths.h +++ b/src/Storages/System/StorageSystemRemoteDataPaths.h @@ -21,7 +21,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; }; } diff --git a/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp index e018ccc0733..0f7877a6e41 100644 --- a/src/Storages/System/StorageSystemReplicas.cpp +++ b/src/Storages/System/StorageSystemReplicas.cpp @@ -66,7 +66,7 @@ Pipe StorageSystemReplicas::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemReplicas.h b/src/Storages/System/StorageSystemReplicas.h index fc7f8f15861..e9c29dec0fd 100644 --- a/src/Storages/System/StorageSystemReplicas.h +++ b/src/Storages/System/StorageSystemReplicas.h @@ -25,7 +25,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } }; diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index 549ce193137..df3d8b74e6e 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -258,7 +258,7 @@ Pipe StorageSystemStackTrace::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); @@ -324,7 +324,7 @@ Pipe StorageSystemStackTrace::read( sigval sig_value{}; sig_value.sival_int = sequence_num.load(std::memory_order_acquire); - if (0 != ::sigqueue(tid, sig, sig_value)) + if (0 != ::sigqueue(static_cast(tid), sig, sig_value)) { /// The thread may has been already finished. if (ESRCH == errno) diff --git a/src/Storages/System/StorageSystemStackTrace.h b/src/Storages/System/StorageSystemStackTrace.h index dd613882e49..9133a86aa55 100644 --- a/src/Storages/System/StorageSystemStackTrace.h +++ b/src/Storages/System/StorageSystemStackTrace.h @@ -33,7 +33,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } diff --git a/src/Storages/System/StorageSystemStoragePolicies.cpp b/src/Storages/System/StorageSystemStoragePolicies.cpp index 832c430e2be..b42bd7859dd 100644 --- a/src/Storages/System/StorageSystemStoragePolicies.cpp +++ b/src/Storages/System/StorageSystemStoragePolicies.cpp @@ -44,7 +44,7 @@ Pipe StorageSystemStoragePolicies::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemStoragePolicies.h b/src/Storages/System/StorageSystemStoragePolicies.h index 3340a4b5e62..afc729c8368 100644 --- a/src/Storages/System/StorageSystemStoragePolicies.h +++ b/src/Storages/System/StorageSystemStoragePolicies.h @@ -27,7 +27,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } }; diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index e36b22a979e..83f922850a3 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -574,7 +574,7 @@ Pipe StorageSystemTables::read( ContextPtr context, QueryProcessingStage::Enum /*processed_stage*/, const size_t max_block_size, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemTables.h b/src/Storages/System/StorageSystemTables.h index 11ac75aab08..60b6144f122 100644 --- a/src/Storages/System/StorageSystemTables.h +++ b/src/Storages/System/StorageSystemTables.h @@ -25,7 +25,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool isSystemStorage() const override { return true; } }; diff --git a/src/Storages/System/StorageSystemZeros.cpp b/src/Storages/System/StorageSystemZeros.cpp index 9e5836fa358..6c2ddd8d3dd 100644 --- a/src/Storages/System/StorageSystemZeros.cpp +++ b/src/Storages/System/StorageSystemZeros.cpp @@ -97,7 +97,7 @@ Pipe StorageSystemZeros::read( ContextPtr /*context*/, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, - unsigned num_streams) + size_t num_streams) { storage_snapshot->check(column_names); diff --git a/src/Storages/System/StorageSystemZeros.h b/src/Storages/System/StorageSystemZeros.h index 5461feacb6b..64443a3cfd6 100644 --- a/src/Storages/System/StorageSystemZeros.h +++ b/src/Storages/System/StorageSystemZeros.h @@ -29,7 +29,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; bool hasEvenlyDistributedRead() const override { return true; } bool isSystemStorage() const override { return true; } diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index d34066de769..c0bc5ad8da9 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1018,7 +1018,8 @@ void StorageWindowView::threadFuncFireProc() return; std::lock_guard lock(fire_signal_mutex); - UInt32 timestamp_now = std::time(nullptr); + /// TODO: consider using time_t instead (for every timestamp in this class) + UInt32 timestamp_now = static_cast(std::time(nullptr)); while (next_fire_signal <= timestamp_now) { @@ -1078,7 +1079,7 @@ void StorageWindowView::read( ContextPtr local_context, QueryProcessingStage::Enum processed_stage, const size_t max_block_size, - const unsigned num_streams) + const size_t num_streams) { if (target_table_id.empty()) return; @@ -1118,7 +1119,7 @@ Pipe StorageWindowView::watch( ContextPtr local_context, QueryProcessingStage::Enum & processed_stage, size_t /*max_block_size*/, - const unsigned /*num_streams*/) + const size_t /*num_streams*/) { ASTWatchQuery & query = typeid_cast(*query_info.query); @@ -1189,7 +1190,7 @@ StorageWindowView::StorageWindowView( target_table_id = has_inner_target_table ? StorageID(table_id_.database_name, generateTargetTableName(table_id_)) : query.to_table_id; if (is_proctime) - next_fire_signal = getWindowUpperBound(std::time(nullptr)); + next_fire_signal = getWindowUpperBound(static_cast(std::time(nullptr))); std::exchange(has_inner_table, true); if (!attach_) diff --git a/src/Storages/WindowView/StorageWindowView.h b/src/Storages/WindowView/StorageWindowView.h index 96c034b9590..6da34389e4d 100644 --- a/src/Storages/WindowView/StorageWindowView.h +++ b/src/Storages/WindowView/StorageWindowView.h @@ -150,7 +150,7 @@ public: ContextPtr context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; Pipe watch( const Names & column_names, @@ -158,7 +158,7 @@ public: ContextPtr context, QueryProcessingStage::Enum & processed_stage, size_t max_block_size, - unsigned num_streams) override; + size_t num_streams) override; std::pair getNewBlocks(UInt32 watermark); diff --git a/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp b/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp new file mode 100644 index 00000000000..31f49fa5490 --- /dev/null +++ b/src/Storages/addColumnsStructureToQueryWithClusterEngine.cpp @@ -0,0 +1,51 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +static ASTExpressionList * extractTableFunctionArgumentsFromSelectQuery(ASTPtr & query) +{ + auto * select_query = query->as(); + if (!select_query || !select_query->tables()) + return nullptr; + + auto * tables = select_query->tables()->as(); + auto * table_expression = tables->children[0]->as()->table_expression->as(); + if (!table_expression->table_function) + return nullptr; + + auto * table_function = table_expression->table_function->as(); + return table_function->arguments->as(); +} + +void addColumnsStructureToQueryWithClusterEngine(ASTPtr & query, const String & structure, size_t max_arguments, const String & function_name) +{ + ASTExpressionList * expression_list = extractTableFunctionArgumentsFromSelectQuery(query); + if (!expression_list) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected SELECT query from table function {}, got '{}'", function_name, queryToString(query)); + auto structure_literal = std::make_shared(structure); + + if (expression_list->children.size() < 2 || expression_list->children.size() > max_arguments) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected 2 to {} arguments in {} table functions, got {}", function_name, max_arguments, expression_list->children.size()); + + if (expression_list->children.size() == 2 || expression_list->children.size() == max_arguments - 1) + { + auto format_literal = std::make_shared("auto"); + expression_list->children.push_back(format_literal); + } + + expression_list->children.push_back(structure_literal); +} + +} diff --git a/src/Storages/addColumnsStructureToQueryWithClusterEngine.h b/src/Storages/addColumnsStructureToQueryWithClusterEngine.h new file mode 100644 index 00000000000..f39f3a31630 --- /dev/null +++ b/src/Storages/addColumnsStructureToQueryWithClusterEngine.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace DB +{ + +/// Add structure argument for queries with s3Cluster/hdfsCluster table function. +void addColumnsStructureToQueryWithClusterEngine(ASTPtr & query, const String & structure, size_t max_arguments, const String & function_name); + +} diff --git a/src/Storages/examples/merge_selector.cpp b/src/Storages/examples/merge_selector.cpp index 9433e38c648..a3b0d8a29ef 100644 --- a/src/Storages/examples/merge_selector.cpp +++ b/src/Storages/examples/merge_selector.cpp @@ -66,7 +66,7 @@ int main(int, char **) size_t sum_merged_size = 0; size_t start_index = 0; - size_t max_level = 0; + unsigned max_level = 0; bool in_range = false; for (size_t i = 0, size = parts.size(); i < size; ++i) diff --git a/src/Storages/examples/merge_selector2.cpp b/src/Storages/examples/merge_selector2.cpp index d9d08a84bcf..029da26fad6 100644 --- a/src/Storages/examples/merge_selector2.cpp +++ b/src/Storages/examples/merge_selector2.cpp @@ -72,7 +72,7 @@ int main(int, char **) size_t sum_merged_size = 0; size_t start_index = 0; - size_t max_level = 0; + unsigned max_level = 0; bool in_range = false; for (size_t i = 0, size = parts.size(); i < size; ++i) diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 3d104ada0b6..7bd5e629c39 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -58,7 +58,7 @@ ColumnsDescription getStructureOfRemoteTableInShard( } ColumnsDescription res; - auto new_context = ClusterProxy::updateSettingsForCluster(cluster, context, context->getSettingsRef()); + auto new_context = ClusterProxy::updateSettingsForCluster(cluster, context, context->getSettingsRef(), table_id); /// Expect only needed columns from the result of DESC TABLE. NOTE 'comment' column is ignored for compatibility reasons. Block sample_block @@ -169,7 +169,7 @@ ColumnsDescriptionByShardNum getExtendedObjectsOfRemoteTables( const auto & shards_info = cluster.getShardsInfo(); auto query = "DESC TABLE " + remote_table_id.getFullTableName(); - auto new_context = ClusterProxy::updateSettingsForCluster(cluster, context, context->getSettingsRef()); + auto new_context = ClusterProxy::updateSettingsForCluster(cluster, context, context->getSettingsRef(), remote_table_id); new_context->setSetting("describe_extend_object_types", true); /// Expect only needed columns from the result of DESC TABLE. @@ -200,7 +200,7 @@ ColumnsDescriptionByShardNum getExtendedObjectsOfRemoteTables( auto type_name = type_col[i].get(); auto storage_column = storage_columns.tryGetPhysical(name); - if (storage_column && isObject(storage_column->type)) + if (storage_column && storage_column->type->hasDynamicSubcolumns()) res.add(ColumnDescription(std::move(name), DataTypeFactory::instance().get(type_name))); } } diff --git a/src/Storages/transformQueryForExternalDatabase.cpp b/src/Storages/transformQueryForExternalDatabase.cpp index c42fb7fa965..51b11680f82 100644 --- a/src/Storages/transformQueryForExternalDatabase.cpp +++ b/src/Storages/transformQueryForExternalDatabase.cpp @@ -22,6 +22,7 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int INCORRECT_QUERY; + extern const int UNSUPPORTED_METHOD; } namespace @@ -251,6 +252,11 @@ String transformQueryForExternalDatabase( ContextPtr context) { auto clone_query = query_info.query->clone(); + + /// TODO: Analyzer syntax analyzer result + if (!query_info.syntax_analyzer_result) + throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "transform query for external database is unsupported"); + const Names used_columns = query_info.syntax_analyzer_result->requiredSourceColumns(); bool strict = context->getSettingsRef().external_table_strict_query; diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index fd474f037b3..4ecf29a05bd 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -43,7 +43,8 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr } else if (type == Field::Types::Int64 || type == Field::Types::UInt64) { - fd = (type == Field::Types::Int64) ? literal->value.get() : literal->value.get(); + fd = static_cast( + (type == Field::Types::Int64) ? literal->value.get() : literal->value.get()); if (fd < 0) throw Exception("File descriptor must be non-negative", ErrorCodes::BAD_ARGUMENTS); } diff --git a/src/TableFunctions/TableFunctionHDFSCluster.cpp b/src/TableFunctions/TableFunctionHDFSCluster.cpp index 26fcb514cca..73b77f770b2 100644 --- a/src/TableFunctions/TableFunctionHDFSCluster.cpp +++ b/src/TableFunctions/TableFunctionHDFSCluster.cpp @@ -48,7 +48,7 @@ void TableFunctionHDFSCluster::parseArguments(const ASTPtr & ast_function, Conte const auto message = fmt::format( "The signature of table function {} shall be the following:\n" \ " - cluster, uri\n",\ - " - cluster, format\n",\ + " - cluster, uri, format\n",\ " - cluster, uri, format, structure\n",\ " - cluster, uri, format, structure, compression_method", getName()); diff --git a/src/TableFunctions/TableFunctionS3.cpp b/src/TableFunctions/TableFunctionS3.cpp index 8ed6272d3d6..75d2fe1cd2d 100644 --- a/src/TableFunctions/TableFunctionS3.cpp +++ b/src/TableFunctions/TableFunctionS3.cpp @@ -64,7 +64,7 @@ void TableFunctionS3::parseArgumentsImpl(const String & error_message, ASTs & ar if (args.size() == 4) { auto second_arg = checkAndGetLiteralArgument(args[1], "format/access_key_id"); - if (FormatFactory::instance().getAllFormats().contains(second_arg)) + if (second_arg == "auto" || FormatFactory::instance().getAllFormats().contains(second_arg)) args_to_idx = {{"format", 1}, {"structure", 2}, {"compression_method", 3}}; else @@ -77,7 +77,7 @@ void TableFunctionS3::parseArgumentsImpl(const String & error_message, ASTs & ar { auto second_arg = checkAndGetLiteralArgument(args[1], "format/access_key_id"); - if (FormatFactory::instance().getAllFormats().contains(second_arg)) + if (second_arg == "auto" || FormatFactory::instance().getAllFormats().contains(second_arg)) args_to_idx = {{"format", 1}, {"structure", 2}}; else args_to_idx = {{"access_key_id", 1}, {"secret_access_key", 2}}; diff --git a/tests/.rgignore b/tests/.rgignore new file mode 100644 index 00000000000..26cb6f9025d --- /dev/null +++ b/tests/.rgignore @@ -0,0 +1 @@ +data_json diff --git a/tests/ci/bugfix_validate_check.py b/tests/ci/bugfix_validate_check.py index 4e6001aaa74..e5f37f2940b 100644 --- a/tests/ci/bugfix_validate_check.py +++ b/tests/ci/bugfix_validate_check.py @@ -3,14 +3,21 @@ import argparse import csv import itertools +import logging import os -import sys + +from github import Github + +from s3_helper import S3Helper +from get_robot_token import get_best_robot_token +from pr_info import PRInfo +from upload_result_helper import upload_results +from commit_status_helper import post_commit_status def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument("report1") - parser.add_argument("report2") + parser.add_argument("status", nargs="+", help="Path to status file") return parser.parse_args() @@ -26,20 +33,63 @@ def post_commit_status_from_file(file_path): return res[0] -def process_results(file_path): +def process_result(file_path): + test_results = [] state, report_url, description = post_commit_status_from_file(file_path) prefix = os.path.basename(os.path.dirname(file_path)) - print( - f"::notice:: bugfix check: {prefix} - {state}: {description} Report url: {report_url}" - ) - return state == "success" + is_ok = state == "success" + if is_ok and report_url == "null": + return is_ok, None + + status = f'OK: Bug reproduced (Report' + if not is_ok: + status = f'Bug is not reproduced (Report)' + test_results.append([f"{prefix}: {description}", status]) + return is_ok, test_results + + +def process_all_results(file_paths): + any_ok = False + all_results = [] + for status_path in file_paths: + is_ok, test_results = process_result(status_path) + any_ok = any_ok or is_ok + if test_results is not None: + all_results.extend(test_results) + + return any_ok, all_results def main(args): - is_ok = False - is_ok = process_results(args.report1) or is_ok - is_ok = process_results(args.report2) or is_ok - sys.exit(0 if is_ok else 1) + logging.basicConfig(level=logging.INFO) + + check_name_with_group = "Bugfix validate check" + + is_ok, test_results = process_all_results(args.status) + + if not test_results: + logging.info("No results to upload") + return + + pr_info = PRInfo() + report_url = upload_results( + S3Helper(), + pr_info.number, + pr_info.sha, + test_results, + [], + check_name_with_group, + ) + + gh = Github(get_best_robot_token(), per_page=100) + post_commit_status( + gh, + pr_info.sha, + check_name_with_group, + "" if is_ok else "Changed tests doesn't reproduce the bug", + "success" if is_ok else "error", + report_url, + ) if __name__ == "__main__": diff --git a/tests/ci/cancel_and_rerun_workflow_lambda/app.py b/tests/ci/cancel_and_rerun_workflow_lambda/app.py index 813ee9d1ab7..21a5ce517f6 100644 --- a/tests/ci/cancel_and_rerun_workflow_lambda/app.py +++ b/tests/ci/cancel_and_rerun_workflow_lambda/app.py @@ -15,7 +15,7 @@ import boto3 # type: ignore NEED_RERUN_OR_CANCELL_WORKFLOWS = { "PullRequestCI", "DocsCheck", - "DocsRelease", + "DocsReleaseChecks", "BackportPR", } diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index a81334860d1..c82d9da05e9 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -37,12 +37,8 @@ class ClickHouseHelper: url, params=params, data=json_str, headers=auth ) except Exception as e: - logging.warning( - "Received exception while sending data to %s on %s attempt: %s", - url, - i, - e, - ) + error = f"Received exception while sending data to {url} on {i} attempt: {e}" + logging.warning(error) continue logging.info("Response content '%s'", response.content) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 388f93f34ec..f7d3288c316 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -210,7 +210,10 @@ if __name__ == "__main__": run_changed_tests = flaky_check or validate_bugix_check gh = Github(get_best_robot_token(), per_page=100) - pr_info = PRInfo(need_changed_files=run_changed_tests) + # For validate_bugix_check we need up to date information about labels, so pr_event_from_api is used + pr_info = PRInfo( + need_changed_files=run_changed_tests, pr_event_from_api=validate_bugix_check + ) atexit.register(update_mergeable_check, gh, pr_info, check_name) @@ -221,11 +224,11 @@ if __name__ == "__main__": if args.post_commit_status == "file": post_commit_status_to_file( os.path.join(temp_path, "post_commit_status.tsv"), - "Skipped (no pr-bugfix)", + f"Skipped (no pr-bugfix in {pr_info.labels})", "success", "null", ) - logging.info("Skipping '%s' (no pr-bugfix)", check_name) + logging.info("Skipping '%s' (no pr-bugfix in %s)", check_name, pr_info.labels) sys.exit(0) if "RUN_BY_HASH_NUM" in os.environ: @@ -320,7 +323,7 @@ if __name__ == "__main__": state, description, test_results, additional_logs = process_results( result_path, server_log_path ) - state = override_status(state, check_name, validate_bugix_check) + state = override_status(state, check_name, invert=validate_bugix_check) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, check_name, test_results) diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 3709a7271d7..cba428cbcf5 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -167,17 +167,22 @@ if __name__ == "__main__": os.makedirs(temp_path) is_flaky_check = "flaky" in check_name - pr_info = PRInfo(need_changed_files=is_flaky_check or validate_bugix_check) + + # For validate_bugix_check we need up to date information about labels, so pr_event_from_api is used + pr_info = PRInfo( + need_changed_files=is_flaky_check or validate_bugix_check, + pr_event_from_api=validate_bugix_check, + ) if validate_bugix_check and "pr-bugfix" not in pr_info.labels: if args.post_commit_status == "file": post_commit_status_to_file( os.path.join(temp_path, "post_commit_status.tsv"), - "Skipped (no pr-bugfix)", + f"Skipped (no pr-bugfix in {pr_info.labels})", "success", "null", ) - logging.info("Skipping '%s' (no pr-bugfix)", check_name) + logging.info("Skipping '%s' (no pr-bugfix in '%s')", check_name, pr_info.labels) sys.exit(0) gh = Github(get_best_robot_token(), per_page=100) @@ -244,7 +249,7 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) state, description, test_results, additional_logs = process_results(result_path) - state = override_status(state, check_name, validate_bugix_check) + state = override_status(state, check_name, invert=validate_bugix_check) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, check_name, test_results) diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 966858c0747..162bab6a50a 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -344,7 +344,7 @@ def main(): update_contributors() return - version = get_version_from_repo(args.version_path) + version = get_version_from_repo(args.version_path, Git(True)) if args.update: version = version.update(args.update) diff --git a/tests/ci/workflow_approve_rerun_lambda/app.py b/tests/ci/workflow_approve_rerun_lambda/app.py index 39bd9cfb283..f2b785840d8 100644 --- a/tests/ci/workflow_approve_rerun_lambda/app.py +++ b/tests/ci/workflow_approve_rerun_lambda/app.py @@ -61,11 +61,11 @@ TRUSTED_WORKFLOW_IDS = { NEED_RERUN_WORKFLOWS = { "BackportPR", - "Docs", - "DocsRelease", + "DocsCheck", + "DocsReleaseChecks", "MasterCI", "PullRequestCI", - "ReleaseCI", + "ReleaseBranchCI", } # Individual trusted contirbutors who are not in any trusted organization. diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 12f85a5adbf..20e63412d91 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -987,7 +987,7 @@ class TestCase: and (proc.stderr is None) and (proc.stdout is None or "Exception" not in proc.stdout) ) - need_drop_database = not maybe_passed + need_drop_database = maybe_passed debug_log = "" if os.path.exists(self.testcase_args.debug_log_file): @@ -2055,7 +2055,7 @@ if __name__ == "__main__": parser.add_argument( "--no-drop-if-fail", action="store_true", - help="Do not drop database for test if test has failed", + help="Do not drop database for test if test has failed (does not work if reference file mismatch)", ) parser.add_argument( "--hide-db-name", diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index a2a7f5cc750..8226d801cef 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -93,6 +93,15 @@ 22548578304 0 + + cache + s3_disk_6 + s3_cache_6/ + 22548578304 + 0 + 1 + 100 + cache s3_disk_6 @@ -183,6 +192,13 @@ + + +
+ s3_cache_6 +
+
+
diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index c987ca292c1..666833013c8 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2678,7 +2678,9 @@ class ClickHouseCluster: # Check server logs for Fatal messages and sanitizer failures. # NOTE: we cannot do this via docker since in case of Fatal message container may already die. for name, instance in self.instances.items(): - if instance.contains_in_log(SANITIZER_SIGN, from_host=True): + if instance.contains_in_log( + SANITIZER_SIGN, from_host=True, filename="stderr.log" + ): sanitizer_assert_instance = instance.grep_in_log( SANITIZER_SIGN, from_host=True, filename="stderr.log" ) diff --git a/tests/integration/test_attach_backup_from_s3_plain/__init__.py b/tests/integration/test_attach_backup_from_s3_plain/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_backup_restore_s3/configs/storage_conf.xml b/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml similarity index 54% rename from tests/integration/test_backup_restore_s3/configs/storage_conf.xml rename to tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml index 0402be720c4..67278694d39 100644 --- a/tests/integration/test_backup_restore_s3/configs/storage_conf.xml +++ b/tests/integration/test_attach_backup_from_s3_plain/configs/disk_s3.xml @@ -1,42 +1,34 @@ + - - s3 - http://minio1:9001/root/data/ - minio - minio123 - 33554432 - - + s3_plain - http://minio1:9001/root/data/ + http://minio1:9001/root/data/disks/disk_s3_plain/ minio minio123 33554432 - - - local - / - + + + s3_plain + + http://minio1:9001/root/data/disks/disk_s3_plain/backup/ + minio + minio123 + 33554432 + - +
- s3 + attach_disk_s3_plain
-
+
- - default - - s3 - s3_plain - - /backups/ + backup_disk_s3_plain
diff --git a/tests/integration/test_attach_backup_from_s3_plain/test.py b/tests/integration/test_attach_backup_from_s3_plain/test.py new file mode 100644 index 00000000000..35d53d5b8bd --- /dev/null +++ b/tests/integration/test_attach_backup_from_s3_plain/test.py @@ -0,0 +1,40 @@ +# pylint: disable=global-statement +# pylint: disable=line-too-long + +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/disk_s3.xml"], + with_minio=True, +) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield + finally: + cluster.shutdown() + + +def test_attach_backup(): + node.query( + f""" + -- BACKUP writes Ordinary like structure + set allow_deprecated_database_ordinary=1; + create database ordinary engine=Ordinary; + + create table ordinary.test_backup_attach engine=MergeTree() order by tuple() as select * from numbers(100); + -- NOTE: name of backup ("backup") is significant. + backup table ordinary.test_backup_attach TO Disk('backup_disk_s3_plain', 'backup'); + + drop table ordinary.test_backup_attach; + attach table ordinary.test_backup_attach (number UInt64) engine=MergeTree() order by tuple() settings storage_policy='attach_policy_s3_plain'; + """ + ) + + assert int(node.query("select count() from ordinary.test_backup_attach")) == 100 diff --git a/tests/integration/test_backup_restore_s3/configs/disk_s3.xml b/tests/integration/test_backup_restore_s3/configs/disk_s3.xml new file mode 100644 index 00000000000..c1fd059bc67 --- /dev/null +++ b/tests/integration/test_backup_restore_s3/configs/disk_s3.xml @@ -0,0 +1,47 @@ + + + + + + s3 + http://minio1:9001/root/data/disks/disk_s3/ + minio + minio123 + + + s3 + http://minio1:9001/root2/data/disks/disk_s3/ + minio + minio123 + + + s3_plain + http://minio1:9001/root/data/disks/disk_s3_plain/ + minio + minio123 + 33554432 + + + + + +
+ disk_s3 +
+
+
+ + +
+ disk_s3_other_bucket +
+
+
+
+
+ + default + disk_s3 + disk_s3_plain + +
diff --git a/tests/integration/test_backup_restore_s3/configs/named_collection_s3_backups.xml b/tests/integration/test_backup_restore_s3/configs/named_collection_s3_backups.xml new file mode 100644 index 00000000000..7a9d5effede --- /dev/null +++ b/tests/integration/test_backup_restore_s3/configs/named_collection_s3_backups.xml @@ -0,0 +1,9 @@ + + + + http://minio1:9001/root/data/backups + minio + minio123 + + + \ No newline at end of file diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index e18b3800fc0..617c14d6736 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -1,65 +1,40 @@ -#!/usr/bin/env python3 -# pylint: disable=unused-argument - import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", - main_configs=["configs/storage_conf.xml"], + main_configs=["configs/disk_s3.xml", "configs/named_collection_s3_backups.xml"], with_minio=True, ) -@pytest.fixture(scope="module") +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() - yield cluster + yield finally: cluster.shutdown() -@pytest.mark.parametrize( - "storage_policy,to_disk", - [ - pytest.param( - "default", - "default", - id="from_local_to_local", - ), - pytest.param( - "s3", - "default", - id="from_s3_to_local", - ), - pytest.param( - "default", - "s3", - id="from_local_to_s3", - ), - pytest.param( - "s3", - "s3_plain", - id="from_s3_to_s3_plain", - ), - pytest.param( - "default", - "s3_plain", - id="from_local_to_s3_plain", - ), - ], -) -def test_backup_restore(start_cluster, storage_policy, to_disk): - backup_name = storage_policy + "_" + to_disk +backup_id_counter = 0 + + +def new_backup_name(): + global backup_id_counter + backup_id_counter += 1 + return f"backup{backup_id_counter}" + + +def check_backup_and_restore(storage_policy, backup_destination): node.query( f""" DROP TABLE IF EXISTS data NO DELAY; CREATE TABLE data (key Int, value String, array Array(String)) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='{storage_policy}'; INSERT INTO data SELECT * FROM generateRandom('key Int, value String, array Array(String)') LIMIT 1000; - BACKUP TABLE data TO Disk('{to_disk}', '{backup_name}'); - RESTORE TABLE data AS data_restored FROM Disk('{to_disk}', '{backup_name}'); + BACKUP TABLE data TO {backup_destination}; + RESTORE TABLE data AS data_restored FROM {backup_destination}; SELECT throwIf( (SELECT groupArray(tuple(*)) FROM data) != (SELECT groupArray(tuple(*)) FROM data_restored), @@ -69,3 +44,75 @@ def test_backup_restore(start_cluster, storage_policy, to_disk): DROP TABLE data_restored NO DELAY; """ ) + + +@pytest.mark.parametrize( + "storage_policy, to_disk", + [ + pytest.param( + "default", + "default", + id="from_local_to_local", + ), + pytest.param( + "policy_s3", + "default", + id="from_s3_to_local", + ), + pytest.param( + "default", + "disk_s3", + id="from_local_to_s3", + ), + pytest.param( + "policy_s3", + "disk_s3_plain", + id="from_s3_to_s3_plain", + ), + pytest.param( + "default", + "disk_s3_plain", + id="from_local_to_s3_plain", + ), + ], +) +def test_backup_to_disk(storage_policy, to_disk): + backup_name = new_backup_name() + backup_destination = f"Disk('{to_disk}', '{backup_name}')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_s3(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = ( + f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" + ) + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_s3_named_collection(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3(named_collection_s3_backups, '{backup_name}')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_s3_native_copy(): + storage_policy = "policy_s3" + backup_name = new_backup_name() + backup_destination = ( + f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" + ) + check_backup_and_restore(storage_policy, backup_destination) + assert node.contains_in_log("using native copy") + + +def test_backup_to_s3_other_bucket_native_copy(): + storage_policy = "policy_s3_other_bucket" + backup_name = new_backup_name() + backup_destination = ( + f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" + ) + check_backup_and_restore(storage_policy, backup_destination) + assert node.contains_in_log("using native copy") diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py new file mode 100644 index 00000000000..fe1c0ea7108 --- /dev/null +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -0,0 +1,224 @@ +# pylint: disable=unused-argument +# pylint: disable=line-too-long +# pylint: disable=call-var-from-loop +# pylint: disable=redefined-outer-name + +import logging +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException + +cluster = ClickHouseCluster(__file__) +upstream = cluster.add_instance("upstream") +backward = cluster.add_instance( + "backward", + image="clickhouse/clickhouse-server", + tag="22.9", + with_installed_binary=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def test_aggregate_states(start_cluster): + """ + This test goes though all aggregate functions that: + - has only one argument + - support string as an argument + + And do a simple check by creating the aggregate state with one string. + + Yes this is not covers everything (does not cover functions with + different number of arguments, types, different states in case of + multiple values - uniqCombined, but as for uniqCombined it will be + checked via uniqHLL12), but at least something. + + And about type, String had been selected, since it more likely that + there will be used some hash function internally. + """ + + aggregate_functions = backward.query( + """ + SELECT if(NOT empty(alias_to), alias_to, name) + FROM system.functions + WHERE is_aggregate = 1 + """ + ) + aggregate_functions = aggregate_functions.strip().split("\n") + aggregate_functions = map(lambda x: x.strip(), aggregate_functions) + + aggregate_functions = list(aggregate_functions) + logging.info("Got %s aggregate functions", len(aggregate_functions)) + + skipped = 0 + failed = 0 + passed = 0 + + def get_aggregate_state_hex(node, function_name): + return node.query( + f"select hex(initializeAggregation('{function_name}State', 'foo'))" + ).strip() + + for aggregate_function in aggregate_functions: + logging.info("Checking %s", aggregate_function) + + try: + backward_state = get_aggregate_state_hex(backward, aggregate_function) + except QueryRuntimeException as e: + error_message = str(e) + allowed_errors = [ + "NUMBER_OF_ARGUMENTS_DOESNT_MATCH", + "ILLEGAL_TYPE_OF_ARGUMENT", + # sequenceNextNode() and friends + "UNKNOWN_AGGREGATE_FUNCTION", + # Function X takes exactly one parameter: + # The function 'X' can only be used as a window function + "BAD_ARGUMENTS", + # aggThrow + "AGGREGATE_FUNCTION_THROW", + ] + if any(map(lambda x: x in error_message, allowed_errors)): + logging.info("Skipping %s", aggregate_function) + skipped += 1 + continue + logging.exception("Failed %s", function) + failed += 1 + continue + + upstream_state = get_aggregate_state_hex(upstream, aggregate_function) + if upstream_state != backward_state: + logging.info( + "Failed %s, %s (backward) != %s (upstream)", + aggregate_function, + backward_state, + upstream_state, + ) + failed += 1 + else: + logging.info("OK %s", aggregate_function) + passed += 1 + + logging.info( + "Aggregate functions: %s, Failed: %s, skipped: %s, passed: %s", + len(aggregate_functions), + failed, + skipped, + passed, + ) + assert failed == 0 + assert passed > 0 + assert failed + passed + skipped == len(aggregate_functions) + + +def test_string_functions(start_cluster): + functions = backward.query( + """ + SELECT if(NOT empty(alias_to), alias_to, name) + FROM system.functions + WHERE is_aggregate = 0 + """ + ) + functions = functions.strip().split("\n") + functions = map(lambda x: x.strip(), functions) + + excludes = [ + "rand", + "rand64", + "randConstant", + "generateUUIDv4", + # Syntax error otherwise + "position", + "substring", + "CAST", + # NOTE: no need to ignore now()/now64() since they will fail because they don't accept any argument + ] + functions = filter(lambda x: x not in excludes, functions) + + functions = list(functions) + logging.info("Got %s functions", len(functions)) + + skipped = 0 + failed = 0 + passed = 0 + + def get_function_value(node, function_name, value="foo"): + return node.query(f"select {function_name}('{value}')").strip() + + for function in functions: + logging.info("Checking %s", function) + + try: + backward_value = get_function_value(backward, function) + except QueryRuntimeException as e: + error_message = str(e) + allowed_errors = [ + # Messages + "Cannot load time zone ", + "No macro ", + "Should start with ", # POINT/POLYGON/... + "Cannot read input: expected a digit but got something else:", + # ErrorCodes + "NUMBER_OF_ARGUMENTS_DOESNT_MATCH", + "ILLEGAL_TYPE_OF_ARGUMENT", + "TOO_FEW_ARGUMENTS_FOR_FUNCTION", + "DICTIONARIES_WAS_NOT_LOADED", + "CANNOT_PARSE_UUID", + "CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING", + "ILLEGAL_COLUMN", + "TYPE_MISMATCH", + "SUPPORT_IS_DISABLED", + "CANNOT_PARSE_DATE", + "UNKNOWN_SETTING", + "CANNOT_PARSE_BOOL", + "FILE_DOESNT_EXIST", + "NOT_IMPLEMENTED", + "BAD_GET", + "UNKNOWN_TYPE", + # addressToSymbol + "FUNCTION_NOT_ALLOWED", + # Date functions + "CANNOT_PARSE_TEXT", + "CANNOT_PARSE_DATETIME", + # Function X takes exactly one parameter: + # The function 'X' can only be used as a window function + "BAD_ARGUMENTS", + ] + if any(map(lambda x: x in error_message, allowed_errors)): + logging.info("Skipping %s", function) + skipped += 1 + continue + logging.exception("Failed %s", function) + failed += 1 + continue + + upstream_value = get_function_value(upstream, function) + if upstream_value != backward_value: + logging.info( + "Failed %s, %s (backward) != %s (upstream)", + function, + backward_value, + upstream_value, + ) + failed += 1 + else: + logging.info("OK %s", function) + passed += 1 + + logging.info( + "Functions: %s, failed: %s, skipped: %s, passed: %s", + len(functions), + failed, + skipped, + passed, + ) + assert failed == 0 + assert passed > 0 + assert failed + passed + skipped == len(functions) diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index ea6e407a18f..2ccc17db4f4 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -129,6 +129,9 @@ def test_incorrect_usage(cluster): result = node2.query_and_get_error("TRUNCATE TABLE test0") assert "Table is read-only" in result + result = node2.query_and_get_error("OPTIMIZE TABLE test0 FINAL") + assert "Only read-only operations are supported" in result + node2.query("DROP TABLE test0 SYNC") diff --git a/tests/integration/test_disks_app_func/test.py b/tests/integration/test_disks_app_func/test.py index d87f387e122..de9b23abd5e 100644 --- a/tests/integration/test_disks_app_func/test.py +++ b/tests/integration/test_disks_app_func/test.py @@ -37,7 +37,7 @@ def test_disks_app_func_ld(started_cluster): source = cluster.instances["disks_app_test"] out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "list-disks"] + ["/usr/bin/clickhouse", "disks", "--save-logs", "list-disks"] ) disks = out.split("\n") @@ -51,7 +51,7 @@ def test_disks_app_func_ls(started_cluster): init_data(source) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test1", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test1", "list", "."] ) files = out.split("\n") @@ -62,7 +62,7 @@ def test_disks_app_func_ls(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test1", "list", @@ -89,7 +89,7 @@ def test_disks_app_func_cp(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test1", "write", @@ -114,7 +114,7 @@ def test_disks_app_func_cp(started_cluster): ) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test2", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test2", "list", "."] ) assert "path1" in out @@ -123,7 +123,7 @@ def test_disks_app_func_cp(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test2", "remove", @@ -135,7 +135,7 @@ def test_disks_app_func_cp(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test1", "remove", @@ -146,13 +146,13 @@ def test_disks_app_func_cp(started_cluster): # alesapin: Why we need list one more time? # kssenii: it is an assertion that the file is indeed deleted out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test2", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test2", "list", "."] ) assert "path1" not in out out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test1", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test1", "list", "."] ) assert "path1" not in out @@ -174,7 +174,7 @@ def test_disks_app_func_ln(started_cluster): ) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "list", "data/default/"] + ["/usr/bin/clickhouse", "disks", "--save-logs", "list", "data/default/"] ) files = out.split("\n") @@ -196,7 +196,7 @@ def test_disks_app_func_rm(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test2", "write", @@ -207,7 +207,7 @@ def test_disks_app_func_rm(started_cluster): ) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test2", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test2", "list", "."] ) assert "path3" in out @@ -216,7 +216,7 @@ def test_disks_app_func_rm(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test2", "remove", @@ -225,7 +225,7 @@ def test_disks_app_func_rm(started_cluster): ) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test2", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test2", "list", "."] ) assert "path3" not in out @@ -237,7 +237,7 @@ def test_disks_app_func_mv(started_cluster): init_data(source) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test1", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test1", "list", "."] ) files = out.split("\n") @@ -257,7 +257,7 @@ def test_disks_app_func_mv(started_cluster): ) out = source.exec_in_container( - ["/usr/bin/clickhouse", "disks", "--send-logs", "--disk", "test1", "list", "."] + ["/usr/bin/clickhouse", "disks", "--save-logs", "--disk", "test1", "list", "."] ) files = out.split("\n") @@ -277,7 +277,7 @@ def test_disks_app_func_read_write(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test1", "write", @@ -291,7 +291,7 @@ def test_disks_app_func_read_write(started_cluster): [ "/usr/bin/clickhouse", "disks", - "--send-logs", + "--save-logs", "--disk", "test1", "read", diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index 52c583973d0..a3f2650eac7 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -387,7 +387,7 @@ progress { , stats { rows: 8 blocks: 4 - allocated_bytes: 324 + allocated_bytes: 1092 applied_limit: true rows_before_limit: 8 } diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml new file mode 100644 index 00000000000..42a1f962705 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/config.xml @@ -0,0 +1,4 @@ + + 1 + 250 + diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml new file mode 100644 index 00000000000..7a2141e6c7e --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/host_regexp.xml @@ -0,0 +1,11 @@ + + + + + + test1\.example\.com$ + + default + + + \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml new file mode 100644 index 00000000000..58ef55cd3f3 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/configs/listen_host.xml @@ -0,0 +1,5 @@ + + :: + 0.0.0.0 + 1 + diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile new file mode 100644 index 00000000000..0dd198441dc --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/Corefile @@ -0,0 +1,8 @@ +. { + hosts /example.com { + reload "200ms" + fallthrough + } + forward . 127.0.0.11 + log +} diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com new file mode 100644 index 00000000000..9beb415c290 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/coredns_config/example.com @@ -0,0 +1 @@ +filled in runtime, but needs to exist in order to be volume mapped in docker \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py new file mode 100644 index 00000000000..b8bafb3d0c1 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/scripts/stress_test.py @@ -0,0 +1,63 @@ +import pycurl +import threading +from io import BytesIO +import sys + +client_ip = sys.argv[1] +server_ip = sys.argv[2] + +mutex = threading.Lock() +success_counter = 0 +number_of_threads = 100 +number_of_iterations = 100 + + +def perform_request(): + + buffer = BytesIO() + crl = pycurl.Curl() + crl.setopt(pycurl.INTERFACE, client_ip) + crl.setopt(crl.WRITEDATA, buffer) + crl.setopt(crl.URL, f"http://{server_ip}:8123/?query=select+1&user=test_dns") + + crl.perform() + + # End curl session + crl.close() + + str_response = buffer.getvalue().decode("iso-8859-1") + expected_response = "1\n" + + mutex.acquire() + + global success_counter + + if str_response == expected_response: + success_counter += 1 + + mutex.release() + + +def perform_multiple_requests(n): + for request_number in range(n): + perform_request() + + +threads = [] + + +for i in range(number_of_threads): + thread = threading.Thread( + target=perform_multiple_requests, args=(number_of_iterations,) + ) + thread.start() + threads.append(thread) + +for thread in threads: + thread.join() + + +if success_counter == number_of_threads * number_of_iterations: + exit(0) + +exit(1) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py new file mode 100644 index 00000000000..62f47579612 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py @@ -0,0 +1,71 @@ +import pytest +from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check +from time import sleep +import os + +DOCKER_COMPOSE_PATH = get_docker_compose_path() +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + +cluster = ClickHouseCluster(__file__) + +ch_server = cluster.add_instance( + "clickhouse-server", + with_coredns=True, + main_configs=["configs/config.xml", "configs/listen_host.xml"], + user_configs=["configs/host_regexp.xml"], +) + +client = cluster.add_instance( + "clickhouse-client", +) + + +@pytest.fixture(scope="module") +def started_cluster(): + global cluster + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def setup_dns_server(ip): + domains_string = "test3.example.com test2.example.com test1.example.com" + example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' + run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) + + +def setup_ch_server(dns_server_ip): + ch_server.exec_in_container( + (["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"]) + ) + ch_server.exec_in_container( + (["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"]) + ) + ch_server.query("SYSTEM DROP DNS CACHE") + + +def build_endpoint_v4(ip): + return f"'http://{ip}:8123/?query=SELECT+1&user=test_dns'" + + +def build_endpoint_v6(ip): + return build_endpoint_v4(f"[{ip}]") + + +def test_host_regexp_multiple_ptr_v4(started_cluster): + server_ip = cluster.get_instance_ip("clickhouse-server") + client_ip = cluster.get_instance_ip("clickhouse-client") + dns_server_ip = cluster.get_instance_ip(cluster.coredns_host) + + setup_dns_server(client_ip) + setup_ch_server(dns_server_ip) + + current_dir = os.path.dirname(__file__) + client.copy_file_to_container( + os.path.join(current_dir, "scripts", "stress_test.py"), "stress_test.py" + ) + + client.exec_in_container(["python3", f"stress_test.py", client_ip, server_ip]) diff --git a/tests/integration/test_keeper_s3_snapshot/__init__.py b/tests/integration/test_keeper_s3_snapshot/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_keeper_s3_snapshot/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml new file mode 100644 index 00000000000..8459ea3e068 --- /dev/null +++ b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml @@ -0,0 +1,42 @@ + + + + http://minio1:9001/snapshots/ + minio + minio123 + + 9181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + * + + + 5000 + 10000 + 5000 + 50 + trace + + + + + 1 + node1 + 9234 + + + 2 + node2 + 9234 + true + + + 3 + node3 + 9234 + true + + + + diff --git a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml new file mode 100644 index 00000000000..dfe73628f66 --- /dev/null +++ b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml @@ -0,0 +1,42 @@ + + + + http://minio1:9001/snapshots/ + minio + minio123 + + 9181 + 2 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + * + + + 5000 + 10000 + 5000 + 75 + trace + + + + + 1 + node1 + 9234 + + + 2 + node2 + 9234 + true + + + 3 + node3 + 9234 + true + + + + diff --git a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml new file mode 100644 index 00000000000..948d9527718 --- /dev/null +++ b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml @@ -0,0 +1,42 @@ + + + + http://minio1:9001/snapshots/ + minio + minio123 + + 9181 + 3 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + * + + + 5000 + 10000 + 5000 + 75 + trace + + + + + 1 + node1 + 9234 + + + 2 + node2 + 9234 + true + + + 3 + node3 + 9234 + true + + + + diff --git a/tests/integration/test_keeper_s3_snapshot/test.py b/tests/integration/test_keeper_s3_snapshot/test.py new file mode 100644 index 00000000000..3e19bc4822c --- /dev/null +++ b/tests/integration/test_keeper_s3_snapshot/test.py @@ -0,0 +1,120 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from time import sleep + +from kazoo.client import KazooClient + +# from kazoo.protocol.serialization import Connect, read_buffer, write_buffer + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/keeper_config1.xml"], + stay_alive=True, + with_minio=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/keeper_config2.xml"], + stay_alive=True, + with_minio=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/keeper_config3.xml"], + stay_alive=True, + with_minio=True, +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + cluster.minio_client.make_bucket("snapshots") + + yield cluster + + finally: + cluster.shutdown() + + +def get_fake_zk(nodename, timeout=30.0): + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) + _fake_zk_instance.start() + return _fake_zk_instance + + +def destroy_zk_client(zk): + try: + if zk: + zk.stop() + zk.close() + except: + pass + + +def wait_node(node): + for _ in range(100): + zk = None + try: + zk = get_fake_zk(node.name, timeout=30.0) + zk.sync("/") + print("node", node.name, "ready") + break + except Exception as ex: + sleep(0.2) + print("Waiting until", node.name, "will be ready, exception", ex) + finally: + destroy_zk_client(zk) + else: + raise Exception("Can't wait node", node.name, "to become ready") + + +def test_s3_upload(started_cluster): + node1_zk = get_fake_zk(node1.name) + + # we defined in configs snapshot_distance as 50 + # so after 50 requests we should generate a snapshot + for _ in range(210): + node1_zk.create("/test", sequence=True) + + def get_saved_snapshots(): + return [ + obj.object_name + for obj in list(cluster.minio_client.list_objects("snapshots")) + ] + + saved_snapshots = get_saved_snapshots() + assert set(saved_snapshots) == set( + [ + "snapshot_50.bin.zstd", + "snapshot_100.bin.zstd", + "snapshot_150.bin.zstd", + "snapshot_200.bin.zstd", + ] + ) + + destroy_zk_client(node1_zk) + node1.stop_clickhouse(kill=True) + + # wait for new leader to be picked and that it continues + # uploading snapshots + wait_node(node2) + node2_zk = get_fake_zk(node2.name) + for _ in range(200): + node2_zk.create("/test", sequence=True) + + saved_snapshots = get_saved_snapshots() + + assert len(saved_snapshots) > 4 + + success_upload_message = "Successfully uploaded" + assert node2.contains_in_log(success_upload_message) or node3.contains_in_log( + success_upload_message + ) + + destroy_zk_client(node2_zk) diff --git a/tests/integration/test_merge_tree_optimize_old_parts/__init__.py b/tests/integration/test_merge_tree_optimize_old_parts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_merge_tree_optimize_old_parts/configs/zookeeper_config.xml b/tests/integration/test_merge_tree_optimize_old_parts/configs/zookeeper_config.xml new file mode 100644 index 00000000000..18412349228 --- /dev/null +++ b/tests/integration/test_merge_tree_optimize_old_parts/configs/zookeeper_config.xml @@ -0,0 +1,8 @@ + + + + zoo1 + 2181 + + + diff --git a/tests/integration/test_merge_tree_optimize_old_parts/test.py b/tests/integration/test_merge_tree_optimize_old_parts/test.py new file mode 100644 index 00000000000..7b386eba2c4 --- /dev/null +++ b/tests/integration/test_merge_tree_optimize_old_parts/test.py @@ -0,0 +1,88 @@ +import pytest +import time +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/zookeeper_config.xml"], + with_zookeeper=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def get_part_number(table_name): + return TSV( + node.query( + f"SELECT count(*) FROM system.parts where table='{table_name}' and active=1" + ) + ) + + +def check_expected_part_number(seconds, table_name, expected): + ok = False + for i in range(int(seconds) * 2): + result = get_part_number(table_name) + if result == expected: + ok = True + break + else: + time.sleep(1) + assert ok + + +def test_without_force_merge_old_parts(start_cluster): + node.query( + "CREATE TABLE test_without_merge (i Int64) ENGINE = MergeTree ORDER BY i;" + ) + node.query("INSERT INTO test_without_merge SELECT 1") + node.query("INSERT INTO test_without_merge SELECT 2") + node.query("INSERT INTO test_without_merge SELECT 3") + + expected = TSV("""3\n""") + # verify that the parts don't get merged + for i in range(10): + if get_part_number("test_without_merge") != expected: + assert False + time.sleep(1) + + node.query("DROP TABLE test_without_merge;") + + +def test_force_merge_old_parts(start_cluster): + node.query( + "CREATE TABLE test_with_merge (i Int64) ENGINE = MergeTree ORDER BY i SETTINGS min_age_to_force_merge_seconds=5;" + ) + node.query("INSERT INTO test_with_merge SELECT 1") + node.query("INSERT INTO test_with_merge SELECT 2") + node.query("INSERT INTO test_with_merge SELECT 3") + + expected = TSV("""1\n""") + check_expected_part_number(10, "test_with_merge", expected) + + node.query("DROP TABLE test_with_merge;") + + +def test_force_merge_old_parts_replicated_merge_tree(start_cluster): + node.query( + "CREATE TABLE test_replicated (i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/testing/test', 'node') ORDER BY i SETTINGS min_age_to_force_merge_seconds=5;" + ) + node.query("INSERT INTO test_replicated SELECT 1") + node.query("INSERT INTO test_replicated SELECT 2") + node.query("INSERT INTO test_replicated SELECT 3") + + expected = TSV("""1\n""") + check_expected_part_number(10, "test_replicated", expected) + + node.query("DROP TABLE test_replicated;") diff --git a/tests/integration/test_partition/configs/testkeeper.xml b/tests/integration/test_partition/configs/testkeeper.xml new file mode 100644 index 00000000000..5200b789a9b --- /dev/null +++ b/tests/integration/test_partition/configs/testkeeper.xml @@ -0,0 +1,6 @@ + + + + testkeeper + + \ No newline at end of file diff --git a/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py index f3df66631a5..320209b5d7e 100644 --- a/tests/integration/test_partition/test.py +++ b/tests/integration/test_partition/test.py @@ -2,9 +2,15 @@ import pytest import logging from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV +from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance("instance") +instance = cluster.add_instance( + "instance", + main_configs=[ + "configs/testkeeper.xml", + ], +) q = instance.query path_to_data = "/var/lib/clickhouse/" @@ -478,3 +484,86 @@ def test_detached_part_dir_exists(started_cluster): == "all_1_1_0\nall_1_1_0_try1\nall_2_2_0\nall_2_2_0_try1\n" ) q("drop table detached_part_dir_exists") + + +def test_make_clone_in_detached(started_cluster): + q( + "create table clone_in_detached (n int, m String) engine=ReplicatedMergeTree('/clone_in_detached', '1') order by n" + ) + + path = path_to_data + "data/default/clone_in_detached/" + + # broken part already detached + q("insert into clone_in_detached values (42, '¯\_(ツ)_/¯')") + instance.exec_in_container(["rm", path + "all_0_0_0/data.bin"]) + instance.exec_in_container( + ["cp", "-r", path + "all_0_0_0", path + "detached/broken_all_0_0_0"] + ) + assert_eq_with_retry(instance, "select * from clone_in_detached", "\n") + assert ["broken_all_0_0_0",] == sorted( + instance.exec_in_container(["ls", path + "detached/"]).strip().split("\n") + ) + + # there's a directory with the same name, but different content + q("insert into clone_in_detached values (43, '¯\_(ツ)_/¯')") + instance.exec_in_container(["rm", path + "all_1_1_0/data.bin"]) + instance.exec_in_container( + ["cp", "-r", path + "all_1_1_0", path + "detached/broken_all_1_1_0"] + ) + instance.exec_in_container(["rm", path + "detached/broken_all_1_1_0/primary.idx"]) + instance.exec_in_container( + ["cp", "-r", path + "all_1_1_0", path + "detached/broken_all_1_1_0_try0"] + ) + instance.exec_in_container( + [ + "bash", + "-c", + "echo 'broken' > {}".format( + path + "detached/broken_all_1_1_0_try0/checksums.txt" + ), + ] + ) + assert_eq_with_retry(instance, "select * from clone_in_detached", "\n") + assert [ + "broken_all_0_0_0", + "broken_all_1_1_0", + "broken_all_1_1_0_try0", + "broken_all_1_1_0_try1", + ] == sorted( + instance.exec_in_container(["ls", path + "detached/"]).strip().split("\n") + ) + + # there are directories with the same name, but different content, and part already detached + q("insert into clone_in_detached values (44, '¯\_(ツ)_/¯')") + instance.exec_in_container(["rm", path + "all_2_2_0/data.bin"]) + instance.exec_in_container( + ["cp", "-r", path + "all_2_2_0", path + "detached/broken_all_2_2_0"] + ) + instance.exec_in_container(["rm", path + "detached/broken_all_2_2_0/primary.idx"]) + instance.exec_in_container( + ["cp", "-r", path + "all_2_2_0", path + "detached/broken_all_2_2_0_try0"] + ) + instance.exec_in_container( + [ + "bash", + "-c", + "echo 'broken' > {}".format( + path + "detached/broken_all_2_2_0_try0/checksums.txt" + ), + ] + ) + instance.exec_in_container( + ["cp", "-r", path + "all_2_2_0", path + "detached/broken_all_2_2_0_try1"] + ) + assert_eq_with_retry(instance, "select * from clone_in_detached", "\n") + assert [ + "broken_all_0_0_0", + "broken_all_1_1_0", + "broken_all_1_1_0_try0", + "broken_all_1_1_0_try1", + "broken_all_2_2_0", + "broken_all_2_2_0_try0", + "broken_all_2_2_0_try1", + ] == sorted( + instance.exec_in_container(["ls", path + "detached/"]).strip().split("\n") + ) diff --git a/tests/integration/test_read_only_table/__init__.py b/tests/integration/test_read_only_table/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_read_only_table/test.py b/tests/integration/test_read_only_table/test.py new file mode 100644 index 00000000000..28abbf6601e --- /dev/null +++ b/tests/integration/test_read_only_table/test.py @@ -0,0 +1,89 @@ +import time +import re +import logging + +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry + +NUM_TABLES = 10 + + +def fill_nodes(nodes): + for table_id in range(NUM_TABLES): + for node in nodes: + node.query( + f""" + CREATE TABLE test_table_{table_id}(a UInt64) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated/{table_id}', '{node.name}') ORDER BY tuple(); + """ + ) + + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) +node3 = cluster.add_instance("node3", with_zookeeper=True) +nodes = [node1, node2, node3] + + +def sync_replicas(table): + for node in nodes: + node.query(f"SYSTEM SYNC REPLICA {table}") + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + fill_nodes(nodes) + + yield cluster + + except Exception as ex: + print(ex) + + finally: + cluster.shutdown() + + +def test_restart_zookeeper(start_cluster): + + for table_id in range(NUM_TABLES): + node1.query( + f"INSERT INTO test_table_{table_id} VALUES (1), (2), (3), (4), (5);" + ) + + logging.info("Inserted test data and initialized all tables") + + def get_zookeeper_which_node_connected_to(node): + line = str( + node.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ).strip() + + pattern = re.compile(r"zoo[0-9]+", re.IGNORECASE) + result = pattern.findall(line) + assert ( + len(result) == 1 + ), "ClickHouse must be connected only to one Zookeeper at a time" + return result[0] + + node1_zk = get_zookeeper_which_node_connected_to(node1) + + # ClickHouse should +- immediately reconnect to another zookeeper node + cluster.stop_zookeeper_nodes([node1_zk]) + time.sleep(5) + + for table_id in range(NUM_TABLES): + node1.query( + f"INSERT INTO test_table_{table_id} VALUES (6), (7), (8), (9), (10);" + ) diff --git a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py index 7d65bed3901..1f81421f93c 100644 --- a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py +++ b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py @@ -1,8 +1,14 @@ +import pytest + +# FIXME This test is too flaky +# https://github.com/ClickHouse/ClickHouse/issues/42561 + +pytestmark = pytest.mark.skip + import logging from string import Template import time -import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry diff --git a/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py b/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py index c46e6840153..cf76d47157a 100644 --- a/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py +++ b/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py @@ -11,11 +11,13 @@ node1 = cluster.add_instance( "node1", main_configs=["configs/zookeeper_config.xml", "configs/remote_servers.xml"], with_zookeeper=True, + use_keeper=False, ) node2 = cluster.add_instance( "node2", main_configs=["configs/zookeeper_config.xml", "configs/remote_servers.xml"], with_zookeeper=True, + use_keeper=False, ) diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py index 2e696be4988..1933823f5d2 100644 --- a/tests/integration/test_row_policy/test.py +++ b/tests/integration/test_row_policy/test.py @@ -867,3 +867,30 @@ def test_policy_on_distributed_table_via_role(): assert node.query( "SELECT * FROM dist_tbl SETTINGS prefer_localhost_replica=0", user="user1" ) == TSV([[0], [2], [4], [6], [8], [0], [2], [4], [6], [8]]) + + +def test_row_policy_filter_with_subquery(): + copy_policy_xml("no_filters.xml") + assert node.query("SHOW POLICIES") == "" + + node.query("DROP ROW POLICY IF EXISTS filter_1 ON table1") + node.query("DROP TABLE IF EXISTS table_1") + node.query("DROP TABLE IF EXISTS table_2") + + node.query( + "CREATE TABLE table_1 (x int, y int) ENGINE = MergeTree ORDER BY tuple()" + ) + node.query("INSERT INTO table_1 SELECT number, number * number FROM numbers(10)") + + node.query("CREATE TABLE table_2 (a int) ENGINE=MergeTree ORDER BY tuple()") + node.query("INSERT INTO table_2 VALUES (3), (5)") + + node.query( + "CREATE ROW POLICY filter_1 ON table_1 USING x IN (SELECT a FROM table_2) TO ALL" + ) + + assert node.query("SELECT * FROM table_1") == TSV([[3, 9], [5, 25]]) + + node.query("DROP ROW POLICY filter_1 ON table_1") + node.query("DROP TABLE table_1") + node.query("DROP TABLE table_2") diff --git a/tests/integration/test_s3_zero_copy_ttl/test_ttl_move_memory_usage.py b/tests/integration/test_s3_zero_copy_ttl/test_ttl_move_memory_usage.py new file mode 100644 index 00000000000..a1e10cde031 --- /dev/null +++ b/tests/integration/test_s3_zero_copy_ttl/test_ttl_move_memory_usage.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +import time + +import pytest +from helpers.cluster import ClickHouseCluster + + +single_node_cluster = ClickHouseCluster(__file__) +small_node = single_node_cluster.add_instance( + "small_node", main_configs=["configs/s3.xml"], with_minio=True +) + + +@pytest.fixture(scope="module") +def started_single_node_cluster(): + try: + single_node_cluster.start() + + yield single_node_cluster + finally: + single_node_cluster.shutdown() + + +def test_move_and_s3_memory_usage(started_single_node_cluster): + if small_node.is_built_with_sanitizer() or small_node.is_debug_build(): + pytest.skip("Disabled for debug and sanitizers. Too slow.") + + small_node.query( + "CREATE TABLE s3_test_with_ttl (x UInt32, a String codec(NONE), b String codec(NONE), c String codec(NONE), d String codec(NONE), e String codec(NONE)) engine = MergeTree order by x partition by x SETTINGS storage_policy='s3_and_default'" + ) + + for _ in range(10): + small_node.query( + "insert into s3_test_with_ttl select 0, repeat('a', 100), repeat('b', 100), repeat('c', 100), repeat('d', 100), repeat('e', 100) from zeros(400000) settings max_block_size = 8192, max_insert_block_size=10000000, min_insert_block_size_rows=10000000" + ) + + # After this, we should have 5 columns per 10 * 100 * 400000 ~ 400 MB; total ~2G data in partition + small_node.query("optimize table s3_test_with_ttl final") + + small_node.query("system flush logs") + # Will take memory usage from metric_log. + # It is easier then specifying total memory limit (insert queries can hit this limit). + small_node.query("truncate table system.metric_log") + + small_node.query( + "alter table s3_test_with_ttl move partition 0 to volume 'external'", + settings={"send_logs_level": "error"}, + ) + small_node.query("system flush logs") + max_usage = small_node.query( + "select max(CurrentMetric_MemoryTracking) from system.metric_log" + ) + # 3G limit is a big one. However, we can hit it anyway with parallel s3 writes enabled. + # Also actual value can be bigger because of memory drift. + # Increase it a little bit if test fails. + assert int(max_usage) < 3e9 + res = small_node.query( + "select * from system.errors where last_error_message like '%Memory limit%' limit 1" + ) + assert res == "" diff --git a/tests/integration/test_storage_nats/test.py b/tests/integration/test_storage_nats/test.py index 63dde8922a6..77db3008524 100644 --- a/tests/integration/test_storage_nats/test.py +++ b/tests/integration/test_storage_nats/test.py @@ -1,3 +1,10 @@ +import pytest + +# FIXME This test is too flaky +# https://github.com/ClickHouse/ClickHouse/issues/39185 + +pytestmark = pytest.mark.skip + import json import os.path as p import random @@ -9,7 +16,6 @@ from random import randrange import math import asyncio -import pytest from google.protobuf.internal.encoder import _VarintBytes from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster, check_nats_is_available, nats_connect_ssl diff --git a/tests/performance/line_as_string_parsing.xml b/tests/performance/line_as_string_parsing.xml new file mode 100644 index 00000000000..d9fa1d4fa6e --- /dev/null +++ b/tests/performance/line_as_string_parsing.xml @@ -0,0 +1,9 @@ + + +INSERT INTO FUNCTION file(test_line_as_string.tsv) SELECT randomString(1000) FROM numbers(1000000) SETTINGS engine_file_truncate_on_insert=1 + +SELECT * FROM file(test_line_as_string.tsv, LineAsString) FORMAT Null + +INSERT INTO FUNCTION file(test_line_as_string.tsv) SELECT * FROM numbers(0) SETTINGS engine_file_truncate_on_insert=1 + + diff --git a/tests/performance/url_hits.xml b/tests/performance/url_hits.xml index 4a07c38b83f..46b39f3a6e9 100644 --- a/tests/performance/url_hits.xml +++ b/tests/performance/url_hits.xml @@ -13,10 +13,14 @@ protocol domain + domainRFC domainWithoutWWW + domainWithoutWWWRFC topLevelDomain firstSignificantSubdomain + firstSignificantSubdomainRFC cutToFirstSignificantSubdomain + cutToFirstSignificantSubdomainRFC path pathFull queryString diff --git a/tests/queries/0_stateless/00396_uuid.reference b/tests/queries/0_stateless/00396_uuid.reference index d70322ec4c1..588f11cb466 100644 --- a/tests/queries/0_stateless/00396_uuid.reference +++ b/tests/queries/0_stateless/00396_uuid.reference @@ -6,3 +6,8 @@ 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 3f1ed72e-f7fe-4459-9cbe-95fe9298f845 1 +-- UUID variants -- +00112233445566778899AABBCCDDEEFF +33221100554477668899AABBCCDDEEFF +00112233-4455-6677-8899-aabbccddeeff +00112233-4455-6677-8899-aabbccddeeff diff --git a/tests/queries/0_stateless/00396_uuid.sql b/tests/queries/0_stateless/00396_uuid.sql index 9d8b48bddb0..4ad659e2464 100644 --- a/tests/queries/0_stateless/00396_uuid.sql +++ b/tests/queries/0_stateless/00396_uuid.sql @@ -11,3 +11,9 @@ with generateUUIDv4() as uuid, identity(lower(hex(reverse(reinterpretAsString(uuid))))) as str, reinterpretAsUUID(reverse(unhex(str))) as uuid2 select uuid = uuid2; + +select '-- UUID variants --'; +select hex(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 1)); +select hex(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 2)); +select UUIDNumToString(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 1), 1); +select UUIDNumToString(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 2), 2); diff --git a/tests/queries/0_stateless/00398_url_functions.reference b/tests/queries/0_stateless/00398_url_functions.reference index feba95fb1b3..39d740e55cd 100644 --- a/tests/queries/0_stateless/00398_url_functions.reference +++ b/tests/queries/0_stateless/00398_url_functions.reference @@ -8,6 +8,32 @@ http ====HOST==== www.example.com + + + + + + + + +www.example.com +127.0.0.1 +www.example.com +www.example.com +www.example.com +example.com +example.com +example.com +www.example.com +example.com +example.com +example.com +example.com +example.com +example.com + + + www.example.com 127.0.0.1 www.example.com @@ -98,8 +124,25 @@ example.com example.com com +example.com +example.com +example.com +example.com +example.com +example.com +example.com +example.com +example.com +com + ====CUT TO FIRST SIGNIFICANT SUBDOMAIN WITH WWW==== +www.com +example.com +example.com +example.com +example.com + www.com example.com example.com diff --git a/tests/queries/0_stateless/00398_url_functions.sql b/tests/queries/0_stateless/00398_url_functions.sql.j2 similarity index 72% rename from tests/queries/0_stateless/00398_url_functions.sql rename to tests/queries/0_stateless/00398_url_functions.sql.j2 index 66fe591bb58..dd7da2ce6ad 100644 --- a/tests/queries/0_stateless/00398_url_functions.sql +++ b/tests/queries/0_stateless/00398_url_functions.sql.j2 @@ -7,16 +7,28 @@ SELECT protocol('http://127.0.0.1:443/') AS Scheme; SELECT protocol('//127.0.0.1:443/') AS Scheme; SELECT '====HOST===='; -SELECT domain('http://paul@www.example.com:80/') AS Host; -SELECT domain('http:/paul/example/com') AS Host; -SELECT domain('http://www.example.com?q=4') AS Host; -SELECT domain('http://127.0.0.1:443/') AS Host; -SELECT domain('//www.example.com') AS Host; -SELECT domain('//paul@www.example.com') AS Host; -SELECT domain('www.example.com') as Host; -SELECT domain('example.com') as Host; -SELECT domainWithoutWWW('//paul@www.example.com') AS Host; -SELECT domainWithoutWWW('http://paul@www.example.com:80/') AS Host; +{% for suffix in ['', 'RFC'] -%} + +SELECT domain{{ suffix }}('http://paul@www.example.com:80/') AS Host; +SELECT domain{{ suffix }}('user:password@example.com:8080') AS Host; +SELECT domain{{ suffix }}('http://user:password@example.com:8080') AS Host; +SELECT domain{{ suffix }}('http://user:password@example.com:8080/path?query=value#fragment') AS Host; +SELECT domain{{ suffix }}('newuser:@example.com') AS Host; +SELECT domain{{ suffix }}('http://:pass@example.com') AS Host; +SELECT domain{{ suffix }}(':newpass@example.com') AS Host; +SELECT domain{{ suffix }}('http://user:pass@example@.com') AS Host; +SELECT domain{{ suffix }}('http://user:pass:example.com') AS Host; +SELECT domain{{ suffix }}('http:/paul/example/com') AS Host; +SELECT domain{{ suffix }}('http://www.example.com?q=4') AS Host; +SELECT domain{{ suffix }}('http://127.0.0.1:443/') AS Host; +SELECT domain{{ suffix }}('//www.example.com') AS Host; +SELECT domain{{ suffix }}('//paul@www.example.com') AS Host; +SELECT domain{{ suffix }}('www.example.com') as Host; +SELECT domain{{ suffix }}('example.com') as Host; +SELECT domainWithoutWWW{{ suffix }}('//paul@www.example.com') AS Host; +SELECT domainWithoutWWW{{ suffix }}('http://paul@www.example.com:80/') AS Host; + +{% endfor %} SELECT '====NETLOC===='; SELECT netloc('http://paul@www.example.com:80/') AS Netloc; @@ -95,25 +107,31 @@ SELECT decodeURLComponent(encodeURLComponent('http://paul@127.0.0.1/?query=hello SELECT decodeURLFormComponent(encodeURLFormComponent('http://paul@127.0.0.1/?query=hello world foo+bar#a=b')); SELECT '====CUT TO FIRST SIGNIFICANT SUBDOMAIN===='; -SELECT cutToFirstSignificantSubdomain('http://www.example.com'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com:1234'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com/a/b/c'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com/a/b/c?a=b'); -SELECT cutToFirstSignificantSubdomain('http://www.example.com/a/b/c?a=b#d=f'); -SELECT cutToFirstSignificantSubdomain('http://paul@www.example.com/a/b/c?a=b#d=f'); -SELECT cutToFirstSignificantSubdomain('//paul@www.example.com/a/b/c?a=b#d=f'); -SELECT cutToFirstSignificantSubdomain('www.example.com'); -SELECT cutToFirstSignificantSubdomain('example.com'); -SELECT cutToFirstSignificantSubdomain('www.com'); -SELECT cutToFirstSignificantSubdomain('com'); + +{% for suffix in ['', 'RFC'] -%} +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com:1234'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com/a/b/c'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com/a/b/c?a=b'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://www.example.com/a/b/c?a=b#d=f'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('http://paul@www.example.com/a/b/c?a=b#d=f'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('//paul@www.example.com/a/b/c?a=b#d=f'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('www.example.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('example.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('www.com'); +SELECT cutToFirstSignificantSubdomain{{ suffix }}('com'); +{% endfor %} SELECT '====CUT TO FIRST SIGNIFICANT SUBDOMAIN WITH WWW===='; -SELECT cutToFirstSignificantSubdomainWithWWW('http://com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.example.com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.foo.example.com'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.example.com:1'); -SELECT cutToFirstSignificantSubdomainWithWWW('http://www.example.com/'); + +{% for suffix in ['', 'RFC'] -%} +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.example.com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.foo.example.com'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.example.com:1'); +SELECT cutToFirstSignificantSubdomainWithWWW{{ suffix }}('http://www.example.com/'); +{% endfor %} SELECT '====CUT WWW===='; SELECT cutWWW('http://www.example.com'); diff --git a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference index 53cdf1e9393..a14d334a483 100644 --- a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference +++ b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.reference @@ -1 +1,28 @@ -PASSED +Using non-existent session with the 'session_check' flag will throw exception: +1 +Using non-existent session without the 'session_check' flag will create a new session: +1 +1 +The 'session_timeout' parameter is checked for validity and for the maximum value: +1 +1 +1 +Valid cases are accepted: +1 +1 +1 +Sessions are local per user: +1 +Hello +World +And cannot be accessed for a non-existent user: +1 +The temporary tables created in a session are not accessible without entering this session: +1 +A session successfully expire after a timeout: +111 +A session successfully expire after a timeout and the session's temporary table shadows the permanent table: +HelloWorld +A session cannot be used by concurrent connections: +1 +1 diff --git a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh index e9f486fbb73..89da84a5bdd 100755 --- a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh +++ b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh @@ -1,113 +1,87 @@ #!/usr/bin/env bash # Tags: long, no-parallel +# shellcheck disable=SC2015 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -request() { - local url="$1" - local select="$2" - ${CLICKHOUSE_CURL} --silent "$url" --data "$select" -} +echo "Using non-existent session with the 'session_check' flag will throw exception:" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=nonexistent&session_check=1" --data-binary "SELECT 1" | grep -c -F 'Session not found' -create_temporary_table() { - local url="$1" - request "$url" "CREATE TEMPORARY TABLE temp (x String)" - request "$url" "INSERT INTO temp VALUES ('Hello'), ('World')" -} +echo "Using non-existent session without the 'session_check' flag will create a new session:" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_1" --data-binary "SELECT 1" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_1&session_check=0" --data-binary "SELECT 1" +echo "The 'session_timeout' parameter is checked for validity and for the maximum value:" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_2&session_timeout=string" --data-binary "SELECT 1" | grep -c -F 'Invalid session timeout' +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_2&session_timeout=3601" --data-binary "SELECT 1" | grep -c -F 'Maximum session timeout' +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_2&session_timeout=-1" --data-binary "SELECT 1" | grep -c -F 'Invalid session timeout' -check() { - local url="$1" - local select="$2" - local output="$3" - local expected_result="$4" - local message="$5" - result=$(request "$url" "$select" | grep --count "$output") - if [ "$result" -ne "$expected_result" ]; then - echo "FAILED: $message" - exit 1 - fi -} +echo "Valid cases are accepted:" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_3&session_timeout=0" --data-binary "SELECT 1" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_4&session_timeout=3600" --data-binary "SELECT 1" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_5&session_timeout=60" --data-binary "SELECT 1" +echo "Sessions are local per user:" +${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;" -address=${CLICKHOUSE_HOST} -port=${CLICKHOUSE_PORT_HTTP} -url="${CLICKHOUSE_PORT_HTTP_PROTO}://$address:$port/" -session="?session_id=test_$$" # use PID for session ID -select="SELECT * FROM system.settings WHERE name = 'max_rows_to_read'" -select_from_temporary_table="SELECT * FROM temp ORDER BY x" -select_from_non_existent_table="SELECT * FROM no_such_table ORDER BY x" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6&session_timeout=600" --data-binary "CREATE TEMPORARY TABLE t (s String)" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "INSERT INTO t VALUES ('Hello')" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6&session_check=1" --data-binary "SELECT 1" | grep -c -F 'Session not found' +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6&session_timeout=600" --data-binary "CREATE TEMPORARY TABLE t (s String)" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "INSERT INTO t VALUES ('World')" -check "$url?session_id=no_such_session_$$&session_check=1" "$select" "Exception.*Session not found" 1 "session_check=1 does not work." -check "$url$session&session_check=0" "$select" "Exception" 0 "session_check=0 does not work." +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" -request "$url""$session" "SET max_rows_to_read=7777777" +${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER test_00463"; -check "$url$session&session_timeout=string" "$select" "Exception.*Invalid session timeout" 1 "Non-numeric value accepted as a timeout." -check "$url$session&session_timeout=3601" "$select" "Exception.*Maximum session timeout*" 1 "More then 3600 seconds accepted as a timeout." -check "$url$session&session_timeout=-1" "$select" "Exception.*Invalid session timeout" 1 "Negative timeout accepted." -check "$url$session&session_timeout=0" "$select" "Exception" 0 "Zero timeout not accepted." -check "$url$session&session_timeout=3600" "$select" "Exception" 0 "3600 second timeout not accepted." -check "$url$session&session_timeout=60" "$select" "Exception" 0 "60 second timeout not accepted." +echo "And cannot be accessed for a non-existent user:" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" | grep -c -F 'Exception' -check "$url""$session" "$select" "7777777" 1 "Failed to reuse session." -# Workaround here -# TODO: move the test to integration test or add readonly user to test environment -if [[ -z $(request "$url?user=readonly" "SELECT ''") ]]; then - # We have readonly user - check "$url$session&user=readonly&session_check=1" "$select" "Exception.*Session not found" 1 "Session is accessable for another user." -else - check "$url$session&user=readonly&session_check=1" "$select" "Exception.*Unknown user*" 1 "Session is accessable for unknown user." -fi +echo "The temporary tables created in a session are not accessible without entering this session:" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}" --data-binary "SELECT * FROM t" | grep -c -F 'Exception' -create_temporary_table "$url""$session" -check "$url""$session" "$select_from_temporary_table" "Hello" 1 "Failed to reuse a temporary table for session." - -check "$url?session_id=another_session_$$" "$select_from_temporary_table" "Exception.*Table .* doesn't exist." 1 "Temporary table is visible for another table." - - -( ( -cat </dev/null 2>/dev/null) & -sleep 1 -check "$url""$session" "$select" "Exception.*Session is locked" 1 "Double access to the same session." - - -session="?session_id=test_timeout_$$" - -create_temporary_table "$url$session&session_timeout=1" -check "$url$session&session_timeout=1" "$select_from_temporary_table" "Hello" 1 "Failed to reuse a temporary table for session." -sleep 3 -check "$url$session&session_check=1" "$select" "Exception.*Session not found" 1 "Session did not expire on time." - -create_temporary_table "$url$session&session_timeout=2" -for _ in $(seq 1 3); do - check "$url$session&session_timeout=2" "$select_from_temporary_table" "Hello" 1 "Session expired too early." - sleep 1 +echo "A session successfully expire after a timeout:" +# An infinite loop is required to make the test reliable. We will check that the timeout corresponds to the observed time at least once +while true +do + ( + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_7&session_timeout=1" --data-binary "SELECT 1" + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_7&session_check=1" --data-binary "SELECT 1" + sleep 3 + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_7&session_check=1" --data-binary "SELECT 1" | grep -c -F 'Session not found' + ) | tr -d '\n' | grep -F '111' && break || sleep 1 done -sleep 3 -check "$url$session&session_check=1" "$select" "Exception.*Session not found" 1 "Session did not expire on time." -create_temporary_table "$url$session&session_timeout=2" -for _ in $(seq 1 5); do - check "$url$session&session_timeout=2" "$select_from_non_existent_table" "Exception.*Table .* doesn't exist." 1 "Session expired too early." - sleep 1 +echo "A session successfully expire after a timeout and the session's temporary table shadows the permanent table:" +# An infinite loop is required to make the test reliable. We will check that the timeout corresponds to the observed time at least once +${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');" +while true +do + ( + ${CLICKHOUSE_CURL} -X POST -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8&session_timeout=1" --data-binary "CREATE TEMPORARY TABLE t (s String)" + ${CLICKHOUSE_CURL} -X POST -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "INSERT INTO t VALUES ('Hello')" + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "SELECT * FROM t" + sleep 3 + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "SELECT * FROM t" + ) | tr -d '\n' | grep -F 'HelloWorld' && break || sleep 1 done -check "$url$session&session_timeout=2" "$select_from_temporary_table" "Hello" 1 "Session expired too early. Failed to update timeout in case of exceptions." -sleep 4 -check "$url$session&session_check=1" "$select" "Exception.*Session not found" 1 "Session did not expire on time." +${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE t" +echo "A session cannot be used by concurrent connections:" -echo "PASSED" +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT count() FROM system.numbers" >/dev/null & + +# An infinite loop is required to make the test reliable. We will ensure that at least once the query on the line above has started before this check +while true +do + ${CLICKHOUSE_CLIENT} --query "SELECT count() > 0 FROM system.processes WHERE query_id = '${CLICKHOUSE_DATABASE}_9'" | grep -F '1' && break || sleep 1 +done + +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT 1" | grep -c -F 'Session is locked' +${CLICKHOUSE_CLIENT} --multiquery --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null"; +wait diff --git a/tests/queries/0_stateless/00700_to_decimal_or_something.reference b/tests/queries/0_stateless/00700_to_decimal_or_something.reference index 89ded7bd6d4..dec36ed5df5 100644 --- a/tests/queries/0_stateless/00700_to_decimal_or_something.reference +++ b/tests/queries/0_stateless/00700_to_decimal_or_something.reference @@ -1,5 +1,5 @@ 1.1 1.1 1.1 -0 +1 0 0.42 0 0.42 0 0.42 @@ -13,7 +13,7 @@ 0 ---- 1.1 1.1 1.1 -\N +1 \N -0.42 \N -0.42 \N -0.42 diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.reference b/tests/queries/0_stateless/00705_drop_create_merge_tree.reference index 8b137891791..e69de29bb2d 100644 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.reference +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.reference @@ -1 +0,0 @@ - diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index 146d6e54c0b..d7754091290 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -1,39 +1,12 @@ #!/usr/bin/env bash # Tags: no-fasttest -set -e - CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -function stress() -{ - # We set up a signal handler to make sure to wait for all queries to be finished before ending - CONTINUE=true - handle_interruption() - { - CONTINUE=false - } - trap handle_interruption INT - - while $CONTINUE; do - ${CLICKHOUSE_CLIENT} --query "CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple()" 2>/dev/null - ${CLICKHOUSE_CLIENT} --query "DROP TABLE table" 2>/dev/null - done - - trap - INT -} - -# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout -export -f stress - -for _ in {1..5}; do - # Ten seconds are just barely enough to reproduce the issue in most of runs. - timeout -s INT 10 bash -c stress & -done - +yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --ignore-error -nm 2>/dev/null & +yes 'DROP TABLE table;' | head -n 1000 | $CLICKHOUSE_CLIENT --ignore-error -nm 2>/dev/null & wait -echo -${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table"; +${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table" diff --git a/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference index 4f12a46d7c0..bc98dd59d5f 100644 --- a/tests/queries/0_stateless/00718_format_datetime.reference +++ b/tests/queries/0_stateless/00718_format_datetime.reference @@ -1,33 +1,34 @@ -20 +20 20 +02 02 +01/02/18 01/02/18 + 2 2 +2018-01-02 2018-01-02 +22 00 02 -01/02/18 - 2 -2018-01-02 -22 -02 -10 +10 12 11 12 -001 -366 -01 -33 -\n -AM +001 001 +366 366 +01 01 +33 00 +\n \n +AM AM AM PM -22:33 -44 -\t -22:33:44 -1 7 -01 01 53 52 -1 0 -18 -2018 -% -no formatting pattern +22:33 00:00 +44 00 +\t \t +22:33:44 00:00:00 +1 7 1 7 +01 01 53 52 01 01 53 52 +1 0 1 0 +18 18 +2018 2018 +% % +no formatting pattern no formatting pattern 2018-01-01 00:00:00 +1927-01-01 00:00:00 2018-01-01 01:00:00 2018-01-01 04:00:00 +0000 -1100 diff --git a/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql index 7ed1f0abea4..deb5fb96c6c 100644 --- a/tests/queries/0_stateless/00718_format_datetime.sql +++ b/tests/queries/0_stateless/00718_format_datetime.sql @@ -8,38 +8,44 @@ SELECT formatDateTime(now(), 'unescaped %'); -- { serverError 36 } SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError 48 } SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%W'); -- { serverError 48 } -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C'), formatDateTime(toDate32('2018-01-02'), '%C'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d'), formatDateTime(toDate32('2018-01-02'), '%d'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D'), formatDateTime(toDate32('2018-01-02'), '%D'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e'), formatDateTime(toDate32('2018-01-02'), '%e'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F'), formatDateTime(toDate32('2018-01-02'), '%F'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H'), formatDateTime(toDate32('2018-01-02'), '%H'); SELECT formatDateTime(toDateTime('2018-01-02 02:33:44'), '%H'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I'), formatDateTime(toDate32('2018-01-02'), '%I'); SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%I'); SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%I'); -SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j'); -SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n'); -SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p'); +SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j'), formatDateTime(toDate32('2018-01-01'), '%j'); +SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j'), formatDateTime(toDate32('2000-12-31'), '%j'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m'), formatDateTime(toDate32('2018-01-02'), '%m'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'), formatDateTime(toDate32('2018-01-02'), '%M'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n'), formatDateTime(toDate32('2018-01-02'), '%n'); +SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p'), formatDateTime(toDateTime('2018-01-02'), '%p'); SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%p'); SELECT formatDateTime(toDateTime('2018-01-02 12:33:44'), '%p'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T'); -SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R'), formatDateTime(toDate32('2018-01-02'), '%R'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S'), formatDateTime(toDate32('2018-01-02'), '%S'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t'), formatDateTime(toDate32('2018-01-02'), '%t'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T'), formatDateTime(toDate32('2018-01-02'), '%T'); +SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u'), + formatDateTime(toDate32('2018-01-01'), '%u'), formatDateTime(toDate32('2018-01-07'), '%u'); SELECT formatDateTime(toDateTime('1996-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1996-12-31 22:33:44'), '%V'), - formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V'); -SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'); -SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'); + formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V'), + formatDateTime(toDate32('1996-01-01'), '%V'), formatDateTime(toDate32('1996-12-31'), '%V'), + formatDateTime(toDate32('1999-01-01'), '%V'), formatDateTime(toDate32('1999-12-31'), '%V'); +SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w'), + formatDateTime(toDate32('2018-01-01'), '%w'), formatDateTime(toDate32('2018-01-07'), '%w'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y'), formatDateTime(toDate32('2018-01-02'), '%y'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y'), formatDateTime(toDate32('2018-01-02'), '%Y'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'), formatDateTime(toDate32('2018-01-02'), '%%'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'), formatDateTime(toDate32('2018-01-02'), 'no formatting pattern'); SELECT formatDateTime(toDate('2018-01-01'), '%F %T'); +SELECT formatDateTime(toDate32('1927-01-01'), '%F %T'); + SELECT formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'UTC'), formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'Asia/Istanbul'); diff --git a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference index dba46e48e43..58f8b7abfb3 100644 --- a/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference +++ b/tests/queries/0_stateless/00753_system_columns_and_system_tables_long.reference @@ -35,7 +35,7 @@ Check total_bytes/total_rows for StripeLog 113 1 Check total_bytes/total_rows for Memory 0 0 -64 1 +256 1 Check total_bytes/total_rows for Buffer 0 0 256 50 diff --git a/tests/queries/0_stateless/00900_long_parquet.reference b/tests/queries/0_stateless/00900_long_parquet.reference index 4dfc726145e..bbdad7243bd 100644 --- a/tests/queries/0_stateless/00900_long_parquet.reference +++ b/tests/queries/0_stateless/00900_long_parquet.reference @@ -44,12 +44,12 @@ converted: diff: dest: 79 81 82 83 84 85 86 87 88 89 str01\0\0\0\0\0\0\0\0\0\0 fstr1\0\0\0\0\0\0\0\0\0\0 2003-03-04 2004-05-06 00:00:00 2004-05-06 07:08:09.012000000 -80 81 82 83 84 85 86 87 88 89 str02 fstr2\0\0\0\0\0\0\0\0\0\0 2149-06-06 2006-08-09 10:11:12 2006-08-09 10:11:12.345000000 +80 81 82 83 84 85 86 87 88 89 str02 fstr2\0\0\0\0\0\0\0\0\0\0 2005-03-04 2006-08-09 10:11:12 2006-08-09 10:11:12.345000000 min: --128 0 0 0 0 0 0 0 -1 -1 string-1\0\0\0\0\0\0\0 fixedstring-1\0\0 2003-04-05 2149-06-06 2003-02-03 04:05:06.789000000 --108 108 8 92 -8 108 -40 -116 -1 -1 string-0\0\0\0\0\0\0\0 fixedstring\0\0\0\0 2001-02-03 2149-06-06 2002-02-03 04:05:06.789000000 +-128 0 0 0 0 0 0 0 -1 -1 string-1\0\0\0\0\0\0\0 fixedstring-1\0\0 2003-04-05 2003-02-03 2003-02-03 04:05:06.789000000 +-108 108 8 92 -8 108 -40 -116 -1 -1 string-0\0\0\0\0\0\0\0 fixedstring\0\0\0\0 2001-02-03 2002-02-03 2002-02-03 04:05:06.789000000 79 81 82 83 84 85 86 87 88 89 str01\0\0\0\0\0\0\0\0\0\0 fstr1\0\0\0\0\0\0\0\0\0\0 2003-03-04 2004-05-06 2004-05-06 07:08:09.012000000 -127 -1 -1 -1 -1 -1 -1 -1 -1 -1 string-2\0\0\0\0\0\0\0 fixedstring-2\0\0 2004-06-07 2149-06-06 2004-02-03 04:05:06.789000000 +127 -1 -1 -1 -1 -1 -1 -1 -1 -1 string-2\0\0\0\0\0\0\0 fixedstring-2\0\0 2004-06-07 2004-02-03 2004-02-03 04:05:06.789000000 max: -128 0 -32768 0 -2147483648 0 -9223372036854775808 0 -1 -1 string-1 fixedstring-1\0\0 2003-04-05 00:00:00 2003-02-03 04:05:06 2003-02-03 04:05:06.789000000 -108 108 -1016 1116 -1032 1132 -1064 1164 -1 -1 string-0 fixedstring\0\0\0\0 2001-02-03 00:00:00 2002-02-03 04:05:06 2002-02-03 04:05:06.789000000 diff --git a/tests/queries/0_stateless/00918_json_functions.reference b/tests/queries/0_stateless/00918_json_functions.reference index 8e6fc3914e0..fc03457c677 100644 --- a/tests/queries/0_stateless/00918_json_functions.reference +++ b/tests/queries/0_stateless/00918_json_functions.reference @@ -61,11 +61,47 @@ Friday (1,'417ddc5d-e556-4d27-95dd-a34d84e46a50') hello (3333.6,'test') +(3333.6,'test') +(3333.6333333333,'test') (3333.6333333333,'test') 123456.1234 Decimal(20, 4) +123456.1234 Decimal(20, 4) +123456789012345.12 Decimal(30, 4) +(1234567890.1234567890123456789,'test') Tuple(a Decimal(35, 20), b LowCardinality(String)) +(1234567890.12345678901234567890123456789,'test') Tuple(a Decimal(45, 30), b LowCardinality(String)) 123456789012345.1136 123456789012345.1136 1234567890.12345677879616925706 (1234567890.12345677879616925706,'test') 1234567890.123456695758468374595199311875 (1234567890.123456695758468374595199311875,'test') +-1234567890 Int32 +1234567890 UInt32 +-1234567890123456789 Int64 +1234567890123456789 UInt64 +-1234567890123456789 Int128 +1234567890123456789 UInt128 +-1234567890123456789 Int256 +1234567890123456789 UInt256 +-123456789 Int32 +123456789 UInt32 +-123456789012 Int64 +123456789012 UInt64 +-123456789012 Int128 +123456789012 UInt128 +-123456789012 Int256 +123456789012 UInt256 +-123456789 Int32 +123456789 UInt32 +-1234567890123456789 Int64 +1234567890123456789 UInt64 +-12345678901234567890123456789012345678 Int128 +12345678901234567890123456789012345678 UInt128 +-11345678901234567890123456789012345678901234567890123456789012345678901234567 Int256 +11345678901234567890123456789012345678901234567890123456789012345678901234567 UInt256 +0 Int32 +0 UInt32 +0 Int64 +0 UInt64 +false Bool +true Bool --JSONExtractKeysAndValues-- [('a','hello'),('b','[-100,200,300]')] [('b',[-100,200,300])] @@ -217,3 +253,4 @@ e u v --show error: type should be const string +--show error: index type should be integer diff --git a/tests/queries/0_stateless/00918_json_functions.sql b/tests/queries/0_stateless/00918_json_functions.sql index 87682587c8e..3105994ce20 100644 --- a/tests/queries/0_stateless/00918_json_functions.sql +++ b/tests/queries/0_stateless/00918_json_functions.sql @@ -72,11 +72,47 @@ SELECT JSONExtract('{"a":123456, "b":3.55}', 'Tuple(a LowCardinality(Int32), b D SELECT JSONExtract('{"a":1, "b":"417ddc5d-e556-4d27-95dd-a34d84e46a50"}', 'Tuple(a Int8, b UUID)'); SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'a', 'LowCardinality(String)'); SELECT JSONExtract('{"a":3333.6333333333333333333333, "b":"test"}', 'Tuple(a Decimal(10,1), b LowCardinality(String))'); +SELECT JSONExtract('{"a":"3333.6333333333333333333333", "b":"test"}', 'Tuple(a Decimal(10,1), b LowCardinality(String))'); SELECT JSONExtract('{"a":3333.6333333333333333333333, "b":"test"}', 'Tuple(a Decimal(20,10), b LowCardinality(String))'); +SELECT JSONExtract('{"a":"3333.6333333333333333333333", "b":"test"}', 'Tuple(a Decimal(20,10), b LowCardinality(String))'); SELECT JSONExtract('{"a":123456.123456}', 'a', 'Decimal(20, 4)') as a, toTypeName(a); +SELECT JSONExtract('{"a":"123456.123456"}', 'a', 'Decimal(20, 4)') as a, toTypeName(a); +SELECT JSONExtract('{"a":"123456789012345.12"}', 'a', 'Decimal(30, 4)') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890.12345678901234567890", "b":"test"}', 'Tuple(a Decimal(35,20), b LowCardinality(String))') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890.123456789012345678901234567890", "b":"test"}', 'Tuple(a Decimal(45,30), b LowCardinality(String))') as a, toTypeName(a); SELECT toDecimal64(123456789012345.12, 4), JSONExtract('{"a":123456789012345.12}', 'a', 'Decimal(30, 4)'); SELECT toDecimal128(1234567890.12345678901234567890, 20), JSONExtract('{"a":1234567890.12345678901234567890, "b":"test"}', 'Tuple(a Decimal(35,20), b LowCardinality(String))'); SELECT toDecimal256(1234567890.123456789012345678901234567890, 30), JSONExtract('{"a":1234567890.12345678901234567890, "b":"test"}', 'Tuple(a Decimal(45,30), b LowCardinality(String))'); +SELECT JSONExtract('{"a":-1234567890}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":-1234567890123456789}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890123456789}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":-1234567890123456789}', 'a', 'Int128') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890123456789}', 'a', 'UInt128') as a, toTypeName(a); +SELECT JSONExtract('{"a":-1234567890123456789}', 'a', 'Int256') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890123456789}', 'a', 'UInt256') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789.345}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789.345}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789012.345}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789012.345}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789012.345}', 'a', 'Int128') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789012.345}', 'a', 'UInt128') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789012.345}', 'a', 'Int256') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789012.345}', 'a', 'UInt256') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-123456789"}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"123456789"}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-1234567890123456789"}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890123456789"}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-12345678901234567890123456789012345678"}', 'a', 'Int128') as a, toTypeName(a); +SELECT JSONExtract('{"a":"12345678901234567890123456789012345678"}', 'a', 'UInt128') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-11345678901234567890123456789012345678901234567890123456789012345678901234567"}', 'a', 'Int256') as a, toTypeName(a); +SELECT JSONExtract('{"a":"11345678901234567890123456789012345678901234567890123456789012345678901234567"}', 'a', 'UInt256') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-1234567899999"}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567899999"}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-1234567890123456789999"}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890123456789999"}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":0}', 'a', 'Bool') as a, toTypeName(a); +SELECT JSONExtract('{"a":1}', 'a', 'Bool') as a, toTypeName(a); SELECT '--JSONExtractKeysAndValues--'; SELECT JSONExtractKeysAndValues('{"a": "hello", "b": [-100, 200.0, 300]}', 'String'); @@ -244,3 +280,6 @@ SELECT JSONExtractString(json, 's') FROM (SELECT arrayJoin(['{"s":"u"}', '{"s":" SELECT '--show error: type should be const string'; SELECT JSONExtractKeysAndValues([], JSONLength('^?V{LSwp')); -- { serverError 44 } WITH '{"i": 1, "f": 1.2}' AS json SELECT JSONExtract(json, 'i', JSONType(json, 'i')); -- { serverError 44 } + +SELECT '--show error: index type should be integer'; +SELECT JSONExtract('[]', JSONExtract('0', 'UInt256'), 'UInt256'); -- { serverError 43 } diff --git a/tests/queries/0_stateless/00938_template_input_format.reference b/tests/queries/0_stateless/00938_template_input_format.reference index e1f77d9a581..ec8cd7a21f0 100644 --- a/tests/queries/0_stateless/00938_template_input_format.reference +++ b/tests/queries/0_stateless/00938_template_input_format.reference @@ -31,3 +31,5 @@ cv bn m","qwe,rty",456,"2016-01-02" "zx\cv\bn m","qwe,rty","as""df'gh","",789,"2016-01-04" "","zx cv bn m","qwe,rty","as""df'gh",9876543210,"2016-01-03" +1 +1 diff --git a/tests/queries/0_stateless/00938_template_input_format.sh b/tests/queries/0_stateless/00938_template_input_format.sh index e99f59614da..be75edcdb61 100755 --- a/tests/queries/0_stateless/00938_template_input_format.sh +++ b/tests/queries/0_stateless/00938_template_input_format.sh @@ -83,3 +83,13 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE template1"; $CLICKHOUSE_CLIENT --query="DROP TABLE template2"; rm "$CURDIR"/00938_template_input_format_resultset.tmp "$CURDIR"/00938_template_input_format_row.tmp +echo -ne '\${a:Escaped},\${b:Escaped}\n' > "$CURDIR"/00938_template_input_format_row.tmp +echo -ne "a,b\nc,d\n" | $CLICKHOUSE_LOCAL --structure "a String, b String" --input-format Template \ + --format_template_row "$CURDIR"/00938_template_input_format_row.tmp --format_template_rows_between_delimiter '' \ + -q 'select * from table' 2>&1| grep -Fac "'Escaped' serialization requires delimiter" +echo -ne '\${a:Escaped},\${:Escaped}\n' > "$CURDIR"/00938_template_input_format_row.tmp +echo -ne "a,b\nc,d\n" | $CLICKHOUSE_LOCAL --structure "a String" --input-format Template \ + --format_template_row "$CURDIR"/00938_template_input_format_row.tmp --format_template_rows_between_delimiter '' \ + -q 'select * from table' 2>&1| grep -Fac "'Escaped' serialization requires delimiter" +rm "$CURDIR"/00938_template_input_format_row.tmp + diff --git a/tests/queries/0_stateless/00941_to_custom_week.sql b/tests/queries/0_stateless/00941_to_custom_week.sql index 04ff08d4117..4dd5d209306 100644 --- a/tests/queries/0_stateless/00941_to_custom_week.sql +++ b/tests/queries/0_stateless/00941_to_custom_week.sql @@ -49,3 +49,4 @@ SELECT toStartOfWeek(x, 3) AS w3, toStartOfWeek(x_t, 3) AS wt3 FROM numbers(10); + diff --git a/tests/queries/0_stateless/01014_format_custom_separated.reference b/tests/queries/0_stateless/01014_format_custom_separated.reference index d46a6fdf5b1..626d6ed66b8 100644 --- a/tests/queries/0_stateless/01014_format_custom_separated.reference +++ b/tests/queries/0_stateless/01014_format_custom_separated.reference @@ -8,3 +8,4 @@ 1,"2019-09-25","world" 2,"2019-09-26","custom" 3,"2019-09-27","separated" +1 diff --git a/tests/queries/0_stateless/01014_format_custom_separated.sh b/tests/queries/0_stateless/01014_format_custom_separated.sh index 4e88419d125..655607c8c9b 100755 --- a/tests/queries/0_stateless/01014_format_custom_separated.sh +++ b/tests/queries/0_stateless/01014_format_custom_separated.sh @@ -34,3 +34,8 @@ FORMAT CustomSeparated" $CLICKHOUSE_CLIENT --query="SELECT * FROM custom_separated ORDER BY n FORMAT CSV" $CLICKHOUSE_CLIENT --query="DROP TABLE custom_separated" + +echo -ne "a,b\nc,d\n" | $CLICKHOUSE_LOCAL --structure "a String, b String" \ + --input-format CustomSeparated --format_custom_escaping_rule=Escaped \ + --format_custom_field_delimiter=',' --format_custom_row_after_delimiter=$'\n' -q 'select * from table' \ + 2>&1| grep -Fac "'Escaped' serialization requires delimiter" diff --git a/tests/queries/0_stateless/01186_conversion_to_nullable.reference b/tests/queries/0_stateless/01186_conversion_to_nullable.reference index 86fa0afff20..e4c1fd7c40b 100644 --- a/tests/queries/0_stateless/01186_conversion_to_nullable.reference +++ b/tests/queries/0_stateless/01186_conversion_to_nullable.reference @@ -26,7 +26,7 @@ \N 42 \N -\N +3.14 42 \N 3.14159 diff --git a/tests/queries/0_stateless/01284_port.reference b/tests/queries/0_stateless/01284_port.reference index 7e776595065..5b7b58bc7e4 100644 --- a/tests/queries/0_stateless/01284_port.reference +++ b/tests/queries/0_stateless/01284_port.reference @@ -22,3 +22,27 @@ ipv6 0 host-no-dot 0 +ipv4 +0 +80 +80 +80 +80 +hostname +0 +80 +80 +80 +80 +default-port +80 +80 +ipv6 +0 +0 +0 +0 +0 +0 +host-no-dot +0 diff --git a/tests/queries/0_stateless/01284_port.sql b/tests/queries/0_stateless/01284_port.sql deleted file mode 100644 index 9c31a5d42ad..00000000000 --- a/tests/queries/0_stateless/01284_port.sql +++ /dev/null @@ -1,34 +0,0 @@ -select 'ipv4'; -select port('http://127.0.0.1/'); -select port('http://127.0.0.1:80'); -select port('http://127.0.0.1:80/'); -select port('//127.0.0.1:80/'); -select port('127.0.0.1:80'); -select 'hostname'; -select port('http://foobar.com/'); -select port('http://foobar.com:80'); -select port('http://foobar.com:80/'); -select port('//foobar.com:80/'); -select port('foobar.com:80'); - -select 'default-port'; -select port('http://127.0.0.1/', toUInt16(80)); -select port('http://foobar.com/', toUInt16(80)); - --- unsupported -/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43; } -/* ILLEGAL_TYPE_OF_ARGUMENT */ select port('', 1); -- { serverError 43; } -/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port('', 1, 1); -- { serverError 42; } - --- --- Known limitations of domain() (getURLHost()) --- -select 'ipv6'; -select port('http://[2001:db8::8a2e:370:7334]/'); -select port('http://[2001:db8::8a2e:370:7334]:80'); -select port('http://[2001:db8::8a2e:370:7334]:80/'); -select port('//[2001:db8::8a2e:370:7334]:80/'); -select port('[2001:db8::8a2e:370:7334]:80'); -select port('2001:db8::8a2e:370:7334:80'); -select 'host-no-dot'; -select port('//foobar:80/'); diff --git a/tests/queries/0_stateless/01284_port.sql.j2 b/tests/queries/0_stateless/01284_port.sql.j2 new file mode 100644 index 00000000000..6f78b3b8e3b --- /dev/null +++ b/tests/queries/0_stateless/01284_port.sql.j2 @@ -0,0 +1,39 @@ +{% for suffix in ['', 'RFC'] -%} + +select 'ipv4'; +select port{{ suffix }}('http://127.0.0.1/'); +select port{{ suffix }}('http://127.0.0.1:80'); +select port{{ suffix }}('http://127.0.0.1:80/'); +select port{{ suffix }}('//127.0.0.1:80/'); +select port{{ suffix }}('127.0.0.1:80'); + +select 'hostname'; +select port{{ suffix }}('http://foobar.com/'); +select port{{ suffix }}('http://foobar.com:80'); +select port{{ suffix }}('http://foobar.com:80/'); +select port{{ suffix }}('//foobar.com:80/'); +select port{{ suffix }}('foobar.com:80'); + +select 'default-port'; +select port{{ suffix }}('http://127.0.0.1/', toUInt16(80)); +select port{{ suffix }}('http://foobar.com/', toUInt16(80)); + +-- unsupported +/* ILLEGAL_TYPE_OF_ARGUMENT */ select port(toFixedString('', 1)); -- { serverError 43; } +/* ILLEGAL_TYPE_OF_ARGUMENT */ select port{{ suffix }}('', 1); -- { serverError 43; } +/* NUMBER_OF_ARGUMENTS_DOESNT_MATCH */ select port{{ suffix }}('', 1, 1); -- { serverError 42; } + +-- +-- Known limitations of domain() (getURLHost()) +-- +select 'ipv6'; +select port{{ suffix }}('http://[2001:db8::8a2e:370:7334]/'); +select port{{ suffix }}('http://[2001:db8::8a2e:370:7334]:80'); +select port{{ suffix }}('http://[2001:db8::8a2e:370:7334]:80/'); +select port{{ suffix }}('//[2001:db8::8a2e:370:7334]:80/'); +select port{{ suffix }}('[2001:db8::8a2e:370:7334]:80'); +select port{{ suffix }}('2001:db8::8a2e:370:7334:80'); +select 'host-no-dot'; +select port{{ suffix }}('//foobar:80/'); + +{%- endfor %} diff --git a/tests/queries/0_stateless/01288_shard_max_network_bandwidth.sql b/tests/queries/0_stateless/01288_shard_max_network_bandwidth.sql index 969bb0a126c..d2daf48a1cb 100644 --- a/tests/queries/0_stateless/01288_shard_max_network_bandwidth.sql +++ b/tests/queries/0_stateless/01288_shard_max_network_bandwidth.sql @@ -1,7 +1,7 @@ -- Tags: shard --- Limit to 10 MB/sec -SET max_network_bandwidth = 10000000; +-- Limit to 100 KB/sec +SET max_network_bandwidth = 100000; -- Lower max_block_size, so we can start throttling sooner. Otherwise query will be executed too quickly. SET max_block_size = 100; @@ -11,7 +11,7 @@ CREATE TEMPORARY TABLE times (t DateTime); -- rand64 is uncompressable data. Each number will take 8 bytes of bandwidth. -- This query should execute in no less than 1.6 seconds if throttled. INSERT INTO times SELECT now(); -SELECT sum(ignore(*)) FROM (SELECT rand64() FROM remote('127.0.0.{2,3}', numbers(2000000))); +SELECT sum(ignore(*)) FROM (SELECT rand64() FROM remote('127.0.0.{2,3}', numbers(20000))); INSERT INTO times SELECT now(); SELECT max(t) - min(t) >= 1 FROM times; diff --git a/tests/queries/0_stateless/01411_from_unixtime.reference b/tests/queries/0_stateless/01411_from_unixtime.reference index 1bc7519e668..17086e8c58b 100644 --- a/tests/queries/0_stateless/01411_from_unixtime.reference +++ b/tests/queries/0_stateless/01411_from_unixtime.reference @@ -5,25 +5,25 @@ 11 1970-01-15 1970-01-15 06:52:36 -20 +20 20 +02 02 +01/02/18 01/02/18 + 2 2 +2018-01-02 2018-01-02 +22 00 02 -01/02/18 - 2 -2018-01-02 -22 -02 -10 +10 12 11 12 -001 -366 -01 -33 -\n -AM +001 001 +366 366 +01 01 +33 00 +\n \n +AM AM AM PM -22:33 -44 -\t -22:33:44 +22:33 00:00 +44 00 +\t \t +22:33:44 00:00:00 diff --git a/tests/queries/0_stateless/01411_from_unixtime.sql b/tests/queries/0_stateless/01411_from_unixtime.sql index ec7b4d65b57..9a6655768e0 100644 --- a/tests/queries/0_stateless/01411_from_unixtime.sql +++ b/tests/queries/0_stateless/01411_from_unixtime.sql @@ -5,25 +5,25 @@ SELECT FROM_UNIXTIME(5345345, '%C', 'UTC'); SELECT FROM_UNIXTIME(645123, '%H', 'UTC'); SELECT FROM_UNIXTIME(1232456, '%Y-%m-%d', 'UTC'); SELECT FROM_UNIXTIME(1234356, '%Y-%m-%d %R:%S', 'UTC'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C'), FROM_UNIXTIME(toDate32('2018-01-02'), '%C'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d'), FROM_UNIXTIME(toDate32('2018-01-02'), '%d'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D'), FROM_UNIXTIME(toDate32('2018-01-02'), '%D'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e'), FROM_UNIXTIME(toDate32('2018-01-02'), '%e'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F'), FROM_UNIXTIME(toDate32('2018-01-02'), '%F'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H'), FROM_UNIXTIME(toDate32('2018-01-02'), '%H'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 02:33:44'), '%H'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I'), FROM_UNIXTIME(toDate32('2018-01-02'), '%I'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%I'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%I'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j'); -SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2018-01-01'), '%j'); +SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2000-12-31'), '%j'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m'), FROM_UNIXTIME(toDate32('2018-01-02'), '%m'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M'), FROM_UNIXTIME(toDate32('2018-01-02'), '%M'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n'), FROM_UNIXTIME(toDate32('2018-01-02'), '%n'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p'), FROM_UNIXTIME(toDate32('2018-01-02'), '%p'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%p'); SELECT FROM_UNIXTIME(toDateTime('2018-01-02 12:33:44'), '%p'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t'); -SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R'), FROM_UNIXTIME(toDate32('2018-01-02'), '%R'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S'), FROM_UNIXTIME(toDate32('2018-01-02'), '%S'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t'), FROM_UNIXTIME(toDate32('2018-01-02'), '%t'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T'), FROM_UNIXTIME(toDate32('2018-01-02'), '%T'); diff --git a/tests/queries/0_stateless/01440_to_date_monotonicity.reference b/tests/queries/0_stateless/01440_to_date_monotonicity.reference index dd8545b721d..2dbec540fbb 100644 --- a/tests/queries/0_stateless/01440_to_date_monotonicity.reference +++ b/tests/queries/0_stateless/01440_to_date_monotonicity.reference @@ -1,4 +1,4 @@ 0 -1970-01-01 2120-07-26 1970-04-11 1970-01-01 2149-06-06 +1970-01-01 2106-02-07 1970-04-11 1970-01-01 2149-06-06 1970-01-01 02:00:00 2106-02-07 09:28:15 1970-01-01 02:16:40 2000-01-01 13:12:12 diff --git a/tests/queries/0_stateless/01601_custom_tld.reference b/tests/queries/0_stateless/01601_custom_tld.reference index 981067606a2..7ef6eb7d3a2 100644 --- a/tests/queries/0_stateless/01601_custom_tld.reference +++ b/tests/queries/0_stateless/01601_custom_tld.reference @@ -89,3 +89,92 @@ select cutToFirstSignificantSubdomainCustom('city.kawasaki.jp', 'public_suffix_l city.kawasaki.jp select cutToFirstSignificantSubdomainCustom('some.city.kawasaki.jp', 'public_suffix_list'); city.kawasaki.jp +select '-- no-tld'; +-- no-tld +-- even if there is no TLD, 2-nd level by default anyway +-- FIXME: make this behavior optional (so that TLD for host never changed, either empty or something real) +select cutToFirstSignificantSubdomainRFC('there-is-no-such-domain'); + +select cutToFirstSignificantSubdomainRFC('foo.there-is-no-such-domain'); +foo.there-is-no-such-domain +select cutToFirstSignificantSubdomainRFC('bar.foo.there-is-no-such-domain'); +foo.there-is-no-such-domain +select cutToFirstSignificantSubdomainCustomRFC('there-is-no-such-domain', 'public_suffix_list'); + +select cutToFirstSignificantSubdomainCustomRFC('foo.there-is-no-such-domain', 'public_suffix_list'); +foo.there-is-no-such-domain +select cutToFirstSignificantSubdomainCustomRFC('bar.foo.there-is-no-such-domain', 'public_suffix_list'); +foo.there-is-no-such-domain +select firstSignificantSubdomainCustomRFC('bar.foo.there-is-no-such-domain', 'public_suffix_list'); +foo +select '-- generic'; +-- generic +select firstSignificantSubdomainCustomRFC('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel +kernel +select cutToFirstSignificantSubdomainCustomRFC('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss +kernel.biz.ss +select '-- difference'; +-- difference +-- biz.ss is not in the default TLD list, hence: +select cutToFirstSignificantSubdomainRFC('foo.kernel.biz.ss'); -- biz.ss +biz.ss +select cutToFirstSignificantSubdomainCustomRFC('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss +kernel.biz.ss +select '-- 3+level'; +-- 3+level +select cutToFirstSignificantSubdomainCustomRFC('xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +xx.blogspot.co.at +select firstSignificantSubdomainCustomRFC('xx.blogspot.co.at', 'public_suffix_list'); -- blogspot +blogspot +select cutToFirstSignificantSubdomainCustomRFC('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +xx.blogspot.co.at +select firstSignificantSubdomainCustomRFC('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- blogspot +blogspot +select '-- url'; +-- url +select cutToFirstSignificantSubdomainCustomRFC('http://foobar.com', 'public_suffix_list'); +foobar.com +select cutToFirstSignificantSubdomainCustomRFC('http://foobar.com/foo', 'public_suffix_list'); +foobar.com +select cutToFirstSignificantSubdomainCustomRFC('http://bar.foobar.com/foo', 'public_suffix_list'); +foobar.com +select cutToFirstSignificantSubdomainCustomRFC('http://xx.blogspot.co.at', 'public_suffix_list'); +xx.blogspot.co.at +select '-- www'; +-- www +select cutToFirstSignificantSubdomainCustomWithWWWRFC('http://www.foo', 'public_suffix_list'); +www.foo +select cutToFirstSignificantSubdomainCustomRFC('http://www.foo', 'public_suffix_list'); +foo +select '-- vector'; +-- vector +select cutToFirstSignificantSubdomainCustomRFC('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); +xx.blogspot.co.at +select cutToFirstSignificantSubdomainCustomRFC('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); + +select '-- no new line'; +-- no new line +select cutToFirstSignificantSubdomainCustomRFC('foo.bar', 'no_new_line_list'); +foo.bar +select cutToFirstSignificantSubdomainCustomRFC('a.foo.bar', 'no_new_line_list'); +a.foo.bar +select cutToFirstSignificantSubdomainCustomRFC('a.foo.baz', 'no_new_line_list'); +foo.baz +select '-- asterisk'; +-- asterisk +select cutToFirstSignificantSubdomainCustomRFC('foo.something.sheffield.sch.uk', 'public_suffix_list'); +something.sheffield.sch.uk +select cutToFirstSignificantSubdomainCustomRFC('something.sheffield.sch.uk', 'public_suffix_list'); +something.sheffield.sch.uk +select cutToFirstSignificantSubdomainCustomRFC('sheffield.sch.uk', 'public_suffix_list'); +sheffield.sch.uk +select '-- exclamation mark'; +-- exclamation mark +select cutToFirstSignificantSubdomainCustomRFC('foo.kawasaki.jp', 'public_suffix_list'); +foo.kawasaki.jp +select cutToFirstSignificantSubdomainCustomRFC('foo.foo.kawasaki.jp', 'public_suffix_list'); +foo.foo.kawasaki.jp +select cutToFirstSignificantSubdomainCustomRFC('city.kawasaki.jp', 'public_suffix_list'); +city.kawasaki.jp +select cutToFirstSignificantSubdomainCustomRFC('some.city.kawasaki.jp', 'public_suffix_list'); +city.kawasaki.jp diff --git a/tests/queries/0_stateless/01601_custom_tld.sql b/tests/queries/0_stateless/01601_custom_tld.sql deleted file mode 100644 index 69ae209af2c..00000000000 --- a/tests/queries/0_stateless/01601_custom_tld.sql +++ /dev/null @@ -1,57 +0,0 @@ --- { echo } - -select '-- no-tld'; --- even if there is no TLD, 2-nd level by default anyway --- FIXME: make this behavior optional (so that TLD for host never changed, either empty or something real) -select cutToFirstSignificantSubdomain('there-is-no-such-domain'); -select cutToFirstSignificantSubdomain('foo.there-is-no-such-domain'); -select cutToFirstSignificantSubdomain('bar.foo.there-is-no-such-domain'); -select cutToFirstSignificantSubdomainCustom('there-is-no-such-domain', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('foo.there-is-no-such-domain', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list'); -select firstSignificantSubdomainCustom('bar.foo.there-is-no-such-domain', 'public_suffix_list'); - -select '-- generic'; -select firstSignificantSubdomainCustom('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel -select cutToFirstSignificantSubdomainCustom('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss - -select '-- difference'; --- biz.ss is not in the default TLD list, hence: -select cutToFirstSignificantSubdomain('foo.kernel.biz.ss'); -- biz.ss -select cutToFirstSignificantSubdomainCustom('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss - -select '-- 3+level'; -select cutToFirstSignificantSubdomainCustom('xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at -select firstSignificantSubdomainCustom('xx.blogspot.co.at', 'public_suffix_list'); -- blogspot -select cutToFirstSignificantSubdomainCustom('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at -select firstSignificantSubdomainCustom('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- blogspot - -select '-- url'; -select cutToFirstSignificantSubdomainCustom('http://foobar.com', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://foobar.com/foo', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://bar.foobar.com/foo', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://xx.blogspot.co.at', 'public_suffix_list'); - -select '-- www'; -select cutToFirstSignificantSubdomainCustomWithWWW('http://www.foo', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('http://www.foo', 'public_suffix_list'); - -select '-- vector'; -select cutToFirstSignificantSubdomainCustom('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); -select cutToFirstSignificantSubdomainCustom('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); - -select '-- no new line'; -select cutToFirstSignificantSubdomainCustom('foo.bar', 'no_new_line_list'); -select cutToFirstSignificantSubdomainCustom('a.foo.bar', 'no_new_line_list'); -select cutToFirstSignificantSubdomainCustom('a.foo.baz', 'no_new_line_list'); - -select '-- asterisk'; -select cutToFirstSignificantSubdomainCustom('foo.something.sheffield.sch.uk', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('something.sheffield.sch.uk', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('sheffield.sch.uk', 'public_suffix_list'); - -select '-- exclamation mark'; -select cutToFirstSignificantSubdomainCustom('foo.kawasaki.jp', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('foo.foo.kawasaki.jp', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('city.kawasaki.jp', 'public_suffix_list'); -select cutToFirstSignificantSubdomainCustom('some.city.kawasaki.jp', 'public_suffix_list'); diff --git a/tests/queries/0_stateless/01601_custom_tld.sql.j2 b/tests/queries/0_stateless/01601_custom_tld.sql.j2 new file mode 100644 index 00000000000..1e0982ea1b7 --- /dev/null +++ b/tests/queries/0_stateless/01601_custom_tld.sql.j2 @@ -0,0 +1,61 @@ +-- { echo } + +{% for suffix in ['', 'RFC'] -%} + +select '-- no-tld'; +-- even if there is no TLD, 2-nd level by default anyway +-- FIXME: make this behavior optional (so that TLD for host never changed, either empty or something real) +select cutToFirstSignificantSubdomain{{ suffix }}('there-is-no-such-domain'); +select cutToFirstSignificantSubdomain{{ suffix }}('foo.there-is-no-such-domain'); +select cutToFirstSignificantSubdomain{{ suffix }}('bar.foo.there-is-no-such-domain'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('there-is-no-such-domain', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.there-is-no-such-domain', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('bar.foo.there-is-no-such-domain', 'public_suffix_list'); +select firstSignificantSubdomainCustom{{ suffix }}('bar.foo.there-is-no-such-domain', 'public_suffix_list'); + +select '-- generic'; +select firstSignificantSubdomainCustom{{ suffix }}('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss + +select '-- difference'; +-- biz.ss is not in the default TLD list, hence: +select cutToFirstSignificantSubdomain{{ suffix }}('foo.kernel.biz.ss'); -- biz.ss +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.kernel.biz.ss', 'public_suffix_list'); -- kernel.biz.ss + +select '-- 3+level'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +select firstSignificantSubdomainCustom{{ suffix }}('xx.blogspot.co.at', 'public_suffix_list'); -- blogspot +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- xx.blogspot.co.at +select firstSignificantSubdomainCustom{{ suffix }}('foo.bar.xx.blogspot.co.at', 'public_suffix_list'); -- blogspot + +select '-- url'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://foobar.com', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://foobar.com/foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://bar.foobar.com/foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://xx.blogspot.co.at', 'public_suffix_list'); + +select '-- www'; +select cutToFirstSignificantSubdomainCustomWithWWW{{ suffix }}('http://www.foo', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://www.foo', 'public_suffix_list'); + +select '-- vector'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('http://xx.blogspot.co.at/' || toString(number), 'public_suffix_list') from numbers(1); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('there-is-no-such-domain' || toString(number), 'public_suffix_list') from numbers(1); + +select '-- no new line'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.bar', 'no_new_line_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('a.foo.bar', 'no_new_line_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('a.foo.baz', 'no_new_line_list'); + +select '-- asterisk'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.something.sheffield.sch.uk', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('something.sheffield.sch.uk', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('sheffield.sch.uk', 'public_suffix_list'); + +select '-- exclamation mark'; +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.kawasaki.jp', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('foo.foo.kawasaki.jp', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('city.kawasaki.jp', 'public_suffix_list'); +select cutToFirstSignificantSubdomainCustom{{ suffix }}('some.city.kawasaki.jp', 'public_suffix_list'); + +{% endfor %} diff --git a/tests/queries/0_stateless/01685_json_extract_double_as_float.reference b/tests/queries/0_stateless/01685_json_extract_double_as_float.reference index f3f4206b425..a24f6569f44 100644 --- a/tests/queries/0_stateless/01685_json_extract_double_as_float.reference +++ b/tests/queries/0_stateless/01685_json_extract_double_as_float.reference @@ -1,7 +1,7 @@ 1.1 1.1 1.1 1.1 0.01 0.01 0.01 0.01 -0 -\N +1 +1 -1e300 -inf 0 diff --git a/tests/queries/0_stateless/01744_fuse_sum_count_aggregate.sql b/tests/queries/0_stateless/01744_fuse_sum_count_aggregate.sql index 5b6ed440ba4..375662eb405 100644 --- a/tests/queries/0_stateless/01744_fuse_sum_count_aggregate.sql +++ b/tests/queries/0_stateless/01744_fuse_sum_count_aggregate.sql @@ -10,4 +10,5 @@ EXPLAIN SYNTAX SELECT sum(a), sum(b), count(b) from fuse_tbl; SELECT '---------NOT trigger fuse--------'; SELECT sum(a), avg(b) from fuse_tbl; EXPLAIN SYNTAX SELECT sum(a), avg(b) from fuse_tbl; + DROP TABLE fuse_tbl; diff --git a/tests/queries/0_stateless/01811_datename.reference b/tests/queries/0_stateless/01811_datename.reference index 2968fde301a..29bf05750e7 100644 --- a/tests/queries/0_stateless/01811_datename.reference +++ b/tests/queries/0_stateless/01811_datename.reference @@ -1,10 +1,10 @@ -2021 2021 2021 -2 2 2 -April April April -104 104 104 -14 14 14 -15 15 15 -Wednesday Wednesday Wednesday +2021 2021 2021 2021 +2 2 2 2 +April April April April +104 104 104 104 +14 14 14 14 +15 15 15 15 +Wednesday Wednesday Wednesday Wednesday 11 11 22 22 33 33 diff --git a/tests/queries/0_stateless/01811_datename.sql b/tests/queries/0_stateless/01811_datename.sql index b757d9ae018..fe9f5d20238 100644 --- a/tests/queries/0_stateless/01811_datename.sql +++ b/tests/queries/0_stateless/01811_datename.sql @@ -1,44 +1,51 @@ WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('year', date_value), dateName('year', date_time_value), dateName('year', date_time_64_value); +SELECT dateName('year', date_value), dateName('year', date_32_value), dateName('year', date_time_value), dateName('year', date_time_64_value); WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('quarter', date_value), dateName('quarter', date_time_value), dateName('quarter', date_time_64_value); +SELECT dateName('quarter', date_value), dateName('quarter', date_32_value), dateName('quarter', date_time_value), dateName('quarter', date_time_64_value); WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('month', date_value), dateName('month', date_time_value), dateName('month', date_time_64_value); +SELECT dateName('month', date_value), dateName('month', date_32_value), dateName('month', date_time_value), dateName('month', date_time_64_value); WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('dayofyear', date_value), dateName('dayofyear', date_time_value), dateName('dayofyear', date_time_64_value); +SELECT dateName('dayofyear', date_value), dateName('dayofyear', date_32_value), dateName('dayofyear', date_time_value), dateName('dayofyear', date_time_64_value); WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('day', date_value), dateName('day', date_time_value), dateName('day', date_time_64_value); +SELECT dateName('day', date_value), dateName('day', date_32_value), dateName('day', date_time_value), dateName('day', date_time_64_value); WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('week', date_value), dateName('week', date_time_value), dateName('week', date_time_64_value); +SELECT dateName('week', date_value), dateName('week', date_32_value), dateName('week', date_time_value), dateName('week', date_time_64_value); WITH toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, toDateTime('2021-04-14 11:22:33') AS date_time_value, toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value -SELECT dateName('weekday', date_value), dateName('weekday', date_time_value), dateName('weekday', date_time_64_value); +SELECT dateName('weekday', date_value), dateName('weekday', date_32_value), dateName('weekday', date_time_value), dateName('weekday', date_time_64_value); WITH toDateTime('2021-04-14 11:22:33') AS date_time_value, diff --git a/tests/queries/0_stateless/01825_type_json_in_array.reference b/tests/queries/0_stateless/01825_type_json_in_array.reference new file mode 100644 index 00000000000..c36a22e6951 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_in_array.reference @@ -0,0 +1,23 @@ +{"id":1,"arr":[{"k1":1,"k2":{"k3":2,"k4":3,"k5":""}},{"k1":2,"k2":{"k3":0,"k4":0,"k5":"foo"}}]} +{"id":2,"arr":[{"k1":3,"k2":{"k3":4,"k4":5,"k5":""}}]} +1 [1,2] [2,0] [3,0] ['','foo'] +2 [3] [4] [5] [''] +{"arr":{"k1":1,"k2":{"k3":2,"k4":3,"k5":""}}} +{"arr":{"k1":2,"k2":{"k3":0,"k4":0,"k5":"foo"}}} +{"arr":{"k1":3,"k2":{"k3":4,"k4":5,"k5":""}}} +Array(Tuple(k1 Int8, k2 Tuple(k3 Int8, k4 Int8, k5 String))) +{"id":1,"arr":[{"k1":[{"k2":"aaa","k3":"bbb","k4":0},{"k2":"ccc","k3":"","k4":0}],"k5":{"k6":""}}]} +{"id":2,"arr":[{"k1":[{"k2":"","k3":"ddd","k4":10},{"k2":"","k3":"","k4":20}],"k5":{"k6":"foo"}}]} +1 [['aaa','ccc']] [['bbb','']] [[0,0]] [''] +2 [['','']] [['ddd','']] [[10,20]] ['foo'] +{"k1":{"k2":"","k3":"","k4":20}} +{"k1":{"k2":"","k3":"ddd","k4":10}} +{"k1":{"k2":"aaa","k3":"bbb","k4":0}} +{"k1":{"k2":"ccc","k3":"","k4":0}} +Tuple(k2 String, k3 String, k4 Int8) +{"arr":[{"x":1}]} +{"arr":{"x":{"y":1},"t":{"y":2}}} +{"arr":[1,{"y":1}]} +{"arr":[2,{"y":2}]} +{"arr":[{"x":"aaa","y":[1,2,3]}]} +{"arr":[{"x":1}]} diff --git a/tests/queries/0_stateless/01825_type_json_in_array.sql b/tests/queries/0_stateless/01825_type_json_in_array.sql new file mode 100644 index 00000000000..e5c20d7ba6b --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_in_array.sql @@ -0,0 +1,35 @@ +-- Tags: no-fasttest + +SET allow_experimental_object_type = 1; +DROP TABLE IF EXISTS t_json_array; + +CREATE TABLE t_json_array (id UInt32, arr Array(JSON)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": 1, "k2": {"k3": 2, "k4": 3}}, {"k1": 2, "k2": {"k5": "foo"}}]} +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": 3, "k2": {"k3": 4, "k4": 5}}]} + +SET output_format_json_named_tuples_as_objects = 1; + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1, arr.k2.k3, arr.k2.k4, arr.k2.k5 FROM t_json_array ORDER BY id; +SELECT arr FROM t_json_array ARRAY JOIN arr ORDER BY arr.k1 FORMAT JSONEachRow; +SELECT toTypeName(arr) FROM t_json_array LIMIT 1; + +TRUNCATE TABLE t_json_array; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1.k2, arr.k1.k3, arr.k1.k4, arr.k5.k6 FROM t_json_array ORDER BY id; + +SELECT arrayJoin(arrayJoin(arr.k1)) AS k1 FROM t_json_array ORDER BY k1 FORMAT JSONEachRow; +SELECT toTypeName(arrayJoin(arrayJoin(arr.k1))) AS arr FROM t_json_array LIMIT 1; + +DROP TABLE t_json_array; + +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; +SELECT * FROM values('arr Map(String, JSON)', '{\'x\' : \'{"y" : 1}\', \'t\' : \'{"y" : 2}\'}') FORMAT JSONEachRow; +SELECT * FROM values('arr Tuple(Int32, JSON)', '(1, \'{"y" : 1}\')', '(2, \'{"y" : 2}\')') FORMAT JSONEachRow; +SELECT * FROM format(JSONEachRow, '{"arr" : [{"x" : "aaa", "y" : [1,2,3]}]}') FORMAT JSONEachRow; +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/01825_type_json_in_other_types.reference b/tests/queries/0_stateless/01825_type_json_in_other_types.reference new file mode 100644 index 00000000000..b94885a65ab --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_in_other_types.reference @@ -0,0 +1,17 @@ +Tuple(String, Map(String, Array(Tuple(k1 Nested(k2 Int8, k3 Int8, k5 String), k4 String))), Tuple(k1 String, k2 Tuple(k3 String, k4 String))) +============= +{"id":1,"data":["foo",{"aa":[{"k1":[{"k2":1,"k3":2,"k5":""},{"k2":0,"k3":3,"k5":""}],"k4":""},{"k1":[{"k2":4,"k3":0,"k5":""},{"k2":0,"k3":5,"k5":""},{"k2":6,"k3":0,"k5":""}],"k4":"qqq"}],"bb":[{"k1":[],"k4":"www"},{"k1":[{"k2":7,"k3":8,"k5":""},{"k2":9,"k3":10,"k5":""},{"k2":11,"k3":12,"k5":""}],"k4":""}]},{"k1":"aa","k2":{"k3":"bb","k4":"c"}}]} +{"id":2,"data":["bar",{"aa":[{"k1":[{"k2":13,"k3":14,"k5":""},{"k2":15,"k3":16,"k5":""}],"k4":"www"}]},{"k1":"","k2":{"k3":"","k4":""}}]} +{"id":3,"data":["some",{"aa":[{"k1":[{"k2":0,"k3":20,"k5":"some"}],"k4":""}]},{"k1":"eee","k2":{"k3":"","k4":""}}]} +============= +{"aa":[{"k1":[{"k2":1,"k3":2,"k5":""},{"k2":0,"k3":3,"k5":""}],"k4":""},{"k1":[{"k2":4,"k3":0,"k5":""},{"k2":0,"k3":5,"k5":""},{"k2":6,"k3":0,"k5":""}],"k4":"qqq"}],"bb":[{"k1":[],"k4":"www"},{"k1":[{"k2":7,"k3":8,"k5":""},{"k2":9,"k3":10,"k5":""},{"k2":11,"k3":12,"k5":""}],"k4":""}]} +{"aa":[{"k1":[{"k2":13,"k3":14,"k5":""},{"k2":15,"k3":16,"k5":""}],"k4":"www"}],"bb":[]} +{"aa":[{"k1":[{"k2":0,"k3":20,"k5":"some"}],"k4":""}],"bb":[]} +============= +{"k1":[[{"k2":1,"k3":2,"k5":""},{"k2":0,"k3":3,"k5":""}],[{"k2":4,"k3":0,"k5":""},{"k2":0,"k3":5,"k5":""},{"k2":6,"k3":0,"k5":""}]],"k4":["","qqq"]} +{"k1":[[{"k2":13,"k3":14,"k5":""},{"k2":15,"k3":16,"k5":""}]],"k4":["www"]} +{"k1":[[{"k2":0,"k3":20,"k5":"some"}]],"k4":[""]} +============= +{"obj":{"k1":"aa","k2":{"k3":"bb","k4":"c"}}} +{"obj":{"k1":"","k2":{"k3":"","k4":""}}} +{"obj":{"k1":"eee","k2":{"k3":"","k4":""}}} diff --git a/tests/queries/0_stateless/01825_type_json_in_other_types.sh b/tests/queries/0_stateless/01825_type_json_in_other_types.sh new file mode 100755 index 00000000000..e9cf0bcaca1 --- /dev/null +++ b/tests/queries/0_stateless/01825_type_json_in_other_types.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "SET allow_experimental_object_type = 1" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_nested" + +${CLICKHOUSE_CLIENT} -q " + CREATE TABLE t_json_nested + ( + id UInt32, + data Tuple(String, Map(String, Array(JSON)), JSON) + ) + ENGINE = MergeTree ORDER BY id" --allow_experimental_object_type 1 + +cat < toString(x)) FROM test_table); +SELECT * APPLY (x -> toString(x)) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * APPLY (x -> toString(x)) APPLY (x -> length(x)) FROM test_table); +SELECT * APPLY (x -> toString(x)) APPLY (x -> length(x)) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * APPLY (x -> toString(x)) APPLY length FROM test_table); +SELECT * APPLY (x -> toString(x)) APPLY length FROM test_table; + +SELECT '--'; +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT 'EXCEPT transformer'; + +SELECT '--'; + +DESCRIBE (SELECT * EXCEPT (id) FROM test_table); +SELECT * EXCEPT (id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) EXCEPT (id) FROM test_table); +SELECT COLUMNS(id, value) EXCEPT (id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * EXCEPT (id) APPLY toString FROM test_table); +SELECT * EXCEPT (id) APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) EXCEPT (id) APPLY toString FROM test_table); +SELECT COLUMNS(id, value) EXCEPT (id) APPLY toString FROM test_table; + +SELECT 'REPLACE transformer'; + +SELECT '--'; + +DESCRIBE (SELECT * REPLACE (5 AS id) FROM test_table); +SELECT * REPLACE (5 AS id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) REPLACE (5 AS id) FROM test_table); +SELECT COLUMNS(id, value) REPLACE (5 AS id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * REPLACE (5 AS id, 6 as value) FROM test_table); +SELECT * REPLACE (5 AS id, 6 as value) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) REPLACE (5 AS id, 6 as value) FROM test_table); +SELECT COLUMNS(id, value) REPLACE (5 AS id, 6 as value) FROM test_table; + +SELECT 'Combine EXCEPT, REPLACE, APPLY transformers'; + +SELECT '--'; + +DESCRIBE (SELECT * EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table); +SELECT * EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table); +SELECT COLUMNS(id, value) EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table; diff --git a/tests/queries/0_stateless/02340_analyzer_functions.reference b/tests/queries/0_stateless/02340_analyzer_functions.reference new file mode 100644 index 00000000000..fe086c69e91 --- /dev/null +++ b/tests/queries/0_stateless/02340_analyzer_functions.reference @@ -0,0 +1,11 @@ +plus(1, 1) UInt16 +2 +-- +plus(dummy, dummy) UInt16 +0 +-- +plus(id, length(value)) UInt64 +5 +-- +concat(concat(toString(id), \'_\'), value) String +0_Value diff --git a/tests/queries/0_stateless/02340_analyzer_functions.sql b/tests/queries/0_stateless/02340_analyzer_functions.sql new file mode 100644 index 00000000000..101a5bfcc86 --- /dev/null +++ b/tests/queries/0_stateless/02340_analyzer_functions.sql @@ -0,0 +1,28 @@ +SET allow_experimental_analyzer = 1; + +DESCRIBE (SELECT 1 + 1); +SELECT 1 + 1; + +SELECT '--'; + +DESCRIBE (SELECT dummy + dummy); +SELECT dummy + dummy; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT '--'; + +DESCRIBE (SELECT id + length(value) FROM test_table); +SELECT id + length(value) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT concat(concat(toString(id), '_'), (value)) FROM test_table); +SELECT concat(concat(toString(id), '_'), (value)) FROM test_table; diff --git a/tests/queries/0_stateless/02341_analyzer_aliases_basics.reference b/tests/queries/0_stateless/02341_analyzer_aliases_basics.reference new file mode 100644 index 00000000000..3733d6b6084 --- /dev/null +++ b/tests/queries/0_stateless/02341_analyzer_aliases_basics.reference @@ -0,0 +1,19 @@ +Aliases to constants +1 1 +4 2 1 3 4 +1 +1 1 +1 1 2 +1 2 1 +3 6 +Aliases to columns +0 0 0 +0 Value 0 Value +0 Value +Alias conflict with identifier inside expression +0 +1 +3 +Alias setting prefer_column_name_to_alias +0 +Value diff --git a/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql b/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql new file mode 100644 index 00000000000..52a1cd1dae8 --- /dev/null +++ b/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql @@ -0,0 +1,50 @@ +SET allow_experimental_analyzer = 1; + +SELECT 'Aliases to constants'; + +SELECT 1 as a, a; +SELECT (c + 1) as d, (a + 1) as b, 1 AS a, (b + 1) as c, d; + +WITH 1 as a SELECT a; +WITH a as b SELECT 1 as a, b; + +SELECT 1 AS x, x, x + 1; +SELECT x, x + 1, 1 AS x; +SELECT x, 1 + (2 + (3 AS x)); + +SELECT a AS b, b AS a; -- { serverError 174 } + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Aliases to columns'; + +SELECT id_alias_2, id AS id_alias, id_alias as id_alias_2 FROM test_table; +SELECT id_1, value_1, id as id_1, value as value_1 FROM test_table; + +WITH value_1 as value_2, id_1 as id_2, id AS id_1, value AS value_1 SELECT id_2, value_2 FROM test_table; + +SELECT (id + b) AS id, id as b FROM test_table; -- { serverError 174 } +SELECT (1 + b + 1 + id) AS id, b as c, id as b FROM test_table; -- { serverError 174 } + +SELECT 'Alias conflict with identifier inside expression'; + +SELECT id AS id FROM test_table; +SELECT (id + 1) AS id FROM test_table; +SELECT (id + 1 + 1 + 1 + id) AS id FROM test_table; + +SELECT 'Alias setting prefer_column_name_to_alias'; + +WITH id AS value SELECT value FROM test_table; + +SET prefer_column_name_to_alias = 1; +WITH id AS value SELECT value FROM test_table; +SET prefer_column_name_to_alias = 0; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02342_analyzer_compound_types.reference b/tests/queries/0_stateless/02342_analyzer_compound_types.reference new file mode 100644 index 00000000000..51e0bbe6e92 --- /dev/null +++ b/tests/queries/0_stateless/02342_analyzer_compound_types.reference @@ -0,0 +1,106 @@ +Constant tuple +(1,'Value') 1 Value +(1,'Value') 1 Value +(1,'Value') 1 +(1,'Value') Value +(1,'Value') 1 +(1,'Value') Value +Tuple +-- +id UInt64 +value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +0 (('value_0_level_1','value_1_level_1'),'value_1_level_0') +-- +id UInt64 +value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +0 (('value_0_level_1','value_1_level_1'),'value_1_level_0') +-- +value.value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String) +value.value_1_level_0 String +('value_0_level_1','value_1_level_1') value_1_level_0 +-- +alias_value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +alias_value.value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String) +alias_value.value_1_level_0 String +(('value_0_level_1','value_1_level_1'),'value_1_level_0') ('value_0_level_1','value_1_level_1') value_1_level_0 +-- +alias_value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +alias_value.value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String) +alias_value.value_1_level_0 String +(('value_0_level_1','value_1_level_1'),'value_1_level_0') ('value_0_level_1','value_1_level_1') value_1_level_0 +-- +alias_value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +toString(alias_value.value_0_level_0) String +toString(alias_value.value_1_level_0) String +(('value_0_level_1','value_1_level_1'),'value_1_level_0') (\'value_0_level_1\',\'value_1_level_1\') value_1_level_0 +-- +value.value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String) +value.value_1_level_0 String +('value_0_level_1','value_1_level_1') value_1_level_0 +-- +toString(value.value_0_level_0) String +toString(value.value_1_level_0) String +(\'value_0_level_1\',\'value_1_level_1\') value_1_level_0 +-- +value.value_0_level_0.value_0_level_1 String +value.value_0_level_0.value_1_level_1 String +value_0_level_1 value_1_level_1 +-- +alias_value Tuple(value_0_level_1 String, value_1_level_1 String) +alias_value.value_0_level_1 String +alias_value.value_1_level_1 String +('value_0_level_1','value_1_level_1') value_0_level_1 value_1_level_1 +-- +alias_value Tuple(value_0_level_1 String, value_1_level_1 String) +alias_value.value_0_level_1 String +alias_value.value_1_level_1 String +('value_0_level_1','value_1_level_1') value_0_level_1 value_1_level_1 +-- +alias_value Tuple(value_0_level_1 String, value_1_level_1 String) +toString(alias_value.value_0_level_1) String +toString(alias_value.value_1_level_1) String +('value_0_level_1','value_1_level_1') value_0_level_1 value_1_level_1 +-- +value.value_0_level_0.value_0_level_1 String +value.value_0_level_0.value_1_level_1 String +value_0_level_1 value_1_level_1 +-- +toString(value.value_0_level_0.value_0_level_1) String +toString(value.value_0_level_0.value_1_level_1) String +value_0_level_1 value_1_level_1 +Nested +id UInt64 +value.value_0_level_0 Array(Nested(value_0_level_1 String, value_1_level_1 String)) +value.value_1_level_0 Array(String) +0 [[('value_0_level_1','value_1_level_1')]] ['value_1_level_0'] +-- +value.value_0_level_0 Array(Nested(value_0_level_1 String, value_1_level_1 String)) +value.value_1_level_0 Array(String) +[[('value_0_level_1','value_1_level_1')]] ['value_1_level_0'] +-- +value.value_0_level_0.value_0_level_1 Array(Array(String)) +value.value_0_level_0.value_1_level_1 Array(Array(String)) +[['value_0_level_1']] [['value_1_level_1']] +-- +value_alias Array(Nested(value_0_level_1 String, value_1_level_1 String)) +value_alias.value_0_level_1 Array(Array(String)) +value_alias.value_1_level_1 Array(Array(String)) +[[('value_0_level_1','value_1_level_1')]] [['value_0_level_1']] [['value_1_level_1']] +-- +value_alias Array(Nested(value_0_level_1 String, value_1_level_1 String)) +value_alias.value_0_level_1 Array(Array(String)) +value_alias.value_1_level_1 Array(Array(String)) +[[('value_0_level_1','value_1_level_1')]] [['value_0_level_1']] [['value_1_level_1']] +-- +value_alias Array(Nested(value_0_level_1 String, value_1_level_1 String)) +toString(value_alias.value_0_level_1) String +toString(value_alias.value_1_level_1) String +[[('value_0_level_1','value_1_level_1')]] [[\'value_0_level_1\']] [[\'value_1_level_1\']] +-- +value.value_0_level_0.value_0_level_1 Array(Array(String)) +value.value_0_level_0.value_1_level_1 Array(Array(String)) +[['value_0_level_1']] [['value_1_level_1']] +-- +toString(value.value_0_level_0.value_0_level_1) String +toString(value.value_0_level_0.value_1_level_1) String +[[\'value_0_level_1\']] [[\'value_1_level_1\']] diff --git a/tests/queries/0_stateless/02342_analyzer_compound_types.sql b/tests/queries/0_stateless/02342_analyzer_compound_types.sql new file mode 100644 index 00000000000..0fd96928496 --- /dev/null +++ b/tests/queries/0_stateless/02342_analyzer_compound_types.sql @@ -0,0 +1,195 @@ +SET allow_experimental_analyzer = 1; + +SELECT 'Constant tuple'; + +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.id, value.value; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.* APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS(id) APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS(value) APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS('i') APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS('v') APPLY toString; + +SELECT 'Tuple'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, (('value_0_level_1', 'value_1_level_1'), 'value_1_level_0')); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT id, value FROM test_table); +SELECT id, value FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table); +SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value AS alias_value, alias_value.value_0_level_0, alias_value.value_1_level_0 FROM test_table); +SELECT value AS alias_value, alias_value.value_0_level_0, alias_value.value_1_level_0 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value AS alias_value, alias_value.* FROM test_table); +SELECT value AS alias_value, alias_value.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value AS alias_value, alias_value.* APPLY toString FROM test_table); +SELECT value AS alias_value, alias_value.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.* FROM test_table); +SELECT value.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.* APPLY toString FROM test_table); +SELECT value.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table); +SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table); +SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* FROM test_table); +SELECT value.value_0_level_0.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* APPLY toString FROM test_table); +SELECT value.value_0_level_0.* APPLY toString FROM test_table; + +DROP TABLE test_table; + +-- SELECT 'Array of tuples'; + +-- DROP TABLE IF EXISTS test_table; +-- CREATE TABLE test_table +-- ( +-- id UInt64, +-- value Array(Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String)) +-- ) ENGINE=MergeTree ORDER BY id; + +-- INSERT INTO test_table VALUES (0, [('value_0_level_1', 'value_1_level_1')], ['value_1_level_0']); + +-- DESCRIBE (SELECT * FROM test_table); +-- SELECT * FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table); +-- SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table); +-- SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table); +-- SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table); +-- SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table); +-- SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0.* FROM test_table); +-- SELECT value.value_0_level_0.* FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0.* APPLY toString FROM test_table); +-- SELECT value.value_0_level_0.* APPLY toString FROM test_table; + +-- DROP TABLE test_table; + +SELECT 'Nested'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value Nested (value_0_level_0 Nested(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, [[('value_0_level_1', 'value_1_level_1')]], ['value_1_level_0']); + +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table); +SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS value_alias, value_alias.value_0_level_1, value_alias.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0 AS value_alias, value_alias.value_0_level_1, value_alias.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS value_alias, value_alias.* FROM test_table); +SELECT value.value_0_level_0 AS value_alias, value_alias.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS value_alias, value_alias.* APPLY toString FROM test_table); +SELECT value.value_0_level_0 AS value_alias, value_alias.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* FROM test_table); +SELECT value.value_0_level_0.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* APPLY toString FROM test_table); +SELECT value.value_0_level_0.* APPLY toString FROM test_table; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.reference b/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.reference new file mode 100644 index 00000000000..4904e950431 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.reference @@ -0,0 +1,2 @@ +Value +1 2 diff --git a/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql b/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql new file mode 100644 index 00000000000..98ee7bc8f58 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql @@ -0,0 +1,18 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT * EXCEPT (id) FROM test_table; +SELECT * EXCEPT STRICT (id, value1) FROM test_table; -- { serverError 36 } + +SELECT * REPLACE STRICT (1 AS id, 2 AS value) FROM test_table; +SELECT * REPLACE STRICT (1 AS id, 2 AS value_1) FROM test_table; -- { serverError 36 } + +DROP TABLE IF EXISTS test_table; diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas.reference b/tests/queries/0_stateless/02343_analyzer_lambdas.reference new file mode 100644 index 00000000000..8d29481c255 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_lambdas.reference @@ -0,0 +1,29 @@ +Standalone lambdas +2 +1 \N [1,2,3] +1 \N [1,2,3] +1 +0 Value +Lambda as function parameter +[2,3,4] +[2,3,4] +['1','2','3'] ['1','2','3'] +['1','2','3'] ['1','2','3'] +[0,0,0] +[1,2,3] +['1_0','2_0','3_0'] +Lambda compound argument +(1,'Value') 1_Value +value_0_level_0_value_1_level_0 +Lambda matcher +0 +0 Value +[1,1,1] +[2,2,2] +0 1 1 +0 2 2 +Lambda untuple +(1,'Value') 1 Value +Lambda carrying +2 1 +1 0 diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas.sql b/tests/queries/0_stateless/02343_analyzer_lambdas.sql new file mode 100644 index 00000000000..b90f7b32b57 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_lambdas.sql @@ -0,0 +1,69 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Standalone lambdas'; + +WITH x -> x + 1 AS lambda SELECT lambda(1); +WITH x -> toString(x) AS lambda SELECT lambda(1), lambda(NULL), lambda([1,2,3]); +WITH x -> toString(x) AS lambda_1, lambda_1 AS lambda_2, lambda_2 AS lambda_3 SELECT lambda_1(1), lambda_2(NULL), lambda_3([1,2,3]); + +WITH x -> x + 1 AS lambda SELECT lambda(id) FROM test_table; +WITH x -> toString(x) AS lambda SELECT lambda(id), lambda(value) FROM test_table; + +SELECT 'Lambda as function parameter'; + +SELECT arrayMap(x -> x + 1, [1,2,3]); +WITH x -> x + 1 AS lambda SELECT arrayMap(lambda, [1,2,3]); +SELECT arrayMap((x -> toString(x)) as lambda, [1,2,3]), arrayMap(lambda, ['1','2','3']); +WITH x -> toString(x) AS lambda_1 SELECT arrayMap(lambda_1 AS lambda_2, [1,2,3]), arrayMap(lambda_2, ['1', '2', '3']); + +SELECT arrayMap(x -> id, [1,2,3]) FROM test_table; +SELECT arrayMap(x -> x + id, [1,2,3]) FROM test_table; +SELECT arrayMap((x -> concat(concat(toString(x), '_'), toString(id))) as lambda, [1,2,3]) FROM test_table; + +SELECT 'Lambda compound argument'; + +DROP TABLE IF EXISTS test_table_tuple; +CREATE TABLE test_table_tuple +( + id UInt64, + value Tuple(value_0_level_0 String, value_1_level_0 String) +) ENGINE=TinyLog; + +INSERT INTO test_table_tuple VALUES (0, ('value_0_level_0', 'value_1_level_0')); + +WITH x -> concat(concat(toString(x.id), '_'), x.value) AS lambda SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, lambda(value); +WITH x -> concat(concat(x.value_0_level_0, '_'), x.value_1_level_0) AS lambda SELECT lambda(value) FROM test_table_tuple; + +SELECT 'Lambda matcher'; + +WITH x -> * AS lambda SELECT lambda(1); +WITH x -> * AS lambda SELECT lambda(1) FROM test_table; + +WITH cast(tuple(1), 'Tuple (value UInt64)') AS compound_value SELECT arrayMap(x -> compound_value.*, [1,2,3]); +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT arrayMap(x -> compound_value.*, [1,2,3]); -- { serverError 1 } +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT arrayMap(x -> plus(compound_value.*), [1,2,3]); + +WITH cast(tuple(1), 'Tuple (value UInt64)') AS compound_value SELECT id, test_table.* APPLY x -> compound_value.* FROM test_table; +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT id, test_table.* APPLY x -> compound_value.* FROM test_table; -- { serverError 1 } +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT id, test_table.* APPLY x -> plus(compound_value.*) FROM test_table; + +SELECT 'Lambda untuple'; + +WITH x -> untuple(x) AS lambda SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, lambda(value); + +SELECT 'Lambda carrying'; + +WITH (functor, x) -> functor(x) AS lambda, x -> x + 1 AS functor_1, x -> toString(x) AS functor_2 SELECT lambda(functor_1, 1), lambda(functor_2, 1); +WITH (functor, x) -> functor(x) AS lambda, x -> x + 1 AS functor_1, x -> toString(x) AS functor_2 SELECT lambda(functor_1, id), lambda(functor_2, id) FROM test_table; + +DROP TABLE test_table_tuple; +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.reference b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.reference new file mode 100644 index 00000000000..a2ed8e55d62 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.reference @@ -0,0 +1,2 @@ +n [('n',1)] +[('n',1)] diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql new file mode 100644 index 00000000000..3b780e1dec3 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql @@ -0,0 +1,17 @@ +SET allow_experimental_analyzer = 1; + +select so, + r +from + (select [('y',0),('n',1)] as cg, + if( arrayMap( x -> x.1, cg ) != ['y', 'n'], 'y', 'n') as so, + arrayFilter( x -> x.1 = so , cg) as r + ); + +select + r +from + (select [('y',0),('n',1)] as cg, + if( arrayMap( x -> x.1, cg ) != ['y', 'n'], 'y', 'n') as so, + arrayFilter( x -> x.1 = so , cg) as r + ); diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.reference b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.reference new file mode 100644 index 00000000000..bec52aa46b6 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.reference @@ -0,0 +1,2 @@ +2.1999999999999997 289.99999999999994 [1,2,3,4] [0.1,0.2,0.1,0.2] +2.1999999999999997 289.99999999999994 [1,2,3,4] [0.1,0.2,0.1,0.2] diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql new file mode 100644 index 00000000000..b07f3f33ac3 --- /dev/null +++ b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql @@ -0,0 +1,14 @@ +SET allow_experimental_analyzer = 1; + +SELECT + arraySum(x -> ((x.1) / ((x.2) * (x.2))), arrayZip(mag, magerr)) / arraySum(x -> (1. / (x * x)), magerr) AS weightedmeanmag, + arraySum(x -> ((((x.1) - weightedmeanmag) * ((x.1) - weightedmeanmag)) / ((x.2) * (x.2))), arrayZip(mag, magerr)) AS chi2, + [1, 2, 3, 4] AS mag, + [0.1, 0.2, 0.1, 0.2] AS magerr; + +SELECT + arraySum(x -> ((x.1) / ((x.2) * (x.2))), arrayZip(mag, magerr)) / arraySum(x -> (1. / (x * x)), magerr) AS weightedmeanmag, + arraySum(x -> ((((x.1) - weightedmeanmag) * ((x.1) - weightedmeanmag)) / ((x.2) * (x.2))), arrayZip(mag, magerr)) AS chi2, + [1, 2, 3, 4] AS mag, + [0.1, 0.2, 0.1, 0.2] AS magerr +WHERE isFinite(chi2) diff --git a/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.reference b/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.reference new file mode 100644 index 00000000000..e0d1bb800d2 --- /dev/null +++ b/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.reference @@ -0,0 +1,4 @@ +1 1 +0 0 +2 +1 1 diff --git a/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql b/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql new file mode 100644 index 00000000000..cd1bca8285b --- /dev/null +++ b/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql @@ -0,0 +1,27 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 1 AS value, 1 AS value; +SELECT id AS value, id AS value FROM test_table; +WITH x -> x + 1 AS lambda, x -> x + 1 AS lambda SELECT lambda(1); +SELECT (SELECT 1) AS subquery, (SELECT 1) AS subquery; + +SELECT 1 AS value, 2 AS value; -- { serverError 179 } +SELECT plus(1, 1) AS value, 2 AS value; -- { serverError 179 } +SELECT (SELECT 1) AS subquery, 1 AS subquery; -- { serverError 179 } +WITH x -> x + 1 AS lambda, x -> x + 2 AS lambda SELECT lambda(1); -- { serverError 179 } +WITH x -> x + 1 AS lambda SELECT (SELECT 1) AS lambda; -- { serverError 179 } +WITH x -> x + 1 AS lambda SELECT 1 AS lambda; -- { serverError 179 } +SELECT id AS value, value AS value FROM test_table; -- { serverError 179 } +SELECT id AS value_1, value AS value_1 FROM test_table; -- { serverError 179 } +SELECT id AS value, (id + 1) AS value FROM test_table; -- { serverError 179 } + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02344_show_caches.reference b/tests/queries/0_stateless/02344_show_caches.reference index 0c5957edb82..68882f63e1f 100644 --- a/tests/queries/0_stateless/02344_show_caches.reference +++ b/tests/queries/0_stateless/02344_show_caches.reference @@ -1,12 +1,13 @@ cached_azure s3_cache_2 +s3_cache +s3_cache_3 +s3_cache_multi s3_cache_4 s3_cache_5 local_cache +s3_cache_6 s3_cache_small local_cache_2 local_cache_3 -s3_cache_multi -s3_cache_3 -s3_cache s3_cache_multi_2 diff --git a/tests/queries/0_stateless/02345_analyzer_subqueries.reference b/tests/queries/0_stateless/02345_analyzer_subqueries.reference new file mode 100644 index 00000000000..1e70be9ef52 --- /dev/null +++ b/tests/queries/0_stateless/02345_analyzer_subqueries.reference @@ -0,0 +1,27 @@ +Scalar subqueries +1 +1 +0 +Value +(0,'Value') +Subqueries FROM section +1 +1 +1 +1 +1 +1 +0 Value +0 Value +2 +Subqueries CTE +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 diff --git a/tests/queries/0_stateless/02345_analyzer_subqueries.sql b/tests/queries/0_stateless/02345_analyzer_subqueries.sql new file mode 100644 index 00000000000..c0cc242b57b --- /dev/null +++ b/tests/queries/0_stateless/02345_analyzer_subqueries.sql @@ -0,0 +1,51 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Scalar subqueries'; + +SELECT (SELECT 1); +WITH 1 AS a SELECT (SELECT a); + +SELECT (SELECT id FROM test_table); +SELECT (SELECT value FROM test_table); +SELECT (SELECT id, value FROM test_table); + +SELECT 'Subqueries FROM section'; + +SELECT a FROM (SELECT 1 AS a) AS b; +SELECT b.a FROM (SELECT 1 AS a) AS b; + +SELECT a FROM (SELECT 1 AS a) AS b; +SELECT b.a FROM (SELECT 1 AS a) AS b; + +WITH 1 AS global_a SELECT a FROM (SELECT global_a AS a) AS b; +WITH 1 AS global_a SELECT b.a FROM (SELECT global_a AS a) AS b; + +SELECT * FROM (SELECT * FROM (SELECT * FROM test_table)); +SELECT * FROM (SELECT id, value FROM (SELECT * FROM test_table)); + +WITH 1 AS a SELECT (SELECT * FROM (SELECT * FROM (SELECT a + 1))); + +SELECT 'Subqueries CTE'; + +WITH subquery AS (SELECT 1 AS a) SELECT * FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT a FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery.a FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery.* FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery.* APPLY toString FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery_alias.a FROM subquery AS subquery_alias; +WITH subquery AS (SELECT 1 AS a) SELECT subquery_alias.* FROM subquery AS subquery_alias; +WITH subquery AS (SELECT 1 AS a) SELECT subquery_alias.* APPLY toString FROM subquery AS subquery_alias; + +WITH subquery_1 AS (SELECT 1 AS a), subquery_2 AS (SELECT 1 + subquery_1.a FROM subquery_1) SELECT * FROM subquery_2; +WITH subquery_1 AS (SELECT 1 AS a), subquery_2 AS (SELECT (1 + subquery_1.a) AS a FROM subquery_1) SELECT subquery_2.a FROM subquery_2; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02346_additional_filters.reference b/tests/queries/0_stateless/02346_additional_filters.reference index 22d53173e71..0a08995223d 100644 --- a/tests/queries/0_stateless/02346_additional_filters.reference +++ b/tests/queries/0_stateless/02346_additional_filters.reference @@ -60,6 +60,14 @@ select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filt 0 0 select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy != 0'}; +select * from distr_table settings additional_table_filters={'distr_table' : 'x = 2'}; +2 bb +2 bb +select * from distr_table settings additional_table_filters={'distr_table' : 'x != 2 and x != 3'}; +1 a +4 dddd +1 a +4 dddd select * from system.numbers limit 5; 0 1 diff --git a/tests/queries/0_stateless/02346_additional_filters.sql b/tests/queries/0_stateless/02346_additional_filters.sql index 9e0bee4549b..f6b665713ec 100644 --- a/tests/queries/0_stateless/02346_additional_filters.sql +++ b/tests/queries/0_stateless/02346_additional_filters.sql @@ -1,3 +1,4 @@ +-- Tags: distributed drop table if exists table_1; drop table if exists table_2; drop table if exists v_numbers; @@ -6,6 +7,8 @@ drop table if exists mv_table; create table table_1 (x UInt32, y String) engine = MergeTree order by x; insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); +CREATE TABLE distr_table (x UInt32, y String) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 'table_1'); + -- { echoOn } select * from table_1; @@ -29,6 +32,9 @@ select x from table_1 prewhere x != 2 where x != 2 settings additional_table_fil select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy = 0'}; select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy != 0'}; +select * from distr_table settings additional_table_filters={'distr_table' : 'x = 2'}; +select * from distr_table settings additional_table_filters={'distr_table' : 'x != 2 and x != 3'}; + select * from system.numbers limit 5; select * from system.numbers as t limit 5 settings additional_table_filters={'t' : 'number % 2 != 0'}; select * from system.numbers limit 5 settings additional_table_filters={'system.numbers' : 'number != 3'}; diff --git a/tests/queries/0_stateless/02346_additional_filters_distr.reference b/tests/queries/0_stateless/02346_additional_filters_distr.reference new file mode 100644 index 00000000000..81814b5e7bb --- /dev/null +++ b/tests/queries/0_stateless/02346_additional_filters_distr.reference @@ -0,0 +1,3 @@ +4 dddd +5 a +6 bb diff --git a/tests/queries/0_stateless/02346_additional_filters_distr.sql b/tests/queries/0_stateless/02346_additional_filters_distr.sql new file mode 100644 index 00000000000..bc9c1715c72 --- /dev/null +++ b/tests/queries/0_stateless/02346_additional_filters_distr.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel, distributed + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists dist_02346; +drop table if exists shard_0.data_02346; +drop table if exists shard_1.data_02346; + +create table shard_0.data_02346 (x UInt32, y String) engine = MergeTree order by x settings index_granularity = 2; +insert into shard_0.data_02346 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); + +create table shard_1.data_02346 (x UInt32, y String) engine = MergeTree order by x settings index_granularity = 2; +insert into shard_1.data_02346 values (5, 'a'), (6, 'bb'), (7, 'ccc'), (8, 'dddd'); + +create table dist_02346 (x UInt32, y String) engine=Distributed('test_cluster_two_shards_different_databases', /* default_database= */ '', data_02346); + +set max_rows_to_read=4; + +select * from dist_02346 order by x settings additional_table_filters={'dist_02346' : 'x > 3 and x < 7'}; diff --git a/tests/queries/0_stateless/02346_additional_filters_index.reference b/tests/queries/0_stateless/02346_additional_filters_index.reference new file mode 100644 index 00000000000..d4b9509cb3c --- /dev/null +++ b/tests/queries/0_stateless/02346_additional_filters_index.reference @@ -0,0 +1,30 @@ +-- { echoOn } +set max_rows_to_read = 2; +select * from table_1 order by x settings additional_table_filters={'table_1' : 'x > 3'}; +4 dddd +select * from table_1 order by x settings additional_table_filters={'table_1' : 'x < 3'}; +1 a +2 bb +select * from table_1 order by x settings additional_table_filters={'table_1' : 'length(y) >= 3'}; +3 ccc +4 dddd +select * from table_1 order by x settings additional_table_filters={'table_1' : 'length(y) < 3'}; +1 a +2 bb +set max_rows_to_read = 4; +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'x > 3'}; +4 dddd +4 dddd +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'x < 3'}; +1 a +1 a +2 bb +2 bb +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'length(y) > 3'}; +4 dddd +4 dddd +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'length(y) < 3'}; +1 a +1 a +2 bb +2 bb diff --git a/tests/queries/0_stateless/02346_additional_filters_index.sql b/tests/queries/0_stateless/02346_additional_filters_index.sql new file mode 100644 index 00000000000..0d40cc1f898 --- /dev/null +++ b/tests/queries/0_stateless/02346_additional_filters_index.sql @@ -0,0 +1,24 @@ +-- Tags: distributed + +create table table_1 (x UInt32, y String, INDEX a (length(y)) TYPE minmax GRANULARITY 1) engine = MergeTree order by x settings index_granularity = 2; +insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); + +CREATE TABLE distr_table (x UInt32, y String) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 'table_1'); + +-- { echoOn } +set max_rows_to_read = 2; + +select * from table_1 order by x settings additional_table_filters={'table_1' : 'x > 3'}; +select * from table_1 order by x settings additional_table_filters={'table_1' : 'x < 3'}; + +select * from table_1 order by x settings additional_table_filters={'table_1' : 'length(y) >= 3'}; +select * from table_1 order by x settings additional_table_filters={'table_1' : 'length(y) < 3'}; + +set max_rows_to_read = 4; + +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'x > 3'}; +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'x < 3'}; + +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'length(y) > 3'}; +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'length(y) < 3'}; + diff --git a/tests/queries/0_stateless/02354_annoy.sql b/tests/queries/0_stateless/02354_annoy.sql index 8a8d023a104..654a4b545ea 100644 --- a/tests/queries/0_stateless/02354_annoy.sql +++ b/tests/queries/0_stateless/02354_annoy.sql @@ -44,3 +44,71 @@ ORDER BY L2Distance(embedding, [0.0, 0.0]) LIMIT 3; -- { serverError 80 } DROP TABLE IF EXISTS 02354_annoy; + +-- ------------------------------------ +-- Check that weird base columns are rejected + +-- Index spans >1 column + +CREATE TABLE 02354_annoy +( + id Int32, + embedding Array(Float32), + INDEX annoy_index (embedding, id) TYPE annoy(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity=5; -- {serverError 7 } + +-- Index must be created on Array(Float32) or Tuple(Float32) + +CREATE TABLE 02354_annoy +( + id Int32, + embedding Float32, + INDEX annoy_index embedding TYPE annoy(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity=5; -- {serverError 44 } + + +CREATE TABLE 02354_annoy +( + id Int32, + embedding Array(Float64), + INDEX annoy_index embedding TYPE annoy(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity=5; -- {serverError 44 } + +CREATE TABLE 02354_annoy +( + id Int32, + embedding Tuple(Float32, Float64), + INDEX annoy_index embedding TYPE annoy(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity=5; -- {serverError 44 } + +CREATE TABLE 02354_annoy +( + id Int32, + embedding Array(LowCardinality(Float32)), + INDEX annoy_index embedding TYPE annoy(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity=5; -- {serverError 44 } + +CREATE TABLE 02354_annoy +( + id Int32, + embedding Array(Nullable(Float32)), + INDEX annoy_index embedding TYPE annoy(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity=5; -- {serverError 44 } diff --git a/tests/queries/0_stateless/02366_explain_query_tree.reference b/tests/queries/0_stateless/02366_explain_query_tree.reference new file mode 100644 index 00000000000..769d7661e68 --- /dev/null +++ b/tests/queries/0_stateless/02366_explain_query_tree.reference @@ -0,0 +1,102 @@ +QUERY id: 0 + PROJECTION + LIST id: 1, nodes: 1 + CONSTANT id: 2, constant_value: UInt64_1, constant_value_type: UInt8 + JOIN TREE + IDENTIFIER id: 3, identifier: system.one +-- +QUERY id: 0 + PROJECTION + LIST id: 1, nodes: 2 + IDENTIFIER id: 2, identifier: id + IDENTIFIER id: 3, identifier: value + JOIN TREE + IDENTIFIER id: 4, identifier: test_table +-- +QUERY id: 0 + PROJECTION COLUMNS + id UInt64 + value String + PROJECTION + LIST id: 1, nodes: 2 + COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3 + COLUMN id: 4, column_name: value, result_type: String, source_id: 3 + JOIN TREE + TABLE id: 3, table_name: default.test_table +-- +QUERY id: 0 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: arrayMap, function_type: ordinary + ARGUMENTS + LIST id: 3, nodes: 2 + LAMBDA id: 4 + ARGUMENTS + LIST id: 5, nodes: 1 + IDENTIFIER id: 6, identifier: x + EXPRESSION + FUNCTION id: 7, function_name: plus, function_type: ordinary + ARGUMENTS + LIST id: 8, nodes: 2 + IDENTIFIER id: 9, identifier: x + IDENTIFIER id: 10, identifier: id + CONSTANT id: 11, constant_value: Array_[UInt64_1, UInt64_2, UInt64_3], constant_value_type: Array(UInt8) + JOIN TREE + IDENTIFIER id: 12, identifier: test_table +-- +QUERY id: 0 + PROJECTION COLUMNS + arrayMap(lambda(tuple(x), plus(x, 1)), [1, 2, 3]) Array(UInt16) + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: arrayMap, function_type: ordinary, result_type: Array(UInt16) + ARGUMENTS + LIST id: 3, nodes: 2 + LAMBDA id: 4 + ARGUMENTS + LIST id: 5, nodes: 1 + COLUMN id: 6, column_name: x, result_type: UInt8, source_id: 4 + EXPRESSION + FUNCTION id: 7, function_name: plus, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 6, column_name: x, result_type: UInt8, source_id: 4 + CONSTANT id: 9, constant_value: UInt64_1, constant_value_type: UInt8 + CONSTANT id: 10, constant_value: Array_[UInt64_1, UInt64_2, UInt64_3], constant_value_type: Array(UInt8) + JOIN TREE + TABLE id: 11, table_name: default.test_table +-- +QUERY id: 0 + WITH + LIST id: 1, nodes: 1 + LAMBDA id: 2, alias: lambda + ARGUMENTS + LIST id: 3, nodes: 1 + IDENTIFIER id: 4, identifier: x + EXPRESSION + FUNCTION id: 5, function_name: plus, function_type: ordinary + ARGUMENTS + LIST id: 6, nodes: 2 + IDENTIFIER id: 7, identifier: x + CONSTANT id: 8, constant_value: UInt64_1, constant_value_type: UInt8 + PROJECTION + LIST id: 9, nodes: 1 + FUNCTION id: 10, function_name: lambda, function_type: ordinary + ARGUMENTS + LIST id: 11, nodes: 1 + IDENTIFIER id: 12, identifier: id + JOIN TREE + IDENTIFIER id: 13, identifier: test_table +-- +QUERY id: 0 + PROJECTION COLUMNS + lambda(id) UInt64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: plus, function_type: ordinary, result_type: UInt64 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: id, result_type: UInt64, source_id: 5 + CONSTANT id: 6, constant_value: UInt64_1, constant_value_type: UInt8 + JOIN TREE + TABLE id: 5, table_name: default.test_table diff --git a/tests/queries/0_stateless/02366_explain_query_tree.sql b/tests/queries/0_stateless/02366_explain_query_tree.sql new file mode 100644 index 00000000000..c942f0cac37 --- /dev/null +++ b/tests/queries/0_stateless/02366_explain_query_tree.sql @@ -0,0 +1,38 @@ +SET allow_experimental_analyzer = 1; + +EXPLAIN QUERY TREE SELECT 1; + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +EXPLAIN QUERY TREE SELECT id, value FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 1 SELECT id, value FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE SELECT arrayMap(x -> x + id, [1, 2, 3]) FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 1 SELECT arrayMap(x -> x + 1, [1, 2, 3]) FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE WITH x -> x + 1 AS lambda SELECT lambda(id) FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 1 WITH x -> x + 1 AS lambda SELECT lambda(id) FROM test_table; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02367_analyzer_table_alias_columns.reference b/tests/queries/0_stateless/02367_analyzer_table_alias_columns.reference new file mode 100644 index 00000000000..1f4875e38c2 --- /dev/null +++ b/tests/queries/0_stateless/02367_analyzer_table_alias_columns.reference @@ -0,0 +1,3 @@ +0 6 5 +0 Value 2 +0 Value 2 diff --git a/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql b/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql new file mode 100644 index 00000000000..f41680cd9f4 --- /dev/null +++ b/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql @@ -0,0 +1,41 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + alias_value_1 ALIAS id + alias_value_2 + 1, + alias_value_2 ALIAS id + 5 +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0); + +SELECT id, alias_value_1, alias_value_2 FROM test_table; + +DROP TABLE test_table; + +CREATE TABLE test_table +( + id UInt64, + value String, + alias_value ALIAS ((id + 1) AS inside_value) + inside_value +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT id, value, alias_value FROM test_table; + +DROP TABLE test_table; + +CREATE TABLE test_table +( + id UInt64, + value String, + alias_value ALIAS ((id + 1) AS value) + value +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT id, value, alias_value FROM test_table; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02368_analyzer_table_functions.reference b/tests/queries/0_stateless/02368_analyzer_table_functions.reference new file mode 100644 index 00000000000..0c1bc4d90a2 --- /dev/null +++ b/tests/queries/0_stateless/02368_analyzer_table_functions.reference @@ -0,0 +1,6 @@ +1 2 [1,2,3] [['abc'],[],['d','e']] +1 2 [1,2,3] [['abc'],[],['d','e']] +1 2 [1,2,3] [['abc'],[],['d','e']] +1 2 [1,2,3] [['abc'],[],['d','e']] +1 2 [1,2,3] [['abc'],[],['d','e']] +CSV 1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]" 1 2 [1,2,3] [['abc'],[],['d','e']] diff --git a/tests/queries/0_stateless/02368_analyzer_table_functions.sql b/tests/queries/0_stateless/02368_analyzer_table_functions.sql new file mode 100644 index 00000000000..456e095c6c1 --- /dev/null +++ b/tests/queries/0_stateless/02368_analyzer_table_functions.sql @@ -0,0 +1,10 @@ +SET allow_experimental_analyzer = 1; + +SELECT c1, c2, c3, c4 FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +SELECT f.c1, f.c2, f.c3, f.c4 FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"') AS f; +SELECT f.* FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"') AS f; + +WITH 'CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"' AS format_value SELECT c1, c2, c3, c4 FROM format('CSV', format_value); +WITH concat('1,2,"[1,2,3]",','"[[\'abc\'], [], [\'d\', \'e\']]"') AS format_value SELECT c1, c2, c3, c4 FROM format('CSV', format_value); + +SELECT format, format_value, c1, c2, c3, c4 FROM format('CSV' AS format, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"' AS format_value); diff --git a/tests/queries/0_stateless/02369_analyzer_array_join_function.reference b/tests/queries/0_stateless/02369_analyzer_array_join_function.reference new file mode 100644 index 00000000000..7025cff6909 --- /dev/null +++ b/tests/queries/0_stateless/02369_analyzer_array_join_function.reference @@ -0,0 +1,44 @@ +1 +2 +3 +-- +1 1 +2 2 +3 3 +-- +1 1 +2 2 +3 3 +-- +[1,2,3] 1 +[1,2,3] 2 +[1,2,3] 3 +-- +1 1 +1 2 +1 3 +1 4 +2 1 +2 2 +2 3 +2 4 +3 1 +3 2 +3 3 +3 4 +-- +[1,1,1] +[2,2,2] +[3,3,3] +-- +[2,3,4] 1 +[3,4,5] 2 +[4,5,6] 3 +-- +0 1 +0 2 +0 3 +-- +0 1 1 +0 2 2 +0 3 3 diff --git a/tests/queries/0_stateless/02369_analyzer_array_join_function.sql b/tests/queries/0_stateless/02369_analyzer_array_join_function.sql new file mode 100644 index 00000000000..9a9939d2a2f --- /dev/null +++ b/tests/queries/0_stateless/02369_analyzer_array_join_function.sql @@ -0,0 +1,59 @@ +SET allow_experimental_analyzer = 1; + +SELECT arrayJoin([1, 2, 3]); + +SELECT '--'; + +SELECT arrayJoin([1, 2, 3]) AS a, arrayJoin([1, 2, 3]); + +SELECT '--'; + +SELECT arrayJoin([1, 2, 3]) AS a, a; + +SELECT '--'; + +SELECT arrayJoin([[1, 2, 3]]) AS a, arrayJoin(a) AS b; + +SELECT '--'; + +SELECT arrayJoin([1, 2, 3]) AS a, arrayJoin([1, 2, 3, 4]) AS b; + +SELECT '--'; + +SELECT arrayMap(x -> arrayJoin([1, 2, 3]), [1, 2, 3]); + +SELECT arrayMap(x -> arrayJoin(x), [[1, 2, 3]]); -- { serverError 36 } + +SELECT arrayMap(x -> arrayJoin(cast(x, 'Array(UInt8)')), [[1, 2, 3]]); -- { serverError 36 } + +SELECT '--'; + +SELECT arrayMap(x -> x + a, [1, 2, 3]), arrayJoin([1,2,3]) as a; + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value_1 Array(UInt8), + value_2 Array(UInt8), +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, [1, 2, 3], [1, 2, 3, 4]); + +SELECT id, arrayJoin(value_1) FROM test_table; + +SELECT '--'; + +SELECT id, arrayJoin(value_1) AS a, a FROM test_table; + +-- SELECT '--'; + +-- SELECT id, arrayJoin(value_1), arrayJoin(value_2) FROM test_table; + +-- SELECT '--'; + +-- SELECT id, arrayJoin(value_1), arrayJoin(value_2), arrayJoin([5, 6]) FROM test_table; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02370_analyzer_in_function.reference b/tests/queries/0_stateless/02370_analyzer_in_function.reference new file mode 100644 index 00000000000..fda174c0b7c --- /dev/null +++ b/tests/queries/0_stateless/02370_analyzer_in_function.reference @@ -0,0 +1,14 @@ +1 +1 +0 +0 +1 +1 +0 +1 +-- +1 +0 +1 +1 +0 diff --git a/tests/queries/0_stateless/02370_analyzer_in_function.sql b/tests/queries/0_stateless/02370_analyzer_in_function.sql new file mode 100644 index 00000000000..a7128ced449 --- /dev/null +++ b/tests/queries/0_stateless/02370_analyzer_in_function.sql @@ -0,0 +1,23 @@ +SET allow_experimental_analyzer = 1; + +SELECT 1 IN 1; +SELECT 1 IN (1); +SELECT 1 IN 0; +SELECT 1 IN (0); +SELECT 1 IN (1, 2); +SELECT (1, 1) IN ((1, 1), (1, 2)); +SELECT (1, 1) IN ((1, 2), (1, 2)); +SELECT 1 IN (((1), (2))); + +SELECT '--'; + +SELECT 1 IN [1]; +SELECT 1 IN [0]; +SELECT 1 IN [1, 2]; +SELECT (1, 1) IN [(1, 1), (1, 2)]; +SELECT (1, 1) IN [(1, 2), (1, 2)]; + +SELECT (1, 2) IN 1; -- { serverError 43 } +SELECT (1, 2) IN [1]; -- { serverError 124 } +SELECT (1, 2) IN (((1, 2), (1, 2)), ((1, 2), (1, 2))); -- { serverError 43 } +SELECT (1, 2) IN [((1, 2), (1, 2)), ((1, 2), (1, 2))]; -- { serverError 43 } diff --git a/tests/queries/0_stateless/02371_analyzer_join_cross.reference b/tests/queries/0_stateless/02371_analyzer_join_cross.reference new file mode 100644 index 00000000000..50e43ac28d1 --- /dev/null +++ b/tests/queries/0_stateless/02371_analyzer_join_cross.reference @@ -0,0 +1,133 @@ +0 Join_1_Value_0 0 Join_2_Value_0 +0 Join_1_Value_0 1 Join_2_Value_1 +0 Join_1_Value_0 2 Join_2_Value_2 +1 Join_1_Value_1 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +1 Join_1_Value_1 2 Join_2_Value_2 +3 Join_1_Value_3 0 Join_2_Value_0 +3 Join_1_Value_3 1 Join_2_Value_1 +3 Join_1_Value_3 2 Join_2_Value_2 +-- +0 Join_1_Value_0 0 Join_2_Value_0 +0 Join_1_Value_0 1 Join_2_Value_1 +0 Join_1_Value_0 2 Join_2_Value_2 +1 Join_1_Value_1 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +1 Join_1_Value_1 2 Join_2_Value_2 +3 Join_1_Value_3 0 Join_2_Value_0 +3 Join_1_Value_3 1 Join_2_Value_1 +3 Join_1_Value_3 2 Join_2_Value_2 +-- +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 +0 0 Join_1_Value_0 Join_1_Value_0 1 1 Join_2_Value_1 Join_2_Value_1 +0 0 Join_1_Value_0 Join_1_Value_0 2 2 Join_2_Value_2 Join_2_Value_2 +1 1 Join_1_Value_1 Join_1_Value_1 0 0 Join_2_Value_0 Join_2_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 +1 1 Join_1_Value_1 Join_1_Value_1 2 2 Join_2_Value_2 Join_2_Value_2 +3 3 Join_1_Value_3 Join_1_Value_3 0 0 Join_2_Value_0 Join_2_Value_0 +3 3 Join_1_Value_3 Join_1_Value_3 1 1 Join_2_Value_1 Join_2_Value_1 +3 3 Join_1_Value_3 Join_1_Value_3 2 2 Join_2_Value_2 Join_2_Value_2 +-- +0 Join_1_Value_0 0 Join_2_Value_0 +0 Join_1_Value_0 1 Join_2_Value_1 +0 Join_1_Value_0 2 Join_2_Value_2 +1 Join_1_Value_1 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +1 Join_1_Value_1 2 Join_2_Value_2 +3 Join_1_Value_3 0 Join_2_Value_0 +3 Join_1_Value_3 1 Join_2_Value_1 +3 Join_1_Value_3 2 Join_2_Value_2 +-- +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 +0 0 Join_1_Value_0 Join_1_Value_0 1 1 Join_2_Value_1 Join_2_Value_1 +0 0 Join_1_Value_0 Join_1_Value_0 2 2 Join_2_Value_2 Join_2_Value_2 +1 1 Join_1_Value_1 Join_1_Value_1 0 0 Join_2_Value_0 Join_2_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 +1 1 Join_1_Value_1 Join_1_Value_1 2 2 Join_2_Value_2 Join_2_Value_2 +3 3 Join_1_Value_3 Join_1_Value_3 0 0 Join_2_Value_0 Join_2_Value_0 +3 3 Join_1_Value_3 Join_1_Value_3 1 1 Join_2_Value_1 Join_2_Value_1 +3 3 Join_1_Value_3 Join_1_Value_3 2 2 Join_2_Value_2 Join_2_Value_2 +-- +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 Join_1_Value_0 0 Join_2_Value_0 1 Join_3_Value_1 +0 Join_1_Value_0 0 Join_2_Value_0 2 Join_3_Value_2 +0 Join_1_Value_0 1 Join_2_Value_1 0 Join_3_Value_0 +0 Join_1_Value_0 1 Join_2_Value_1 1 Join_3_Value_1 +0 Join_1_Value_0 1 Join_2_Value_1 2 Join_3_Value_2 +0 Join_1_Value_0 2 Join_2_Value_2 0 Join_3_Value_0 +0 Join_1_Value_0 2 Join_2_Value_2 1 Join_3_Value_1 +0 Join_1_Value_0 2 Join_2_Value_2 2 Join_3_Value_2 +1 Join_1_Value_1 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 Join_2_Value_0 1 Join_3_Value_1 +1 Join_1_Value_1 0 Join_2_Value_0 2 Join_3_Value_2 +1 Join_1_Value_1 1 Join_2_Value_1 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +1 Join_1_Value_1 1 Join_2_Value_1 2 Join_3_Value_2 +1 Join_1_Value_1 2 Join_2_Value_2 0 Join_3_Value_0 +1 Join_1_Value_1 2 Join_2_Value_2 1 Join_3_Value_1 +1 Join_1_Value_1 2 Join_2_Value_2 2 Join_3_Value_2 +3 Join_1_Value_3 0 Join_2_Value_0 0 Join_3_Value_0 +3 Join_1_Value_3 0 Join_2_Value_0 1 Join_3_Value_1 +3 Join_1_Value_3 0 Join_2_Value_0 2 Join_3_Value_2 +3 Join_1_Value_3 1 Join_2_Value_1 0 Join_3_Value_0 +3 Join_1_Value_3 1 Join_2_Value_1 1 Join_3_Value_1 +3 Join_1_Value_3 1 Join_2_Value_1 2 Join_3_Value_2 +3 Join_1_Value_3 2 Join_2_Value_2 0 Join_3_Value_0 +3 Join_1_Value_3 2 Join_2_Value_2 1 Join_3_Value_1 +3 Join_1_Value_3 2 Join_2_Value_2 2 Join_3_Value_2 +-- +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 Join_1_Value_0 0 Join_2_Value_0 1 Join_3_Value_1 +0 Join_1_Value_0 0 Join_2_Value_0 2 Join_3_Value_2 +0 Join_1_Value_0 1 Join_2_Value_1 0 Join_3_Value_0 +0 Join_1_Value_0 1 Join_2_Value_1 1 Join_3_Value_1 +0 Join_1_Value_0 1 Join_2_Value_1 2 Join_3_Value_2 +0 Join_1_Value_0 2 Join_2_Value_2 0 Join_3_Value_0 +0 Join_1_Value_0 2 Join_2_Value_2 1 Join_3_Value_1 +0 Join_1_Value_0 2 Join_2_Value_2 2 Join_3_Value_2 +1 Join_1_Value_1 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 Join_2_Value_0 1 Join_3_Value_1 +1 Join_1_Value_1 0 Join_2_Value_0 2 Join_3_Value_2 +1 Join_1_Value_1 1 Join_2_Value_1 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +1 Join_1_Value_1 1 Join_2_Value_1 2 Join_3_Value_2 +1 Join_1_Value_1 2 Join_2_Value_2 0 Join_3_Value_0 +1 Join_1_Value_1 2 Join_2_Value_2 1 Join_3_Value_1 +1 Join_1_Value_1 2 Join_2_Value_2 2 Join_3_Value_2 +3 Join_1_Value_3 0 Join_2_Value_0 0 Join_3_Value_0 +3 Join_1_Value_3 0 Join_2_Value_0 1 Join_3_Value_1 +3 Join_1_Value_3 0 Join_2_Value_0 2 Join_3_Value_2 +3 Join_1_Value_3 1 Join_2_Value_1 0 Join_3_Value_0 +3 Join_1_Value_3 1 Join_2_Value_1 1 Join_3_Value_1 +3 Join_1_Value_3 1 Join_2_Value_1 2 Join_3_Value_2 +3 Join_1_Value_3 2 Join_2_Value_2 0 Join_3_Value_0 +3 Join_1_Value_3 2 Join_2_Value_2 1 Join_3_Value_1 +3 Join_1_Value_3 2 Join_2_Value_2 2 Join_3_Value_2 +-- +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 2 2 Join_3_Value_2 Join_3_Value_2 +0 0 Join_1_Value_0 Join_1_Value_0 1 1 Join_2_Value_1 Join_2_Value_1 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 Join_1_Value_0 Join_1_Value_0 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 Join_1_Value_0 Join_1_Value_0 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_3_Value_2 Join_3_Value_2 +0 0 Join_1_Value_0 Join_1_Value_0 2 2 Join_2_Value_2 Join_2_Value_2 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 Join_1_Value_0 Join_1_Value_0 2 2 Join_2_Value_2 Join_2_Value_2 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 Join_1_Value_0 Join_1_Value_0 2 2 Join_2_Value_2 Join_2_Value_2 2 2 Join_3_Value_2 Join_3_Value_2 +1 1 Join_1_Value_1 Join_1_Value_1 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_3_Value_1 Join_3_Value_1 +1 1 Join_1_Value_1 Join_1_Value_1 0 0 Join_2_Value_0 Join_2_Value_0 2 2 Join_3_Value_2 Join_3_Value_2 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_3_Value_2 Join_3_Value_2 +1 1 Join_1_Value_1 Join_1_Value_1 2 2 Join_2_Value_2 Join_2_Value_2 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 2 2 Join_2_Value_2 Join_2_Value_2 1 1 Join_3_Value_1 Join_3_Value_1 +1 1 Join_1_Value_1 Join_1_Value_1 2 2 Join_2_Value_2 Join_2_Value_2 2 2 Join_3_Value_2 Join_3_Value_2 +3 3 Join_1_Value_3 Join_1_Value_3 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +3 3 Join_1_Value_3 Join_1_Value_3 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_3_Value_1 Join_3_Value_1 +3 3 Join_1_Value_3 Join_1_Value_3 0 0 Join_2_Value_0 Join_2_Value_0 2 2 Join_3_Value_2 Join_3_Value_2 +3 3 Join_1_Value_3 Join_1_Value_3 1 1 Join_2_Value_1 Join_2_Value_1 0 0 Join_3_Value_0 Join_3_Value_0 +3 3 Join_1_Value_3 Join_1_Value_3 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +3 3 Join_1_Value_3 Join_1_Value_3 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_3_Value_2 Join_3_Value_2 +3 3 Join_1_Value_3 Join_1_Value_3 2 2 Join_2_Value_2 Join_2_Value_2 0 0 Join_3_Value_0 Join_3_Value_0 +3 3 Join_1_Value_3 Join_1_Value_3 2 2 Join_2_Value_2 Join_2_Value_2 1 1 Join_3_Value_1 Join_3_Value_1 +3 3 Join_1_Value_3 Join_1_Value_3 2 2 Join_2_Value_2 Join_2_Value_2 2 2 Join_3_Value_2 Join_3_Value_2 diff --git a/tests/queries/0_stateless/02371_analyzer_join_cross.sql b/tests/queries/0_stateless/02371_analyzer_join_cross.sql new file mode 100644 index 00000000000..8261572cdf2 --- /dev/null +++ b/tests/queries/0_stateless/02371_analyzer_join_cross.sql @@ -0,0 +1,78 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (3, 'Join_1_Value_3'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (2, 'Join_2_Value_2'); + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value_0'); +INSERT INTO test_table_join_3 VALUES (1, 'Join_3_Value_1'); +INSERT INTO test_table_join_3 VALUES (2, 'Join_3_Value_2'); + +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1, test_table_join_2; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1, test_table_join_2 AS t2; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1, test_table_join_2 AS t2; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1, test_table_join_2 AS t2; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value FROM test_table_join_1 AS t1, test_table_join_2 AS t2; + +SELECT '--'; + +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1, test_table_join_2, test_table_join_3; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3; + +SELECT id FROM test_table_join_1, test_table_join_2; -- { serverError 207 } + +SELECT value FROM test_table_join_1, test_table_join_2; -- { serverError 207 } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; diff --git a/tests/queries/0_stateless/02372_analyzer_join.reference b/tests/queries/0_stateless/02372_analyzer_join.reference new file mode 100644 index 00000000000..b8a658106ff --- /dev/null +++ b/tests/queries/0_stateless/02372_analyzer_join.reference @@ -0,0 +1,1554 @@ +-- { echoOn } + +SELECT 'JOIN INNER'; +JOIN INNER +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 +SELECT '--'; +-- +SELECT t1.value, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +Join_1_Value_0 Join_2_Value_0 +Join_1_Value_1 Join_2_Value_1 +SELECT id FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT 'JOIN LEFT'; +JOIN LEFT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 +SELECT '--'; +-- +SELECT t1.value, t2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +Join_1_Value_0 Join_2_Value_0 +Join_1_Value_1 Join_2_Value_1 +Join_1_Value_2 +SELECT id FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 0 +2 Join_1_Value_2 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 0 +2 Join_1_Value_2 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 0 +2 Join_1_Value_2 0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +SELECT 'JOIN RIGHT'; +JOIN RIGHT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.value, t2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +Join_1_Value_0 Join_2_Value_0 +Join_1_Value_1 Join_2_Value_1 + Join_2_Value_3 +SELECT id FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT 'JOIN FULL'; +JOIN FULL +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.value, t2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +Join_1_Value_0 Join_2_Value_0 +Join_1_Value_1 Join_2_Value_1 +Join_1_Value_2 + Join_2_Value_3 +SELECT id FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 0 +2 Join_1_Value_2 0 +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 0 +2 Join_1_Value_2 0 +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 0 +2 Join_1_Value_2 0 +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +0 3 Join_2_Value_3 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +0 3 Join_2_Value_3 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +0 3 Join_2_Value_3 +SELECT 'First JOIN INNER second JOIN INNER'; +First JOIN INNER second JOIN INNER +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT 'First JOIN INNER second JOIN LEFT'; +First JOIN INNER second JOIN LEFT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT 'First JOIN INNER second JOIN RIGHT'; +First JOIN INNER second JOIN RIGHT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN INNER second JOIN FULL'; +First JOIN INNER second JOIN FULL +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN LEFT second JOIN INNER'; +First JOIN LEFT second JOIN INNER +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT 'First JOIN LEFT second JOIN LEFT'; +First JOIN LEFT second JOIN LEFT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT 'First JOIN LEFT second JOIN RIGHT'; +First JOIN LEFT second JOIN RIGHT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN LEFT second JOIN FULL'; +First JOIN LEFT second JOIN FULL +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN RIGHT second JOIN INNER'; +First JOIN RIGHT second JOIN INNER +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +SELECT 'First JOIN RIGHT second JOIN LEFT'; +First JOIN RIGHT second JOIN LEFT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_2_Value_3 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +0 1 Join_2_Value_1 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_2_Value_3 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +SELECT 'First JOIN RIGHT second JOIN RIGHT'; +First JOIN RIGHT second JOIN RIGHT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN RIGHT second JOIN FULL'; +First JOIN RIGHT second JOIN FULL +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_2_Value_3 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +0 1 Join_2_Value_1 0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 + Join_2_Value_3 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN FULL second JOIN INNER'; +First JOIN FULL second JOIN INNER +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +SELECT 'First JOIN FULL second JOIN LEFT'; +First JOIN FULL second JOIN LEFT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_2_Value_3 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +0 3 Join_2_Value_3 0 +0 1 Join_2_Value_1 0 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_2_Value_3 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +SELECT 'First JOIN FULL second JOIN RIGHT'; +First JOIN FULL second JOIN RIGHT +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 0 4 Join_3_Value_4 +SELECT 'First JOIN FULL second JOIN FULL'; +First JOIN FULL second JOIN FULL +SELECT 'JOIN ON without conditions'; +JOIN ON without conditions +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 +1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 +2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_2_Value_3 + Join_3_Value_4 +SELECT 'JOIN ON with conditions'; +JOIN ON with conditions +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +0 3 Join_2_Value_3 0 +0 1 Join_2_Value_1 0 +0 0 4 Join_3_Value_4 +0 0 1 Join_3_Value_1 +SELECT '--'; +-- +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 0 0 +2 Join_1_Value_2 0 0 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +SELECT 'JOIN multiple clauses'; +JOIN multiple clauses +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +Join_1_Value_2 Join_3_Value_0 + Join_2_Value_3 + Join_3_Value_4 +SELECT 'JOIN expression aliases'; +JOIN expression aliases +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 +SELECT '--'; +-- +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 +2 Join_1_Value_2 0 0 Join_3_Value_0 +0 3 Join_2_Value_3 0 +0 0 4 Join_3_Value_4 diff --git a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 new file mode 100644 index 00000000000..9b3c212562b --- /dev/null +++ b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 @@ -0,0 +1,170 @@ +-- Tags: long + +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value_0'); +INSERT INTO test_table_join_3 VALUES (1, 'Join_3_Value_1'); +INSERT INTO test_table_join_3 VALUES (4, 'Join_3_Value_4'); + +-- { echoOn } + +{% for join_type in ['INNER', 'LEFT', 'RIGHT', 'FULL'] -%} + +SELECT 'JOIN {{ join_type }}'; + +SELECT 'JOIN ON without conditions'; + +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT '--'; + +SELECT t1.value, t2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT id FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } + +SELECT value FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } + +SELECT 'JOIN ON with conditions'; + +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; + +SELECT 'JOIN multiple clauses'; + +SELECT t1.id, t1.value, t2.id, t2.value +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; + +SELECT 'JOIN expression aliases'; + +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); + +SELECT '--'; + +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id; + +{% endfor %} + +{% for first_join_type in ['INNER', 'LEFT', 'RIGHT', 'FULL'] -%} +{% for second_join_type in ['INNER', 'LEFT', 'RIGHT', 'FULL'] -%} + +SELECT 'First JOIN {{ first_join_type }} second JOIN {{ second_join_type }}'; + +SELECT 'JOIN ON without conditions'; + +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1 {{ first_join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +{{ second_join_type }} JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; + +SELECT '--'; +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; + +SELECT 'JOIN ON with conditions'; + +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; + +SELECT 'JOIN multiple clauses'; + +SELECT t1.value, t2.value, t3.value +FROM test_table_join_1 AS t1 +{{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; + +SELECT 'JOIN expression aliases'; + +SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); + +SELECT '--'; + +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = t3_id; + +{% endfor %} +{% endfor %} + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference new file mode 100644 index 00000000000..3722c23e4a0 --- /dev/null +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference @@ -0,0 +1,60 @@ +-- { echoOn } + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String +1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String +SELECT '--'; +-- +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 UInt64 Join_1_Value_0 String 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) +1 UInt64 Join_1_Value_1 String 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) +2 UInt64 Join_1_Value_2 String \N Nullable(UInt64) \N Nullable(String) +SELECT '--'; +-- +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 UInt64 Join_2_Value_0 String +1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 UInt64 Join_2_Value_1 String +\N Nullable(UInt64) \N Nullable(String) 3 UInt64 Join_2_Value_3 String +SELECT '--'; +-- +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) +1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) +2 Nullable(UInt64) Join_1_Value_2 Nullable(String) \N Nullable(UInt64) \N Nullable(String) +\N Nullable(UInt64) \N Nullable(String) 3 Nullable(UInt64) Join_2_Value_3 Nullable(String) +SELECT '--'; +-- +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String +SELECT '--'; +-- +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) +1 UInt64 1 UInt64 Join_1_Value_1 String 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) +2 UInt64 2 UInt64 Join_1_Value_2 String \N Nullable(UInt64) \N Nullable(String) +SELECT '--'; +-- +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +0 UInt64 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 UInt64 Join_2_Value_0 String +1 UInt64 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 UInt64 Join_2_Value_1 String +3 UInt64 \N Nullable(UInt64) \N Nullable(String) 3 UInt64 Join_2_Value_3 String +SELECT '--'; +-- +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +0 Nullable(UInt64) 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) +1 Nullable(UInt64) 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) +2 Nullable(UInt64) 2 Nullable(UInt64) Join_1_Value_2 Nullable(String) \N Nullable(UInt64) \N Nullable(String) +\N Nullable(UInt64) \N Nullable(UInt64) \N Nullable(String) 3 Nullable(UInt64) Join_2_Value_3 Nullable(String) diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql new file mode 100644 index 00000000000..db7895084e8 --- /dev/null +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql @@ -0,0 +1,73 @@ +SET allow_experimental_analyzer = 1; +SET join_use_nulls = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +-- { echoOn } + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT '--'; + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT '--'; + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT '--'; + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/tests/queries/0_stateless/02373_progress_contain_result.reference b/tests/queries/0_stateless/02373_progress_contain_result.reference index 1e7492e2829..a125646e7b8 100644 --- a/tests/queries/0_stateless/02373_progress_contain_result.reference +++ b/tests/queries/0_stateless/02373_progress_contain_result.reference @@ -1 +1 @@ -< X-ClickHouse-Summary: {"read_rows":"100","read_bytes":"800","written_rows":"0","written_bytes":"0","total_rows_to_read":"100","result_rows":"100","result_bytes":"131"} +< X-ClickHouse-Summary: {"read_rows":"100","read_bytes":"800","written_rows":"0","written_bytes":"0","total_rows_to_read":"100","result_rows":"100","result_bytes":"227"} diff --git a/tests/queries/0_stateless/02374_analyzer_array_join.reference b/tests/queries/0_stateless/02374_analyzer_array_join.reference new file mode 100644 index 00000000000..28859f715b3 --- /dev/null +++ b/tests/queries/0_stateless/02374_analyzer_array_join.reference @@ -0,0 +1,110 @@ +-- { echoOn } + +SELECT 'ARRAY JOIN with constant'; +ARRAY JOIN with constant +SELECT id, value, value_1 FROM test_table ARRAY JOIN [1, 2, 3] AS value_1; +0 Value 1 +0 Value 2 +0 Value 3 +0 Value 1 +0 Value 2 +0 Value 3 +SELECT '--'; +-- +SELECT id, value FROM test_table ARRAY JOIN [1, 2, 3] AS value; +0 1 +0 2 +0 3 +0 1 +0 2 +0 3 +SELECT '--'; +-- +WITH [1, 2, 3] AS constant_array SELECT id, value FROM test_table ARRAY JOIN constant_array AS value; +0 1 +0 2 +0 3 +0 1 +0 2 +0 3 +SELECT '--'; +-- +WITH [1, 2, 3] AS constant_array SELECT id, value, value_1 FROM test_table ARRAY JOIN constant_array AS value_1; +0 Value 1 +0 Value 2 +0 Value 3 +0 Value 1 +0 Value 2 +0 Value 3 +SELECT '--'; +-- +SELECT id, value, value_1, value_2 FROM test_table ARRAY JOIN [[1, 2, 3]] AS value_1 ARRAY JOIN value_1 AS value_2; +0 Value [1,2,3] 1 +0 Value [1,2,3] 2 +0 Value [1,2,3] 3 +0 Value [1,2,3] 1 +0 Value [1,2,3] 2 +0 Value [1,2,3] 3 +SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value; -- { serverError 179 } +SELECT 'ARRAY JOIN with column'; +ARRAY JOIN with column +SELECT id, value, test_table.value_array FROM test_table ARRAY JOIN value_array; +0 Value 1 +0 Value 2 +0 Value 3 +0 Value 4 +0 Value 5 +0 Value 6 +SELECT '--'; +-- +SELECT id, value_array, value FROM test_table ARRAY JOIN value_array AS value; +0 [1,2,3] 1 +0 [1,2,3] 2 +0 [1,2,3] 3 +0 [4,5,6] 4 +0 [4,5,6] 5 +0 [4,5,6] 6 +SELECT '--'; +-- +SELECT id, value, value_array, value_array_element FROM test_table ARRAY JOIN value_array AS value_array_element; +0 Value [1,2,3] 1 +0 Value [1,2,3] 2 +0 Value [1,2,3] 3 +0 Value [4,5,6] 4 +0 Value [4,5,6] 5 +0 Value [4,5,6] 6 +SELECT '--'; +-- +SELECT id, value, value_array AS value_array_array_alias FROM test_table ARRAY JOIN value_array_array_alias; +0 Value [1,2,3] +0 Value [1,2,3] +0 Value [1,2,3] +0 Value [4,5,6] +0 Value [4,5,6] +0 Value [4,5,6] +SELECT '--'; +-- +SELECT id AS value FROM test_table ARRAY JOIN value_array AS value; -- { serverError 179 } +SELECT '--'; +-- +SELECT id, value, value_array AS value_array_array_alias, value_array_array_alias_element FROM test_table ARRAY JOIN value_array_array_alias AS value_array_array_alias_element; +0 Value [1,2,3] 1 +0 Value [1,2,3] 2 +0 Value [1,2,3] 3 +0 Value [4,5,6] 4 +0 Value [4,5,6] 5 +0 Value [4,5,6] 6 +SELECT '--'; +-- +SELECT id, value, value_array_array, value_array_array_inner_element, value_array_array_inner_element, value_array_array_inner_inner_element +FROM test_table ARRAY JOIN value_array_array AS value_array_array_inner_element +ARRAY JOIN value_array_array_inner_element AS value_array_array_inner_inner_element; +0 Value [[1,2,3]] [1,2,3] [1,2,3] 1 +0 Value [[1,2,3]] [1,2,3] [1,2,3] 2 +0 Value [[1,2,3]] [1,2,3] [1,2,3] 3 +0 Value [[1,2,3],[4,5,6]] [1,2,3] [1,2,3] 1 +0 Value [[1,2,3],[4,5,6]] [1,2,3] [1,2,3] 2 +0 Value [[1,2,3],[4,5,6]] [1,2,3] [1,2,3] 3 +0 Value [[1,2,3],[4,5,6]] [4,5,6] [4,5,6] 4 +0 Value [[1,2,3],[4,5,6]] [4,5,6] [4,5,6] 5 +0 Value [[1,2,3],[4,5,6]] [4,5,6] [4,5,6] 6 diff --git a/tests/queries/0_stateless/02374_analyzer_array_join.sql b/tests/queries/0_stateless/02374_analyzer_array_join.sql new file mode 100644 index 00000000000..8ebfdef349c --- /dev/null +++ b/tests/queries/0_stateless/02374_analyzer_array_join.sql @@ -0,0 +1,70 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String, + value_array Array(UInt64), + value_array_array Array(Array(UInt64)) +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value', [1, 2, 3], [[1, 2, 3]]), (0, 'Value', [4, 5, 6], [[1, 2, 3], [4, 5, 6]]); + +-- { echoOn } + +SELECT 'ARRAY JOIN with constant'; + +SELECT id, value, value_1 FROM test_table ARRAY JOIN [1, 2, 3] AS value_1; + +SELECT '--'; + +SELECT id, value FROM test_table ARRAY JOIN [1, 2, 3] AS value; + +SELECT '--'; + +WITH [1, 2, 3] AS constant_array SELECT id, value FROM test_table ARRAY JOIN constant_array AS value; + +SELECT '--'; + +WITH [1, 2, 3] AS constant_array SELECT id, value, value_1 FROM test_table ARRAY JOIN constant_array AS value_1; + +SELECT '--'; + +SELECT id, value, value_1, value_2 FROM test_table ARRAY JOIN [[1, 2, 3]] AS value_1 ARRAY JOIN value_1 AS value_2; + +SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value; -- { serverError 179 } + +SELECT 'ARRAY JOIN with column'; + +SELECT id, value, test_table.value_array FROM test_table ARRAY JOIN value_array; + +SELECT '--'; + +SELECT id, value_array, value FROM test_table ARRAY JOIN value_array AS value; + +SELECT '--'; + +SELECT id, value, value_array, value_array_element FROM test_table ARRAY JOIN value_array AS value_array_element; + +SELECT '--'; + +SELECT id, value, value_array AS value_array_array_alias FROM test_table ARRAY JOIN value_array_array_alias; + +SELECT '--'; + +SELECT id AS value FROM test_table ARRAY JOIN value_array AS value; -- { serverError 179 } + +SELECT '--'; + +SELECT id, value, value_array AS value_array_array_alias, value_array_array_alias_element FROM test_table ARRAY JOIN value_array_array_alias AS value_array_array_alias_element; + +SELECT '--'; + +SELECT id, value, value_array_array, value_array_array_inner_element, value_array_array_inner_element, value_array_array_inner_inner_element +FROM test_table ARRAY JOIN value_array_array AS value_array_array_inner_element +ARRAY JOIN value_array_array_inner_element AS value_array_array_inner_inner_element; + +-- { echoOff } + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02374_analyzer_join_using.reference b/tests/queries/0_stateless/02374_analyzer_join_using.reference new file mode 100644 index 00000000000..62750c33f89 --- /dev/null +++ b/tests/queries/0_stateless/02374_analyzer_join_using.reference @@ -0,0 +1,452 @@ +-- { echoOn } + +SELECT 'JOIN INNER'; +JOIN INNER +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String +1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +Join_1_Value_0 String Join_2_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +1 +1 +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (test_value); -- { serverError 47 } +SELECT 'JOIN LEFT'; +JOIN LEFT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String +1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String +2 UInt16 2 UInt16 Join_1_Value_2 String 0 UInt16 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +Join_1_Value_0 String Join_2_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String +Join_1_Value_2 String String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +1 +1 +1 +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (test_value); -- { serverError 47 } +SELECT 'JOIN RIGHT'; +JOIN RIGHT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String +1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String +3 UInt16 0 UInt16 String 3 UInt16 Join_2_Value_3 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +Join_1_Value_0 String Join_2_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String + String Join_2_Value_3 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +1 +1 +1 +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (test_value); -- { serverError 47 } +SELECT 'JOIN FULL'; +JOIN FULL +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String +1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String +2 UInt16 2 UInt16 Join_1_Value_2 String 0 UInt16 String +0 UInt16 0 UInt16 String 3 UInt16 Join_2_Value_3 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +Join_1_Value_0 String Join_2_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String +Join_1_Value_2 String String + String Join_2_Value_3 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +1 +1 +1 +1 +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (test_value); -- { serverError 47 } +SELECT 'First JOIN INNER second JOIN INNER'; +First JOIN INNER second JOIN INNER +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +1 +1 +SELECT id FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN INNER second JOIN LEFT'; +First JOIN INNER second JOIN LEFT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +SELECT id FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id LEFT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN INNER second JOIN RIGHT'; +First JOIN INNER second JOIN RIGHT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id RIGHT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN INNER second JOIN FULL'; +First JOIN INNER second JOIN FULL +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id FULL JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN LEFT second JOIN INNER'; +First JOIN LEFT second JOIN INNER +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +1 +1 +SELECT id FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN LEFT second JOIN LEFT'; +First JOIN LEFT second JOIN LEFT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +Join_1_Value_2 String String String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id LEFT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN LEFT second JOIN RIGHT'; +First JOIN LEFT second JOIN RIGHT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id RIGHT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN LEFT second JOIN FULL'; +First JOIN LEFT second JOIN FULL +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +Join_1_Value_2 String String String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id FULL JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN RIGHT second JOIN INNER'; +First JOIN RIGHT second JOIN INNER +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +1 +1 +SELECT id FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN RIGHT second JOIN LEFT'; +First JOIN RIGHT second JOIN LEFT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String Join_2_Value_3 String String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id LEFT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN RIGHT second JOIN RIGHT'; +First JOIN RIGHT second JOIN RIGHT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id RIGHT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN RIGHT second JOIN FULL'; +First JOIN RIGHT second JOIN FULL +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String Join_2_Value_3 String String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id FULL JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN FULL second JOIN INNER'; +First JOIN FULL second JOIN INNER +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String Join_2_Value_3 String Join_3_Value_0 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN FULL second JOIN LEFT'; +First JOIN FULL second JOIN LEFT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +Join_1_Value_2 String String String + String Join_2_Value_3 String Join_3_Value_0 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id LEFT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN FULL second JOIN RIGHT'; +First JOIN FULL second JOIN RIGHT +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String +4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String + String Join_2_Value_3 String Join_3_Value_0 String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id RIGHT JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } +SELECT 'First JOIN FULL second JOIN FULL'; +First JOIN FULL second JOIN FULL +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String +1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String +2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +SELECT '--'; +-- +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String +Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String +Join_1_Value_2 String String String + String Join_2_Value_3 String Join_3_Value_0 String + String String Join_3_Value_4 String +SELECT '--'; +-- +SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +1 +1 +1 +1 +1 +SELECT id FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id FULL JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } diff --git a/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 b/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 new file mode 100644 index 00000000000..26fb52716ff --- /dev/null +++ b/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 @@ -0,0 +1,87 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt8, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt16, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value_0'); +INSERT INTO test_table_join_3 VALUES (1, 'Join_3_Value_1'); +INSERT INTO test_table_join_3 VALUES (4, 'Join_3_Value_4'); + +-- { echoOn } + +{% for join_type in ['INNER', 'LEFT', 'RIGHT', 'FULL'] -%} + +SELECT 'JOIN {{ join_type }}'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id); + +SELECT '--'; + +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id); + +SELECT '--'; + +SELECT 1 FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id); + +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (test_value); -- { serverError 47 } + +{% endfor %} + +{% for first_join_type in ['INNER', 'LEFT', 'RIGHT', 'FULL'] -%} +{% for second_join_type in ['INNER', 'LEFT', 'RIGHT', 'FULL'] -%} + +SELECT 'First JOIN {{ first_join_type }} second JOIN {{ second_join_type }}'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id); + +SELECT '--'; + +SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id); + +SELECT '--'; + +SELECT 1 FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id); + +SELECT id FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id {{ second_join_type }} JOIN test_table_join_3 AS t3 USING (id); -- { serverError 207 } + +{% endfor %} +{% endfor %} + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; diff --git a/tests/queries/0_stateless/02375_analyzer_union.reference b/tests/queries/0_stateless/02375_analyzer_union.reference new file mode 100644 index 00000000000..199b9af5313 --- /dev/null +++ b/tests/queries/0_stateless/02375_analyzer_union.reference @@ -0,0 +1,62 @@ +-- { echoOn } + +SELECT 'Union constants'; +Union constants +SELECT 1 UNION ALL SELECT 1; +1 +1 +SELECT '--'; +-- +SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1; +1 +1 +SELECT '--'; +-- +SELECT 1 INTERSECT SELECT 1; +1 +SELECT '--'; +-- +SELECT 1 EXCEPT SELECT 1; +SELECT '--'; +-- +SELECT id FROM (SELECT 1 AS id UNION ALL SELECT 1); +1 +1 +SELECT 'Union non constants'; +Union non constants +SELECT value FROM (SELECT 1 as value UNION ALL SELECT 1 UNION ALL SELECT 1); +1 +1 +1 +SELECT '--'; +-- +SELECT id FROM test_table UNION ALL SELECT id FROM test_table; +0 +0 +SELECT '--'; +-- +SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table; +0 +SELECT '--'; +-- +SELECT id FROM test_table INTERSECT SELECT id FROM test_table; +0 +SELECT '--'; +-- +SELECT id FROM test_table EXCEPT SELECT id FROM test_table; +SELECT '--'; +-- +SELECT id FROM (SELECT id FROM test_table UNION ALL SELECT id FROM test_table); +0 +0 +SELECT '--'; +-- +SELECT id FROM (SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table); +0 +SELECT '--'; +-- +SELECT id FROM (SELECT id FROM test_table INTERSECT SELECT id FROM test_table); +0 +SELECT '--'; +-- +SELECT id FROM (SELECT id FROM test_table EXCEPT SELECT id FROM test_table); diff --git a/tests/queries/0_stateless/02375_analyzer_union.sql b/tests/queries/0_stateless/02375_analyzer_union.sql new file mode 100644 index 00000000000..5e41f07d217 --- /dev/null +++ b/tests/queries/0_stateless/02375_analyzer_union.sql @@ -0,0 +1,71 @@ +SET allow_experimental_analyzer = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +-- { echoOn } + +SELECT 'Union constants'; + +SELECT 1 UNION ALL SELECT 1; + +SELECT '--'; + +SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1; + +SELECT '--'; + +SELECT 1 INTERSECT SELECT 1; + +SELECT '--'; + +SELECT 1 EXCEPT SELECT 1; + +SELECT '--'; + +SELECT id FROM (SELECT 1 AS id UNION ALL SELECT 1); + +SELECT 'Union non constants'; + +SELECT value FROM (SELECT 1 as value UNION ALL SELECT 1 UNION ALL SELECT 1); + +SELECT '--'; + +SELECT id FROM test_table UNION ALL SELECT id FROM test_table; + +SELECT '--'; + +SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table; + +SELECT '--'; + +SELECT id FROM test_table INTERSECT SELECT id FROM test_table; + +SELECT '--'; +SELECT id FROM test_table EXCEPT SELECT id FROM test_table; + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table UNION ALL SELECT id FROM test_table); + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table); + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table INTERSECT SELECT id FROM test_table); + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table EXCEPT SELECT id FROM test_table); + +-- { echoOff } + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02376_analyzer_in_function_subquery.reference b/tests/queries/0_stateless/02376_analyzer_in_function_subquery.reference new file mode 100644 index 00000000000..3641c7d2f09 --- /dev/null +++ b/tests/queries/0_stateless/02376_analyzer_in_function_subquery.reference @@ -0,0 +1,43 @@ +-- { echoOn } + +SELECT id, value FROM test_table WHERE 1 IN (SELECT 1); +0 Value_0 +1 Value_1 +2 Value_2 +SELECT '--'; +-- +SELECT id, value FROM test_table WHERE 0 IN (SELECT 1); +SELECT '--'; +-- +SELECT id, value FROM test_table WHERE id IN (SELECT 1); +1 Value_1 +SELECT '--'; +-- +SELECT id, value FROM test_table WHERE id IN (SELECT 2); +2 Value_2 +SELECT '--'; +-- +SELECT id, value FROM test_table WHERE id IN test_table_for_in; +0 Value_0 +1 Value_1 +SELECT '--'; +-- +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in); +0 Value_0 +1 Value_1 +SELECT '--'; +-- +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in UNION DISTINCT SELECT id FROM test_table_for_in); +0 Value_0 +1 Value_1 +SELECT '--'; +-- +WITH cte_test_table_for_in AS (SELECT id FROM test_table_for_in) SELECT id, value FROM test_table WHERE id IN cte_test_table_for_in; +0 Value_0 +1 Value_1 +SELECT '--'; +-- +WITH cte_test_table_for_in AS (SELECT id FROM test_table_for_in) SELECT id, value +FROM test_table WHERE id IN (SELECT id FROM cte_test_table_for_in UNION DISTINCT SELECT id FROM cte_test_table_for_in); +0 Value_0 +1 Value_1 diff --git a/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql b/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql new file mode 100644 index 00000000000..72a4edb8567 --- /dev/null +++ b/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql @@ -0,0 +1,60 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value_0'), (1, 'Value_1'), (2, 'Value_2'); + +DROP TABLE IF EXISTS test_table_for_in; +CREATE TABLE test_table_for_in +( + id UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_for_in VALUES (0), (1); + +-- { echoOn } + +SELECT id, value FROM test_table WHERE 1 IN (SELECT 1); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE 0 IN (SELECT 1); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT 1); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT 2); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN test_table_for_in; + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in UNION DISTINCT SELECT id FROM test_table_for_in); + +SELECT '--'; + +WITH cte_test_table_for_in AS (SELECT id FROM test_table_for_in) SELECT id, value FROM test_table WHERE id IN cte_test_table_for_in; + +SELECT '--'; + +WITH cte_test_table_for_in AS (SELECT id FROM test_table_for_in) SELECT id, value +FROM test_table WHERE id IN (SELECT id FROM cte_test_table_for_in UNION DISTINCT SELECT id FROM cte_test_table_for_in); + +-- { echoOff } + +DROP TABLE test_table; +DROP TABLE test_table_for_in; diff --git a/tests/queries/0_stateless/02377_analyzer_in_function_set.reference b/tests/queries/0_stateless/02377_analyzer_in_function_set.reference new file mode 100644 index 00000000000..b32da0d591a --- /dev/null +++ b/tests/queries/0_stateless/02377_analyzer_in_function_set.reference @@ -0,0 +1,2 @@ +0 Value_0 +1 Value_1 diff --git a/tests/queries/0_stateless/02377_analyzer_in_function_set.sql b/tests/queries/0_stateless/02377_analyzer_in_function_set.sql new file mode 100644 index 00000000000..e3cbcf75a9c --- /dev/null +++ b/tests/queries/0_stateless/02377_analyzer_in_function_set.sql @@ -0,0 +1,23 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value_0'), (1, 'Value_1'), (2, 'Value_2'); + +DROP TABLE IF EXISTS special_set_table; +CREATE TABLE special_set_table +( + id UInt64 +) ENGINE=Set; + +INSERT INTO special_set_table VALUES (0), (1); + +SELECT id, value FROM test_table WHERE id IN special_set_table; + +DROP TABLE special_set_table; +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02378_analyzer_projection_names.reference b/tests/queries/0_stateless/02378_analyzer_projection_names.reference new file mode 100644 index 00000000000..1fa79677876 --- /dev/null +++ b/tests/queries/0_stateless/02378_analyzer_projection_names.reference @@ -0,0 +1,739 @@ +-- { echoOn } + +SELECT 'Constants'; +Constants +DESCRIBE (SELECT 1, 'Value'); +1 UInt8 +\'Value\' String +SELECT '--'; +-- +DESCRIBE (SELECT 1 + 1, concat('Value_1', 'Value_2')); +plus(1, 1) UInt16 +concat(\'Value_1\', \'Value_2\') String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)')); +CAST(tuple(1, \'Value\'), \'Tuple (id UInt64, value String)\') Tuple(id UInt64, value String) +SELECT 'Columns'; +Columns +DESCRIBE (SELECT test_table.id, test_table.id, id FROM test_table); +id UInt64 +id UInt64 +id UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT * FROM test_table); +id UInt64 +value String +SELECT '--'; +-- +DESCRIBE (SELECT * APPLY toString FROM test_table); +toString(id) String +toString(value) String +SELECT '--'; +-- +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table); +toString(id) String +toString(value) String +SELECT '--'; +-- +DESCRIBE (SELECT tuple_value.* FROM test_table_compound); +tuple_value.value_1 UInt64 +tuple_value.value_2 String +SELECT '--'; +-- +DESCRIBE (SELECT tuple_value.* APPLY x -> x FROM test_table_compound); +tuple_value.value_1 UInt64 +tuple_value.value_2 String +SELECT '--'; +-- +DESCRIBE (SELECT tuple_value.* APPLY toString FROM test_table_compound); +toString(tuple_value.value_1) String +toString(tuple_value.value_2) String +SELECT '--'; +-- +DESCRIBE (SELECT tuple_value.* APPLY x -> toString(x) FROM test_table_compound); +toString(tuple_value.value_1) String +toString(tuple_value.value_2) String +SELECT 'Constants with aliases'; +Constants with aliases +DESCRIBE (SELECT 1 AS a, a AS b, b, b AS c, c, 'Value' AS d, d AS e, e AS f); +a UInt8 +b UInt8 +b UInt8 +c UInt8 +c UInt8 +d String +e String +f String +SELECT '--'; +-- +DESCRIBE (SELECT plus(1 AS a, a AS b), plus(b, b), plus(b, b) AS c, concat('Value' AS d, d) AS e, e); +plus(a, b) UInt16 +plus(b, b) UInt16 +c UInt16 +e String +e String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.id, a.value); +a Tuple(id UInt64, value String) +a.id UInt64 +a.value String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.*); +a Tuple(id UInt64, value String) +a.id UInt64 +a.value String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT id); +a Tuple(id UInt64, value String) +a.value String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value); +a Tuple(id UInt64, value String) +a.id UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value APPLY toString); +a Tuple(id UInt64, value String) +toString(a.id) String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value APPLY x -> toString(x)); +a Tuple(id UInt64, value String) +toString(a.id) String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, untuple(a)); +a Tuple(id UInt64, value String) +tupleElement(a, \'id\') UInt64 +tupleElement(a, \'value\') String +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, untuple(a) AS b); +a Tuple(id UInt64, value String) +b.id UInt64 +b.value String +SELECT 'Columns with aliases'; +Columns with aliases +DESCRIBE (SELECT test_table.id AS a, a, test_table.id AS b, b AS c, c FROM test_table); +a UInt64 +a UInt64 +b UInt64 +c UInt64 +c UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT plus(test_table.id AS a, test_table.id), plus(id, id AS b), plus(b, b), plus(test_table.id, test_table.id) FROM test_table); +plus(a, id) UInt64 +plus(id, b) UInt64 +plus(b, b) UInt64 +plus(id, id) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT test_table.* REPLACE id + (id AS id_alias) AS id, id_alias FROM test_table); +plus(id, id_alias) UInt64 +value String +id_alias UInt64 +SELECT 'Matcher'; +Matcher +DESCRIBE (SELECT * FROM test_table); +id UInt64 +value String +SELECT '--'; +-- +DESCRIBE (SELECT test_table.* FROM test_table); +id UInt64 +value String +SELECT '--'; +-- +DESCRIBE (SELECT 1 AS id, 2 AS value, * FROM test_table); +id UInt8 +value UInt8 +test_table.id UInt64 +test_table.value String +SELECT '--'; +-- +DESCRIBE (SELECT 1 AS id, 2 AS value, * FROM test_table AS t1); +id UInt8 +value UInt8 +t1.id UInt64 +t1.value String +SELECT 'Lambda'; +Lambda +DESCRIBE (SELECT arrayMap(x -> x + 1, [1,2,3])); +arrayMap(lambda(tuple(x), plus(x, 1)), [1, 2, 3]) Array(UInt16) +SELECT '--'; +-- +DESCRIBE (SELECT 1 AS a, arrayMap(x -> x + a, [1,2,3])); +a UInt8 +arrayMap(lambda(tuple(x), plus(x, a)), [1, 2, 3]) Array(UInt16) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> x + test_table.id + test_table.id + id, [1,2,3]) FROM test_table); +arrayMap(lambda(tuple(x), plus(plus(plus(x, id), id), id)), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> x + (test_table.id AS first) + (test_table.id AS second) + id, [1,2,3]) FROM test_table); +arrayMap(lambda(tuple(x), plus(plus(plus(x, first), second), id)), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value, [1,2,3]) FROM test_table); +arrayMap(lambda(tuple(x), id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY x -> x, [1,2,3]) FROM test_table); +arrayMap(lambda(tuple(x), id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY toString, [1,2,3]) FROM test_table); +arrayMap(lambda(tuple(x), toString(id)), [1, 2, 3]) Array(String) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY x -> toString(x), [1,2,3]) FROM test_table); +arrayMap(lambda(tuple(x), toString(id)), [1, 2, 3]) Array(String) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.*, [1,2,3])); +compound_value Tuple(id UInt64) +arrayMap(lambda(tuple(x), compound_value.id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY x -> x, [1,2,3])); +compound_value Tuple(id UInt64) +arrayMap(lambda(tuple(x), compound_value.id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY toString, [1,2,3])); +compound_value Tuple(id UInt64) +arrayMap(lambda(tuple(x), toString(compound_value.id)), [1, 2, 3]) Array(String) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY x -> toString(x), [1,2,3])); +compound_value Tuple(id UInt64) +arrayMap(lambda(tuple(x), toString(compound_value.id)), [1, 2, 3]) Array(String) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value, [1,2,3])); +compound_value Tuple(id UInt64, value String) +arrayMap(lambda(tuple(x), compound_value.id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY x -> x, [1,2,3])); +compound_value Tuple(id UInt64, value String) +arrayMap(lambda(tuple(x), compound_value.id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY toString, [1,2,3])); +compound_value Tuple(id UInt64, value String) +arrayMap(lambda(tuple(x), toString(compound_value.id)), [1, 2, 3]) Array(String) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY x -> toString(x), [1,2,3])); +compound_value Tuple(id UInt64, value String) +arrayMap(lambda(tuple(x), toString(compound_value.id)), [1, 2, 3]) Array(String) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, arrayMap(x -> untuple(a), [1,2,3]) FROM test_table); +a Tuple(id UInt64) +arrayMap(lambda(tuple(x), tupleElement(a, \'id\')), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, arrayMap(x -> untuple(a) AS untupled_value, [1,2,3]) FROM test_table); +a Tuple(id UInt64) +arrayMap(untupled_value, [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, untuple(a) AS untupled_value, arrayMap(x -> untupled_value, [1,2,3]) FROM test_table); +a Tuple(id UInt64) +untupled_value.id UInt64 +arrayMap(lambda(tuple(x), untupled_value.id), [1, 2, 3]) Array(UInt64) +SELECT '--'; +-- +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, untuple(a) AS untupled_value, arrayMap(x -> untupled_value AS untupled_value_in_lambda, [1,2,3]) FROM test_table); +a Tuple(id UInt64) +untupled_value.id UInt64 +arrayMap(untupled_value_in_lambda, [1, 2, 3]) Array(UInt64) +SELECT 'Standalone lambda'; +Standalone lambda +DESCRIBE (WITH x -> x + 1 AS test_lambda SELECT test_lambda(1)); +test_lambda(1) UInt16 +SELECT '--'; +-- +DESCRIBE (WITH x -> * AS test_lambda SELECT test_lambda(1) AS value, value FROM test_table); +id UInt64 +value String +id UInt64 +value String +SELECT 'Subquery'; +Subquery +DESCRIBE (SELECT (SELECT 1), (SELECT 2), (SELECT 3) AS a, (SELECT 4)); +_subquery_1 Nullable(UInt8) +_subquery_2 Nullable(UInt8) +a Nullable(UInt8) +_subquery_4 Nullable(UInt8) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> (SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2) AS a, [1, 2, 3]), arrayMap(x -> (SELECT 1), [1,2,3])); +arrayMap(lambda(tuple(x), _subquery_1), [1, 2, 3]) Array(Nullable(UInt8)) +arrayMap(a, [1, 2, 3]) Array(Nullable(UInt8)) +arrayMap(lambda(tuple(x), _subquery_3), [1, 2, 3]) Array(Nullable(UInt8)) +SELECT '--'; +-- +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b) AS c, c.a, c.b); +c Tuple(a UInt8, b UInt8) +c.a UInt8 +c.b UInt8 +SELECT '--'; +-- +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b) AS c, c.*); +c Tuple(a UInt8, b UInt8) +c.a UInt8 +c.b UInt8 +SELECT '--'; +-- +DESCRIBE (SELECT (SELECT 1 UNION DISTINCT SELECT 1), (SELECT 2 UNION DISTINCT SELECT 2), (SELECT 3 UNION DISTINCT SELECT 3) AS a, (SELECT 4 UNION DISTINCT SELECT 4)); +_subquery_1 Nullable(UInt8) +_subquery_2 Nullable(UInt8) +a Nullable(UInt8) +_subquery_4 Nullable(UInt8) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> (SELECT 1 UNION DISTINCT SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2 UNION DISTINCT SELECT 2) AS a, [1, 2, 3]), +arrayMap(x -> (SELECT 3 UNION DISTINCT SELECT 3), [1,2,3])); +arrayMap(lambda(tuple(x), _subquery_1), [1, 2, 3]) Array(Nullable(UInt8)) +arrayMap(a, [1, 2, 3]) Array(Nullable(UInt8)) +arrayMap(lambda(tuple(x), _subquery_3), [1, 2, 3]) Array(Nullable(UInt8)) +SELECT '--'; +-- +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b UNION DISTINCT SELECT 1, 2) AS c, c.a, c.b); +c Tuple(a UInt8, b UInt8) +c.a UInt8 +c.b UInt8 +SELECT '--'; +-- +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b UNION DISTINCT SELECT 1, 2) AS c, c.*); +c Tuple(a UInt8, b UInt8) +c.a UInt8 +c.b UInt8 +SELECT '--'; +-- +DESCRIBE (SELECT (SELECT 1), (SELECT 2 UNION DISTINCT SELECT 2), (SELECT 3) AS a, (SELECT 4 UNION DISTINCT SELECT 4)); +_subquery_1 Nullable(UInt8) +_subquery_2 Nullable(UInt8) +a Nullable(UInt8) +_subquery_4 Nullable(UInt8) +SELECT '--'; +-- +DESCRIBE (SELECT arrayMap(x -> (SELECT 1 UNION DISTINCT SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2) AS a, [1, 2, 3]), +arrayMap(x -> (SELECT 3 UNION DISTINCT SELECT 3), [1,2,3])); +arrayMap(lambda(tuple(x), _subquery_1), [1, 2, 3]) Array(Nullable(UInt8)) +arrayMap(a, [1, 2, 3]) Array(Nullable(UInt8)) +arrayMap(lambda(tuple(x), _subquery_3), [1, 2, 3]) Array(Nullable(UInt8)) +SELECT 'Window functions'; +Window functions +DESCRIBE (SELECT count() OVER ()); +count() OVER () UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER () AS window_function); +window_function UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id) FROM test_table); +count() OVER (PARTITION BY id) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value) FROM test_table); +count() OVER (PARTITION BY id, value) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS CURRENT ROW) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE CURRENT ROW) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE BETWEEN CURRENT ROW AND CURRENT ROW) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE BETWEEN CURRENT ROW AND CURRENT ROW) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY (id AS id_alias), (value AS value_alias) ORDER BY id ASC, value DESC ROWS CURRENT ROW) FROM test_table); +count() OVER (PARTITION BY id_alias, value_alias ORDER BY id ASC, value DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY (id AS id_alias) ASC, (value AS value_alias) DESC ROWS CURRENT ROW) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id_alias ASC, value_alias DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 PRECEDING AND 2 FOLLOWING) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 PRECEDING AND 2 FOLLOWING) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 + 1 PRECEDING AND 2 + 2 FOLLOWING) FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN plus(1, 1) PRECEDING AND plus(2, 2) FOLLOWING) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN ((1 + 1) AS frame_offset_begin) PRECEDING AND ((2 + 2) AS frame_offset_end) FOLLOWING) +FROM test_table); +count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN frame_offset_begin PRECEDING AND frame_offset_end FOLLOWING) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (ORDER BY toNullable(id) NULLS FIRST) FROM test_table); +count() OVER (ORDER BY toNullable(id) ASC NULLS FIRST) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (ORDER BY toNullable(id) NULLS LAST) FROM test_table); +count() OVER (ORDER BY toNullable(id) ASC NULLS LAST) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM 1 TO 5 STEP 1) FROM test_table); +count() OVER (ORDER BY id ASC WITH FILL FROM 1 TO 5 STEP 1) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM 1 + 1 TO 6 STEP 1 + 1) FROM test_table); +count() OVER (ORDER BY id ASC WITH FILL FROM plus(1, 1) TO 6 STEP plus(1, 1)) UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM ((1 + 1) AS from) TO (6 AS to) STEP ((1 + 1) AS step)) FROM test_table); +count() OVER (ORDER BY id ASC WITH FILL FROM from TO to STEP step) UInt64 +SELECT 'Window functions WINDOW'; +Window functions WINDOW +DESCRIBE (SELECT count() OVER window_name FROM test_table WINDOW window_name AS (PARTITION BY id)); +count() OVER window_name UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER window_name FROM test_table WINDOW window_name AS (PARTITION BY id ORDER BY value)); +count() OVER window_name UInt64 +SELECT '--'; +-- +DESCRIBE (SELECT count() OVER (window_name ORDER BY id) FROM test_table WINDOW window_name AS (PARTITION BY id)); +count() OVER (window_name ORDER BY id ASC) UInt64 +SELECT 'IN function'; +IN function +DESCRIBE (SELECT id IN (SELECT 1) FROM test_table); +in(id, _subquery_1) UInt8 +SELECT '--'; +-- +DESCRIBE (SELECT id IN (SELECT id FROM test_table_in) FROM test_table); +in(id, _subquery_1) UInt8 +SELECT '--'; +-- +DESCRIBE (SELECT id IN test_table_in FROM test_table); +in(id, test_table_in) UInt8 +SELECT '--'; +-- +DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN (SELECT id FROM test_table_in_cte) FROM test_table); +in(id, _subquery_1) UInt8 +SELECT '--'; +-- +DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table); +in(id, test_table_in_cte) UInt8 +SELECT 'Joins'; +Joins +DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2); +test_table_join_1.id UInt64 +test_table_join_1.value String +value_join_1 String +test_table_join_2.id UInt64 +test_table_join_2.value String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT * FROM test_table_join_1 AS t1, test_table_join_2 AS t2); +t1.id UInt64 +t1.value String +value_join_1 String +t2.id UInt64 +t2.value String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT * APPLY toString FROM test_table_join_1 AS t1, test_table_join_2 AS t2); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +SELECT '--'; +-- +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table_join_1 AS t1, test_table_join_2 AS t2); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.*, test_table_join_2.* FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id); +test_table_join_1.id UInt64 +test_table_join_1.value String +value_join_1 String +test_table_join_2.id UInt64 +test_table_join_2.value String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT t1.*, t2.* FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); +t1.id UInt64 +t1.value String +value_join_1 String +t2.id UInt64 +t2.value String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.* APPLY toString, test_table_join_2.* APPLY toString FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.* APPLY x -> toString(x), test_table_join_2.* APPLY x -> toString(x) FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_1.value_join_1, test_table_join_2.id, test_table_join_2.value, test_table_join_2.value_join_2 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); +test_table_join_1.id UInt64 +test_table_join_1.value String +value_join_1 String +test_table_join_2.id UInt64 +test_table_join_2.value String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT t1.id, t1.value, t1.value_join_1, t2.id, t2.value, t2.value_join_2 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); +t1.id UInt64 +t1.value String +value_join_1 String +t2.id UInt64 +t2.value String +value_join_2 String +SELECT 'Multiple JOINS'; +Multiple JOINS +DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2, test_table_join_3); +test_table_join_1.id UInt64 +test_table_join_1.value String +value_join_1 String +test_table_join_2.id UInt64 +test_table_join_2.value String +value_join_2 String +test_table_join_3.id UInt64 +test_table_join_3.value String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT * FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); +t1.id UInt64 +t1.value String +value_join_1 String +t2.id UInt64 +t2.value String +value_join_2 String +t3.id UInt64 +t3.value String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT * APPLY toString FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +toString(t3.id) String +toString(t3.value) String +toString(value_join_3) String +SELECT '--'; +-- +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +toString(t3.id) String +toString(t3.value) String +toString(value_join_3) String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.*, test_table_join_2.*, test_table_join_3.* +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id); +test_table_join_1.id UInt64 +test_table_join_1.value String +value_join_1 String +test_table_join_2.id UInt64 +test_table_join_2.value String +value_join_2 String +test_table_join_3.id UInt64 +test_table_join_3.value String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT t1.*, t2.*, t3.* +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); +t1.id UInt64 +t1.value String +value_join_1 String +t2.id UInt64 +t2.value String +value_join_2 String +t3.id UInt64 +t3.value String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.* APPLY toString, test_table_join_2.* APPLY toString, test_table_join_3.* APPLY toString +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +toString(t3.id) String +toString(t3.value) String +toString(value_join_3) String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.* APPLY x -> toString(x), test_table_join_2.* APPLY x -> toString(x), test_table_join_3.* APPLY x -> toString(x) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); +toString(t1.id) String +toString(t1.value) String +toString(value_join_1) String +toString(t2.id) String +toString(t2.value) String +toString(value_join_2) String +toString(t3.id) String +toString(t3.value) String +toString(value_join_3) String +SELECT '--'; +-- +DESCRIBE (SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_1.value_join_1, test_table_join_2.id, test_table_join_2.value, test_table_join_2.value_join_2, +test_table_join_3.id, test_table_join_3.value, test_table_join_3.value_join_3 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); +test_table_join_1.id UInt64 +test_table_join_1.value String +value_join_1 String +test_table_join_2.id UInt64 +test_table_join_2.value String +value_join_2 String +test_table_join_3.id UInt64 +test_table_join_3.value String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT t1.id, t1.value, t1.value_join_1, t2.id, t2.value, t2.value_join_2, t3.id, t3.value, t3.value_join_3 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); +t1.id UInt64 +t1.value String +value_join_1 String +t2.id UInt64 +t2.value String +value_join_2 String +t3.id UInt64 +t3.value String +value_join_3 String +SELECT 'Joins USING'; +Joins USING +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id)); +id UInt64 +t1.value String +value_join_1 String +t2.value String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value)); +id UInt64 +value String +value_join_1 String +value_join_2 String +SELECT '--'; +-- +DESCRIBE (SELECT id, t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id)); +id UInt64 +t1.id UInt64 +t1.value String +t2.id UInt64 +t2.value String +SELECT '--'; +-- +DESCRIBE (SELECT id, value, t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value)); +id UInt64 +value String +t1.id UInt64 +t1.value String +t2.id UInt64 +t2.value String +SELECT 'Multiple Joins USING'; +Multiple Joins USING +SELECT '--'; +-- +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id)); +id UInt64 +t1.value String +value_join_1 String +t2.value String +value_join_2 String +t3.value String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value) INNER JOIN test_table_join_3 AS t3 USING (id, value)); +id UInt64 +value String +value_join_1 String +value_join_2 String +value_join_3 String +SELECT '--'; +-- +DESCRIBE (SELECT id, t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id)); +id UInt64 +t1.id UInt64 +t1.value String +t2.id UInt64 +t2.value String +t3.id UInt64 +t3.value String +SELECT '--'; +-- +DESCRIBE (SELECT id, value, t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value) INNER JOIN test_table_join_3 AS t3 USING (id, value)); +id UInt64 +value String +t1.id UInt64 +t1.value String +t2.id UInt64 +t2.value String +t3.id UInt64 +t3.value String diff --git a/tests/queries/0_stateless/02378_analyzer_projection_names.sql b/tests/queries/0_stateless/02378_analyzer_projection_names.sql new file mode 100644 index 00000000000..907cc79dcec --- /dev/null +++ b/tests/queries/0_stateless/02378_analyzer_projection_names.sql @@ -0,0 +1,541 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP TABLE IF EXISTS test_table_in; +CREATE TABLE test_table_in +( + id UInt64 +) ENGINE=TinyLog; + +DROP TABLE IF EXISTS test_table_compound; +CREATE TABLE test_table_compound +( + id UInt64, + tuple_value Tuple(value_1 UInt64, value_2 String) +) ENGINE=TinyLog; + +INSERT INTO test_table_compound VALUES (0, tuple(0, 'Value')); + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String, + value_join_1 String +) ENGINE=TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value', 'Join_1_Value'); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String, + value_join_2 String +) ENGINE=TinyLog; + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value', 'Join_2_Value'); + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String, + value_join_3 String +) ENGINE=TinyLog; + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value', 'Join_3_Value'); + +-- { echoOn } + +SELECT 'Constants'; + +DESCRIBE (SELECT 1, 'Value'); + +SELECT '--'; + +DESCRIBE (SELECT 1 + 1, concat('Value_1', 'Value_2')); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)')); + +SELECT 'Columns'; + +DESCRIBE (SELECT test_table.id, test_table.id, id FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* FROM test_table_compound); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* APPLY x -> x FROM test_table_compound); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* APPLY toString FROM test_table_compound); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* APPLY x -> toString(x) FROM test_table_compound); + +SELECT 'Constants with aliases'; + +DESCRIBE (SELECT 1 AS a, a AS b, b, b AS c, c, 'Value' AS d, d AS e, e AS f); + +SELECT '--'; + +DESCRIBE (SELECT plus(1 AS a, a AS b), plus(b, b), plus(b, b) AS c, concat('Value' AS d, d) AS e, e); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.id, a.value); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.*); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT id); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value APPLY toString); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value APPLY x -> toString(x)); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, untuple(a)); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, untuple(a) AS b); + +SELECT 'Columns with aliases'; + +DESCRIBE (SELECT test_table.id AS a, a, test_table.id AS b, b AS c, c FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT plus(test_table.id AS a, test_table.id), plus(id, id AS b), plus(b, b), plus(test_table.id, test_table.id) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT test_table.* REPLACE id + (id AS id_alias) AS id, id_alias FROM test_table); + +SELECT 'Matcher'; + +DESCRIBE (SELECT * FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT test_table.* FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT 1 AS id, 2 AS value, * FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT 1 AS id, 2 AS value, * FROM test_table AS t1); + +SELECT 'Lambda'; + +DESCRIBE (SELECT arrayMap(x -> x + 1, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT 1 AS a, arrayMap(x -> x + a, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> x + test_table.id + test_table.id + id, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> x + (test_table.id AS first) + (test_table.id AS second) + id, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY x -> x, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY toString, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY x -> toString(x), [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.*, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY x -> x, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY toString, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY x -> toString(x), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY x -> x, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY toString, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY x -> toString(x), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, arrayMap(x -> untuple(a), [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, arrayMap(x -> untuple(a) AS untupled_value, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, untuple(a) AS untupled_value, arrayMap(x -> untupled_value, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, untuple(a) AS untupled_value, arrayMap(x -> untupled_value AS untupled_value_in_lambda, [1,2,3]) FROM test_table); + +SELECT 'Standalone lambda'; + +DESCRIBE (WITH x -> x + 1 AS test_lambda SELECT test_lambda(1)); + +SELECT '--'; + +DESCRIBE (WITH x -> * AS test_lambda SELECT test_lambda(1) AS value, value FROM test_table); + +SELECT 'Subquery'; + +DESCRIBE (SELECT (SELECT 1), (SELECT 2), (SELECT 3) AS a, (SELECT 4)); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> (SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2) AS a, [1, 2, 3]), arrayMap(x -> (SELECT 1), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b) AS c, c.a, c.b); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b) AS c, c.*); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 UNION DISTINCT SELECT 1), (SELECT 2 UNION DISTINCT SELECT 2), (SELECT 3 UNION DISTINCT SELECT 3) AS a, (SELECT 4 UNION DISTINCT SELECT 4)); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> (SELECT 1 UNION DISTINCT SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2 UNION DISTINCT SELECT 2) AS a, [1, 2, 3]), +arrayMap(x -> (SELECT 3 UNION DISTINCT SELECT 3), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b UNION DISTINCT SELECT 1, 2) AS c, c.a, c.b); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b UNION DISTINCT SELECT 1, 2) AS c, c.*); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1), (SELECT 2 UNION DISTINCT SELECT 2), (SELECT 3) AS a, (SELECT 4 UNION DISTINCT SELECT 4)); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> (SELECT 1 UNION DISTINCT SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2) AS a, [1, 2, 3]), +arrayMap(x -> (SELECT 3 UNION DISTINCT SELECT 3), [1,2,3])); + +SELECT 'Window functions'; + +DESCRIBE (SELECT count() OVER ()); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER () AS window_function); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY (id AS id_alias), (value AS value_alias) ORDER BY id ASC, value DESC ROWS CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY (id AS id_alias) ASC, (value AS value_alias) DESC ROWS CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 PRECEDING AND 2 FOLLOWING) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 + 1 PRECEDING AND 2 + 2 FOLLOWING) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN ((1 + 1) AS frame_offset_begin) PRECEDING AND ((2 + 2) AS frame_offset_end) FOLLOWING) +FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY toNullable(id) NULLS FIRST) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY toNullable(id) NULLS LAST) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM 1 TO 5 STEP 1) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM 1 + 1 TO 6 STEP 1 + 1) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM ((1 + 1) AS from) TO (6 AS to) STEP ((1 + 1) AS step)) FROM test_table); + +SELECT 'Window functions WINDOW'; + +DESCRIBE (SELECT count() OVER window_name FROM test_table WINDOW window_name AS (PARTITION BY id)); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER window_name FROM test_table WINDOW window_name AS (PARTITION BY id ORDER BY value)); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (window_name ORDER BY id) FROM test_table WINDOW window_name AS (PARTITION BY id)); + +SELECT 'IN function'; + +DESCRIBE (SELECT id IN (SELECT 1) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT id IN (SELECT id FROM test_table_in) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT id IN test_table_in FROM test_table); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN (SELECT id FROM test_table_in_cte) FROM test_table); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table); + +SELECT 'Joins'; + +DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1, test_table_join_2 AS t2); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table_join_1 AS t1, test_table_join_2 AS t2); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table_join_1 AS t1, test_table_join_2 AS t2); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.*, test_table_join_2.* FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.*, t2.* FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY toString, test_table_join_2.* APPLY toString FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY x -> toString(x), test_table_join_2.* APPLY x -> toString(x) FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_1.value_join_1, test_table_join_2.id, test_table_join_2.value, test_table_join_2.value_join_2 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.id, t1.value, t1.value_join_1, t2.id, t2.value, t2.value_join_2 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT 'Multiple JOINS'; + +DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2, test_table_join_3); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.*, test_table_join_2.*, test_table_join_3.* +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.*, t2.*, t3.* +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY toString, test_table_join_2.* APPLY toString, test_table_join_3.* APPLY toString +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY x -> toString(x), test_table_join_2.* APPLY x -> toString(x), test_table_join_3.* APPLY x -> toString(x) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_1.value_join_1, test_table_join_2.id, test_table_join_2.value, test_table_join_2.value_join_2, +test_table_join_3.id, test_table_join_3.value, test_table_join_3.value_join_3 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.id, t1.value, t1.value_join_1, t2.id, t2.value, t2.value_join_2, t3.id, t3.value, t3.value_join_3 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT 'Joins USING'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value)); + +SELECT '--'; + +DESCRIBE (SELECT id, t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT id, value, t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value)); + +SELECT 'Multiple Joins USING'; + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value) INNER JOIN test_table_join_3 AS t3 USING (id, value)); + +SELECT '--'; + +DESCRIBE (SELECT id, t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT id, value, t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value) INNER JOIN test_table_join_3 AS t3 USING (id, value)); + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; +DROP TABLE test_table; +DROP TABLE test_table_compound; diff --git a/tests/queries/0_stateless/02379_analyzer_subquery_depth.reference b/tests/queries/0_stateless/02379_analyzer_subquery_depth.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02379_analyzer_subquery_depth.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql b/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql new file mode 100644 index 00000000000..c2109f543eb --- /dev/null +++ b/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql @@ -0,0 +1,4 @@ +SET allow_experimental_analyzer = 1; + +SELECT (SELECT a FROM (SELECT 1 AS a)) SETTINGS max_subquery_depth = 1; -- { serverError 162 } +SELECT (SELECT a FROM (SELECT 1 AS a)) SETTINGS max_subquery_depth = 2; diff --git a/tests/queries/0_stateless/02380_analyzer_join_sample.reference b/tests/queries/0_stateless/02380_analyzer_join_sample.reference new file mode 100644 index 00000000000..14d5f58d76a --- /dev/null +++ b/tests/queries/0_stateless/02380_analyzer_join_sample.reference @@ -0,0 +1,2 @@ +0 0 2 2 +1 1 2 2 diff --git a/tests/queries/0_stateless/02380_analyzer_join_sample.sql b/tests/queries/0_stateless/02380_analyzer_join_sample.sql new file mode 100644 index 00000000000..e417f47d173 --- /dev/null +++ b/tests/queries/0_stateless/02380_analyzer_join_sample.sql @@ -0,0 +1,29 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE=MergeTree +ORDER BY id +SAMPLE BY id; + +INSERT INTO test_table_join_1 VALUES (0, 'Value'), (1, 'Value_1'); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE=MergeTree +ORDER BY id +SAMPLE BY id; + +INSERT INTO test_table_join_2 VALUES (0, 'Value'), (1, 'Value_1'); + +SELECT t1.id AS t1_id, t2.id AS t2_id, t1._sample_factor AS t1_sample_factor, t2._sample_factor AS t2_sample_factor +FROM test_table_join_1 AS t1 SAMPLE 1/2 INNER JOIN test_table_join_2 AS t2 SAMPLE 1/2 ON t1.id = t2.id; + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/tests/queries/0_stateless/02381_analyzer_join_final.reference b/tests/queries/0_stateless/02381_analyzer_join_final.reference new file mode 100644 index 00000000000..e00d444d142 --- /dev/null +++ b/tests/queries/0_stateless/02381_analyzer_join_final.reference @@ -0,0 +1,2 @@ +0 0 3 1 +1 1 1 3 diff --git a/tests/queries/0_stateless/02381_analyzer_join_final.sql b/tests/queries/0_stateless/02381_analyzer_join_final.sql new file mode 100644 index 00000000000..57fc3aedd8f --- /dev/null +++ b/tests/queries/0_stateless/02381_analyzer_join_final.sql @@ -0,0 +1,34 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value UInt64 +) ENGINE=SummingMergeTree(value) +ORDER BY id +SAMPLE BY id; + +SYSTEM STOP MERGES test_table_join_1; +INSERT INTO test_table_join_1 VALUES (0, 1), (1, 1); +INSERT INTO test_table_join_1 VALUES (0, 2); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value UInt64 +) ENGINE=SummingMergeTree(value) +ORDER BY id +SAMPLE BY id; + +SYSTEM STOP MERGES test_table_join_2; +INSERT INTO test_table_join_2 VALUES (0, 1), (1, 1); +INSERT INTO test_table_join_2 VALUES (1, 2); + +SELECT t1.id AS t1_id, t2.id AS t2_id, t1.value AS t1_value, t2.value AS t2_value +FROM test_table_join_1 AS t1 FINAL INNER JOIN test_table_join_2 AS t2 FINAL ON t1.id = t2.id +ORDER BY t1_id; + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/tests/queries/0_stateless/02382_analyzer_matcher_join_using.reference b/tests/queries/0_stateless/02382_analyzer_matcher_join_using.reference new file mode 100644 index 00000000000..f2199aad4c8 --- /dev/null +++ b/tests/queries/0_stateless/02382_analyzer_matcher_join_using.reference @@ -0,0 +1,47 @@ +-- { echoOn } + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; +0 Join_1_Value_0 Join_2_Value_0 +1 Join_1_Value_1 Join_2_Value_1 +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, id, id) ORDER BY id, t1.value; -- { serverError 36 } +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; +0 Join_1_Value_0 Join_2_Value_0 +1 Join_1_Value_1 Join_2_Value_1 +2 Join_1_Value_2 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; +0 Join_1_Value_0 Join_2_Value_0 +1 Join_1_Value_1 Join_2_Value_1 +3 Join_2_Value_3 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; +0 Join_2_Value_3 +0 Join_1_Value_0 Join_2_Value_0 +1 Join_1_Value_1 Join_2_Value_1 +2 Join_1_Value_2 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; +0 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +1 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; +0 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +1 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; +0 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +1 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 +4 Join_3_Value_4 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; +0 Join_3_Value_4 +0 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 +1 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 diff --git a/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql b/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql new file mode 100644 index 00000000000..25d493dc422 --- /dev/null +++ b/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql @@ -0,0 +1,74 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt8, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt16, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value_0'); +INSERT INTO test_table_join_3 VALUES (1, 'Join_3_Value_1'); +INSERT INTO test_table_join_3 VALUES (4, 'Join_3_Value_4'); + +-- { echoOn } + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, id, id) ORDER BY id, t1.value; -- { serverError 36 } + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; diff --git a/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.reference b/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.reference new file mode 100644 index 00000000000..e48ae282f5d --- /dev/null +++ b/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.reference @@ -0,0 +1,24 @@ +-- { echoOn } + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; +0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +SELECT '--'; +-- +SELECT * FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; +0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 +1 Join_1_Value_1 1 Join_2_Value_1 +2 Join_1_Value_2 0 diff --git a/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql b/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql new file mode 100644 index 00000000000..c22a0f4244b --- /dev/null +++ b/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql @@ -0,0 +1,44 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +-- { echoOn } + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.reference b/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.reference new file mode 100644 index 00000000000..5f783010a1c --- /dev/null +++ b/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.reference @@ -0,0 +1,10 @@ +Dictionary +0 Value +Value +Value +Value +JOIN +0 Value +Value +Value +Value diff --git a/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql b/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql new file mode 100644 index 00000000000..ff6e417d756 --- /dev/null +++ b/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql @@ -0,0 +1,59 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(FLAT()) +SOURCE(CLICKHOUSE(TABLE 'test_table')) +LIFETIME(0); + +SELECT 'Dictionary'; + +SELECT * FROM test_dictionary; + +SELECT dictGet('test_dictionary', 'value', toUInt64(0)); + +SELECT dictGet(test_dictionary, 'value', toUInt64(0)); + +WITH 'test_dictionary' AS dictionary SELECT dictGet(dictionary, 'value', toUInt64(0)); + +WITH 'invalid_dictionary' AS dictionary SELECT dictGet(dictionary, 'value', toUInt64(0)); -- { serverError 36 } + +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; + +DROP TABLE IF EXISTS test_table_join; +CREATE TABLE test_table_join +( + id UInt64, + value String +) ENGINE=Join(Any, Left, id); + +INSERT INTO test_table_join VALUES (0, 'Value'); + +SELECT 'JOIN'; + +SELECT * FROM test_table_join; + +SELECT joinGet('test_table_join', 'value', toUInt64(0)); + +SELECT joinGet(test_table_join, 'value', toUInt64(0)); + +WITH 'test_table_join' AS join_table SELECT joinGet(join_table, 'value', toUInt64(0)); + +WITH 'invalid_test_table_join' AS join_table SELECT joinGet(join_table, 'value', toUInt64(0)); -- { serverError 60 } + +DROP TABLE test_table_join; diff --git a/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.reference b/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.reference new file mode 100644 index 00000000000..05c5c9872a6 --- /dev/null +++ b/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.reference @@ -0,0 +1,7 @@ +(1,'Value') 1 Value +-- +2 +-- +1 1 +-- +1 1 diff --git a/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql b/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql new file mode 100644 index 00000000000..1a195bbfffe --- /dev/null +++ b/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql @@ -0,0 +1,21 @@ +SET allow_experimental_analyzer = 1; + +SELECT cast(tuple(1, 'Value'), 'Tuple(first UInt64, second String)') AS value, value.first, value.second; + +SELECT '--'; + +WITH (x -> x + 1) AS lambda SELECT lambda(1); + +WITH (x -> x + 1) AS lambda SELECT lambda.nested(1); -- { serverError 36 } + +SELECT '--'; + +SELECT * FROM (SELECT 1) AS t1, t1 AS t2; + +SELECT '--'; + +SELECT * FROM t1 AS t2, (SELECT 1) AS t1; + +SELECT * FROM (SELECT 1) AS t1, t1.nested AS t2; -- { serverError 36 } + +SELECT * FROM t1.nested AS t2, (SELECT 1) AS t1; -- { serverError 36 } diff --git a/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.reference b/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.reference new file mode 100644 index 00000000000..dec7d2fabd2 --- /dev/null +++ b/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.reference @@ -0,0 +1 @@ +\N diff --git a/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql b/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql new file mode 100644 index 00000000000..c8ca3ff21d4 --- /dev/null +++ b/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql @@ -0,0 +1,3 @@ +SET allow_experimental_analyzer = 1; + +SELECT (NULL IN (SELECT 9223372036854775806 IN (SELECT 65536), inf, NULL IN (NULL))) IN (SELECT NULL IN (NULL)); diff --git a/tests/queries/0_stateless/02387_analyzer_cte.reference b/tests/queries/0_stateless/02387_analyzer_cte.reference new file mode 100644 index 00000000000..1ad3aee198b --- /dev/null +++ b/tests/queries/0_stateless/02387_analyzer_cte.reference @@ -0,0 +1,7 @@ +1 +-- +0 Value +-- +1 +-- +0 Value diff --git a/tests/queries/0_stateless/02387_analyzer_cte.sql b/tests/queries/0_stateless/02387_analyzer_cte.sql new file mode 100644 index 00000000000..1f10ac10438 --- /dev/null +++ b/tests/queries/0_stateless/02387_analyzer_cte.sql @@ -0,0 +1,26 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT * FROM test_table) SELECT * FROM cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT 1 UNION DISTINCT SELECT 1) SELECT * FROM cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT * FROM test_table UNION DISTINCT SELECT * FROM test_table) SELECT * FROM cte_subquery; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02388_analyzer_recursive_lambda.reference b/tests/queries/0_stateless/02388_analyzer_recursive_lambda.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql b/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql new file mode 100644 index 00000000000..6fc8ff2aae0 --- /dev/null +++ b/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql @@ -0,0 +1,5 @@ +SET allow_experimental_analyzer = 1; + +WITH x -> plus(lambda(1), x) AS lambda SELECT lambda(1048576); -- { serverError 1 }; + +WITH lambda(lambda(plus(x, x, -1)), tuple(x), x + 2147483646) AS lambda, x -> plus(lambda(1), x, 2) AS lambda SELECT 1048576, lambda(1048576); -- { serverError 1 }; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference b/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference new file mode 100644 index 00000000000..935c53358c0 --- /dev/null +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.reference @@ -0,0 +1,121 @@ +-- { echoOn } + +SELECT arrayMap(x -> x + arrayMap(x -> x + 1, [1])[1], [1,2,3]); +[3,4,5] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> 5, [1])[1], [1,2,3]); +[6,7,8] +SELECT '--'; +-- +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> constant, [1])[1], [1,2,3]); +5 [6,7,8] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> x, [1])[1], [1,2,3]); +[2,3,4] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(y -> x + y, [1])[1], [1,2,3]); +[3,5,7] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> (SELECT 5), [1])[1], [1,2,3]); +[6,7,8] +SELECT '--'; +-- +SELECT (SELECT 5) AS subquery, arrayMap(x -> x + arrayMap(x -> subquery, [1])[1], [1,2,3]); +5 [6,7,8] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> (SELECT 5 UNION DISTINCT SELECT 5), [1])[1], [1,2,3]); +[6,7,8] +SELECT '--'; +-- +SELECT (SELECT 5 UNION DISTINCT SELECT 5) AS subquery, arrayMap(x -> x + arrayMap(x -> subquery, [1])[1], [1,2,3]); +5 [6,7,8] +SELECT '--'; +-- +WITH x -> toString(x) AS lambda SELECT arrayMap(x -> lambda(x), [1,2,3]); +['1','2','3'] +SELECT '--'; +-- +WITH x -> toString(x) AS lambda SELECT arrayMap(x -> arrayMap(y -> concat(lambda(x), '_', lambda(y)), [1,2,3]), [1,2,3]); +[['1_1','1_2','1_3'],['2_1','2_2','2_3'],['3_1','3_2','3_3']] +SELECT '--'; +-- +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; +INSERT INTO test_table VALUES (0, 'Value'); +SELECT arrayMap(x -> x + arrayMap(x -> id, [1])[1], [1,2,3]) FROM test_table; +[1,2,3] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> x + id, [1])[1], [1,2,3]) FROM test_table; +[2,3,4] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(y -> x + y + id, [1])[1], [1,2,3]) FROM test_table; +[3,5,7] +SELECT '--'; +-- +SELECT id AS id_alias, arrayMap(x -> x + arrayMap(y -> x + y + id_alias, [1])[1], [1,2,3]) FROM test_table; +0 [3,5,7] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> 5, [1])[1], [1,2,3]) FROM test_table; +[6,7,8] +SELECT '--'; +-- +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> constant, [1])[1], [1,2,3]) FROM test_table; +5 [6,7,8] +SELECT '--'; +-- +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> x + constant, [1])[1], [1,2,3]) FROM test_table; +5 [7,8,9] +SELECT '--'; +-- +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> x + id + constant, [1])[1], [1,2,3]) FROM test_table; +5 [7,8,9] +SELECT '--'; +-- +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(y -> x + y + id + constant, [1])[1], [1,2,3]) FROM test_table; +5 [8,10,12] +SELECT '--'; +-- +SELECT arrayMap(x -> x + arrayMap(x -> id + (SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; +[1,2,3] +SELECT '--'; +-- +SELECT arrayMap(x -> id + arrayMap(x -> id + (SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; +[0,0,0] +SELECT '--'; +-- +SELECT arrayMap(x -> id + arrayMap(x -> id + (SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; +[0,0,0] +SELECT '--'; +-- +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> lambda(x), [1,2,3]) FROM test_table; +['0','0','0'] +SELECT '--'; +-- +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> arrayMap(y -> lambda(y), [1,2,3]), [1,2,3]) FROM test_table; +[['0','0','0'],['0','0','0'],['0','0','0']] +SELECT '--'; +-- +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> arrayMap(y -> concat(lambda(x), '_', lambda(y)), [1,2,3]), [1,2,3]) FROM test_table; +[['0_0','0_0','0_0'],['0_0','0_0','0_0'],['0_0','0_0','0_0']] +SELECT '--'; +-- +SELECT arrayMap(x -> concat(concat(concat(concat(concat(toString(id), '___\0_______\0____'), toString(id), concat(concat(toString(id), ''), toString(id)), toString(id)), + arrayMap(x -> concat(concat(concat(concat(toString(id), ''), toString(id)), toString(id), '___\0_______\0____'), toString(id)) AS lambda, [NULL, inf, 1, 1]), + concat(toString(id), NULL), toString(id)), toString(id))) AS lambda, [NULL, NULL, 2147483647]) +FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), toString(id)), concat(toString(id), NULL), toString(id)); +SELECT '--'; +-- +SELECT arrayMap(x -> concat(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql new file mode 100644 index 00000000000..8f8b5537da9 --- /dev/null +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql @@ -0,0 +1,129 @@ +SET allow_experimental_analyzer = 1; + +-- { echoOn } + +SELECT arrayMap(x -> x + arrayMap(x -> x + 1, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> 5, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> constant, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> x, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(y -> x + y, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> (SELECT 5), [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT (SELECT 5) AS subquery, arrayMap(x -> x + arrayMap(x -> subquery, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> (SELECT 5 UNION DISTINCT SELECT 5), [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT (SELECT 5 UNION DISTINCT SELECT 5) AS subquery, arrayMap(x -> x + arrayMap(x -> subquery, [1])[1], [1,2,3]); + +SELECT '--'; + +WITH x -> toString(x) AS lambda SELECT arrayMap(x -> lambda(x), [1,2,3]); + +SELECT '--'; + +WITH x -> toString(x) AS lambda SELECT arrayMap(x -> arrayMap(y -> concat(lambda(x), '_', lambda(y)), [1,2,3]), [1,2,3]); + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT arrayMap(x -> x + arrayMap(x -> id, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> x + id, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(y -> x + y + id, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT id AS id_alias, arrayMap(x -> x + arrayMap(y -> x + y + id_alias, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> 5, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> x + constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> x + id + constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(y -> x + y + id + constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> id + (SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> id + arrayMap(x -> id + (SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> id + arrayMap(x -> id + (SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> lambda(x), [1,2,3]) FROM test_table; + +SELECT '--'; + +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> arrayMap(y -> lambda(y), [1,2,3]), [1,2,3]) FROM test_table; + +SELECT '--'; + +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> arrayMap(y -> concat(lambda(x), '_', lambda(y)), [1,2,3]), [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> concat(concat(concat(concat(concat(toString(id), '___\0_______\0____'), toString(id), concat(concat(toString(id), ''), toString(id)), toString(id)), + arrayMap(x -> concat(concat(concat(concat(toString(id), ''), toString(id)), toString(id), '___\0_______\0____'), toString(id)) AS lambda, [NULL, inf, 1, 1]), + concat(toString(id), NULL), toString(id)), toString(id))) AS lambda, [NULL, NULL, 2147483647]) +FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), toString(id)), concat(toString(id), NULL), toString(id)); + +SELECT '--'; + +SELECT arrayMap(x -> concat(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError 44 }; + +DROP TABLE test_table; + +-- { echoOff } diff --git a/tests/queries/0_stateless/02403_date_time_narrowing.reference b/tests/queries/0_stateless/02403_date_time_narrowing.reference deleted file mode 100644 index 7d6e91c61b8..00000000000 --- a/tests/queries/0_stateless/02403_date_time_narrowing.reference +++ /dev/null @@ -1,20 +0,0 @@ -1970-01-01 2149-06-06 1970-01-01 2149-06-06 1900-01-01 1970-01-02 1970-01-01 00:00:00 2106-02-07 06:28:15 -1970-01-01 2149-06-06 -1970-01-01 2149-06-06 -1970-01-01 00:00:00 2106-02-07 06:28:15 -1970-01-01 00:00:00 2106-02-07 06:28:15 -2106-02-07 06:28:15 -toStartOfDay -2106-02-07 00:00:00 1970-01-01 00:00:00 2106-02-07 00:00:00 1970-01-01 00:00:00 2106-02-07 00:00:00 -toStartOfWeek -1970-01-01 1970-01-01 1970-01-01 1970-01-01 1970-01-01 2149-06-01 1970-01-01 2149-06-02 -toMonday -1970-01-01 1970-01-01 2149-06-02 1970-01-01 2149-06-02 -toStartOfMonth -1970-01-01 2149-06-01 1970-01-01 2149-06-01 -toLastDayOfMonth -2149-05-31 1970-01-01 2149-05-31 1970-01-01 2149-05-31 -toStartOfQuarter -1970-01-01 2149-04-01 1970-01-01 2149-04-01 -toStartOfYear -1970-01-01 2149-01-01 1970-01-01 2149-01-01 diff --git a/tests/queries/0_stateless/02403_date_time_narrowing.sql b/tests/queries/0_stateless/02403_date_time_narrowing.sql deleted file mode 100644 index 07cbba6f31c..00000000000 --- a/tests/queries/0_stateless/02403_date_time_narrowing.sql +++ /dev/null @@ -1,74 +0,0 @@ --- check conversion of numbers to date/time -- -SELECT toDate(toInt32(toDate32('1930-01-01', 'UTC')), 'UTC'), - toDate(toInt32(toDate32('2151-01-01', 'UTC')), 'UTC'), - toDate(toInt64(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC')), 'UTC'), - toDate(toInt64(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC')), 'UTC'), - toDate32(toInt32(toDate32('1900-01-01', 'UTC')) - 1, 'UTC'), - toDate32(toInt32(toDate32('2299-12-31', 'UTC')) + 1, 'UTC'), - toDateTime(toInt64(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC')), 'UTC'), - toDateTime(toInt64(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC')), 'UTC'); - --- check conversion of extended range type to normal range type -- -SELECT toDate(toDate32('1930-01-01', 'UTC'), 'UTC'), - toDate(toDate32('2151-01-01', 'UTC'), 'UTC'); - -SELECT toDate(toDateTime64('1930-01-01 12:12:12.12', 3, 'UTC'), 'UTC'), - toDate(toDateTime64('2151-01-01 12:12:12.12', 3, 'UTC'), 'UTC'); - -SELECT toDateTime(toDateTime64('1930-01-01 12:12:12.12', 3, 'UTC'), 'UTC'), - toDateTime(toDateTime64('2151-01-01 12:12:12.12', 3, 'UTC'), 'UTC'); - -SELECT toDateTime(toDate32('1930-01-01', 'UTC'), 'UTC'), - toDateTime(toDate32('2151-01-01', 'UTC'), 'UTC'); - -SELECT toDateTime(toDate('2141-01-01', 'UTC'), 'UTC'); - --- test DateTimeTransforms -- -SELECT 'toStartOfDay'; -SELECT toStartOfDay(toDate('2141-01-01', 'UTC'), 'UTC'), - toStartOfDay(toDate32('1930-01-01', 'UTC'), 'UTC'), - toStartOfDay(toDate32('2141-01-01', 'UTC'), 'UTC'), - toStartOfDay(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), - toStartOfDay(toDateTime64('2141-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); - -SELECT 'toStartOfWeek'; -SELECT toStartOfWeek(toDate('1970-01-01', 'UTC')), - toStartOfWeek(toDate32('1970-01-01', 'UTC')), - toStartOfWeek(toDateTime('1970-01-01 10:10:10', 'UTC'), 0, 'UTC'), - toStartOfWeek(toDateTime64('1970-01-01 10:10:10.123', 3, 'UTC'), 1, 'UTC'), - toStartOfWeek(toDate32('1930-01-01', 'UTC')), - toStartOfWeek(toDate32('2151-01-01', 'UTC')), - toStartOfWeek(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 2, 'UTC'), - toStartOfWeek(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 3, 'UTC'); - -SELECT 'toMonday'; -SELECT toMonday(toDate('1970-01-02', 'UTC')), - toMonday(toDate32('1930-01-01', 'UTC')), - toMonday(toDate32('2151-01-01', 'UTC')), - toMonday(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), - toMonday(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); - -SELECT 'toStartOfMonth'; -SELECT toStartOfMonth(toDate32('1930-01-01', 'UTC')), - toStartOfMonth(toDate32('2151-01-01', 'UTC')), - toStartOfMonth(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), - toStartOfMonth(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); - -SELECT 'toLastDayOfMonth'; -SELECT toLastDayOfMonth(toDate('2149-06-03', 'UTC')), - toLastDayOfMonth(toDate32('1930-01-01', 'UTC')), - toLastDayOfMonth(toDate32('2151-01-01', 'UTC')), - toLastDayOfMonth(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), - toLastDayOfMonth(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); - -SELECT 'toStartOfQuarter'; -SELECT toStartOfQuarter(toDate32('1930-01-01', 'UTC')), - toStartOfQuarter(toDate32('2151-01-01', 'UTC')), - toStartOfQuarter(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), - toStartOfQuarter(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); - -SELECT 'toStartOfYear'; -SELECT toStartOfYear(toDate32('1930-01-01', 'UTC')), - toStartOfYear(toDate32('2151-01-01', 'UTC')), - toStartOfYear(toDateTime64('1930-01-01 12:12:12.123', 3, 'UTC'), 'UTC'), - toStartOfYear(toDateTime64('2151-01-01 12:12:12.123', 3, 'UTC'), 'UTC'); diff --git a/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference b/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference index 5773810bf64..025191c234a 100644 --- a/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference +++ b/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference @@ -42,39 +42,39 @@ timeSlot;toDateTime64;true 1920-02-02 10:00:00.000 type;timeSlot;toDateTime64;true DateTime64(3, \'UTC\') toStartOfDay;toDate32;true 1920-02-02 00:00:00.000 type;toStartOfDay;toDate32;true DateTime64(3, \'UTC\') -toStartOfYear;toDate32;false 1970-01-01 +toStartOfYear;toDate32;false 2099-06-06 type;toStartOfYear;toDate32;false Date -toStartOfYear;toDateTime64;false 1970-01-01 +toStartOfYear;toDateTime64;false 2099-06-06 type;toStartOfYear;toDateTime64;false Date toStartOfISOYear;toDate32;false 1970-01-01 type;toStartOfISOYear;toDate32;false Date toStartOfISOYear;toDateTime64;false 1970-01-01 type;toStartOfISOYear;toDateTime64;false Date -toStartOfQuarter;toDate32;false 1970-01-01 +toStartOfQuarter;toDate32;false 2099-06-06 type;toStartOfQuarter;toDate32;false Date -toStartOfQuarter;toDateTime64;false 1970-01-01 +toStartOfQuarter;toDateTime64;false 2099-06-06 type;toStartOfQuarter;toDateTime64;false Date -toStartOfMonth;toDate32;false 1970-01-01 +toStartOfMonth;toDate32;false 2099-07-07 type;toStartOfMonth;toDate32;false Date -toStartOfMonth;toDateTime64;false 1970-01-01 +toStartOfMonth;toDateTime64;false 2099-07-07 type;toStartOfMonth;toDateTime64;false Date -toStartOfWeek;toDate32;false 1970-01-01 +toStartOfWeek;toDate32;false 2099-07-07 type;toStartOfWeek;toDate32;false Date -toStartOfWeek;toDateTime64;false 1970-01-01 +toStartOfWeek;toDateTime64;false 2099-07-07 type;toStartOfWeek;toDateTime64;false Date -toMonday;toDate32;false 1970-01-01 +toMonday;toDate32;false 2099-07-08 type;toMonday;toDate32;false Date -toMonday;toDateTime64;false 1970-01-01 +toMonday;toDateTime64;false 2099-07-08 type;toMonday;toDateTime64;false Date -toLastDayOfMonth;toDate32;false 1970-01-01 +toLastDayOfMonth;toDate32;false 2099-08-04 type;toLastDayOfMonth;toDate32;false Date -toLastDayOfMonth;toDateTime64;false 1970-01-01 +toLastDayOfMonth;toDateTime64;false 2099-08-04 type;toLastDayOfMonth;toDateTime64;false Date -toStartOfDay;toDateTime64;false 1970-01-01 00:00:00 +toStartOfDay;toDateTime64;false 2056-03-09 06:28:16 type;toStartOfDay;toDateTime64;false DateTime(\'UTC\') -toStartOfHour;toDateTime64;false 1970-01-01 00:00:00 +toStartOfHour;toDateTime64;false 2056-03-09 16:28:16 type;toStartOfHour;toDateTime64;false DateTime(\'UTC\') -toStartOfMinute;toDateTime64;false 1970-01-01 00:00:00 +toStartOfMinute;toDateTime64;false 2056-03-09 16:51:16 type;toStartOfMinute;toDateTime64;false DateTime(\'UTC\') toStartOfFiveMinutes;toDateTime64;false 2056-03-09 16:48:16 type;toStartOfFiveMinutes;toDateTime64;false DateTime(\'UTC\') @@ -84,5 +84,5 @@ toStartOfFifteenMinutes;toDateTime64;false 2056-03-09 16:43:16 type;toStartOfFifteenMinutes;toDateTime64;false DateTime(\'UTC\') timeSlot;toDateTime64;false 2056-03-09 16:58:16 type;timeSlot;toDateTime64;false DateTime(\'UTC\') -toStartOfDay;toDate32;false 1970-01-01 00:00:00 +toStartOfDay;toDate32;false 2056-03-09 06:28:16 type;toStartOfDay;toDate32;false DateTime(\'UTC\') diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index c7ac00ee18f..040a8c8d317 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -219,10 +219,6 @@ cutFragment cutIPv6 cutQueryString cutQueryStringAndFragment -cutToFirstSignificantSubdomain -cutToFirstSignificantSubdomainCustom -cutToFirstSignificantSubdomainCustomWithWWW -cutToFirstSignificantSubdomainWithWWW cutURLParameter cutWWW dateDiff @@ -280,8 +276,6 @@ dictGetUUIDOrDefault dictHas dictIsIn divide -domain -domainWithoutWWW dotProduct dumpColumnStructure e @@ -330,8 +324,8 @@ filesystemAvailable filesystemCapacity filesystemFree finalizeAggregation -firstSignificantSubdomain firstSignificantSubdomainCustom +firstSignificantSubdomainCustomRFC flattenTuple floor format @@ -592,7 +586,6 @@ polygonsUnionCartesian polygonsUnionSpherical polygonsWithinCartesian polygonsWithinSpherical -port position positionCaseInsensitive positionCaseInsensitiveUTF8 @@ -897,7 +890,6 @@ toYear toYearWeek today tokens -topLevelDomain transactionID transactionLatestSnapshot transactionOldestSnapshot diff --git a/tests/queries/0_stateless/02428_combinators_with_over_statement.reference b/tests/queries/0_stateless/02428_combinators_with_over_statement.reference new file mode 100644 index 00000000000..55be3f35cb1 --- /dev/null +++ b/tests/queries/0_stateless/02428_combinators_with_over_statement.reference @@ -0,0 +1,50 @@ +{1:'\0wR'} +{1:'\0D@='} +{1:'\07'} +{1:'\0޲'} +{1:'\0"Q'} +{1:'\0V\''} +{1:'\0\0'} +{1:'\0_'} +{1:'\0q4h'} +{1:'\0g7'} +['\0wR'] +['\0D@='] +['\07'] +['\0޲'] +['\0"Q'] +['\0V\''] +['\0\0'] +['\0_'] +['\0q4h'] +['\0g7'] +['\0Z','\0\0'] +['\04n','\0\0'] +['\0ޓ','\0\0'] +['\01','\0\0'] +['\0_&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') + +mkdir $user_files_path/d1 +touch $user_files_path/d1/text1.txt + +for i in {1..2} +do + echo $i$'\t'$i >> $user_files_path/d1/text1.txt +done + +mkdir $user_files_path/d1/d2 +touch $user_files_path/d1/d2/text2.txt +for i in {3..4} +do + echo $i$'\t'$i >> $user_files_path/d1/d2/text2.txt +done + +mkdir $user_files_path/d1/d2/d3 +touch $user_files_path/d1/d2/d3/text3.txt +for i in {5..6} +do + echo $i$'\t'$i >> $user_files_path/d1/d2/d3/text3.txt +done + +${CLICKHOUSE_CLIENT} -q "SELECT * from file ('d1/*','TSV', 'Index UInt8, Number UInt8')" | sort --numeric-sort +${CLICKHOUSE_CLIENT} -q "SELECT * from file ('d1/**','TSV', 'Index UInt8, Number UInt8')" | sort --numeric-sort +${CLICKHOUSE_CLIENT} -q "SELECT * from file ('d1/*/tex*','TSV', 'Index UInt8, Number UInt8')" | sort --numeric-sort +${CLICKHOUSE_CLIENT} -q "SELECT * from file ('d1/**/tex*','TSV', 'Index UInt8, Number UInt8')" | sort --numeric-sort + + +rm $user_files_path/d1/d2/d3/text3.txt +rmdir $user_files_path/d1/d2/d3 +rm $user_files_path/d1/d2/text2.txt +rmdir $user_files_path/d1/d2 +rm $user_files_path/d1/text1.txt +rmdir $user_files_path/d1 \ No newline at end of file diff --git a/tests/queries/0_stateless/02461_cancel_finish_race.reference b/tests/queries/0_stateless/02461_cancel_finish_race.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02461_cancel_finish_race.sh b/tests/queries/0_stateless/02461_cancel_finish_race.sh new file mode 100755 index 00000000000..7e775437da1 --- /dev/null +++ b/tests/queries/0_stateless/02461_cancel_finish_race.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +function thread_query() +{ + while true; do + $CLICKHOUSE_CLIENT --query "SELECT count() FROM numbers_mt(10000) WHERE rand() = 0 FORMAT Null"; + done +} + +function thread_cancel() +{ + while true; do + $CLICKHOUSE_CLIENT --query "KILL QUERY WHERE current_database = '$CLICKHOUSE_DATABASE' SYNC FORMAT Null"; + done +} + +# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout +export -f thread_query; +export -f thread_cancel; + +TIMEOUT=30 + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +timeout $TIMEOUT bash -c thread_query 2> /dev/null & +timeout $TIMEOUT bash -c thread_cancel 2> /dev/null & + +wait diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference new file mode 100644 index 00000000000..c0d3de1806a --- /dev/null +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.reference @@ -0,0 +1,64 @@ +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +1 +2 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 +2022-02-02 00:00:01 +2022-02-02 00:00:02 diff --git a/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql new file mode 100644 index 00000000000..75c8cb2b7e7 --- /dev/null +++ b/tests/queries/0_stateless/02461_mullable_pk_monotonicity_bug.sql @@ -0,0 +1,62 @@ +create table tab (x Nullable(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); +set allow_suspicious_low_cardinality_types=1; +set max_rows_to_read = 2; + +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +drop table tab; +set max_rows_to_read = 100; +create table tab (x LowCardinality(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +drop table tab; +set max_rows_to_read = 100; +create table tab (x UInt128) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +set max_rows_to_read = 100; +SELECT x + 1 FROM tab WHERE (x + 1::LowCardinality(UInt8)) <= -9223372036854775808 order by x; + +drop table tab; +create table tab (x DateTime) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select toDateTime('2022-02-02') + number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= toDateTime('2022-02-02') + 2 order by x; + +SELECT x + 1 FROM tab WHERE (x + CAST('1', 'Nullable(UInt8)')) <= -2147483647 ORDER BY x ASC NULLS FIRST; diff --git a/tests/queries/0_stateless/02461_welch_t_test_fuzz.reference b/tests/queries/0_stateless/02461_welch_t_test_fuzz.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02461_welch_t_test_fuzz.sql b/tests/queries/0_stateless/02461_welch_t_test_fuzz.sql new file mode 100644 index 00000000000..b22dc49dec3 --- /dev/null +++ b/tests/queries/0_stateless/02461_welch_t_test_fuzz.sql @@ -0,0 +1,8 @@ + +DROP TABLE IF EXISTS welch_ttest__fuzz_7; +CREATE TABLE welch_ttest__fuzz_7 (left UInt128, right UInt128) ENGINE = Memory; + +INSERT INTO welch_ttest__fuzz_7 VALUES (0.010268, 0), (0.000167, 0), (0.000167, 0), (0.159258, 1), (0.136278, 1), (0.122389, 1); + +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest__fuzz_7; -- { serverError 36 } +SELECT roundBankers(studentTTest(left, right).2, 6) from welch_ttest__fuzz_7; -- { serverError 36 } diff --git a/tests/queries/0_stateless/02462_distributions.reference b/tests/queries/0_stateless/02462_distributions.reference new file mode 100644 index 00000000000..56b04bcb856 --- /dev/null +++ b/tests/queries/0_stateless/02462_distributions.reference @@ -0,0 +1,12 @@ +Ok +Ok +Ok +Ok +Ok +Ok +Ok +0 +1 +Ok +Ok +Ok diff --git a/tests/queries/0_stateless/02462_distributions.sql b/tests/queries/0_stateless/02462_distributions.sql new file mode 100644 index 00000000000..b45dc897f2a --- /dev/null +++ b/tests/queries/0_stateless/02462_distributions.sql @@ -0,0 +1,24 @@ +# Values should be between 0 and 1 +SELECT DISTINCT if (a >= toFloat64(0) AND a <= toFloat64(1), 'Ok', 'Fail') FROM (SELECT randUniform(0, 1) AS a FROM numbers(100000)); +# Mean should be around 0 +SELECT DISTINCT if (m >= toFloat64(-0.2) AND m <= toFloat64(0.2), 'Ok', 'Fail') FROM (SELECT avg(a) as m FROM (SELECT randNormal(0, 5) AS a FROM numbers(100000))); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randLogNormal(0, 5) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randExponential(15) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randChiSquared(3) AS a FROM numbers(100000)); +# Mean should be around 0 +SELECT DISTINCT if (m > toFloat64(-0.2) AND m < toFloat64(0.2), 'Ok', 'Fail') FROM (SELECT avg(a) as m FROM (SELECT randStudentT(5) AS a FROM numbers(100000))); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randFisherF(3, 4) AS a FROM numbers(100000)); +# There should be only 0s and 1s +SELECT a FROM (SELECT DISTINCT randBernoulli(0.5) AS a FROM numbers(100000)) ORDER BY a; +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randBinomial(3, 0.5) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randNegativeBinomial(3, 0.5) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randPoisson(44) AS a FROM numbers(100000)); +# No errors +SELECT randUniform(1, 2, 1), randNormal(0, 1, 'abacaba'), randLogNormal(0, 10, 'b'), randChiSquared(1, 1), randStudentT(7, '8'), randFisherF(23, 42, 100), randBernoulli(0.5, 2), randBinomial(3, 0.5, 1), randNegativeBinomial(3, 0.5, 2), randPoisson(44, 44) FORMAT Null; diff --git a/tests/queries/0_stateless/02462_int_to_date.reference b/tests/queries/0_stateless/02462_int_to_date.reference new file mode 100644 index 00000000000..f31441cf3b8 --- /dev/null +++ b/tests/queries/0_stateless/02462_int_to_date.reference @@ -0,0 +1,4 @@ +20221011 2022-10-11 1665519765 +20221011 2022-10-11 1665519765 +20221011 2022-10-11 1665519765 Int32 +20221011 2022-10-11 1665519765 UInt32 diff --git a/tests/queries/0_stateless/02462_int_to_date.sql b/tests/queries/0_stateless/02462_int_to_date.sql new file mode 100644 index 00000000000..cd470ca12f6 --- /dev/null +++ b/tests/queries/0_stateless/02462_int_to_date.sql @@ -0,0 +1,4 @@ +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toInt64(1665519765) as recordTimestamp; +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toUInt64(1665519765) as recordTimestamp; +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toUInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); diff --git a/tests/queries/0_stateless/02462_match_regexp_pk.reference b/tests/queries/0_stateless/02462_match_regexp_pk.reference new file mode 100644 index 00000000000..428d6556f4c --- /dev/null +++ b/tests/queries/0_stateless/02462_match_regexp_pk.reference @@ -0,0 +1,5 @@ +4 +1 +3 +4 +4 diff --git a/tests/queries/0_stateless/02462_match_regexp_pk.sql b/tests/queries/0_stateless/02462_match_regexp_pk.sql new file mode 100644 index 00000000000..1a944b96196 --- /dev/null +++ b/tests/queries/0_stateless/02462_match_regexp_pk.sql @@ -0,0 +1,9 @@ +CREATE TABLE mt_match_pk (v String) ENGINE = MergeTree ORDER BY v SETTINGS index_granularity = 1; +INSERT INTO mt_match_pk VALUES ('a'), ('aaa'), ('aba'), ('bac'), ('acccca'); + +SET force_primary_key = 1; +SELECT count() FROM mt_match_pk WHERE match(v, '^a'); +SELECT count() FROM mt_match_pk WHERE match(v, '^ab'); +SELECT count() FROM mt_match_pk WHERE match(v, '^a.'); +SELECT count() FROM mt_match_pk WHERE match(v, '^ab*'); +SELECT count() FROM mt_match_pk WHERE match(v, '^ac?'); diff --git a/tests/queries/0_stateless/02463_julian_day_ubsan.reference b/tests/queries/0_stateless/02463_julian_day_ubsan.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02463_julian_day_ubsan.sql b/tests/queries/0_stateless/02463_julian_day_ubsan.sql new file mode 100644 index 00000000000..a8583d7b0a8 --- /dev/null +++ b/tests/queries/0_stateless/02463_julian_day_ubsan.sql @@ -0,0 +1 @@ +SELECT fromModifiedJulianDay(9223372036854775807 :: Int64); -- { serverError 490 } diff --git a/tests/queries/0_stateless/02464_decimal_scale_buffer_overflow.reference b/tests/queries/0_stateless/02464_decimal_scale_buffer_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02464_decimal_scale_buffer_overflow.sql b/tests/queries/0_stateless/02464_decimal_scale_buffer_overflow.sql new file mode 100644 index 00000000000..355d9012f1f --- /dev/null +++ b/tests/queries/0_stateless/02464_decimal_scale_buffer_overflow.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS series__fuzz_35; +CREATE TABLE series__fuzz_35 (`i` UInt8, `x_value` Decimal(18, 14), `y_value` DateTime) ENGINE = Memory; +INSERT INTO series__fuzz_35(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3); +SELECT skewSamp(x_value) FROM (SELECT x_value as x_value FROM series__fuzz_35 LIMIT 2) FORMAT Null; +DROP TABLE series__fuzz_35; diff --git a/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference new file mode 100644 index 00000000000..87370760038 --- /dev/null +++ b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.reference @@ -0,0 +1,7 @@ +0 +0 +1 +2 +3 +4 +0 diff --git a/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql new file mode 100644 index 00000000000..ee7a4e6b6b5 --- /dev/null +++ b/tests/queries/0_stateless/02465_limit_trivial_max_rows_to_read.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_max_rows_to_read; + +CREATE TABLE t_max_rows_to_read (a UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 4; + +INSERT INTO t_max_rows_to_read SELECT number FROM numbers(100); + +SET max_block_size = 10; +SET max_rows_to_read = 20; +SET read_overflow_mode = 'throw'; + +SELECT number FROM numbers(30); -- { serverError 158 } +SELECT number FROM numbers(30) LIMIT 21; -- { serverError 158 } +SELECT number FROM numbers(30) LIMIT 1; +SELECT number FROM numbers(5); + +SELECT a FROM t_max_rows_to_read LIMIT 1; +SELECT a FROM t_max_rows_to_read LIMIT 11 offset 11; -- { serverError 158 } +SELECT a FROM t_max_rows_to_read WHERE a > 50 LIMIT 1; -- { serverError 158 } + +DROP TABLE t_max_rows_to_read; diff --git a/tests/queries/0_stateless/02466_distributed_query_profiler.reference b/tests/queries/0_stateless/02466_distributed_query_profiler.reference new file mode 100644 index 00000000000..4521d575ff3 --- /dev/null +++ b/tests/queries/0_stateless/02466_distributed_query_profiler.reference @@ -0,0 +1,10 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02466_distributed_query_profiler.sql b/tests/queries/0_stateless/02466_distributed_query_profiler.sql new file mode 100644 index 00000000000..9fc2fe7b4bd --- /dev/null +++ b/tests/queries/0_stateless/02466_distributed_query_profiler.sql @@ -0,0 +1,21 @@ +-- This is a regression test for EINTR handling in MultiplexedConnections::getReplicaForReading() + +select * from remote('127.{2,4}', view( + -- This is the emulation of the slow query, the server will return a line each 0.1 second + select sleep(0.1) from numbers(20) settings max_block_size=1) +) +-- LIMIT is to activate query cancellation in case of enough rows already read. +limit 10 +settings + -- This is to avoid draining in background and got the exception during query execution + drain_timeout=-1, + -- This is to activate as much signals as possible to trigger EINTR + query_profiler_real_time_period_ns=1, + -- This is to use MultiplexedConnections + use_hedged_requests=0, + -- This is to make the initiator waiting for cancel packet in MultiplexedConnections::getReplicaForReading() + -- + -- NOTE: that even smaller sleep will be enough to trigger this problem + -- with 100% probability, however just to make it more reliable, increase + -- it to 2 seconds. + sleep_in_receive_cancel_ms=2000; diff --git a/tests/queries/0_stateless/02467_cross_join_three_table_functions.reference b/tests/queries/0_stateless/02467_cross_join_three_table_functions.reference new file mode 100644 index 00000000000..0718dd8e65f --- /dev/null +++ b/tests/queries/0_stateless/02467_cross_join_three_table_functions.reference @@ -0,0 +1 @@ +1320 diff --git a/tests/queries/0_stateless/02467_cross_join_three_table_functions.sql b/tests/queries/0_stateless/02467_cross_join_three_table_functions.sql new file mode 100644 index 00000000000..5c7da815bbe --- /dev/null +++ b/tests/queries/0_stateless/02467_cross_join_three_table_functions.sql @@ -0,0 +1 @@ +SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c; diff --git a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference new file mode 100644 index 00000000000..b3f28057554 --- /dev/null +++ b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.reference @@ -0,0 +1,2 @@ +1 test +1 test diff --git a/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql new file mode 100644 index 00000000000..dee6f7de74a --- /dev/null +++ b/tests/queries/0_stateless/02467_set_with_lowcardinality_type.sql @@ -0,0 +1,31 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/42460 +DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_0; +CREATE TABLE bloom_filter_nullable_index__fuzz_0 +( + `order_key` UInt64, + `str` Nullable(String), + INDEX idx str TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6; + +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (2, 'test2'); + +DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_1; +CREATE TABLE bloom_filter_nullable_index__fuzz_1 +( + `order_key` UInt64, + `str` String, + INDEX idx str TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6; + +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (2, 'test2'); + +DROP TABLE IF EXISTS nullable_string_value__fuzz_2; +CREATE TABLE nullable_string_value__fuzz_2 (`value` LowCardinality(String)) ENGINE = TinyLog; +INSERT INTO nullable_string_value__fuzz_2 VALUES ('test'); + +SELECT * FROM bloom_filter_nullable_index__fuzz_0 WHERE str IN (SELECT value FROM nullable_string_value__fuzz_2); +SELECT * FROM bloom_filter_nullable_index__fuzz_1 WHERE str IN (SELECT value FROM nullable_string_value__fuzz_2); diff --git a/tests/queries/0_stateless/02468_has_any_tuple.reference b/tests/queries/0_stateless/02468_has_any_tuple.reference new file mode 100644 index 00000000000..252a9293563 --- /dev/null +++ b/tests/queries/0_stateless/02468_has_any_tuple.reference @@ -0,0 +1,4 @@ +1 +1 +[(3,3)] +1 diff --git a/tests/queries/0_stateless/02468_has_any_tuple.sql b/tests/queries/0_stateless/02468_has_any_tuple.sql new file mode 100644 index 00000000000..12c7222d593 --- /dev/null +++ b/tests/queries/0_stateless/02468_has_any_tuple.sql @@ -0,0 +1,4 @@ +select [(toUInt8(3), toUInt8(3))] = [(toInt16(3), toInt16(3))]; +select hasAny([(toInt16(3), toInt16(3))],[(toInt16(3), toInt16(3))]); +select arrayFilter(x -> x = (toInt16(3), toInt16(3)), arrayZip([toUInt8(3)], [toUInt8(3)])); +select hasAny([(toUInt8(3), toUInt8(3))],[(toInt16(3), toInt16(3))]); diff --git a/tests/queries/0_stateless/02469_fix_aliases_parser.reference b/tests/queries/0_stateless/02469_fix_aliases_parser.reference new file mode 100644 index 00000000000..09f584c9cd4 --- /dev/null +++ b/tests/queries/0_stateless/02469_fix_aliases_parser.reference @@ -0,0 +1,2 @@ +45 +[0] diff --git a/tests/queries/0_stateless/02469_fix_aliases_parser.sql b/tests/queries/0_stateless/02469_fix_aliases_parser.sql new file mode 100644 index 00000000000..227d8becdb6 --- /dev/null +++ b/tests/queries/0_stateless/02469_fix_aliases_parser.sql @@ -0,0 +1,9 @@ +SELECT sum(number number number) FROM numbers(10); -- { clientError 62 } +SELECT sum(number number) FROM numbers(10); -- { clientError 62 } +SELECT sum(number AS number) FROM numbers(10); + +SELECT [number number number] FROM numbers(1); -- { clientError 62 } +SELECT [number number] FROM numbers(1); -- { clientError 62 } +SELECT [number AS number] FROM numbers(1); + +SELECT cast('1234' lhs lhs, 'UInt32'), lhs; -- { clientError 62 } \ No newline at end of file diff --git a/tests/queries/0_stateless/02469_interval_msan.reference b/tests/queries/0_stateless/02469_interval_msan.reference new file mode 100644 index 00000000000..c18b4e9b082 --- /dev/null +++ b/tests/queries/0_stateless/02469_interval_msan.reference @@ -0,0 +1,8 @@ +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02469_interval_msan.sql b/tests/queries/0_stateless/02469_interval_msan.sql new file mode 100644 index 00000000000..4b4a9f746ea --- /dev/null +++ b/tests/queries/0_stateless/02469_interval_msan.sql @@ -0,0 +1,19 @@ +SELECT now() + 1::Int128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() + 1::Int256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() + 1::UInt128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() + 1::UInt256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT now() - 1::Int128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() - 1::Int256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() - 1::UInt128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() - 1::UInt256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT now() + INTERVAL 1::Int128 SECOND - now(); +SELECT now() + INTERVAL 1::Int256 SECOND - now(); +SELECT now() + INTERVAL 1::UInt128 SECOND - now(); +SELECT now() + INTERVAL 1::UInt256 SECOND - now(); + +SELECT today() + INTERVAL 1::Int128 DAY - today(); +SELECT today() + INTERVAL 1::Int256 DAY - today(); +SELECT today() + INTERVAL 1::UInt128 DAY - today(); +SELECT today() + INTERVAL 1::UInt256 DAY - today(); diff --git a/tests/queries/0_stateless/02470_suspicious_low_cardinality_msan.reference b/tests/queries/0_stateless/02470_suspicious_low_cardinality_msan.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02470_suspicious_low_cardinality_msan.sql b/tests/queries/0_stateless/02470_suspicious_low_cardinality_msan.sql new file mode 100644 index 00000000000..6969be1ca64 --- /dev/null +++ b/tests/queries/0_stateless/02470_suspicious_low_cardinality_msan.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS alias_2__fuzz_25; +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE alias_2__fuzz_25 (`dt` LowCardinality(Date), `col` DateTime, `col2` Nullable(Int256), `colAlias0` Nullable(DateTime64(3)) ALIAS col, `colAlias3` Nullable(Int32) ALIAS col3 + colAlias0, `colAlias1` LowCardinality(UInt16) ALIAS colAlias0 + col2, `colAlias2` LowCardinality(Int32) ALIAS colAlias0 + colAlias1, `col3` Nullable(UInt8)) ENGINE = MergeTree ORDER BY dt; +insert into alias_2__fuzz_25 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); +SELECT colAlias0, colAlias2, colAlias3 FROM alias_2__fuzz_25; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +DROP TABLE alias_2__fuzz_25; diff --git a/tests/queries/0_stateless/02471_wrong_date_monotonicity.reference b/tests/queries/0_stateless/02471_wrong_date_monotonicity.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02471_wrong_date_monotonicity.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02471_wrong_date_monotonicity.sql b/tests/queries/0_stateless/02471_wrong_date_monotonicity.sql new file mode 100644 index 00000000000..40d64e53309 --- /dev/null +++ b/tests/queries/0_stateless/02471_wrong_date_monotonicity.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS tdm__fuzz_23; +CREATE TABLE tdm__fuzz_23 (`x` UInt256) ENGINE = MergeTree ORDER BY x SETTINGS write_final_mark = 0; +INSERT INTO tdm__fuzz_23 FORMAT Values (1); +SELECT count(x) FROM tdm__fuzz_23 WHERE toDate(x) < toDate(now(), 'Asia/Istanbul') SETTINGS max_rows_to_read = 1; +DROP TABLE tdm__fuzz_23; diff --git a/tests/queries/0_stateless/02472_segfault_expression_parser.reference b/tests/queries/0_stateless/02472_segfault_expression_parser.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02472_segfault_expression_parser.sql b/tests/queries/0_stateless/02472_segfault_expression_parser.sql new file mode 100644 index 00000000000..285de80a64a --- /dev/null +++ b/tests/queries/0_stateless/02472_segfault_expression_parser.sql @@ -0,0 +1 @@ +SELECT TIMESTAMP_SUB (SELECT ILIKE INTO OUTFILE , accurateCast ) FROM TIMESTAMP_SUB ( MINUTE , ) GROUP BY accurateCast; -- { clientError 62 } diff --git a/tests/queries/0_stateless/02473_map_element_nullable.reference b/tests/queries/0_stateless/02473_map_element_nullable.reference new file mode 100644 index 00000000000..84a9ba03bb4 --- /dev/null +++ b/tests/queries/0_stateless/02473_map_element_nullable.reference @@ -0,0 +1,16 @@ +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N +2 \N \N diff --git a/tests/queries/0_stateless/02473_map_element_nullable.sql b/tests/queries/0_stateless/02473_map_element_nullable.sql new file mode 100644 index 00000000000..e9c351d112c --- /dev/null +++ b/tests/queries/0_stateless/02473_map_element_nullable.sql @@ -0,0 +1,19 @@ +WITH map(1, 2, 3, NULL) AS m SELECT m[toNullable(1)], m[toNullable(2)], m[toNullable(3)]; +WITH map(1, 2, 3, NULL) AS m SELECT m[materialize(toNullable(1))], m[materialize(toNullable(2))], m[materialize(toNullable(3))]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[toNullable(1)], m[toNullable(2)], m[toNullable(3)]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[materialize(toNullable(1))], m[materialize(toNullable(2))], m[materialize(toNullable(3))]; + +WITH map('a', 2, 'b', NULL) AS m SELECT m[toNullable('a')], m[toNullable('b')], m[toNullable('c')]; +WITH map('a', 2, 'b', NULL) AS m SELECT m[materialize(toNullable('a'))], m[materialize(toNullable('b'))], m[materialize(toNullable('c'))]; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m[toNullable('a')], m[toNullable('b')], m[toNullable('c')]; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m[materialize(toNullable('a'))], m[materialize(toNullable('b'))], m[materialize(toNullable('c'))]; + +WITH map(1, 2, 3, NULL) AS m SELECT m[1], m[2], m[3]; +WITH map(1, 2, 3, NULL) AS m SELECT m[materialize(1)], m[materialize(2)], m[materialize(3)]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[1], m[2], m[3]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[materialize(1)], m[materialize(2)], m[materialize(3)]; + +WITH map('a', 2, 'b', NULL) AS m SELECT m['a'], m['b'], m['c']; +WITH map('a', 2, 'b', NULL) AS m SELECT m[materialize('a')], m[materialize('b')], m[materialize('c')]; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m['a'], m['b'], m['c']; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m[materialize('a')], m[materialize('b')], m[materialize('c')]; diff --git a/tests/queries/0_stateless/02473_prewhere_with_bigint.reference b/tests/queries/0_stateless/02473_prewhere_with_bigint.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02473_prewhere_with_bigint.sql b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql new file mode 100644 index 00000000000..29c6f0da2a1 --- /dev/null +++ b/tests/queries/0_stateless/02473_prewhere_with_bigint.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS prewhere_int128; +DROP TABLE IF EXISTS prewhere_int256; +DROP TABLE IF EXISTS prewhere_uint128; +DROP TABLE IF EXISTS prewhere_uint256; + +CREATE TABLE prewhere_int128 (a Int128) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_int128 VALUES (1); +SELECT a FROM prewhere_int128 PREWHERE a; -- { serverError 59 } +DROP TABLE prewhere_int128; + +CREATE TABLE prewhere_int256 (a Int256) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_int256 VALUES (1); +SELECT a FROM prewhere_int256 PREWHERE a; -- { serverError 59 } +DROP TABLE prewhere_int256; + +CREATE TABLE prewhere_uint128 (a UInt128) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_uint128 VALUES (1); +SELECT a FROM prewhere_uint128 PREWHERE a; -- { serverError 59 } +DROP TABLE prewhere_uint128; + +CREATE TABLE prewhere_uint256 (a UInt256) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_uint256 VALUES (1); +SELECT a FROM prewhere_uint256 PREWHERE a; -- { serverError 59 } +DROP TABLE prewhere_uint256; diff --git a/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.reference b/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql b/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql new file mode 100644 index 00000000000..456783cad26 --- /dev/null +++ b/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql @@ -0,0 +1,17 @@ +SET allow_experimental_analyzer = 1; + +SELECT * FROM (SELECT 1) FINAL; -- { serverError 1 } +SELECT * FROM (SELECT 1) SAMPLE 1/2; -- { serverError 1 } +SELECT * FROM (SELECT 1) FINAL SAMPLE 1/2; -- { serverError 1 } + +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery FINAL; -- { serverError 1 } +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery SAMPLE 1/2; -- { serverError 1 } +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery FINAL SAMPLE 1/2; -- { serverError 1 } + +SELECT * FROM (SELECT 1 UNION ALL SELECT 1) FINAL; -- { serverError 1 } +SELECT * FROM (SELECT 1 UNION ALL SELECT 1) SAMPLE 1/2; -- { serverError 1 } +SELECT * FROM (SELECT 1 UNION ALL SELECT 1) FINAL SAMPLE 1/2; -- { serverError 1 } + +WITH cte_subquery AS (SELECT 1 UNION ALL SELECT 1) SELECT * FROM cte_subquery FINAL; -- { serverError 1 } +WITH cte_subquery AS (SELECT 1 UNION ALL SELECT 1) SELECT * FROM cte_subquery SAMPLE 1/2; -- { serverError 1 } +WITH cte_subquery AS (SELECT 1 UNION ALL SELECT 1) SELECT * FROM cte_subquery FINAL SAMPLE 1/2; -- { serverError 1 } diff --git a/tests/queries/0_stateless/02474_create_user_query_fuzzer_bug.reference b/tests/queries/0_stateless/02474_create_user_query_fuzzer_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02474_create_user_query_fuzzer_bug.sql b/tests/queries/0_stateless/02474_create_user_query_fuzzer_bug.sql new file mode 100644 index 00000000000..3ef1469cf1b --- /dev/null +++ b/tests/queries/0_stateless/02474_create_user_query_fuzzer_bug.sql @@ -0,0 +1 @@ +EXPLAIN AST ALTER user WITH a; -- { clientError SYNTAX_ERROR } diff --git a/tests/queries/0_stateless/02474_fix_function_parser_bug.reference b/tests/queries/0_stateless/02474_fix_function_parser_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02474_fix_function_parser_bug.sql b/tests/queries/0_stateless/02474_fix_function_parser_bug.sql new file mode 100644 index 00000000000..67d97aa1c25 --- /dev/null +++ b/tests/queries/0_stateless/02474_fix_function_parser_bug.sql @@ -0,0 +1 @@ +CREATE DATABASE conv_mian ENGINE QALL(COLUMNS('|T.D'),¸mp} -- { clientError SYNTAX_ERROR } diff --git a/tests/queries/0_stateless/02475_join_bug_42832.reference b/tests/queries/0_stateless/02475_join_bug_42832.reference new file mode 100644 index 00000000000..e5310261d0a --- /dev/null +++ b/tests/queries/0_stateless/02475_join_bug_42832.reference @@ -0,0 +1,2 @@ +4 6 +4 4 diff --git a/tests/queries/0_stateless/02475_join_bug_42832.sql b/tests/queries/0_stateless/02475_join_bug_42832.sql new file mode 100644 index 00000000000..e383949fb22 --- /dev/null +++ b/tests/queries/0_stateless/02475_join_bug_42832.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; + +SET allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE tab1 (a1 Int32, b1 Int32, val UInt64) ENGINE = MergeTree ORDER BY a1; +CREATE TABLE tab2 (a2 LowCardinality(Int32), b2 Int32) ENGINE = MergeTree ORDER BY a2; + +INSERT INTO tab1 SELECT number, number, 1 from numbers(4); +INSERT INTO tab2 SELECT number + 2, number + 2 from numbers(4); + +SELECT sum(val), count(val) FROM tab1 FULL OUTER JOIN tab2 ON b1 - 2 = a2 OR a1 = b2 SETTINGS join_use_nulls = 0; +SELECT sum(val), count(val) FROM tab1 FULL OUTER JOIN tab2 ON b1 - 2 = a2 OR a1 = b2 SETTINGS join_use_nulls = 1; + +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; diff --git a/tests/queries/1_stateful/00096_obfuscator_save_load.sh b/tests/queries/1_stateful/00096_obfuscator_save_load.sh index c90eee1d0f9..a88dfcdb9b9 100755 --- a/tests/queries/1_stateful/00096_obfuscator_save_load.sh +++ b/tests/queries/1_stateful/00096_obfuscator_save_load.sh @@ -4,12 +4,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +model=$(mktemp "$CLICKHOUSE_TMP/obfuscator-model-XXXXXX.bin") + $CLICKHOUSE_CLIENT --max_threads 1 --query="SELECT URL, Title, SearchPhrase FROM test.hits LIMIT 1000" > "${CLICKHOUSE_TMP}"/data.tsv -$CLICKHOUSE_OBFUSCATOR --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --seed hello --limit 0 --save "${CLICKHOUSE_TMP}"/model.bin < "${CLICKHOUSE_TMP}"/data.tsv 2>/dev/null -wc -c < "${CLICKHOUSE_TMP}"/model.bin -$CLICKHOUSE_OBFUSCATOR --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --seed hello --limit 2500 --load "${CLICKHOUSE_TMP}"/model.bin < "${CLICKHOUSE_TMP}"/data.tsv > "${CLICKHOUSE_TMP}"/data2500.tsv 2>/dev/null -rm "${CLICKHOUSE_TMP}"/model.bin +$CLICKHOUSE_OBFUSCATOR --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --seed hello --limit 0 --save "$model" < "${CLICKHOUSE_TMP}"/data.tsv 2>/dev/null +wc -c < "$model" +$CLICKHOUSE_OBFUSCATOR --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --seed hello --limit 2500 --load "$model" < "${CLICKHOUSE_TMP}"/data.tsv > "${CLICKHOUSE_TMP}"/data2500.tsv 2>/dev/null +rm "$model" $CLICKHOUSE_LOCAL --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --query "SELECT count(), uniq(URL), uniq(Title), uniq(SearchPhrase) FROM table" < "${CLICKHOUSE_TMP}"/data.tsv $CLICKHOUSE_LOCAL --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --query "SELECT count(), uniq(URL), uniq(Title), uniq(SearchPhrase) FROM table" < "${CLICKHOUSE_TMP}"/data2500.tsv diff --git a/tests/queries/1_stateful/00097_constexpr_in_index.reference b/tests/queries/1_stateful/00097_constexpr_in_index.reference new file mode 100644 index 00000000000..5080d6d4cd8 --- /dev/null +++ b/tests/queries/1_stateful/00097_constexpr_in_index.reference @@ -0,0 +1 @@ +1803 diff --git a/tests/queries/1_stateful/00097_constexpr_in_index.sql b/tests/queries/1_stateful/00097_constexpr_in_index.sql new file mode 100644 index 00000000000..b5cac75c767 --- /dev/null +++ b/tests/queries/1_stateful/00097_constexpr_in_index.sql @@ -0,0 +1,3 @@ +-- Even in presense of OR, we evaluate the "0 IN (1, 2, 3)" as a constant expression therefore it does not prevent the index analysis. + +SELECT count() FROM test.hits WHERE CounterID IN (14917930, 33034174) OR 0 IN (1, 2, 3) SETTINGS max_rows_to_read = 1000000, force_primary_key = 1; diff --git a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.reference b/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.reference deleted file mode 100644 index 2675904dea0..00000000000 --- a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.reference +++ /dev/null @@ -1,110 +0,0 @@ -Testing 00001_count_hits.sql ----> Ok! ✅ -Testing 00002_count_visits.sql ----> Ok! ✅ -Testing 00004_top_counters.sql ----> Ok! ✅ -Testing 00005_filtering.sql ----> Ok! ✅ -Testing 00006_agregates.sql ----> Ok! ✅ -Testing 00007_uniq.sql ----> Ok! ✅ -Testing 00008_uniq.sql ----> Ok! ✅ -Testing 00009_uniq_distributed.sql ----> Ok! ✅ -Testing 00010_quantiles_segfault.sql ----> Ok! ✅ -Testing 00011_sorting.sql ----> Ok! ✅ -Testing 00012_sorting_distributed.sql ----> Ok! ✅ -Skipping 00013_sorting_of_nested.sql -Testing 00014_filtering_arrays.sql ----> Ok! ✅ -Testing 00015_totals_and_no_aggregate_functions.sql ----> Ok! ✅ -Testing 00016_any_if_distributed_cond_always_false.sql ----> Ok! ✅ -Testing 00017_aggregation_uninitialized_memory.sql ----> Ok! ✅ -Testing 00020_distinct_order_by_distributed.sql ----> Ok! ✅ -Testing 00021_1_select_with_in.sql ----> Ok! ✅ -Testing 00021_2_select_with_in.sql ----> Ok! ✅ -Testing 00021_3_select_with_in.sql ----> Ok! ✅ -Testing 00022_merge_prewhere.sql ----> Ok! ✅ -Testing 00023_totals_limit.sql ----> Ok! ✅ -Testing 00024_random_counters.sql ----> Ok! ✅ -Testing 00030_array_enumerate_uniq.sql ----> Ok! ✅ -Testing 00031_array_enumerate_uniq.sql ----> Ok! ✅ -Testing 00032_aggregate_key64.sql ----> Ok! ✅ -Testing 00033_aggregate_key_string.sql ----> Ok! ✅ -Testing 00034_aggregate_key_fixed_string.sql ----> Ok! ✅ -Testing 00035_aggregate_keys128.sql ----> Ok! ✅ -Testing 00036_aggregate_hashed.sql ----> Ok! ✅ -Testing 00037_uniq_state_merge1.sql ----> Ok! ✅ -Testing 00038_uniq_state_merge2.sql ----> Ok! ✅ -Testing 00039_primary_key.sql ----> Ok! ✅ -Testing 00040_aggregating_materialized_view.sql ----> Ok! ✅ -Testing 00041_aggregating_materialized_view.sql ----> Ok! ✅ -Testing 00042_any_left_join.sql ----> Ok! ✅ -Testing 00043_any_left_join.sql ----> Ok! ✅ -Testing 00044_any_left_join_string.sql ----> Ok! ✅ -Testing 00045_uniq_upto.sql ----> Ok! ✅ -Testing 00046_uniq_upto_distributed.sql ----> Ok! ✅ -Testing 00047_bar.sql ----> Ok! ✅ -Testing 00048_min_max.sql ----> Ok! ✅ -Testing 00049_max_string_if.sql ----> Ok! ✅ -Testing 00050_min_max.sql ----> Ok! ✅ -Testing 00051_min_max_array.sql ----> Ok! ✅ -Testing 00052_group_by_in.sql ----> Ok! ✅ -Testing 00053_replicate_segfault.sql ----> Ok! ✅ -Testing 00054_merge_tree_partitions.sql ----> Ok! ✅ -Testing 00055_index_and_not.sql ----> Ok! ✅ -Testing 00056_view.sql ----> Ok! ✅ -Testing 00059_merge_sorting_empty_array_joined.sql ----> Ok! ✅ -Testing 00060_move_to_prewhere_and_sets.sql ----> Ok! ✅ -Skipping 00061_storage_buffer.sql -Testing 00062_loyalty.sql ----> Ok! ✅ -Testing 00063_loyalty_joins.sql ----> Ok! ✅ -Testing 00065_loyalty_with_storage_join.sql ----> Ok! ✅ -Testing 00066_sorting_distributed_many_replicas.sql ----> Ok! ✅ -Testing 00067_union_all.sql ----> Ok! ✅ -Testing 00068_subquery_in_prewhere.sql ----> Ok! ✅ -Testing 00069_duplicate_aggregation_keys.sql ----> Ok! ✅ -Testing 00071_merge_tree_optimize_aio.sql ----> Ok! ✅ -Testing 00072_compare_date_and_string_index.sql ----> Ok! ✅ -Testing 00073_uniq_array.sql ----> Ok! ✅ -Testing 00074_full_join.sql ----> Ok! ✅ -Testing 00075_left_array_join.sql ----> Ok! ✅ -Testing 00076_system_columns_bytes.sql ----> Ok! ✅ -Testing 00077_log_tinylog_stripelog.sql ----> Ok! ✅ -Testing 00078_group_by_arrays.sql ----> Ok! ✅ -Testing 00079_array_join_not_used_joined_column.sql ----> Ok! ✅ -Testing 00080_array_join_and_union.sql ----> Ok! ✅ -Testing 00081_group_by_without_key_and_totals.sql ----> Ok! ✅ -Testing 00082_quantiles.sql ----> Ok! ✅ -Testing 00083_array_filter.sql ----> Ok! ✅ -Testing 00084_external_aggregation.sql ----> Ok! ✅ -Testing 00085_monotonic_evaluation_segfault.sql ----> Ok! ✅ -Testing 00086_array_reduce.sql ----> Ok! ✅ -Testing 00087_where_0.sql ----> Ok! ✅ -Testing 00088_global_in_one_shard_and_rows_before_limit.sql ----> Ok! ✅ -Testing 00089_position_functions_with_non_constant_arg.sql ----> Ok! ✅ -Testing 00091_prewhere_two_conditions.sql ----> Ok! ✅ -Testing 00093_prewhere_array_join.sql ----> Ok! ✅ -Testing 00094_order_by_array_join_limit.sql ----> Ok! ✅ -Skipping 00095_hyperscan_profiler.sql -Testing 00139_like.sql ----> Ok! ✅ -Skipping 00140_rename.sql -Testing 00141_transform.sql ----> Ok! ✅ -Testing 00142_system_columns.sql ----> Ok! ✅ -Testing 00143_transform_non_const_default.sql ----> Ok! ✅ -Testing 00144_functions_of_aggregation_states.sql ----> Ok! ✅ -Testing 00145_aggregate_functions_statistics.sql ----> Ok! ✅ -Testing 00146_aggregate_function_uniq.sql ----> Ok! ✅ -Testing 00147_global_in_aggregate_function.sql ----> Ok! ✅ -Testing 00148_monotonic_functions_and_index.sql ----> Ok! ✅ -Testing 00149_quantiles_timing_distributed.sql ----> Ok! ✅ -Testing 00150_quantiles_timing_precision.sql ----> Ok! ✅ -Testing 00151_order_by_read_in_order.sql ----> Ok! ✅ -Skipping 00151_replace_partition_with_different_granularity.sql -Skipping 00152_insert_different_granularity.sql -Testing 00153_aggregate_arena_race.sql ----> Ok! ✅ -Skipping 00154_avro.sql -Testing 00156_max_execution_speed_sample_merge.sql ----> Ok! ✅ -Skipping 00157_cache_dictionary.sql -Skipping 00158_cache_dictionary_has.sql -Testing 00160_decode_xml_component.sql ----> Ok! ✅ -Testing 00162_mmap_compression_none.sql ----> Ok! ✅ -Testing 00164_quantileBfloat16.sql ----> Ok! ✅ -Testing 00165_jit_aggregate_functions.sql ----> Ok! ✅ -Skipping 00166_explain_estimate.sql -Testing 00167_read_bytes_from_fs.sql ----> Ok! ✅ -Total failed tests: diff --git a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh b/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh deleted file mode 100755 index ecd0d281b53..00000000000 --- a/tests/queries/1_stateful/00168_parallel_processing_on_replicas_part_1.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-tsan, no-random-settings - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -# set -e - -# All replicas are localhost, disable `prefer_localhost_replica` option to test network interface -# Currently this feature could not work with hedged requests -# Enabling `enable_sample_offset_parallel_processing` feature could lead to intersecting marks, so some of them would be thrown away and it will lead to incorrect result of SELECT query -SETTINGS="--max_parallel_replicas=3 --use_hedged_requests=false --allow_experimental_parallel_reading_from_replicas=true" - -# Prepare tables -$CLICKHOUSE_CLIENT $SETTINGS -nm -q ''' - drop table if exists test.dist_hits SYNC; - drop table if exists test.dist_visits SYNC; - - create table test.dist_hits as test.hits engine = Distributed("test_cluster_one_shard_three_replicas_localhost", test, hits, rand()); - create table test.dist_visits as test.visits engine = Distributed("test_cluster_one_shard_three_replicas_localhost", test, visits, rand()); -'''; - -FAILED=() - -# PreviouslyFailed=( -# ) - -SkipList=( - "00013_sorting_of_nested.sql" # It contains FINAL, which is not allowed together with parallel reading - - "00061_storage_buffer.sql" - "00095_hyperscan_profiler.sql" # too long in debug (there is a --no-debug tag inside a test) - - "00140_rename.sql" # Multiple renames are not allowed with DatabaseReplicated and tags are not forwarded through this test - - "00154_avro.sql" # Plain select * with limit with Distributed table is not deterministic - "00151_replace_partition_with_different_granularity.sql" # Replace partition from Distributed is not allowed - "00152_insert_different_granularity.sql" # The same as above - - "00157_cache_dictionary.sql" # Too long in debug mode, but result is correct - "00158_cache_dictionary_has.sql" # The same as above - - "00166_explain_estimate.sql" # Distributed table returns nothing -) - -# for TESTPATH in "${PreviouslyFailed[@]}" -for TESTPATH in "$CURDIR"/*.sql; -do - TESTNAME=$(basename $TESTPATH) - NUM=$(echo "${TESTNAME}" | grep -o -P '^\d+' | sed 's/^0*//') - if [[ "${NUM}" -ge 168 ]]; then - continue - fi - - if [[ " ${SkipList[*]} " =~ ${TESTNAME} ]]; then - echo "Skipping $TESTNAME " - continue - fi - - echo -n "Testing $TESTNAME ----> " - - # prepare test - NEW_TESTNAME="/tmp/dist_$TESTNAME" - # Added g to sed command to replace all tables, not the first - cat $TESTPATH | sed -e 's/test.hits/test.dist_hits/g' | sed -e 's/test.visits/test.dist_visits/g' > $NEW_TESTNAME - - TESTNAME_RESULT="/tmp/result_$TESTNAME" - NEW_TESTNAME_RESULT="/tmp/result_dist_$TESTNAME" - - $CLICKHOUSE_CLIENT $SETTINGS -nm < $TESTPATH > $TESTNAME_RESULT - $CLICKHOUSE_CLIENT $SETTINGS -nm < $NEW_TESTNAME > $NEW_TESTNAME_RESULT - - expected=$(cat $TESTNAME_RESULT | md5sum) - actual=$(cat $NEW_TESTNAME_RESULT | md5sum) - - if [[ "$expected" != "$actual" ]]; then - FAILED+=("$TESTNAME") - echo "Failed! ❌" - echo "Plain:" - cat $TESTNAME_RESULT - echo "Distributed:" - cat $NEW_TESTNAME_RESULT - else - echo "Ok! ✅" - fi -done - - -echo "Total failed tests: " -# Iterate the loop to read and print each array element -for value in "${FAILED[@]}" -do - echo "🔺 $value" -done - -# Drop tables - -$CLICKHOUSE_CLIENT $SETTINGS -nm -q ''' - drop table if exists test.dist_hits SYNC; - drop table if exists test.dist_visits SYNC; -'''; diff --git a/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh b/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh index 8ff0d2fa648..771c7ab5436 100755 --- a/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh +++ b/tests/queries/1_stateful/00175_obfuscator_schema_inference.sh @@ -4,6 +4,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +model=$(mktemp "$CLICKHOUSE_TMP/obfuscator-model-XXXXXX.bin") + # Compared to explicitly specifying the structure of the input, # schema inference adds Nullable(T) to all types, so the model and the results # are a bit different from test '00175_obfuscator_schema_inference.sh' @@ -14,10 +16,10 @@ $CLICKHOUSE_CLIENT --max_threads 1 --query="SELECT URL, Title, SearchPhrase FROM $CLICKHOUSE_OBFUSCATOR --input-format TSV --output-format TSV --seed hello --limit 2500 < "${CLICKHOUSE_TMP}"/data.tsv > "${CLICKHOUSE_TMP}"/data2500.tsv 2>/dev/null # Test obfuscator with saving the model -$CLICKHOUSE_OBFUSCATOR --input-format TSV --output-format TSV --seed hello --limit 0 --save "${CLICKHOUSE_TMP}"/model.bin < "${CLICKHOUSE_TMP}"/data.tsv 2>/dev/null -wc -c < "${CLICKHOUSE_TMP}"/model.bin -$CLICKHOUSE_OBFUSCATOR --input-format TSV --output-format TSV --seed hello --limit 2500 --load "${CLICKHOUSE_TMP}"/model.bin < "${CLICKHOUSE_TMP}"/data.tsv > "${CLICKHOUSE_TMP}"/data2500_load_from_model.tsv 2>/dev/null -rm "${CLICKHOUSE_TMP}"/model.bin +$CLICKHOUSE_OBFUSCATOR --input-format TSV --output-format TSV --seed hello --limit 0 --save "$model" < "${CLICKHOUSE_TMP}"/data.tsv 2>/dev/null +wc -c < "$model" +$CLICKHOUSE_OBFUSCATOR --input-format TSV --output-format TSV --seed hello --limit 2500 --load "$model" < "${CLICKHOUSE_TMP}"/data.tsv > "${CLICKHOUSE_TMP}"/data2500_load_from_model.tsv 2>/dev/null +rm "$model" $CLICKHOUSE_LOCAL --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --query "SELECT count(), uniq(URL), uniq(Title), uniq(SearchPhrase) FROM table" < "${CLICKHOUSE_TMP}"/data.tsv $CLICKHOUSE_LOCAL --structure "URL String, Title String, SearchPhrase String" --input-format TSV --output-format TSV --query "SELECT count(), uniq(URL), uniq(Title), uniq(SearchPhrase) FROM table" < "${CLICKHOUSE_TMP}"/data2500.tsv diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index a79982bbd61..92a97a9c60e 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -44,5 +44,3 @@ if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS) add_subdirectory (memcpy-bench) endif () endif () - -add_subdirectory (package) diff --git a/utils/compressor/decompress_perf.cpp b/utils/compressor/decompress_perf.cpp index e3210164d79..891a6d3d1dd 100644 --- a/utils/compressor/decompress_perf.cpp +++ b/utils/compressor/decompress_perf.cpp @@ -107,8 +107,12 @@ protected: if (variant == LZ4_REFERENCE) { - if (LZ4_decompress_fast(compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, to, size_decompressed) < 0) + if (LZ4_decompress_fast( + compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, to, + static_cast(size_decompressed)) < 0) + { throw Exception("Cannot LZ4_decompress_fast", ErrorCodes::CANNOT_DECOMPRESS); + } } else LZ4::decompress(compressed_buffer + COMPRESSED_BLOCK_HEADER_SIZE, to, size_compressed_without_checksum, size_decompressed, perf_stat); diff --git a/utils/iotest/iotest_nonblock.cpp b/utils/iotest/iotest_nonblock.cpp index 33fab4d04e6..32c86282743 100644 --- a/utils/iotest/iotest_nonblock.cpp +++ b/utils/iotest/iotest_nonblock.cpp @@ -101,7 +101,7 @@ int mainImpl(int argc, char ** argv) size_t ops = 0; while (ops < count) { - if (poll(polls.data(), descriptors, -1) <= 0) + if (poll(polls.data(), static_cast(descriptors), -1) <= 0) throwFromErrno("poll failed", ErrorCodes::SYSTEM_ERROR); for (size_t i = 0; i < descriptors; ++i) { diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index 0762c740ac1..dd3c3a4e2ad 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -63,7 +63,7 @@ int main(int argc, char *argv[]) SnapshotsQueue snapshots_queue{1}; CoordinationSettingsPtr settings = std::make_shared(); KeeperContextPtr keeper_context = std::make_shared(); - auto state_machine = std::make_shared(queue, snapshots_queue, argv[1], settings, keeper_context); + auto state_machine = std::make_shared(queue, snapshots_queue, argv[1], settings, keeper_context, nullptr); state_machine->init(); size_t last_commited_index = state_machine->last_commit_index(); diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 65ec5ddec01..47dbec5a5f8 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,12 +1,18 @@ +v22.10.2.11-stable 2022-11-01 +v22.10.1.1877-stable 2022-10-26 +v22.9.4.32-stable 2022-10-26 v22.9.3.18-stable 2022-09-30 v22.9.2.7-stable 2022-09-23 v22.9.1.2603-stable 2022-09-22 +v22.8.8.3-lts 2022-10-27 +v22.8.7.34-lts 2022-10-26 v22.8.6.71-lts 2022-09-30 v22.8.5.29-lts 2022-09-13 v22.8.4.7-lts 2022-08-31 v22.8.3.13-lts 2022-08-29 v22.8.2.11-lts 2022-08-23 v22.8.1.2097-lts 2022-08-18 +v22.7.7.24-stable 2022-10-26 v22.7.6.74-stable 2022-09-30 v22.7.5.13-stable 2022-08-29 v22.7.4.16-stable 2022-08-23 @@ -31,6 +37,7 @@ v22.4.5.9-stable 2022-05-06 v22.4.4.7-stable 2022-04-29 v22.4.3.3-stable 2022-04-26 v22.4.2.1-stable 2022-04-22 +v22.3.14.23-lts 2022-10-28 v22.3.13.80-lts 2022-09-30 v22.3.12.19-lts 2022-08-29 v22.3.11.12-lts 2022-08-10 diff --git a/utils/package/CMakeLists.txt b/utils/package/CMakeLists.txt deleted file mode 100644 index 8c8a09adc0f..00000000000 --- a/utils/package/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -add_subdirectory (arch) diff --git a/utils/package/arch/CMakeLists.txt b/utils/package/arch/CMakeLists.txt deleted file mode 100644 index 4ee754fec56..00000000000 --- a/utils/package/arch/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -include ("${ClickHouse_SOURCE_DIR}/cmake/version.cmake") -configure_file (PKGBUILD.in PKGBUILD) diff --git a/utils/package/arch/PKGBUILD.in b/utils/package/arch/PKGBUILD.in deleted file mode 100644 index 4e068e8b8a2..00000000000 --- a/utils/package/arch/PKGBUILD.in +++ /dev/null @@ -1,33 +0,0 @@ -pkgname=clickhouse -pkgver=${VERSION_STRING} -pkgrel=1 -pkgdesc='An open-source column-oriented database management system that allows generating analytical data reports in real time' -arch=('x86_64') -url='https://clickhouse.com/' -license=('Apache') - -package() { - install -dm 755 $pkgdir/usr/lib/tmpfiles.d - install -dm 755 $pkgdir/usr/lib/sysusers.d - install -Dm 644 ${CMAKE_CURRENT_SOURCE_DIR}/clickhouse.tmpfiles $pkgdir/usr/lib/tmpfiles.d/clickhouse.conf - install -Dm 644 ${CMAKE_CURRENT_SOURCE_DIR}/clickhouse.sysusers $pkgdir/usr/lib/sysusers.d/clickhouse.conf - install -dm 755 $pkgdir/etc/clickhouse-server/config.d - install -Dm 644 ${CMAKE_CURRENT_SOURCE_DIR}/logging.xml $pkgdir/etc/clickhouse-server/config.d/logging.xml - # This code was requisited from kmeaw@ https://aur.archlinux.org/packages/clickhouse/ . - SRC=${ClickHouse_SOURCE_DIR} - BIN=${ClickHouse_BINARY_DIR} - mkdir -p $pkgdir/etc/clickhouse-server/ $pkgdir/etc/clickhouse-client/ - mkdir -p $pkgdir/usr/bin/ - mkdir -p $pkgdir/usr/lib/systemd/system - ln -s clickhouse-client $pkgdir/usr/bin/clickhouse-server - cp $SRC/programs/server/config.xml $SRC/programs/server/users.xml $pkgdir/etc/clickhouse-server/ - cp $BIN/programs/clickhouse $pkgdir/usr/bin/clickhouse-client - patchelf --remove-rpath $pkgdir/usr/bin/clickhouse-client - patchelf --replace-needed libz.so.1 libz-ng.so.1 $pkgdir/usr/bin/clickhouse-client - cp $SRC/programs/client/clickhouse-client.xml $pkgdir/etc/clickhouse-client/config.xml - compiler="libclickhouse-compiler.so" - if ! pacman -Q clang | grep '^clang 7'; then - compiler="" - fi - cp $SRC/debian/clickhouse-server.service $pkgdir/usr/lib/systemd/system -} diff --git a/utils/package/arch/README.md b/utils/package/arch/README.md deleted file mode 100644 index 0db5aac8080..00000000000 --- a/utils/package/arch/README.md +++ /dev/null @@ -1,17 +0,0 @@ -### Build Arch Linux package - -From binary directory: - -``` -make -cd utils/package/arch -makepkg -``` - -### Install and start ClickHouse server - -``` -pacman -U clickhouse-*.pkg.tar.xz -systemctl enable clickhouse-server -systemctl start clickhouse-server -``` diff --git a/utils/package/arch/clickhouse.sysusers b/utils/package/arch/clickhouse.sysusers deleted file mode 100644 index 4381c52c4f2..00000000000 --- a/utils/package/arch/clickhouse.sysusers +++ /dev/null @@ -1,3 +0,0 @@ -u clickhouse - "ClickHouse user" /nonexistent /bin/false -g clickhouse - "ClickHouse group" -m clickhouse clickhouse diff --git a/utils/package/arch/clickhouse.tmpfiles b/utils/package/arch/clickhouse.tmpfiles deleted file mode 100644 index 631aa895f2f..00000000000 --- a/utils/package/arch/clickhouse.tmpfiles +++ /dev/null @@ -1 +0,0 @@ -d /var/lib/clickhouse 0700 clickhouse clickhouse diff --git a/utils/package/arch/logging.xml b/utils/package/arch/logging.xml deleted file mode 100644 index c7a78442424..00000000000 --- a/utils/package/arch/logging.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/utils/self-extracting-executable/compressor.cpp b/utils/self-extracting-executable/compressor.cpp index d8b4fdbb038..0c0c85838da 100644 --- a/utils/self-extracting-executable/compressor.cpp +++ b/utils/self-extracting-executable/compressor.cpp @@ -356,7 +356,7 @@ int compressFiles(const char* out_name, const char* exec, char* filenames[], int return 0; } -int copy_decompressor(int input_fd, int decompressor_size, int output_fd) +int copy_decompressor(int input_fd, ssize_t decompressor_size, int output_fd) { const ssize_t buf_size = 1ul<<19; auto buf_memory = std::make_unique(buf_size); @@ -411,7 +411,7 @@ int copy_decompressor_self(const char *self, int output_fd) } char * end = nullptr; - int decompressor_size = strtol(size_str, &end, 10); + ssize_t decompressor_size = strtol(size_str, &end, 10); if (*end != 0) { std::cerr << "Error: unable to extract decompressor" << std::endl; @@ -519,7 +519,7 @@ int main(int argc, char* argv[]) if (p[0] != 0) { char * end = nullptr; - level = strtol(p, &end, 10); + level = static_cast(strtol(p, &end, 10)); if (*end != 0) { std::cerr << "Error: level [" << p << "] is not valid" << std::endl; diff --git a/utils/self-extracting-executable/decompressor.cpp b/utils/self-extracting-executable/decompressor.cpp index c997526d38d..be25d315d68 100644 --- a/utils/self-extracting-executable/decompressor.cpp +++ b/utils/self-extracting-executable/decompressor.cpp @@ -329,7 +329,7 @@ int decompressFiles(int input_fd, char * path, char * name, bool & have_compress int read_exe_path(char *exe, size_t buf_sz) { - uint32_t size = buf_sz; + uint32_t size = static_cast(buf_sz); char apple[size]; if (_NSGetExecutablePath(apple, &size) != 0) return 1; @@ -514,7 +514,7 @@ int main(int/* argc*/, char* argv[]) return 1; } - if (chmod(self, decompressed_umask)) + if (chmod(self, static_cast(decompressed_umask))) { perror("chmod"); return 1;