mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-14 19:45:11 +00:00
Merge branch 'master' into vdimir/mutliple_storage_join
This commit is contained in:
commit
815767a064
35
.github/workflows/pull_request.yml
vendored
35
.github/workflows/pull_request.yml
vendored
@ -1308,6 +1308,40 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseAnalyzer:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_analyzer
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3_0:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -4755,6 +4789,7 @@ jobs:
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated2
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated3
|
||||
- FunctionalStatelessTestReleaseWideParts
|
||||
- FunctionalStatelessTestReleaseAnalyzer
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
|
150
CHANGELOG.md
150
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v23.4, 2023-04-26](#234)**<br/>
|
||||
**[ClickHouse release v23.3 LTS, 2023-03-30](#233)**<br/>
|
||||
**[ClickHouse release v23.2, 2023-02-23](#232)**<br/>
|
||||
**[ClickHouse release v23.1, 2023-01-25](#231)**<br/>
|
||||
@ -6,6 +7,155 @@
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### <a id="234"></a> ClickHouse release 23.4 LTS, 2023-04-26
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* This change makes sense only if you are using the virtual filesystem cache. If `path` in the virtual filesystem cache configuration is not empty and is not an absolute path, then it will be put in `<clickhouse server data directory>/caches/<path_from_cache_config>`. [#48784](https://github.com/ClickHouse/ClickHouse/pull/48784) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Primary/secondary indices and sorting keys with identical expressions are now rejected. This behavior can be disabled using setting `allow_suspicious_indices`. [#48536](https://github.com/ClickHouse/ClickHouse/pull/48536) ([凌涛](https://github.com/lingtaolf)).
|
||||
|
||||
#### New Feature
|
||||
* Support new aggregate function `quantileGK`/`quantilesGK`, like [approx_percentile](https://spark.apache.org/docs/latest/api/sql/index.html#approx_percentile) in spark. Greenwald-Khanna algorithm refer to http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf. [#46428](https://github.com/ClickHouse/ClickHouse/pull/46428) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add a statement `SHOW COLUMNS` which shows distilled information from system.columns. [#48017](https://github.com/ClickHouse/ClickHouse/pull/48017) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added `LIGHTWEIGHT` and `PULL` modifiers for `SYSTEM SYNC REPLICA` query. `LIGHTWEIGHT` version waits for fetches and drop-ranges only (merges and mutations are ignored). `PULL` version pulls new entries from ZooKeeper and does not wait for them. Fixes [#47794](https://github.com/ClickHouse/ClickHouse/issues/47794). [#48085](https://github.com/ClickHouse/ClickHouse/pull/48085) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add `kafkaMurmurHash` function for compatibility with Kafka DefaultPartitioner. Closes [#47834](https://github.com/ClickHouse/ClickHouse/issues/47834). [#48185](https://github.com/ClickHouse/ClickHouse/pull/48185) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Allow to easily create a user with the same grants as the current user by using `GRANT CURRENT GRANTS`. [#48262](https://github.com/ClickHouse/ClickHouse/pull/48262) ([pufit](https://github.com/pufit)).
|
||||
* Add statistical aggregate function `kolmogorovSmirnovTest`. Close [#48228](https://github.com/ClickHouse/ClickHouse/issues/48228). [#48325](https://github.com/ClickHouse/ClickHouse/pull/48325) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||
* Added a `lost_part_count` column to the `system.replicas` table. The column value shows the total number of lost parts in the corresponding table. Value is stored in zookeeper and can be used instead of not persistent `ReplicatedDataLoss` profile event for monitoring. [#48526](https://github.com/ClickHouse/ClickHouse/pull/48526) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add `soundex` function for compatibility. Closes [#39880](https://github.com/ClickHouse/ClickHouse/issues/39880). [#48567](https://github.com/ClickHouse/ClickHouse/pull/48567) ([FriendLey](https://github.com/FriendLey)).
|
||||
* Support `Map` type for JSONExtract. [#48629](https://github.com/ClickHouse/ClickHouse/pull/48629) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add `PrettyJSONEachRow` format to output pretty JSON with new line delimieters and 4 space indents. [#48898](https://github.com/ClickHouse/ClickHouse/pull/48898) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add `ParquetMetadata` input format to read Parquet file metadata. [#48911](https://github.com/ClickHouse/ClickHouse/pull/48911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add `extractKeyValuePairs` function to extract key value pairs from strings. Input strings might contain noise (i.e log files / do not need to be 100% formatted in key-value-pair format), the algorithm will look for key value pairs matching the arguments passed to the function. As of now, function accepts the following arguments: `data_column` (mandatory), `key_value_pair_delimiter` (defaults to `:`), `pair_delimiters` (defaults to `\space \, \;`) and `quoting_character` (defaults to double quotes). [#43606](https://github.com/ClickHouse/ClickHouse/pull/43606) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Functions replaceOne(), replaceAll(), replaceRegexpOne() and replaceRegexpAll() can now be called with non-const pattern and replacement arguments. [#46589](https://github.com/ClickHouse/ClickHouse/pull/46589) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added functions to work with columns of type `Map`: `mapConcat`, `mapSort`, `mapExists`. [#48071](https://github.com/ClickHouse/ClickHouse/pull/48071) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Reading files in `Parquet` format is now much faster. IO and decoding are parallelized (controlled by `max_threads` setting), and only required data ranges are read. [#47964](https://github.com/ClickHouse/ClickHouse/pull/47964) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* If we run a mutation with IN (subquery) like this: `ALTER TABLE t UPDATE col='new value' WHERE id IN (SELECT id FROM huge_table)` and the table `t` has multiple parts than for each part a set for subquery `SELECT id FROM huge_table` is built in memory. And if there are many parts then this might consume a lot of memory (and lead to an OOM) and CPU. The solution is to introduce a short-lived cache of sets that are currently being built by mutation tasks. If another task of the same mutation is executed concurrently it can lookup the set in the cache, wait for it to be built and reuse it. [#46835](https://github.com/ClickHouse/ClickHouse/pull/46835) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Only check dependencies if necessary when applying `ALTER TABLE` queries. [#48062](https://github.com/ClickHouse/ClickHouse/pull/48062) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Optimize function `mapUpdate`. [#48118](https://github.com/ClickHouse/ClickHouse/pull/48118) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now an internal query to local replica is sent explicitly and data from it received through loopback interface. Setting `prefer_localhost_replica` is not respected for parallel replicas. This is needed for better scheduling and makes the code cleaner: the initiator is only responsible for coordinating of the reading process and merging results, continiously answering for requests while all the secondary queries read the data. Note: Using loopback interface is not so performant, otherwise some replicas could starve for tasks which could lead to even slower query execution and not utilizing all possible resources. The initialization of the coordinator is now even more lazy. All incoming requests contain the information about the reading algorithm we initialize the coordinator with it when first request comes. If any replica will decide to read with different algorithm - an exception will be thrown and a query will be aborted. [#48246](https://github.com/ClickHouse/ClickHouse/pull/48246) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Do not build set for the right side of `IN` clause with subquery when it is used only for analysis of skip indexes and they are disabled by setting (`use_skip_indexes=0`). Previously it might affect the performance of queries. [#48299](https://github.com/ClickHouse/ClickHouse/pull/48299) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Query processing is parallelized right after reading `FROM file(...)`. Related to [#38755](https://github.com/ClickHouse/ClickHouse/issues/38755). [#48525](https://github.com/ClickHouse/ClickHouse/pull/48525) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Query processing is parallelized right after reading from a data source. Affected data sources are mostly simple or external storages like table functions `url`, `file`. [#48727](https://github.com/ClickHouse/ClickHouse/pull/48727) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Lowered contention of ThreadPool mutex (may increase performance for a huge amount of small jobs). [#48750](https://github.com/ClickHouse/ClickHouse/pull/48750) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Reduce memory usage for multiple `ALTER DELETE` mutations. [#48522](https://github.com/ClickHouse/ClickHouse/pull/48522) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Remove the excessive connection attempts if the `skip_unavailable_shards` setting is enabled. [#48771](https://github.com/ClickHouse/ClickHouse/pull/48771) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Entries in the query cache are now squashed to max_block_size and compressed. [#45912](https://github.com/ClickHouse/ClickHouse/pull/45912) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* It is now possible to define per-user quotas in the query cache. [#48284](https://github.com/ClickHouse/ClickHouse/pull/48284) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Some fixes for parallel replicas [#48433](https://github.com/ClickHouse/ClickHouse/pull/48433) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Implement zero-copy-replication (an experimental feature) on encrypted disks. [#48741](https://github.com/ClickHouse/ClickHouse/pull/48741) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Improvement
|
||||
* Increase default value for `connect_timeout_with_failover_ms` to 1000 ms (because of adding async connections in https://github.com/ClickHouse/ClickHouse/pull/47229) . Closes [#5188](https://github.com/ClickHouse/ClickHouse/issues/5188). [#49009](https://github.com/ClickHouse/ClickHouse/pull/49009) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Several improvements around data lakes: - Make `Iceberg` work with non-partitioned data. - Support `Iceberg` format version v2 (previously only v1 was supported) - Support reading partitioned data for `DeltaLake`/`Hudi` - Faster reading of `DeltaLake` metadata by using Delta's checkpoint files - Fixed incorrect `Hudi` reads: previously it incorrectly chose which data to read and therefore was able to read correctly only small size tables - Made these engines to pickup updates of changed data (previously the state was set on table creation) - Make proper testing for `Iceberg`/`DeltaLake`/`Hudi` using spark. [#47307](https://github.com/ClickHouse/ClickHouse/pull/47307) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add async connection to socket and async writing to socket. Make creating connections and sending query/external tables async across shards. Refactor code with fibers. Closes [#46931](https://github.com/ClickHouse/ClickHouse/issues/46931). We will be able to increase `connect_timeout_with_failover_ms` by default after this PR (https://github.com/ClickHouse/ClickHouse/issues/5188). [#47229](https://github.com/ClickHouse/ClickHouse/pull/47229) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support config sections `keeper`/`keeper_server` as an alternative to `zookeeper`. Close [#34766](https://github.com/ClickHouse/ClickHouse/issues/34766) , [#34767](https://github.com/ClickHouse/ClickHouse/issues/34767). [#35113](https://github.com/ClickHouse/ClickHouse/pull/35113) ([李扬](https://github.com/taiyang-li)).
|
||||
* It is possible to set _secure_ flag in named_collections for a dictionary with a ClickHouse table source. Addresses [#38450](https://github.com/ClickHouse/ClickHouse/issues/38450) . [#46323](https://github.com/ClickHouse/ClickHouse/pull/46323) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* `bitCount` function support `FixedString` and `String` data type. [#49044](https://github.com/ClickHouse/ClickHouse/pull/49044) ([flynn](https://github.com/ucasfl)).
|
||||
* Added configurable retries for all operations with [Zoo]Keeper for Backup queries. [#47224](https://github.com/ClickHouse/ClickHouse/pull/47224) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Enable `use_environment_credentials` for S3 by default, so the entire provider chain is constructed by default. [#47397](https://github.com/ClickHouse/ClickHouse/pull/47397) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Currently, the JSON_VALUE function is similar as spark's get_json_object function, which support to get value from json string by a path like '$.key'. But still has something different - 1. in spark's get_json_object will return null while the path is not exist, but in JSON_VALUE will return empty string; - 2. in spark's get_json_object will return a complext type value, such as a json object/array value, but in JSON_VALUE will return empty string. [#47494](https://github.com/ClickHouse/ClickHouse/pull/47494) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* For `use_structure_from_insertion_table_in_table_functions` more flexible insert table structure propagation to table function. Fixed an issue with name mapping and using virtual columns. No more need for 'auto' setting. [#47962](https://github.com/ClickHouse/ClickHouse/pull/47962) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Do not continue retrying to connect to ZK if the query is killed or over limits. [#47985](https://github.com/ClickHouse/ClickHouse/pull/47985) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support Enum output/input in `BSONEachRow`, allow all map key types and avoid extra calculations on output. [#48122](https://github.com/ClickHouse/ClickHouse/pull/48122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support more ClickHouse types in `ORC`/`Arrow`/`Parquet` formats: Enum(8|16), (U)Int(128|256), Decimal256 (for ORC), allow reading IPv4 from Int32 values (ORC outputs IPv4 as Int32 and we couldn't read it back), fix reading Nullable(IPv6) from binary data for `ORC`. [#48126](https://github.com/ClickHouse/ClickHouse/pull/48126) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add columns `perform_ttl_move_on_insert`, `load_balancing` for table `system.storage_policies`, modify column `volume_type` type to `Enum8`. [#48167](https://github.com/ClickHouse/ClickHouse/pull/48167) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Added support for `BACKUP ALL` command which backups all tables and databases, including temporary and system ones. [#48189](https://github.com/ClickHouse/ClickHouse/pull/48189) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Function mapFromArrays supports `Map` type as an input. [#48207](https://github.com/ClickHouse/ClickHouse/pull/48207) ([李扬](https://github.com/taiyang-li)).
|
||||
* The output of some SHOW PROCESSLIST is now sorted. [#48241](https://github.com/ClickHouse/ClickHouse/pull/48241) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Per-query/per-server throttling for remote IO/local IO/BACKUPs (server settings: `max_remote_read_network_bandwidth_for_server`, `max_remote_write_network_bandwidth_for_server`, `max_local_read_bandwidth_for_server`, `max_local_write_bandwidth_for_server`, `max_backup_bandwidth_for_server`, settings: `max_remote_read_network_bandwidth`, `max_remote_write_network_bandwidth`, `max_local_read_bandwidth`, `max_local_write_bandwidth`, `max_backup_bandwidth`). [#48242](https://github.com/ClickHouse/ClickHouse/pull/48242) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support more types in `CapnProto` format: Map, (U)Int(128|256), Decimal(128|256). Allow integer conversions during input/output. [#48257](https://github.com/ClickHouse/ClickHouse/pull/48257) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't throw CURRENT_WRITE_BUFFER_IS_EXHAUSTED for normal behaviour. [#48288](https://github.com/ClickHouse/ClickHouse/pull/48288) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add new setting `keeper_map_strict_mode` which enforces extra guarantees on operations made on top of `KeeperMap` tables. [#48293](https://github.com/ClickHouse/ClickHouse/pull/48293) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Check primary key type for simple dictionary is native unsigned integer type Add setting `check_dictionary_primary_key ` for compatibility(set `check_dictionary_primary_key =false` to disable checking). [#48335](https://github.com/ClickHouse/ClickHouse/pull/48335) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Don't replicate mutations for `KeeperMap` because it's unnecessary. [#48354](https://github.com/ClickHouse/ClickHouse/pull/48354) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow write/read unnamed tuple as nested Message in Protobuf format. Tuple elements and Message fields are mathced by position. [#48390](https://github.com/ClickHouse/ClickHouse/pull/48390) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support `additional_table_filters` and `additional_result_filter` settings in the new planner. Also, add a documentation entry for `additional_result_filter`. [#48405](https://github.com/ClickHouse/ClickHouse/pull/48405) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* `parseDateTime` now understands format string '%f' (fractional seconds). [#48420](https://github.com/ClickHouse/ClickHouse/pull/48420) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Format string "%f" in formatDateTime() now prints "000000" if the formatted value has no fractional seconds, the previous behavior (single zero) can be restored using setting "formatdatetime_f_prints_single_zero = 1". [#48422](https://github.com/ClickHouse/ClickHouse/pull/48422) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Don't replicate DELETE and TRUNCATE for KeeperMap. [#48434](https://github.com/ClickHouse/ClickHouse/pull/48434) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Generate valid Decimals and Bools in generateRandom function. [#48436](https://github.com/ClickHouse/ClickHouse/pull/48436) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow trailing commas in expression list of SELECT query, for example `SELECT a, b, c, FROM table`. Closes [#37802](https://github.com/ClickHouse/ClickHouse/issues/37802). [#48438](https://github.com/ClickHouse/ClickHouse/pull/48438) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Override `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables with `--user` and `--password` client parameters. Closes [#38909](https://github.com/ClickHouse/ClickHouse/issues/38909). [#48440](https://github.com/ClickHouse/ClickHouse/pull/48440) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Added retries to loading of data parts in `MergeTree` tables in case of retryable errors. [#48442](https://github.com/ClickHouse/ClickHouse/pull/48442) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add support for `Date`, `Date32`, `DateTime`, `DateTime64` data types to `arrayMin`, `arrayMax`, `arrayDifference` functions. Closes [#21645](https://github.com/ClickHouse/ClickHouse/issues/21645). [#48445](https://github.com/ClickHouse/ClickHouse/pull/48445) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add support for `{server_uuid}` macro. It is useful for identifying replicas in autoscaled clusters when new replicas are constantly added and removed in runtime. This closes [#48554](https://github.com/ClickHouse/ClickHouse/issues/48554). [#48563](https://github.com/ClickHouse/ClickHouse/pull/48563) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The installation script will create a hard link instead of copying if it is possible. [#48578](https://github.com/ClickHouse/ClickHouse/pull/48578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support `SHOW TABLE` syntax meaning the same as `SHOW CREATE TABLE`. Closes [#48580](https://github.com/ClickHouse/ClickHouse/issues/48580). [#48591](https://github.com/ClickHouse/ClickHouse/pull/48591) ([flynn](https://github.com/ucasfl)).
|
||||
* HTTP temporary buffers now support working by evicting data from the virtual filesystem cache. [#48664](https://github.com/ClickHouse/ClickHouse/pull/48664) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Make Schema inference works for `CREATE AS SELECT`. Closes [#47599](https://github.com/ClickHouse/ClickHouse/issues/47599). [#48679](https://github.com/ClickHouse/ClickHouse/pull/48679) ([flynn](https://github.com/ucasfl)).
|
||||
* Added a `replicated_max_mutations_in_one_entry` setting for `ReplicatedMergeTree` that allows limiting the number of mutation commands per one `MUTATE_PART` entry (default is 10000). [#48731](https://github.com/ClickHouse/ClickHouse/pull/48731) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* In AggregateFunction types, don't count unused arena bytes as `read_bytes`. [#48745](https://github.com/ClickHouse/ClickHouse/pull/48745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix some MySQL-related settings not being handled with the MySQL dictionary source + named collection. Closes [#48402](https://github.com/ClickHouse/ClickHouse/issues/48402). [#48759](https://github.com/ClickHouse/ClickHouse/pull/48759) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* If a user set `max_single_part_upload_size` to a very large value, it can lead to a crash due to a bug in the AWS S3 SDK. This fixes [#47679](https://github.com/ClickHouse/ClickHouse/issues/47679). [#48816](https://github.com/ClickHouse/ClickHouse/pull/48816) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data race in `RabbitMQ` ([report](https://pastila.nl/?004f7100/de1505289ab5bb355e67ebe6c7cc8707)), refactor the code. [#48845](https://github.com/ClickHouse/ClickHouse/pull/48845) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add aliases `name` and `part_name` form `system.parts` and `system.part_log`. Closes [#48718](https://github.com/ClickHouse/ClickHouse/issues/48718). [#48850](https://github.com/ClickHouse/ClickHouse/pull/48850) ([sichenzhao](https://github.com/sichenzhao)).
|
||||
* Functions "arrayDifferenceSupport()", "arrayCumSum()" and "arrayCumSumNonNegative()" now support input arrays of wide integer types (U)Int128/256. [#48866](https://github.com/ClickHouse/ClickHouse/pull/48866) ([cluster](https://github.com/infdahai)).
|
||||
* Multi-line history in clickhouse-client is now no longer padded. This makes pasting more natural. [#48870](https://github.com/ClickHouse/ClickHouse/pull/48870) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* Implement a slight improvement for the rare case when ClickHouse is run inside LXC and LXCFS is used. The LXCFS has an issue: sometimes it returns an error "Transport endpoint is not connected" on reading from the file inside `/proc`. This error was correctly logged into ClickHouse's server log. We have additionally workaround this issue by reopening a file. This is a minuscule change. [#48922](https://github.com/ClickHouse/ClickHouse/pull/48922) ([Real](https://github.com/RunningXie)).
|
||||
* Improve memory accounting for prefetches. Randomise prefetch settings In CI. [#48973](https://github.com/ClickHouse/ClickHouse/pull/48973) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Correctly set headers for native copy operations on GCS. [#48981](https://github.com/ClickHouse/ClickHouse/pull/48981) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add support for specifying setting names in the command line with dashes instead of underscores, for example, `--max-threads` instead of `--max_threads`. Additionally, support Unicode dash characters like `—` instead of `--` - this is useful when you communicate with a team in another company, and a manager from that team copy-pasted code from MS Word. [#48985](https://github.com/ClickHouse/ClickHouse/pull/48985) ([alekseygolub](https://github.com/alekseygolub)).
|
||||
* Add fallback to password authentication when authentication with SSL user certificate has failed. Closes [#48974](https://github.com/ClickHouse/ClickHouse/issues/48974). [#48989](https://github.com/ClickHouse/ClickHouse/pull/48989) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Improve the embedded dashboard. Close [#46671](https://github.com/ClickHouse/ClickHouse/issues/46671). [#49036](https://github.com/ClickHouse/ClickHouse/pull/49036) ([Kevin Zhang](https://github.com/Kinzeng)).
|
||||
* Add profile events for log messages, so you can easily see the count of log messages by severity. [#49042](https://github.com/ClickHouse/ClickHouse/pull/49042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In previous versions, the `LineAsString` format worked inconsistently when the parallel parsing was enabled or not, in presence of DOS or MacOS Classic line breaks. This closes [#49039](https://github.com/ClickHouse/ClickHouse/issues/49039). [#49052](https://github.com/ClickHouse/ClickHouse/pull/49052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The exception message about the unparsed query parameter will also tell about the name of the parameter. Reimplement [#48878](https://github.com/ClickHouse/ClickHouse/issues/48878). Close [#48772](https://github.com/ClickHouse/ClickHouse/issues/48772). [#49061](https://github.com/ClickHouse/ClickHouse/pull/49061) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reduce the number of dependencies in the header files to speed up the build. [#47984](https://github.com/ClickHouse/ClickHouse/pull/47984) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Randomize compression of marks and indices in tests. [#48286](https://github.com/ClickHouse/ClickHouse/pull/48286) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Bump internal ZSTD from 1.5.4 to 1.5.5. [#46797](https://github.com/ClickHouse/ClickHouse/pull/46797) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Randomize vertical merges from compact to wide parts in tests. [#48287](https://github.com/ClickHouse/ClickHouse/pull/48287) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support for CRC32 checksum in HDFS. Fix performance issues. [#48614](https://github.com/ClickHouse/ClickHouse/pull/48614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove remainders of GCC support. [#48671](https://github.com/ClickHouse/ClickHouse/pull/48671) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add CI run with new analyzer infrastructure enabled. [#48719](https://github.com/ClickHouse/ClickHouse/pull/48719) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix system.query_views_log for MVs that are pushed from background threads [#46668](https://github.com/ClickHouse/ClickHouse/pull/46668) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix several `RENAME COLUMN` bugs [#46946](https://github.com/ClickHouse/ClickHouse/pull/46946) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix minor hiliting issues in clickhouse-format [#47610](https://github.com/ClickHouse/ClickHouse/pull/47610) ([Natasha Murashkina](https://github.com/murfel)).
|
||||
* Fix a bug in LLVM's libc++ leading to a crash for uploading parts to S3 which size is greater then INT_MAX [#47693](https://github.com/ClickHouse/ClickHouse/pull/47693) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix overflow in the `sparkbar` function [#48121](https://github.com/ClickHouse/ClickHouse/pull/48121) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix race in S3 [#48190](https://github.com/ClickHouse/ClickHouse/pull/48190) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Disable JIT for aggregate functions due to inconsistent behavior [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix alter formatting (minor) [#48289](https://github.com/ClickHouse/ClickHouse/pull/48289) ([Natasha Murashkina](https://github.com/murfel)).
|
||||
* Fix cpu usage in RabbitMQ (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix crash in EXPLAIN PIPELINE for Merge over Distributed [#48320](https://github.com/ClickHouse/ClickHouse/pull/48320) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix serializing LowCardinality as Arrow dictionary [#48361](https://github.com/ClickHouse/ClickHouse/pull/48361) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Reset downloader for cache file segment in TemporaryFileStream [#48386](https://github.com/ClickHouse/ClickHouse/pull/48386) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible SYSTEM SYNC REPLICA stuck in case of DROP/REPLACE PARTITION [#48391](https://github.com/ClickHouse/ClickHouse/pull/48391) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix a startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Don't check dependencies when renaming system tables automatically [#48431](https://github.com/ClickHouse/ClickHouse/pull/48431) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update only affected rows in KeeperMap storage [#48435](https://github.com/ClickHouse/ClickHouse/pull/48435) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible segfault in the VFS cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* `toTimeZone` function throws an error when no constant string is provided [#48471](https://github.com/ClickHouse/ClickHouse/pull/48471) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Fix logical error with IPv4 in Protobuf, add support for Date32 [#48486](https://github.com/ClickHouse/ClickHouse/pull/48486) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* "changed" flag in system.settings was calculated incorrectly for settings with multiple values [#48516](https://github.com/ClickHouse/ClickHouse/pull/48516) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix storage `Memory` with enabled compression [#48517](https://github.com/ClickHouse/ClickHouse/pull/48517) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bracketed-paste mode messing up password input in the event of client reconnection [#48528](https://github.com/ClickHouse/ClickHouse/pull/48528) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix an uncaught exception in case of parallel loader for hashed dictionaries [#48571](https://github.com/ClickHouse/ClickHouse/pull/48571) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The `groupArray` aggregate function correctly works for empty result over nullable types [#48593](https://github.com/ClickHouse/ClickHouse/pull/48593) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Allow IPv4 comparison operators with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix possible error from cache [#48636](https://github.com/ClickHouse/ClickHouse/pull/48636) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Async inserts with empty data will no longer throw exception. [#48663](https://github.com/ClickHouse/ClickHouse/pull/48663) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix table dependencies in case of failed RENAME TABLE [#48683](https://github.com/ClickHouse/ClickHouse/pull/48683) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* If the primary key has duplicate columns (which is only possible for projections), in previous versions it might lead to a bug [#48838](https://github.com/ClickHouse/ClickHouse/pull/48838) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix for a race condition in ZooKeeper when joining send_thread/receive_thread [#48849](https://github.com/ClickHouse/ClickHouse/pull/48849) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix unexpected part name error when trying to drop a ignored detached part with zero copy replication [#48862](https://github.com/ClickHouse/ClickHouse/pull/48862) ([Michael Lex](https://github.com/mlex)).
|
||||
* Fix reading `Date32` Parquet/Arrow column into not a `Date32` column [#48864](https://github.com/ClickHouse/ClickHouse/pull/48864) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `UNKNOWN_IDENTIFIER` error while selecting from table with row policy and column with dots [#48976](https://github.com/ClickHouse/ClickHouse/pull/48976) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix aggregation by empty nullable strings [#48999](https://github.com/ClickHouse/ClickHouse/pull/48999) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
|
||||
|
||||
### <a id="233"></a> ClickHouse release 23.3 LTS, 2023-03-30
|
||||
|
||||
#### Upgrade Notes
|
||||
|
@ -421,8 +421,11 @@ endif ()
|
||||
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X))
|
||||
if (NOT SANITIZE)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
|
@ -235,6 +235,17 @@ ssize_t getrandom(void *buf, size_t buflen, unsigned flags)
|
||||
return syscall(SYS_getrandom, buf, buflen, flags);
|
||||
}
|
||||
|
||||
/* Structure for scatter/gather I/O. */
|
||||
struct iovec
|
||||
{
|
||||
void *iov_base; /* Pointer to data. */
|
||||
size_t iov_len; /* Length of data. */
|
||||
};
|
||||
|
||||
ssize_t preadv(int __fd, const struct iovec *__iovec, int __count, __off_t __offset)
|
||||
{
|
||||
return syscall(SYS_preadv, __fd, __iovec, __count, (long)(__offset), (long)(__offset>>32));
|
||||
}
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
|
@ -33,8 +33,7 @@ if (SANITIZE)
|
||||
# RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to
|
||||
# keep the binary size down.
|
||||
# TODO: try compiling with -Og and with ld.gold.
|
||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
||||
|
||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit f0081b2649b94837855f3bc7d05ef326b100bad8
|
||||
Subproject commit e0d1b64da666afbfaa6f1ee0487c33f3fd2cd5cb
|
@ -1,4 +1,3 @@
|
||||
# rebuild in #36968
|
||||
# docker build -t clickhouse/docs-builder .
|
||||
# nodejs 17 prefers ipv6 and is broken in our environment
|
||||
FROM node:16-alpine
|
||||
|
@ -18,7 +18,7 @@ SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||
RETRIES_SIGN = "Some tests were restarted"
|
||||
|
||||
|
||||
def process_test_log(log_path):
|
||||
def process_test_log(log_path, broken_tests):
|
||||
total = 0
|
||||
skipped = 0
|
||||
unknown = 0
|
||||
@ -62,8 +62,12 @@ def process_test_log(log_path):
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
elif FAIL_SIGN in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
if test_name in broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif UNKNOWN_SIGN in line:
|
||||
unknown += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
@ -71,8 +75,21 @@ def process_test_log(log_path):
|
||||
skipped += 1
|
||||
test_results.append((test_name, "SKIPPED", test_time, []))
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
if OK_SIGN in line and test_name in broken_tests:
|
||||
failed += 1
|
||||
test_results.append(
|
||||
(
|
||||
test_name,
|
||||
"FAIL",
|
||||
test_time,
|
||||
[
|
||||
"Test is expected to fail! Please, update broken_tests.txt!\n"
|
||||
],
|
||||
)
|
||||
)
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
test_end = False
|
||||
elif (
|
||||
len(test_results) > 0 and test_results[-1][1] == "FAIL" and not test_end
|
||||
@ -110,7 +127,7 @@ def process_test_log(log_path):
|
||||
)
|
||||
|
||||
|
||||
def process_result(result_path):
|
||||
def process_result(result_path, broken_tests):
|
||||
test_results = []
|
||||
state = "success"
|
||||
description = ""
|
||||
@ -134,7 +151,7 @@ def process_result(result_path):
|
||||
success_finish,
|
||||
retries,
|
||||
test_results,
|
||||
) = process_test_log(result_path)
|
||||
) = process_test_log(result_path, broken_tests)
|
||||
is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
||||
logging.info("Is flaky check: %s", is_flacky_check)
|
||||
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
@ -186,9 +203,17 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
parser.add_argument("--broken-tests", default="/broken_tests.txt")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir)
|
||||
broken_tests = list()
|
||||
if os.path.exists(args.broken_tests):
|
||||
logging.info(f"File {args.broken_tests} with broken tests found")
|
||||
with open(args.broken_tests) as f:
|
||||
broken_tests = f.read().splitlines()
|
||||
logging.info(f"Broken tests in the list: {len(broken_tests)}")
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir, broken_tests)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
|
@ -13,9 +13,11 @@ Supported platforms:
|
||||
- AArch64
|
||||
- Power9 (experimental)
|
||||
|
||||
## Normal Build for Development on Ubuntu
|
||||
## Building on Ubuntu
|
||||
|
||||
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
|
||||
The following tutorial is based on Ubuntu Linux.
|
||||
With appropriate changes, it should also work on any other Linux distribution.
|
||||
The minimum recommended Ubuntu version for development is 22.04 LTS.
|
||||
|
||||
### Install Prerequisites {#install-prerequisites}
|
||||
|
||||
@ -23,13 +25,11 @@ The following tutorial is based on the Ubuntu Linux system. With appropriate cha
|
||||
sudo apt-get install git cmake ccache python3 ninja-build yasm gawk
|
||||
```
|
||||
|
||||
Or cmake3 instead of cmake on older systems.
|
||||
### Install and Use the Clang compiler
|
||||
|
||||
### Install the latest clang (recommended)
|
||||
On Ubuntu/Debian you can use LLVM's automatic installation script, see [here](https://apt.llvm.org/).
|
||||
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
```bash
|
||||
``` bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
@ -40,19 +40,17 @@ sudo apt-get install software-properties-common
|
||||
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
```
|
||||
|
||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
|
||||
#### Use the latest clang for Builds
|
||||
As of April 2023, any version of Clang >= 15 will work.
|
||||
GCC as a compiler is not supported
|
||||
To build with a specific Clang version:
|
||||
|
||||
``` bash
|
||||
export CC=clang-15
|
||||
export CXX=clang++-15
|
||||
```
|
||||
|
||||
In this example we use version 15 that is the latest as of Sept 2022.
|
||||
|
||||
Gcc cannot be used.
|
||||
|
||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
||||
``` bash
|
||||
@ -70,79 +68,46 @@ git clone --recursive --shallow-submodules https://github.com/ClickHouse/ClickHo
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
ninja
|
||||
cmake -S . -B build
|
||||
cmake --build build # or: `cd build; ninja`
|
||||
```
|
||||
|
||||
To create an executable, run `ninja clickhouse`.
|
||||
This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments.
|
||||
To create an executable, run `cmake --build --target clickhouse` (or: `cd build; ninja clickhouse`).
|
||||
This will create executable `build/programs/clickhouse` which can be used with `client` or `server` arguments.
|
||||
|
||||
## How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
## Building on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
|
||||
The build requires the following components:
|
||||
|
||||
- Git (is used only to checkout the sources, it’s not needed for the build)
|
||||
- CMake 3.15 or newer
|
||||
- Git (used to checkout the sources, not needed for the build)
|
||||
- CMake 3.20 or newer
|
||||
- Compiler: Clang 15 or newer
|
||||
- Linker: lld 15 or newer
|
||||
- Ninja
|
||||
- C++ compiler: clang-15 or newer
|
||||
- Linker: lld
|
||||
- Yasm
|
||||
- Gawk
|
||||
|
||||
If all the components are installed, you may build in the same way as the steps above.
|
||||
|
||||
Example for Ubuntu Eoan:
|
||||
``` bash
|
||||
sudo apt update
|
||||
sudo apt install git cmake ninja-build clang++ python yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
ninja
|
||||
```
|
||||
|
||||
Example for OpenSUSE Tumbleweed:
|
||||
|
||||
``` bash
|
||||
sudo zypper install git cmake ninja clang-c++ python lld yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
ninja
|
||||
mkdir build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
```
|
||||
|
||||
Example for Fedora Rawhide:
|
||||
|
||||
``` bash
|
||||
sudo yum update
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
make -j $(nproc)
|
||||
```
|
||||
|
||||
Here is an example of how to build `clang` and all the llvm infrastructure from sources:
|
||||
|
||||
```
|
||||
git clone git@github.com:llvm/llvm-project.git
|
||||
mkdir llvm-build && cd llvm-build
|
||||
cmake -DCMAKE_BUILD_TYPE:STRING=Release -DLLVM_ENABLE_PROJECTS=all ../llvm-project/llvm/
|
||||
make -j16
|
||||
sudo make install
|
||||
hash clang
|
||||
clang --version
|
||||
```
|
||||
|
||||
You can install the older clang like clang-11 from packages and then use it to build the new clang from sources.
|
||||
|
||||
Here is an example of how to install the new `cmake` from the official website:
|
||||
|
||||
```
|
||||
wget https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-linux-x86_64.sh
|
||||
chmod +x cmake-3.22.2-linux-x86_64.sh
|
||||
./cmake-3.22.2-linux-x86_64.sh
|
||||
export PATH=/home/milovidov/work/cmake-3.22.2-linux-x86_64/bin/:${PATH}
|
||||
hash cmake
|
||||
mkdir build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
```
|
||||
|
||||
## You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
|
||||
|
@ -119,7 +119,7 @@ When processing a query, the client shows:
|
||||
1. Progress, which is updated no more than 10 times per second (by default). For quick queries, the progress might not have time to be displayed.
|
||||
2. The formatted query after parsing, for debugging.
|
||||
3. The result in the specified format.
|
||||
4. The number of lines in the result, the time passed, and the average speed of query processing.
|
||||
4. The number of lines in the result, the time passed, and the average speed of query processing. All data amounts refer to uncompressed data.
|
||||
|
||||
You can cancel a long query by pressing Ctrl+C. However, you will still need to wait for a little for the server to abort the request. It is not possible to cancel a query at certain stages. If you do not wait and press Ctrl+C a second time, the client will exit.
|
||||
|
||||
|
@ -88,6 +88,33 @@ If the query was aborted due to an exception or user cancellation, no entry is w
|
||||
The size of the query cache in bytes, the maximum number of cache entries and the maximum size of individual cache entries (in bytes and in
|
||||
records) can be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-cache).
|
||||
|
||||
It is also possible to limit the cache usage of individual users using [settings profiles](settings/settings-profiles.md) and [settings
|
||||
constraints](settings/constraints-on-settings.md). More specifically, you can restrict the maximum amount of memory (in bytes) a user may
|
||||
allocate in the query cache and the the maximum number of stored query results. For that, first provide configurations
|
||||
[query_cache_max_size_in_bytes](settings/settings.md#query-cache-max-size-in-bytes) and
|
||||
[query_cache_max_entries](settings/settings.md#query-cache-size-max-items) in a user profile in `users.xml`, then make both settings
|
||||
readonly:
|
||||
|
||||
``` xml
|
||||
<profiles>
|
||||
<default>
|
||||
<!-- The maximum cache size in bytes for user/profile 'default' -->
|
||||
<query_cache_max_size_in_bytes>10000</query_cache_max_size_in_bytes>
|
||||
<!-- The maximum number of SELECT query results stored in the cache for user/profile 'default' -->
|
||||
<query_cache_max_entries>100</query_cache_max_entries>
|
||||
<!-- Make both settings read-only so the user cannot change them -->
|
||||
<constraints>
|
||||
<query_cache_max_size_in_bytes>
|
||||
<readonly/>
|
||||
</query_cache_max_size_in_bytes>
|
||||
<query_cache_max_entries>
|
||||
<readonly/>
|
||||
<query_cache_max_entries>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
```
|
||||
|
||||
To define how long a query must run at least such that its result can be cached, you can use setting
|
||||
[query_cache_min_query_duration](settings/settings.md#query-cache-min-query-duration). For example, the result of query
|
||||
|
||||
|
@ -1382,25 +1382,25 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
|
||||
The following settings are available:
|
||||
|
||||
- `max_size`: The maximum cache size in bytes. 0 means the query cache is disabled. Default value: `1073741824` (1 GiB).
|
||||
- `max_size_in_bytes`: The maximum cache size in bytes. 0 means the query cache is disabled. Default value: `1073741824` (1 GiB).
|
||||
- `max_entries`: The maximum number of `SELECT` query results stored in the cache. Default value: `1024`.
|
||||
- `max_entry_size`: The maximum size in bytes `SELECT` query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
|
||||
- `max_entry_rows`: The maximum number of rows `SELECT` query results may have to be saved in the cache. Default value: `30000000` (30 mil).
|
||||
- `max_entry_size_in_bytes`: The maximum size in bytes `SELECT` query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
|
||||
- `max_entry_size_in_rows`: The maximum number of rows `SELECT` query results may have to be saved in the cache. Default value: `30000000` (30 mil).
|
||||
|
||||
Changed settings take effect immediately.
|
||||
|
||||
:::note
|
||||
Data for the query cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `max_size` or disable the query cache altogether.
|
||||
Data for the query cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `max_size_in_bytes` or disable the query cache altogether.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<query_cache>
|
||||
<max_size>1073741824</max_size>
|
||||
<max_size_in_bytes>1073741824</max_size_in_bytes>
|
||||
<max_entries>1024</max_entries>
|
||||
<max_entry_size>1048576</max_entry_size>
|
||||
<max_entry_rows>30000000</max_entry_rows>
|
||||
<max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>
|
||||
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
||||
</query_cache>
|
||||
```
|
||||
|
||||
|
@ -40,7 +40,7 @@ If the user tries to violate the constraints an exception is thrown and the sett
|
||||
There are supported few types of constraints: `min`, `max`, `readonly` (with alias `const`) and `changeable_in_readonly`. The `min` and `max` constraints specify upper and lower boundaries for a numeric setting and can be used in combination. The `readonly` or `const` constraint specifies that the user cannot change the corresponding setting at all. The `changeable_in_readonly` constraint type allows user to change the setting within `min`/`max` range even if `readonly` setting is set to 1, otherwise settings are not allow to be changed in `readonly=1` mode. Note that `changeable_in_readonly` is supported only if `settings_constraints_replace_previous` is enabled:
|
||||
``` xml
|
||||
<access_control_improvements>
|
||||
<settings_constraints_replace_previous>true<settings_constraints_replace_previous>
|
||||
<settings_constraints_replace_previous>true</settings_constraints_replace_previous>
|
||||
</access_control_improvements>
|
||||
```
|
||||
|
||||
|
@ -890,7 +890,7 @@ Write time that processor spent during execution/waiting for data to `system.pro
|
||||
|
||||
See also:
|
||||
|
||||
- [`system.processors_profile_log`](../../operations/system-tables/processors_profile_log.md#system-processors_profile_log)
|
||||
- [`system.processors_profile_log`](../../operations/system-tables/processors_profile_log.md)
|
||||
- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)
|
||||
|
||||
## max_insert_block_size {#settings-max_insert_block_size}
|
||||
@ -1512,6 +1512,26 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## query_cache_max_size_in_bytes {#query-cache-max-size-in-bytes}
|
||||
|
||||
The maximum amount of memory (in bytes) the current user may allocate in the query cache. 0 means unlimited.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: 0 (no restriction).
|
||||
|
||||
## query_cache_max_entries {#query-cache-max-entries}
|
||||
|
||||
The maximum number of query results the current user may store in the query cache. 0 means unlimited.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: 0 (no restriction).
|
||||
|
||||
## insert_quorum {#settings-insert_quorum}
|
||||
|
||||
Enables the quorum writes.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.processors_profile_log {#system-processors_profile_log}
|
||||
# processors_profile_log
|
||||
|
||||
This table contains profiling on processors level (that you can find in [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)).
|
||||
|
||||
@ -73,4 +73,4 @@ Here you can see:
|
||||
|
||||
**See Also**
|
||||
|
||||
- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)
|
||||
- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)
|
||||
|
@ -646,7 +646,7 @@ SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res;
|
||||
|
||||
Array elements set to `NULL` are handled as normal values.
|
||||
|
||||
## arraySort(\[func,\] arr, …)
|
||||
## arraySort(\[func,\] arr, …) {#array_functions-sort}
|
||||
|
||||
Sorts the elements of the `arr` array in ascending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the elements of the array. If `func` accepts multiple arguments, the `arraySort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arraySort` description.
|
||||
|
||||
@ -751,7 +751,7 @@ To improve sorting efficiency, the [Schwartzian transform](https://en.wikipedia.
|
||||
|
||||
Same as `arraySort` with additional `limit` argument allowing partial sorting. Returns an array of the same size as the original array where elements in range `[1..limit]` are sorted in ascending order. Remaining elements `(limit..N]` shall contain elements in unspecified order.
|
||||
|
||||
## arrayReverseSort(\[func,\] arr, …)
|
||||
## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort}
|
||||
|
||||
Sorts the elements of the `arr` array in descending order. If the `func` function is specified, `arr` is sorted according to the result of the `func` function applied to the elements of the array, and then the sorted array is reversed. If `func` accepts multiple arguments, the `arrayReverseSort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arrayReverseSort` description.
|
||||
|
||||
|
@ -69,24 +69,27 @@ Result:
|
||||
|
||||
Merges an [Array](../../sql-reference/data-types/array.md) of keys and an [Array](../../sql-reference/data-types/array.md) of values into a [Map(key, value)](../../sql-reference/data-types/map.md). Notice that the second argument could also be a [Map](../../sql-reference/data-types/map.md), thus it is casted to an Array when executing.
|
||||
|
||||
|
||||
The function is a more convenient alternative to `CAST((key_array, value_array_or_map), 'Map(key_type, value_type)')`. For example, instead of writing `CAST((['aa', 'bb'], [4, 5]), 'Map(String, UInt32)')`, you can write `mapFromArrays(['aa', 'bb'], [4, 5])`.
|
||||
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapFromArrays(keys, values)
|
||||
```
|
||||
```
|
||||
|
||||
Alias: `MAP_FROM_ARRAYS(keys, values)`
|
||||
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `keys` — Given key array to create a map from. The nested type of array must be: [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md)
|
||||
- `values` - Given value array or map to create a map from.
|
||||
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map whose keys and values are constructed from the key array and value array/map.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -94,6 +97,7 @@ Query:
|
||||
```sql
|
||||
select mapFromArrays(['a', 'b', 'c'], [1, 2, 3])
|
||||
|
||||
|
||||
┌─mapFromArrays(['a', 'b', 'c'], [1, 2, 3])─┐
|
||||
│ {'a':1,'b':2,'c':3} │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -391,25 +395,24 @@ Result:
|
||||
│ ['eleven','11'] │
|
||||
│ ['twelve','6.0'] │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## mapContainsKeyLike
|
||||
|
||||
```
|
||||
|
||||
## mapContainsKeyLike
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapContainsKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1` if `map` contains `key` like specified pattern, `0` if not.
|
||||
|
||||
- `1` if `map` contains `key` like specified pattern, `0` if not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -420,34 +423,34 @@ CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapContainsKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapContainsKeyLike(a, 'a%')─┐
|
||||
│ 1 │
|
||||
│ 0 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapExtractKeyLike
|
||||
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapExtractKeyLike
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapExtractKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map contained elements the key of which matchs the specified pattern. If there are no elements matched the pattern, it will return an empty map.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -458,34 +461,34 @@ CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapExtractKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapExtractKeyLike(a, 'a%')─┐
|
||||
│ {'abc':'abc'} │
|
||||
│ {} │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapApply
|
||||
|
||||
```
|
||||
|
||||
## mapApply
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapApply(func, map)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
||||
- `func` - [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map obtained from the original map by application of `func(map1[i], …, mapN[i])` for each element.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -497,36 +500,36 @@ FROM
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r─────────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key1':10,'key2':20} │
|
||||
│ {'key1':20,'key2':40} │
|
||||
└───────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
## mapFilter
|
||||
|
||||
## mapFilter
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapFilter(func, map)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `func` - [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map containing only the elements in `map` for which `func(map1[i], …, mapN[i])` returns something other than 0.
|
||||
|
||||
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -538,27 +541,27 @@ FROM
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r───────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key2':2} │
|
||||
│ {'key1':2,'key2':4} │
|
||||
└─────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
|
||||
## mapUpdate
|
||||
|
||||
## mapUpdate
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapUpdate(map1, map2)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `map1` [Map](../../sql-reference/data-types/map.md).
|
||||
@ -567,19 +570,166 @@ mapUpdate(map1, map2)
|
||||
**Returned value**
|
||||
|
||||
- Returns a map1 with values updated of values for the corresponding keys in map2.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map────────────────────────────┐
|
||||
│ {'key3':0,'key1':10,'key2':10} │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
## mapConcat
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapConcat(maps)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `maps` – Arbitrary number of arguments of [Map](../../sql-reference/data-types/map.md) type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map with concatenated maps passed as arguments. If there are same keys in two or more maps, all of them are added to the result map, but only the first one is accessible via operator `[]`
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapConcat(map('key1', 1, 'key3', 3), map('key2', 2)) AS map;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key1':1,'key3':3,'key2':2} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapConcat(map('key1', 1, 'key2', 2), map('key1', 3)) AS map, map['key1'];
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map──────────────────────────┬─elem─┐
|
||||
│ {'key1':1,'key2':2,'key1':3} │ 1 │
|
||||
└──────────────────────────────┴──────┘
|
||||
```
|
||||
|
||||
## mapExists(\[func,\], map)
|
||||
|
||||
Returns 1 if there is at least one key-value pair in `map` for which `func(key, value)` returns something other than 0. Otherwise, it returns 0.
|
||||
|
||||
Note that the `mapExists` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapExists((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res─┐
|
||||
│ 1 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## mapAll(\[func,\] map)
|
||||
|
||||
Returns 1 if `func(key, value)` returns something other than 0 for all key-value pairs in `map`. Otherwise, it returns 0.
|
||||
|
||||
Note that the `mapAll` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapAll((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res─┐
|
||||
│ 0 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## mapSort(\[func,\], map)
|
||||
|
||||
Sorts the elements of the `map` in ascending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the keys and values of the map.
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT mapSort(map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key1':3,'key2':2,'key3':1} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key3':1,'key2':2,'key1':3} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-sort) for `arraySort` function.
|
||||
|
||||
## mapReverseSort(\[func,\], map)
|
||||
|
||||
Sorts the elements of the `map` in descending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the keys and values of the map.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT mapReverseSort(map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key3':1,'key2':2,'key1':3} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key1':3,'key2':2,'key3':1} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-reverse-sort) for `arrayReverseSort` function.
|
||||
|
@ -36,6 +36,18 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US
|
||||
The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option-privilege) privilege to `user` or `role`.
|
||||
The `WITH REPLACE OPTION` clause replace old roles by new role for the `user` or `role`, if is not specified it appends roles.
|
||||
|
||||
## Grant Current Grants Syntax
|
||||
``` sql
|
||||
GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*}) | ON {db.table|db.*|*.*|table|*}} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Type of privilege.
|
||||
- `role` — ClickHouse user role.
|
||||
- `user` — ClickHouse user account.
|
||||
|
||||
Using the `CURRENT GRANTS` statement allows you to give all specified privileges to the given user or role.
|
||||
If none of the privileges were specified, then the given user or role will receive all available privileges for `CURRENT_USER`.
|
||||
|
||||
## Usage
|
||||
|
||||
To use `GRANT`, your account must have the `GRANT OPTION` privilege. You can grant privileges only inside the scope of your account privileges.
|
||||
|
@ -37,6 +37,19 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US
|
||||
`WITH ADMIN OPTION` присваивает привилегию [ADMIN OPTION](#admin-option-privilege) пользователю или роли.
|
||||
`WITH REPLACE OPTION` заменяет все старые роли новыми ролями для пользователя `user` или `role`, если не указано, добавляет новые новые роли.
|
||||
|
||||
## Синтаксис присвоения текущих привилегий {#grant-current-grants-syntax}
|
||||
|
||||
```sql
|
||||
GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*}) | ON {db.table|db.*|*.*|table|*}} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Тип привилегии
|
||||
- `role` — Роль пользователя ClickHouse.
|
||||
- `user` — Пользователь ClickHouse.
|
||||
|
||||
Использование выражения `CURRENT GRANTS` позволяет присвоить все указанные и доступные для присвоения привилегии.
|
||||
Если список привелегий не задан, то указанный пользователь или роль получат все доступные привилегии для `CURRENT_USER`.
|
||||
|
||||
## Использование {#grant-usage}
|
||||
|
||||
Для использования `GRANT` пользователь должен иметь привилегию `GRANT OPTION`. Пользователь может выдавать привилегии только внутри области действий назначенных ему самому привилегий.
|
||||
|
@ -1293,7 +1293,7 @@
|
||||
|
||||
<!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.
|
||||
If not specified they will be stored locally. -->
|
||||
<!-- <user_defined_zookeeper_path>/clickhouse/user_defined<user_defined_zookeeper_path> -->
|
||||
<!-- <user_defined_zookeeper_path>/clickhouse/user_defined</user_defined_zookeeper_path> -->
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
@ -1517,10 +1517,10 @@
|
||||
|
||||
<!-- Configuration for the query cache -->
|
||||
<!-- <query_cache> -->
|
||||
<!-- <max_size>1073741824</max_size> -->
|
||||
<!-- <max_size_in_bytes>1073741824</max_size_in_bytes> -->
|
||||
<!-- <max_entries>1024</max_entries> -->
|
||||
<!-- <max_entry_size>1048576</max_entry_size> -->
|
||||
<!-- <max_entry_rows>30000000</max_entry_rows> -->
|
||||
<!-- <max_entry_size_in_bytes>1048576</max_entry_size_in_bytes> -->
|
||||
<!-- <max_entry_size_in_rows>30000000</max_entry_size_in_rows> -->
|
||||
<!-- </query_cache> -->
|
||||
|
||||
<!-- Uncomment if enable merge tree metadata cache -->
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <IO/ReadHelpersArena.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadHelpersArena.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
|
||||
#include <Columns/ColumnVector.h>
|
||||
@ -29,6 +28,7 @@
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
class Arena;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/arithmeticOverflow.h>
|
||||
#include <base/sort.h>
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/PODArray_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/PODArray_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
|
@ -14,8 +14,6 @@
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <base/range.h>
|
||||
#include <bitset>
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadHelpersArena.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionNull.h>
|
||||
|
@ -385,8 +385,7 @@ void ColumnAggregateFunction::updateHashFast(SipHash & hash) const
|
||||
/// threads, so we can't know the size of these data.
|
||||
size_t ColumnAggregateFunction::byteSize() const
|
||||
{
|
||||
return data.size() * sizeof(data[0])
|
||||
+ (my_arena ? my_arena->size() : 0);
|
||||
return data.size() * sizeof(data[0]) + (my_arena ? my_arena->usedBytes() : 0);
|
||||
}
|
||||
|
||||
size_t ColumnAggregateFunction::byteSizeAt(size_t) const
|
||||
@ -395,11 +394,11 @@ size_t ColumnAggregateFunction::byteSizeAt(size_t) const
|
||||
return sizeof(data[0]) + func->sizeOfData();
|
||||
}
|
||||
|
||||
/// Like in byteSize(), the size is underestimated.
|
||||
/// Similar to byteSize() the size is underestimated.
|
||||
/// In this case it's also overestimated at the same time as it counts all the bytes allocated by the arena, used or not
|
||||
size_t ColumnAggregateFunction::allocatedBytes() const
|
||||
{
|
||||
return data.allocated_bytes()
|
||||
+ (my_arena ? my_arena->size() : 0);
|
||||
return data.allocated_bytes() + (my_arena ? my_arena->allocatedBytes() : 0);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::protect()
|
||||
|
@ -258,12 +258,11 @@ void ColumnFunction::appendArguments(const ColumnsWithTypeAndName & columns)
|
||||
|
||||
void ColumnFunction::appendArgument(const ColumnWithTypeAndName & column)
|
||||
{
|
||||
const auto & argumnet_types = function->getArgumentTypes();
|
||||
|
||||
const auto & argument_types = function->getArgumentTypes();
|
||||
auto index = captured_columns.size();
|
||||
if (!is_short_circuit_argument && !column.type->equals(*argumnet_types[index]))
|
||||
if (!is_short_circuit_argument && !column.type->equals(*argument_types[index]))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture column {} because it has incompatible type: "
|
||||
"got {}, but {} is expected.", argumnet_types.size(), column.type->getName(), argumnet_types[index]->getName());
|
||||
"got {}, but {} is expected.", argument_types.size(), column.type->getName(), argument_types[index]->getName());
|
||||
|
||||
captured_columns.push_back(column);
|
||||
}
|
||||
|
@ -80,7 +80,8 @@ private:
|
||||
|
||||
/// Last contiguous MemoryChunk of memory.
|
||||
MemoryChunk * head;
|
||||
size_t size_in_bytes;
|
||||
size_t allocated_bytes;
|
||||
size_t used_bytes;
|
||||
size_t page_size;
|
||||
|
||||
static size_t roundUpToPageSize(size_t s, size_t page_size)
|
||||
@ -119,7 +120,7 @@ private:
|
||||
void NO_INLINE addMemoryChunk(size_t min_size)
|
||||
{
|
||||
head = new MemoryChunk(nextSize(min_size + pad_right), head);
|
||||
size_in_bytes += head->size();
|
||||
allocated_bytes += head->size();
|
||||
}
|
||||
|
||||
friend class ArenaAllocator;
|
||||
@ -127,9 +128,12 @@ private:
|
||||
|
||||
public:
|
||||
explicit Arena(size_t initial_size_ = 4096, size_t growth_factor_ = 2, size_t linear_growth_threshold_ = 128 * 1024 * 1024)
|
||||
: growth_factor(growth_factor_), linear_growth_threshold(linear_growth_threshold_),
|
||||
head(new MemoryChunk(initial_size_, nullptr)), size_in_bytes(head->size()),
|
||||
page_size(static_cast<size_t>(::getPageSize()))
|
||||
: growth_factor(growth_factor_)
|
||||
, linear_growth_threshold(linear_growth_threshold_)
|
||||
, head(new MemoryChunk(initial_size_, nullptr))
|
||||
, allocated_bytes(head->size())
|
||||
, used_bytes(0)
|
||||
, page_size(static_cast<size_t>(::getPageSize()))
|
||||
{
|
||||
}
|
||||
|
||||
@ -141,6 +145,7 @@ public:
|
||||
/// Get piece of memory, without alignment.
|
||||
char * alloc(size_t size)
|
||||
{
|
||||
used_bytes += size;
|
||||
if (unlikely(static_cast<std::ptrdiff_t>(size) > head->end - head->pos))
|
||||
addMemoryChunk(size);
|
||||
|
||||
@ -153,6 +158,7 @@ public:
|
||||
/// Get piece of memory with alignment
|
||||
char * alignedAlloc(size_t size, size_t alignment)
|
||||
{
|
||||
used_bytes += size;
|
||||
do
|
||||
{
|
||||
void * head_pos = head->pos;
|
||||
@ -184,6 +190,7 @@ public:
|
||||
*/
|
||||
void * rollback(size_t size)
|
||||
{
|
||||
used_bytes -= size;
|
||||
head->pos -= size;
|
||||
ASAN_POISON_MEMORY_REGION(head->pos, size + pad_right);
|
||||
return head->pos;
|
||||
@ -299,11 +306,11 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Size of MemoryChunks in bytes.
|
||||
size_t size() const
|
||||
{
|
||||
return size_in_bytes;
|
||||
}
|
||||
/// Size of all MemoryChunks in bytes.
|
||||
size_t allocatedBytes() const { return allocated_bytes; }
|
||||
|
||||
/// Total space actually used (not counting padding or space unused by caller allocations) in all MemoryChunks in bytes.
|
||||
size_t usedBytes() const { return used_bytes; }
|
||||
|
||||
/// Bad method, don't use it -- the MemoryChunks are not your business, the entire
|
||||
/// purpose of the arena code is to manage them for you, so if you find
|
||||
|
@ -107,10 +107,7 @@ public:
|
||||
}
|
||||
|
||||
/// Size of the allocated pool in bytes
|
||||
size_t size() const
|
||||
{
|
||||
return pool.size();
|
||||
}
|
||||
size_t allocatedBytes() const { return pool.allocatedBytes(); }
|
||||
};
|
||||
|
||||
class SynchronizedArenaWithFreeLists : private ArenaWithFreeLists
|
||||
@ -135,10 +132,10 @@ public:
|
||||
}
|
||||
|
||||
/// Size of the allocated pool in bytes
|
||||
size_t size() const
|
||||
size_t allocatedBytes() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return ArenaWithFreeLists::size();
|
||||
return ArenaWithFreeLists::allocatedBytes();
|
||||
}
|
||||
private:
|
||||
mutable std::mutex mutex;
|
||||
|
@ -214,13 +214,19 @@ public:
|
||||
void setMaxCount(size_t max_count)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return cache_policy->setMaxCount(max_count, lock);
|
||||
cache_policy->setMaxCount(max_count, lock);
|
||||
}
|
||||
|
||||
void setMaxSize(size_t max_size_in_bytes)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return cache_policy->setMaxSize(max_size_in_bytes, lock);
|
||||
cache_policy->setMaxSize(max_size_in_bytes, lock);
|
||||
}
|
||||
|
||||
void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
cache_policy->setQuotaForUser(user_name, max_size_in_bytes, max_entries, lock);
|
||||
}
|
||||
|
||||
virtual ~CacheBase() = default;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ICachePolicyUserQuota.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -38,12 +39,16 @@ public:
|
||||
MappedPtr mapped;
|
||||
};
|
||||
|
||||
virtual size_t weight(std::lock_guard<std::mutex> & /* cache_lock */) const = 0;
|
||||
virtual size_t count(std::lock_guard<std::mutex> & /* cache_lock */) const = 0;
|
||||
virtual size_t maxSize(std::lock_guard<std::mutex>& /* cache_lock */) const = 0;
|
||||
explicit ICachePolicy(CachePolicyUserQuotaPtr user_quotas_) : user_quotas(std::move(user_quotas_)) {}
|
||||
virtual ~ICachePolicy() = default;
|
||||
|
||||
virtual size_t weight(std::lock_guard<std::mutex> & /*cache_lock*/) const = 0;
|
||||
virtual size_t count(std::lock_guard<std::mutex> & /*cache_lock*/) const = 0;
|
||||
virtual size_t maxSize(std::lock_guard<std::mutex>& /*cache_lock*/) const = 0;
|
||||
|
||||
virtual void setMaxCount(size_t /*max_count*/, std::lock_guard<std::mutex> & /* cache_lock */) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for cache policy"); }
|
||||
virtual void setMaxSize(size_t /*max_size_in_bytes*/, std::lock_guard<std::mutex> & /* cache_lock */) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for cache policy"); }
|
||||
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries, std::lock_guard<std::mutex> & /*cache_lock*/) { user_quotas->setQuotaForUser(user_name, max_size_in_bytes, max_entries); }
|
||||
|
||||
/// HashFunction usually hashes the entire key and the found key will be equal the provided key. In such cases, use get(). It is also
|
||||
/// possible to store other, non-hashed data in the key. In that case, the found key is potentially different from the provided key.
|
||||
@ -51,14 +56,15 @@ public:
|
||||
virtual MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual std::optional<KeyMapped> getWithKey(const Key &, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
|
||||
virtual void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
|
||||
virtual void remove(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual void remove(const Key & key, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
|
||||
virtual void reset(std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual void reset(std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
virtual std::vector<KeyMapped> dump() const = 0;
|
||||
|
||||
virtual ~ICachePolicy() = default;
|
||||
protected:
|
||||
CachePolicyUserQuotaPtr user_quotas;
|
||||
};
|
||||
|
||||
}
|
||||
|
43
src/Common/ICachePolicyUserQuota.h
Normal file
43
src/Common/ICachePolicyUserQuota.h
Normal file
@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Per-user quotas for usage of shared caches, used by ICachePolicy.
|
||||
/// Currently allows to limit
|
||||
/// - the maximum amount of cache memory a user may consume
|
||||
/// - the maximum number of items a user can store in the cache
|
||||
/// Note that caches usually also have global limits which restrict these values at cache level. Per-user quotas have no effect if they
|
||||
/// exceed the global thresholds.
|
||||
class ICachePolicyUserQuota
|
||||
{
|
||||
public:
|
||||
/// Register or update the user's quota for the given resource.
|
||||
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) = 0;
|
||||
|
||||
/// Update the actual resource usage for the given user.
|
||||
virtual void increaseActual(const String & user_name, size_t entry_size_in_bytes) = 0;
|
||||
virtual void decreaseActual(const String & user_name, size_t entry_size_in_bytes) = 0;
|
||||
|
||||
/// Is the user allowed to write a new entry into the cache?
|
||||
virtual bool approveWrite(const String & user_name, size_t entry_size_in_bytes) const = 0;
|
||||
|
||||
virtual ~ICachePolicyUserQuota() = default;
|
||||
};
|
||||
|
||||
using CachePolicyUserQuotaPtr = std::unique_ptr<ICachePolicyUserQuota>;
|
||||
|
||||
|
||||
class NoCachePolicyUserQuota : public ICachePolicyUserQuota
|
||||
{
|
||||
public:
|
||||
void setQuotaForUser(const String & /*user_name*/, size_t /*max_size_in_bytes*/, size_t /*max_entries*/) override {}
|
||||
void increaseActual(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) override {}
|
||||
void decreaseActual(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) override {}
|
||||
bool approveWrite(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) const override { return true; }
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -27,7 +27,8 @@ public:
|
||||
* max_count == 0 means no elements size restrictions.
|
||||
*/
|
||||
LRUCachePolicy(size_t max_size_in_bytes_, size_t max_count_, OnWeightLossFunction on_weight_loss_function_)
|
||||
: max_size_in_bytes(std::max(1uz, max_size_in_bytes_))
|
||||
: Base(std::make_unique<NoCachePolicyUserQuota>())
|
||||
, max_size_in_bytes(std::max(1uz, max_size_in_bytes_))
|
||||
, max_count(max_count_)
|
||||
, on_weight_loss_function(on_weight_loss_function_)
|
||||
{
|
||||
|
@ -10,6 +10,7 @@
|
||||
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
||||
M(AsyncInsertQuery, "Same as InsertQuery, but only for asynchronous INSERT queries.") \
|
||||
M(AsyncInsertBytes, "Data size in bytes of asynchronous INSERT queries.") \
|
||||
M(AsyncInsertRows, "Number of rows inserted by asynchronous INSERT queries.") \
|
||||
M(AsyncInsertCacheHits, "Number of times a duplicate hash id has been found in asynchronous INSERT hash id cache.") \
|
||||
M(FailedQuery, "Number of failed queries.") \
|
||||
M(FailedSelectQuery, "Same as FailedQuery, but only for SELECT queries.") \
|
||||
|
@ -31,7 +31,8 @@ public:
|
||||
*/
|
||||
/// TODO: construct from special struct with cache policy parameters (also with max_protected_size).
|
||||
SLRUCachePolicy(size_t max_size_in_bytes_, size_t max_count_, double size_ratio, OnWeightLossFunction on_weight_loss_function_)
|
||||
: max_protected_size(static_cast<size_t>(max_size_in_bytes_ * std::min(1.0, size_ratio)))
|
||||
: Base(std::make_unique<NoCachePolicyUserQuota>())
|
||||
, max_protected_size(static_cast<size_t>(max_size_in_bytes_ * std::min(1.0, size_ratio)))
|
||||
, max_size_in_bytes(max_size_in_bytes_)
|
||||
, max_count(max_count_)
|
||||
, on_weight_loss_function(on_weight_loss_function_)
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <base/defines.h>
|
||||
#include <base/StringRef.h>
|
||||
#include <Common/HashTable/StringHashMap.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
@ -11,6 +10,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Arena;
|
||||
|
||||
enum TLDType
|
||||
{
|
||||
|
@ -2,11 +2,80 @@
|
||||
|
||||
#include <Common/ICachePolicy.h>
|
||||
|
||||
#include <limits>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class PerUserTTLCachePolicyUserQuota : public ICachePolicyUserQuota
|
||||
{
|
||||
public:
|
||||
void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) override
|
||||
{
|
||||
quotas[user_name] = {max_size_in_bytes, max_entries};
|
||||
}
|
||||
|
||||
void increaseActual(const String & user_name, size_t entry_size_in_bytes) override
|
||||
{
|
||||
auto & actual_for_user = actual[user_name];
|
||||
actual_for_user.size_in_bytes += entry_size_in_bytes;
|
||||
actual_for_user.num_items += 1;
|
||||
}
|
||||
|
||||
void decreaseActual(const String & user_name, size_t entry_size_in_bytes) override
|
||||
{
|
||||
chassert(actual.contains(user_name));
|
||||
|
||||
chassert(actual[user_name].size_in_bytes >= entry_size_in_bytes);
|
||||
actual[user_name].size_in_bytes -= entry_size_in_bytes;
|
||||
|
||||
chassert(actual[user_name].num_items >= 1);
|
||||
actual[user_name].num_items -= 1;
|
||||
}
|
||||
|
||||
bool approveWrite(const String & user_name, size_t entry_size_in_bytes) const override
|
||||
{
|
||||
auto it_actual = actual.find(user_name);
|
||||
Resources actual_for_user{.size_in_bytes = 0, .num_items = 0}; /// assume zero actual resource consumption is user isn't found
|
||||
if (it_actual != actual.end())
|
||||
actual_for_user = it_actual->second;
|
||||
|
||||
auto it_quota = quotas.find(user_name);
|
||||
Resources quota_for_user{.size_in_bytes = std::numeric_limits<size_t>::max(), .num_items = std::numeric_limits<size_t>::max()}; /// assume no threshold if no quota is found
|
||||
if (it_quota != quotas.end())
|
||||
quota_for_user = it_quota->second;
|
||||
|
||||
/// Special case: A quota configured as 0 means no threshold
|
||||
if (quota_for_user.size_in_bytes == 0)
|
||||
quota_for_user.size_in_bytes = std::numeric_limits<UInt64>::max();
|
||||
if (quota_for_user.num_items == 0)
|
||||
quota_for_user.num_items = std::numeric_limits<UInt64>::max();
|
||||
|
||||
/// Check size quota
|
||||
if (actual_for_user.size_in_bytes + entry_size_in_bytes >= quota_for_user.size_in_bytes)
|
||||
return false;
|
||||
|
||||
/// Check items quota
|
||||
if (quota_for_user.num_items + 1 >= quota_for_user.num_items)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct Resources
|
||||
{
|
||||
size_t size_in_bytes = 0;
|
||||
size_t num_items = 0;
|
||||
};
|
||||
|
||||
/// user name --> cache size quota (in bytes) / number of items quota
|
||||
std::map<String, Resources> quotas;
|
||||
/// user name --> actual cache usage (in bytes) / number of items
|
||||
std::map<String, Resources> actual;
|
||||
};
|
||||
|
||||
|
||||
/// TTLCachePolicy evicts entries for which IsStaleFunction returns true.
|
||||
/// The cache size (in bytes and number of entries) can be changed at runtime. It is expected to set both sizes explicitly after construction.
|
||||
template <typename Key, typename Mapped, typename HashFunction, typename WeightFunction, typename IsStaleFunction>
|
||||
@ -18,8 +87,9 @@ public:
|
||||
using typename Base::KeyMapped;
|
||||
using typename Base::OnWeightLossFunction;
|
||||
|
||||
TTLCachePolicy()
|
||||
: max_size_in_bytes(0)
|
||||
explicit TTLCachePolicy(CachePolicyUserQuotaPtr quotas_)
|
||||
: Base(std::move(quotas_))
|
||||
, max_size_in_bytes(0)
|
||||
, max_count(0)
|
||||
{
|
||||
}
|
||||
@ -61,8 +131,10 @@ public:
|
||||
auto it = cache.find(key);
|
||||
if (it == cache.end())
|
||||
return;
|
||||
size_in_bytes -= weight_function(*it->second);
|
||||
size_t sz = weight_function(*it->second);
|
||||
Base::user_quotas->decreaseActual(it->first.user_name, sz);
|
||||
cache.erase(it);
|
||||
size_in_bytes -= sz;
|
||||
}
|
||||
|
||||
MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
|
||||
@ -88,35 +160,47 @@ public:
|
||||
|
||||
const size_t entry_size_in_bytes = weight_function(*mapped);
|
||||
|
||||
/// Checks against per-cache limits
|
||||
auto sufficient_space_in_cache = [&]()
|
||||
{
|
||||
return (size_in_bytes + entry_size_in_bytes <= max_size_in_bytes) && (cache.size() + 1 <= max_count);
|
||||
};
|
||||
|
||||
if (!sufficient_space_in_cache())
|
||||
/// Checks against per-user limits
|
||||
auto sufficient_space_in_cache_for_user = [&]()
|
||||
{
|
||||
return Base::user_quotas->approveWrite(key.user_name, entry_size_in_bytes);
|
||||
};
|
||||
|
||||
if (!sufficient_space_in_cache() || !sufficient_space_in_cache_for_user())
|
||||
{
|
||||
/// Remove stale entries
|
||||
for (auto it = cache.begin(); it != cache.end();)
|
||||
if (is_stale_function(it->first))
|
||||
{
|
||||
size_in_bytes -= weight_function(*it->second);
|
||||
size_t sz = weight_function(*it->second);
|
||||
Base::user_quotas->decreaseActual(it->first.user_name, sz);
|
||||
it = cache.erase(it);
|
||||
size_in_bytes -= sz;
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
|
||||
if (sufficient_space_in_cache())
|
||||
if (sufficient_space_in_cache() && sufficient_space_in_cache_for_user())
|
||||
{
|
||||
/// Insert or replace key
|
||||
if (auto it = cache.find(key); it != cache.end())
|
||||
{
|
||||
size_in_bytes -= weight_function(*it->second);
|
||||
size_t sz = weight_function(*it->second);
|
||||
Base::user_quotas->decreaseActual(it->first.user_name, sz);
|
||||
cache.erase(it); // stupid bug: (*) doesn't replace existing entries (likely due to custom hash function), need to erase explicitly
|
||||
size_in_bytes -= sz;
|
||||
}
|
||||
|
||||
cache[key] = std::move(mapped); // (*)
|
||||
size_in_bytes += entry_size_in_bytes;
|
||||
Base::user_quotas->increaseActual(key.user_name, entry_size_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,7 @@ int main(int argc, char ** argv)
|
||||
|
||||
watch.stop();
|
||||
std::cerr
|
||||
<< "Insert info arena. Bytes: " << arena.size()
|
||||
<< "Insert info arena. Bytes: " << arena.allocatedBytes()
|
||||
<< ", elapsed: " << watch.elapsedSeconds()
|
||||
<< " (" << data.size() / watch.elapsedSeconds() << " elem/sec.,"
|
||||
<< " " << sum_strings_size / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)"
|
||||
@ -298,7 +298,7 @@ int main(int argc, char ** argv)
|
||||
|
||||
watch.stop();
|
||||
std::cerr
|
||||
<< "Randomly remove and insert elements. Bytes: " << arena.size()
|
||||
<< "Randomly remove and insert elements. Bytes: " << arena.allocatedBytes()
|
||||
<< ", elapsed: " << watch.elapsedSeconds()
|
||||
<< " (" << data.size() / watch.elapsedSeconds() << " elem/sec.,"
|
||||
<< " " << bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)"
|
||||
@ -331,7 +331,7 @@ int main(int argc, char ** argv)
|
||||
|
||||
watch.stop();
|
||||
std::cerr
|
||||
<< "Filling cache. Bytes: " << arena.size()
|
||||
<< "Filling cache. Bytes: " << arena.allocatedBytes()
|
||||
<< ", elapsed: " << watch.elapsedSeconds()
|
||||
<< " (" << data.size() / watch.elapsedSeconds() << " elem/sec.,"
|
||||
<< " " << bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)"
|
||||
|
@ -47,7 +47,7 @@ void setThreadName(const char * name)
|
||||
#endif
|
||||
DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
|
||||
memcpy(thread_name, name, 1 + strlen(name));
|
||||
memcpy(thread_name, name, std::min<size_t>(1 + strlen(name), THREAD_NAME_SIZE - 1));
|
||||
}
|
||||
|
||||
const char * getThreadName()
|
||||
|
@ -333,10 +333,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t keyArenaSize() const
|
||||
{
|
||||
return arena.size();
|
||||
}
|
||||
uint64_t keyArenaSize() const { return arena.allocatedBytes(); }
|
||||
|
||||
iterator begin() { return list.begin(); }
|
||||
const_iterator begin() const { return list.cbegin(); }
|
||||
|
@ -129,6 +129,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, allow_suspicious_low_cardinality_types, false, "In CREATE TABLE statement allows specifying LowCardinality modifier for types of small fixed size (8 or less). Enabling this may increase merge times and memory consumption.", 0) \
|
||||
M(Bool, allow_suspicious_fixed_string_types, false, "In CREATE TABLE statement allows creating columns of type FixedString(n) with n > 256. FixedString with length >= 256 is suspicious and most likely indicates misusage", 0) \
|
||||
M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \
|
||||
M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_aggregate_expressions, false, "Compile aggregate functions to native code. This feature has a bug and should not be used.", 0) \
|
||||
@ -565,6 +566,8 @@ class IColumn;
|
||||
M(Bool, enable_writes_to_query_cache, true, "Enable storing results of SELECT queries in the query cache", 0) \
|
||||
M(Bool, enable_reads_from_query_cache, true, "Enable reading results of SELECT queries from the query cache", 0) \
|
||||
M(Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false, "Store results of queries with non-deterministic functions (e.g. rand(), now()) in the query cache", 0) \
|
||||
M(UInt64, query_cache_max_size_in_bytes, 0, "The maximum amount of memory (in bytes) the current user may allocate in the query cache. 0 means unlimited. ", 0) \
|
||||
M(UInt64, query_cache_max_entries, 0, "The maximum number of query results the current user may store in the query cache. 0 means unlimited.", 0) \
|
||||
M(UInt64, query_cache_min_query_runs, 0, "Minimum number a SELECT query must run before its result is stored in the query cache", 0) \
|
||||
M(Milliseconds, query_cache_min_query_duration, 0, "Minimum time in milliseconds for a query to run for its result to be stored in the query cache.", 0) \
|
||||
M(Bool, query_cache_compress_entries, true, "Compress cache entries.", 0) \
|
||||
|
@ -80,6 +80,7 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}}},
|
||||
{"23.4", {{"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"},
|
||||
{"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"},
|
||||
{"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}}},
|
||||
|
@ -128,6 +128,13 @@ bool DataTypeMap::checkKeyType(DataTypePtr key_type)
|
||||
return true;
|
||||
}
|
||||
|
||||
DataTypePtr DataTypeMap::getNestedTypeWithUnnamedTuple() const
|
||||
{
|
||||
const auto & from_array = assert_cast<const DataTypeArray &>(*nested);
|
||||
const auto & from_tuple = assert_cast<const DataTypeTuple &>(*from_array.getNestedType());
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeTuple>(from_tuple.getElements()));
|
||||
}
|
||||
|
||||
static DataTypePtr create(const ASTPtr & arguments)
|
||||
{
|
||||
if (!arguments || arguments->children.size() != 2)
|
||||
|
@ -47,6 +47,7 @@ public:
|
||||
const DataTypePtr & getValueType() const { return value_type; }
|
||||
DataTypes getKeyValueTypes() const { return {key_type, value_type}; }
|
||||
const DataTypePtr & getNestedType() const { return nested; }
|
||||
DataTypePtr getNestedTypeWithUnnamedTuple() const;
|
||||
|
||||
SerializationPtr doGetDefaultSerialization() const override;
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -51,6 +52,7 @@ DatabasePostgreSQL::DatabasePostgreSQL(
|
||||
, configuration(configuration_)
|
||||
, pool(std::move(pool_))
|
||||
, cache_tables(cache_tables_)
|
||||
, log(&Poco::Logger::get("DatabasePostgreSQL(" + dbname_ + ")"))
|
||||
{
|
||||
cleaner_task = getContext()->getSchedulePool().createTask("PostgreSQLCleanerTask", [this]{ removeOutdatedTables(); });
|
||||
cleaner_task->deactivate();
|
||||
@ -192,7 +194,10 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr,
|
||||
ColumnsDescription{columns_info->columns}, ConstraintsDescription{}, String{}, configuration.schema, configuration.on_conflict);
|
||||
|
||||
if (cache_tables)
|
||||
{
|
||||
LOG_TEST(log, "Cached table `{}`", table_name);
|
||||
cached_tables[table_name] = storage;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ private:
|
||||
mutable Tables cached_tables;
|
||||
std::unordered_set<std::string> detached_or_dropped;
|
||||
BackgroundSchedulePool::TaskHolder cleaner_task;
|
||||
Poco::Logger * log;
|
||||
|
||||
String getTableNameForLogs(const String & table_name) const;
|
||||
|
||||
|
@ -157,7 +157,7 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
return arena.size() + sizeof(Cell) * configuration.max_size_in_cells + attributes_size_in_bytes;
|
||||
return arena.allocatedBytes() + sizeof(Cell) * configuration.max_size_in_cells + attributes_size_in_bytes;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Columns/ColumnDecimal.h>
|
||||
@ -29,6 +28,8 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class Arena;
|
||||
|
||||
/** Simple helper for getting default.
|
||||
* Initialized with default value and default values column.
|
||||
* If default values column is not null default value is taken from column.
|
||||
|
@ -505,7 +505,7 @@ void FlatDictionary::calculateBytesAllocated()
|
||||
bytes_allocated += hierarchical_index_bytes_allocated;
|
||||
}
|
||||
|
||||
bytes_allocated += string_arena.size();
|
||||
bytes_allocated += string_arena.allocatedBytes();
|
||||
}
|
||||
|
||||
FlatDictionary::Attribute FlatDictionary::createAttribute(const DictionaryAttribute & dictionary_attribute)
|
||||
|
@ -797,7 +797,7 @@ void HashedArrayDictionary<dictionary_key_type>::calculateBytesAllocated()
|
||||
bytes_allocated += hierarchical_index_bytes_allocated;
|
||||
}
|
||||
|
||||
bytes_allocated += string_arena.size();
|
||||
bytes_allocated += string_arena.allocatedBytes();
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
|
@ -1022,7 +1022,7 @@ void HashedDictionary<dictionary_key_type, sparse, sharded>::calculateBytesAlloc
|
||||
}
|
||||
|
||||
for (const auto & arena : string_arenas)
|
||||
bytes_allocated += arena->size();
|
||||
bytes_allocated += arena->allocatedBytes();
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type, bool sparse, bool sharded>
|
||||
|
@ -541,7 +541,7 @@ template <>
|
||||
void IPAddressDictionary::addAttributeSize<String>(const Attribute & attribute)
|
||||
{
|
||||
addAttributeSize<StringRef>(attribute);
|
||||
bytes_allocated += sizeof(Arena) + attribute.string_arena->size();
|
||||
bytes_allocated += sizeof(Arena) + attribute.string_arena->allocatedBytes();
|
||||
}
|
||||
|
||||
void IPAddressDictionary::calculateBytesAllocated()
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <variant>
|
||||
#include <Columns/ColumnDecimal.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
@ -18,6 +17,8 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Arena;
|
||||
|
||||
class IPAddressDictionary final : public IDictionary
|
||||
{
|
||||
public:
|
||||
|
@ -726,7 +726,7 @@ void RangeHashedDictionary<dictionary_key_type>::calculateBytesAllocated()
|
||||
if (update_field_loaded_block)
|
||||
bytes_allocated += update_field_loaded_block->allocatedBytes();
|
||||
|
||||
bytes_allocated += string_arena.size();
|
||||
bytes_allocated += string_arena.allocatedBytes();
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
|
@ -10,7 +10,6 @@
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
|
@ -37,7 +37,6 @@ public:
|
||||
template <typename Function>
|
||||
void registerFunction(const std::string & name, Documentation doc = {}, CaseSensitiveness case_sensitiveness = CaseSensitive)
|
||||
{
|
||||
|
||||
if constexpr (std::is_base_of_v<IFunction, Function>)
|
||||
registerFunction(name, &adaptFunctionToOverloadResolver<Function>, std::move(doc), case_sensitiveness);
|
||||
else
|
||||
|
@ -1,15 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <DataTypes/Native.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnDecimal.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/Native.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/IsOperation.h>
|
||||
#include <Functions/castTypeToEither.h>
|
||||
|
||||
@ -30,7 +33,6 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
template <typename A, typename Op>
|
||||
struct UnaryOperationImpl
|
||||
{
|
||||
@ -130,6 +132,47 @@ struct FixedStringUnaryOperationImpl
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Op>
|
||||
struct StringUnaryOperationReduceImpl
|
||||
{
|
||||
MULTITARGET_FUNCTION_AVX512BW_AVX512F_AVX2_SSE42(
|
||||
MULTITARGET_FUNCTION_HEADER(static UInt64 NO_INLINE),
|
||||
vectorImpl,
|
||||
MULTITARGET_FUNCTION_BODY((const UInt8 * start, const UInt8 * end) /// NOLINT
|
||||
{
|
||||
UInt64 res = 0;
|
||||
while (start < end)
|
||||
res += Op::apply(*start++);
|
||||
return res;
|
||||
}))
|
||||
|
||||
static UInt64 NO_INLINE vector(const UInt8 * start, const UInt8 * end)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
if (isArchSupported(TargetArch::AVX512BW))
|
||||
{
|
||||
return vectorImplAVX512BW(start, end);
|
||||
}
|
||||
|
||||
if (isArchSupported(TargetArch::AVX512F))
|
||||
{
|
||||
return vectorImplAVX512F(start, end);
|
||||
}
|
||||
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
{
|
||||
return vectorImplAVX2(start, end);
|
||||
}
|
||||
|
||||
if (isArchSupported(TargetArch::SSE42))
|
||||
{
|
||||
return vectorImplSSE42(start, end);
|
||||
}
|
||||
#endif
|
||||
|
||||
return vectorImpl(start, end);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename FunctionName>
|
||||
struct FunctionUnaryArithmeticMonotonicity;
|
||||
@ -142,7 +185,8 @@ template <template <typename> class Op, typename Name, bool is_injective>
|
||||
class FunctionUnaryArithmetic : public IFunction
|
||||
{
|
||||
static constexpr bool allow_decimal = IsUnaryOperation<Op>::negate || IsUnaryOperation<Op>::abs || IsUnaryOperation<Op>::sign;
|
||||
static constexpr bool allow_fixed_string = Op<UInt8>::allow_fixed_string;
|
||||
static constexpr bool allow_string_or_fixed_string = Op<UInt8>::allow_string_or_fixed_string;
|
||||
static constexpr bool is_bit_count = IsUnaryOperation<Op>::bit_count;
|
||||
static constexpr bool is_sign_function = IsUnaryOperation<Op>::sign;
|
||||
|
||||
ContextPtr context;
|
||||
@ -170,8 +214,8 @@ class FunctionUnaryArithmetic : public IFunction
|
||||
DataTypeDecimal<Decimal128>,
|
||||
DataTypeDecimal<Decimal256>,
|
||||
DataTypeFixedString,
|
||||
DataTypeInterval
|
||||
>(type, std::forward<F>(f));
|
||||
DataTypeString,
|
||||
DataTypeInterval>(type, std::forward<F>(f));
|
||||
}
|
||||
|
||||
static FunctionOverloadResolverPtr
|
||||
@ -204,7 +248,10 @@ public:
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool isInjective(const ColumnsWithTypeAndName &) const override { return is_injective; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
@ -232,9 +279,33 @@ public:
|
||||
using DataType = std::decay_t<decltype(type)>;
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType>)
|
||||
{
|
||||
if constexpr (!Op<DataTypeFixedString>::allow_fixed_string)
|
||||
if constexpr (!allow_string_or_fixed_string)
|
||||
return false;
|
||||
result = std::make_shared<DataType>(type.getN());
|
||||
/// For `bitCount`, when argument is FixedString, it's return type
|
||||
/// should be integer instead of FixedString, the return value is
|
||||
/// the sum of `bitCount` apply to each chars.
|
||||
else
|
||||
{
|
||||
/// UInt16 can save bitCount of FixedString less than 8192,
|
||||
/// it's should enough for almost all cases, and the setting
|
||||
/// `allow_suspicious_fixed_string_types` is disabled by default.
|
||||
if constexpr (is_bit_count)
|
||||
result = std::make_shared<DataTypeUInt16>();
|
||||
else
|
||||
result = std::make_shared<DataType>(type.getN());
|
||||
}
|
||||
}
|
||||
else if constexpr (std::is_same_v<DataTypeString, DataType>)
|
||||
{
|
||||
if constexpr (!allow_string_or_fixed_string)
|
||||
return false;
|
||||
else
|
||||
{
|
||||
if constexpr (is_bit_count)
|
||||
result = std::make_shared<DataTypeUInt64>();
|
||||
else
|
||||
result = std::make_shared<DataType>();
|
||||
}
|
||||
}
|
||||
else if constexpr (std::is_same_v<DataTypeInterval, DataType>)
|
||||
{
|
||||
@ -278,16 +349,80 @@ public:
|
||||
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType>)
|
||||
{
|
||||
if constexpr (allow_fixed_string)
|
||||
if constexpr (allow_string_or_fixed_string)
|
||||
{
|
||||
if (const auto * col = checkAndGetColumn<ColumnFixedString>(arguments[0].column.get()))
|
||||
{
|
||||
auto col_res = ColumnFixedString::create(col->getN());
|
||||
auto & vec_res = col_res->getChars();
|
||||
vec_res.resize(col->size() * col->getN());
|
||||
FixedStringUnaryOperationImpl<Op<UInt8>>::vector(col->getChars(), vec_res);
|
||||
result_column = std::move(col_res);
|
||||
return true;
|
||||
if constexpr (is_bit_count)
|
||||
{
|
||||
auto size = col->size();
|
||||
|
||||
auto col_res = ColumnUInt16::create(size);
|
||||
auto & vec_res = col_res->getData();
|
||||
vec_res.resize(col->size());
|
||||
|
||||
const auto & chars = col->getChars();
|
||||
auto n = col->getN();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
vec_res[i] = StringUnaryOperationReduceImpl<Op<UInt8>>::vector(
|
||||
chars.data() + n * i, chars.data() + n * (i + 1));
|
||||
}
|
||||
result_column = std::move(col_res);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto col_res = ColumnFixedString::create(col->getN());
|
||||
auto & vec_res = col_res->getChars();
|
||||
vec_res.resize(col->size() * col->getN());
|
||||
FixedStringUnaryOperationImpl<Op<UInt8>>::vector(col->getChars(), vec_res);
|
||||
result_column = std::move(col_res);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if constexpr (std::is_same_v<DataTypeString, DataType>)
|
||||
{
|
||||
if constexpr (allow_string_or_fixed_string)
|
||||
{
|
||||
if (const auto * col = checkAndGetColumn<ColumnString>(arguments[0].column.get()))
|
||||
{
|
||||
if constexpr (is_bit_count)
|
||||
{
|
||||
auto size = col->size();
|
||||
|
||||
auto col_res = ColumnUInt64::create(size);
|
||||
auto & vec_res = col_res->getData();
|
||||
|
||||
const auto & chars = col->getChars();
|
||||
const auto & offsets = col->getOffsets();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
vec_res[i] = StringUnaryOperationReduceImpl<Op<UInt8>>::vector(
|
||||
chars.data() + offsets[i - 1], chars.data() + offsets[i] - 1);
|
||||
}
|
||||
result_column = std::move(col_res);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto col_res = ColumnString::create();
|
||||
auto & vec_res = col_res->getChars();
|
||||
auto & offset_res = col_res->getOffsets();
|
||||
|
||||
const auto & vec_col = col->getChars();
|
||||
const auto & offset_col = col->getOffsets();
|
||||
|
||||
vec_res.resize(vec_col.size());
|
||||
offset_res.resize(offset_col.size());
|
||||
memcpy(offset_res.data(), offset_col.data(), offset_res.size() * sizeof(UInt64));
|
||||
|
||||
FixedStringUnaryOperationImpl<Op<UInt8>>::vector(vec_col, vec_res);
|
||||
result_column = std::move(col_res);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -350,7 +485,7 @@ public:
|
||||
return castType(arguments[0].get(), [&](const auto & type)
|
||||
{
|
||||
using DataType = std::decay_t<decltype(type)>;
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType>)
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType> || std::is_same_v<DataTypeString, DataType>)
|
||||
return false;
|
||||
else
|
||||
return !IsDataTypeDecimal<DataType> && Op<typename DataType::FieldType>::compilable;
|
||||
@ -365,7 +500,7 @@ public:
|
||||
castType(types[0].get(), [&](const auto & type)
|
||||
{
|
||||
using DataType = std::decay_t<decltype(type)>;
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType>)
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType> || std::is_same_v<DataTypeString, DataType>)
|
||||
return false;
|
||||
else
|
||||
{
|
||||
|
@ -3098,12 +3098,18 @@ private:
|
||||
return &ConvertImplGenericFromString<ColumnString>::execute;
|
||||
}
|
||||
|
||||
DataTypePtr from_type_holder;
|
||||
const auto * from_type = checkAndGetDataType<DataTypeArray>(from_type_untyped.get());
|
||||
const auto * from_type_map = checkAndGetDataType<DataTypeMap>(from_type_untyped.get());
|
||||
|
||||
/// Convert from Map
|
||||
if (from_type_map)
|
||||
from_type = checkAndGetDataType<DataTypeArray>(from_type_map->getNestedType().get());
|
||||
{
|
||||
/// Recreate array of unnamed tuples because otherwise it may work
|
||||
/// unexpectedly while converting to array of named tuples.
|
||||
from_type_holder = from_type_map->getNestedTypeWithUnnamedTuple();
|
||||
from_type = assert_cast<const DataTypeArray *>(from_type_holder.get());
|
||||
}
|
||||
|
||||
if (!from_type)
|
||||
{
|
||||
|
@ -5,7 +5,9 @@ namespace DB
|
||||
|
||||
/// These classes should be present in DB namespace (cannot place them into namelesspace)
|
||||
template <typename> struct AbsImpl;
|
||||
template <typename> struct BitCountImpl;
|
||||
template <typename> struct NegateImpl;
|
||||
template <typename> struct SignImpl;
|
||||
template <typename, typename> struct PlusImpl;
|
||||
template <typename, typename> struct MinusImpl;
|
||||
template <typename, typename> struct MultiplyImpl;
|
||||
@ -22,9 +24,6 @@ template <typename, typename> struct LessOrEqualsOp;
|
||||
template <typename, typename> struct GreaterOrEqualsOp;
|
||||
template <typename, typename> struct BitHammingDistanceImpl;
|
||||
|
||||
template <typename>
|
||||
struct SignImpl;
|
||||
|
||||
template <template <typename, typename> typename Op1, template <typename, typename> typename Op2>
|
||||
struct IsSameOperation
|
||||
{
|
||||
@ -37,6 +36,7 @@ struct IsUnaryOperation
|
||||
static constexpr bool abs = std::is_same_v<Op<Int8>, AbsImpl<Int8>>;
|
||||
static constexpr bool negate = std::is_same_v<Op<Int8>, NegateImpl<Int8>>;
|
||||
static constexpr bool sign = std::is_same_v<Op<Int8>, SignImpl<Int8>>;
|
||||
static constexpr bool bit_count = std::is_same_v<Op<Int8>, BitCountImpl<Int8>>;
|
||||
};
|
||||
|
||||
template <template <typename, typename> typename Op>
|
||||
|
@ -10,8 +10,7 @@ template <typename A>
|
||||
struct AbsImpl
|
||||
{
|
||||
using ResultType = std::conditional_t<is_decimal<A>, A, typename NumberTraits::ResultOfAbs<A>::Type>;
|
||||
static const constexpr bool allow_fixed_string = false;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr bool allow_string_or_fixed_string = false;
|
||||
|
||||
static inline NO_SANITIZE_UNDEFINED ResultType apply(A a)
|
||||
{
|
||||
|
@ -3,6 +3,10 @@ add_headers_and_sources(clickhouse_functions_array .)
|
||||
add_library(clickhouse_functions_array OBJECT ${clickhouse_functions_array_sources} ${clickhouse_functions_array_headers})
|
||||
target_link_libraries(clickhouse_functions_array PRIVATE dbms clickhouse_functions_gatherutils)
|
||||
|
||||
if (TARGET ch_contrib::vectorscan)
|
||||
target_link_libraries(clickhouse_functions_array PRIVATE ch_contrib::vectorscan)
|
||||
endif()
|
||||
|
||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||
target_compile_options(clickhouse_functions_array PRIVATE "-g0")
|
||||
endif()
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
@ -41,33 +42,6 @@ namespace ErrorCodes
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
ColumnPtr getOffsetsPtr(const T & column)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, ColumnArray>)
|
||||
{
|
||||
return column.getOffsetsPtr();
|
||||
}
|
||||
else // ColumnMap
|
||||
{
|
||||
return column.getNestedColumn().getOffsetsPtr();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const IColumn::Offsets & getOffsets(const T & column)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, ColumnArray>)
|
||||
{
|
||||
return column.getOffsets();
|
||||
}
|
||||
else // ColumnMap
|
||||
{
|
||||
return column.getNestedColumn().getOffsets();
|
||||
}
|
||||
}
|
||||
|
||||
/** Higher-order functions for arrays.
|
||||
* These functions optionally apply a map (transform) to array (or multiple arrays of identical size) by lambda function,
|
||||
* and return some result based on that transformation.
|
||||
@ -90,10 +64,6 @@ class FunctionArrayMapped : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = Name::name;
|
||||
static constexpr bool is_argument_type_map = std::is_same_v<typename Impl::data_type, DataTypeMap>;
|
||||
static constexpr bool is_argument_type_array = std::is_same_v<typename Impl::data_type, DataTypeArray>;
|
||||
static constexpr auto argument_type_name = is_argument_type_map ? "Map" : "Array";
|
||||
|
||||
static constexpr size_t num_fixed_params = []{ if constexpr (requires { Impl::num_fixed_params; }) return Impl::num_fixed_params; else return 0; }();
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionArrayMapped>(); }
|
||||
@ -131,32 +101,56 @@ public:
|
||||
num_fixed_params + 1,
|
||||
(num_fixed_params + 1 == 1) ? "" : "s");
|
||||
|
||||
size_t nested_types_count = (arguments.size() - num_fixed_params - 1) * (is_argument_type_map ? 2 : 1);
|
||||
DataTypes nested_types(nested_types_count);
|
||||
for (size_t i = 0; i < arguments.size() - 1 - num_fixed_params; ++i)
|
||||
bool is_single_array_argument = arguments.size() == num_fixed_params + 2;
|
||||
size_t tuple_argument_size = 0;
|
||||
|
||||
size_t num_nested_types = arguments.size() - num_fixed_params - 1;
|
||||
DataTypes nested_types(num_nested_types);
|
||||
|
||||
for (size_t i = 0; i < num_nested_types; ++i)
|
||||
{
|
||||
const auto * array_type = checkAndGetDataType<typename Impl::data_type>(&*arguments[i + 1 + num_fixed_params]);
|
||||
const auto * array_type = checkAndGetDataType<DataTypeArray>(&*arguments[i + 1 + num_fixed_params]);
|
||||
if (!array_type)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Argument {} of function {} must be {}. Found {} instead",
|
||||
"Argument {} of function {} must be Array. Found {} instead",
|
||||
i + 2 + num_fixed_params,
|
||||
getName(),
|
||||
argument_type_name,
|
||||
arguments[i + 1 + num_fixed_params]->getName());
|
||||
if constexpr (is_argument_type_map)
|
||||
{
|
||||
nested_types[2 * i] = recursiveRemoveLowCardinality(array_type->getKeyType());
|
||||
nested_types[2 * i + 1] = recursiveRemoveLowCardinality(array_type->getValueType());
|
||||
}
|
||||
else if constexpr (is_argument_type_array)
|
||||
{
|
||||
nested_types[i] = recursiveRemoveLowCardinality(array_type->getNestedType());
|
||||
}
|
||||
|
||||
if (const auto * tuple_type = checkAndGetDataType<DataTypeTuple>(array_type->getNestedType().get()))
|
||||
tuple_argument_size = tuple_type->getElements().size();
|
||||
|
||||
nested_types[i] = recursiveRemoveLowCardinality(array_type->getNestedType());
|
||||
}
|
||||
|
||||
const DataTypeFunction * function_type = checkAndGetDataType<DataTypeFunction>(arguments[0].get());
|
||||
if (!function_type || function_type->getArgumentTypes().size() != nested_types.size())
|
||||
const auto * function_type = checkAndGetDataType<DataTypeFunction>(arguments[0].get());
|
||||
if (!function_type)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"First argument for this overload of {} must be a function with {} arguments, found {} instead",
|
||||
getName(),
|
||||
nested_types.size(),
|
||||
arguments[0]->getName());
|
||||
|
||||
size_t num_function_arguments = function_type->getArgumentTypes().size();
|
||||
if (is_single_array_argument
|
||||
&& tuple_argument_size
|
||||
&& tuple_argument_size == num_function_arguments)
|
||||
{
|
||||
assert(nested_types.size() == 1);
|
||||
|
||||
auto argument_type = nested_types[0];
|
||||
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*argument_type);
|
||||
|
||||
nested_types.clear();
|
||||
nested_types.reserve(tuple_argument_size);
|
||||
|
||||
for (const auto & element : tuple_type.getElements())
|
||||
nested_types.push_back(element);
|
||||
}
|
||||
|
||||
if (num_function_arguments != nested_types.size())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"First argument for this overload of {} must be a function with {} arguments, found {} instead",
|
||||
@ -179,11 +173,11 @@ public:
|
||||
(min_args > 1 ? "s" : ""),
|
||||
arguments.size());
|
||||
|
||||
if ((arguments.size() == 1 + num_fixed_params) && is_argument_type_array)
|
||||
if (arguments.size() == 1 + num_fixed_params)
|
||||
{
|
||||
const auto * data_type = checkAndGetDataType<typename Impl::data_type>(arguments[num_fixed_params].type.get());
|
||||
const auto * array_type = checkAndGetDataType<DataTypeArray>(arguments[num_fixed_params].type.get());
|
||||
|
||||
if (!data_type)
|
||||
if (!array_type)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"The {}{}{} argument for function {} must be array. Found {} instead",
|
||||
@ -196,7 +190,7 @@ public:
|
||||
if constexpr (num_fixed_params)
|
||||
Impl::checkArguments(getName(), arguments.data());
|
||||
|
||||
DataTypePtr nested_type = data_type->getNestedType();
|
||||
DataTypePtr nested_type = array_type->getNestedType();
|
||||
|
||||
if (Impl::needBoolean() && !isUInt8(nested_type))
|
||||
throw Exception(
|
||||
@ -208,10 +202,7 @@ public:
|
||||
getName(),
|
||||
arguments[num_fixed_params].type->getName());
|
||||
|
||||
if constexpr (is_argument_type_array)
|
||||
return Impl::getReturnType(nested_type, nested_type);
|
||||
else
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable code reached");
|
||||
return Impl::getReturnType(nested_type, nested_type);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -246,26 +237,15 @@ public:
|
||||
getName(),
|
||||
return_type->getName());
|
||||
|
||||
static_assert(is_argument_type_map || is_argument_type_array, "unsupported type");
|
||||
|
||||
if (arguments.size() < 2 + num_fixed_params)
|
||||
{
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect number of arguments: {}", arguments.size());
|
||||
}
|
||||
|
||||
const auto * first_array_type = checkAndGetDataType<typename Impl::data_type>(arguments[1 + num_fixed_params].type.get());
|
||||
|
||||
const auto * first_array_type = checkAndGetDataType<DataTypeArray>(arguments[1 + num_fixed_params].type.get());
|
||||
if (!first_array_type)
|
||||
throw DB::Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Unsupported type {}", arguments[1 + num_fixed_params].type->getName());
|
||||
|
||||
if constexpr (is_argument_type_array)
|
||||
return Impl::getReturnType(return_type, first_array_type->getNestedType());
|
||||
|
||||
if constexpr (is_argument_type_map)
|
||||
return Impl::getReturnType(return_type, first_array_type->getKeyValueTypes());
|
||||
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable code reached");
|
||||
return Impl::getReturnType(return_type, first_array_type->getNestedType());
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,38 +254,26 @@ public:
|
||||
if (arguments.size() == 1 + num_fixed_params)
|
||||
{
|
||||
ColumnPtr column_array_ptr = arguments[num_fixed_params].column;
|
||||
const auto * column_array = checkAndGetColumn<typename Impl::column_type>(column_array_ptr.get());
|
||||
const auto * column_array = checkAndGetColumn<ColumnArray>(column_array_ptr.get());
|
||||
|
||||
if (!column_array)
|
||||
{
|
||||
const ColumnConst * column_const_array = checkAndGetColumnConst<typename Impl::column_type>(column_array_ptr.get());
|
||||
const auto * column_const_array = checkAndGetColumnConst<ColumnArray>(column_array_ptr.get());
|
||||
if (!column_const_array)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN, "Expected {} column, found {}", argument_type_name, column_array_ptr->getName());
|
||||
ErrorCodes::ILLEGAL_COLUMN, "Expected Array column, found {}", column_array_ptr->getName());
|
||||
|
||||
column_array_ptr = column_const_array->convertToFullColumn();
|
||||
column_array = assert_cast<const typename Impl::column_type *>(column_array_ptr.get());
|
||||
column_array = assert_cast<const ColumnArray *>(column_array_ptr.get());
|
||||
}
|
||||
|
||||
if constexpr (std::is_same_v<typename Impl::column_type, ColumnMap>)
|
||||
{
|
||||
if constexpr (num_fixed_params)
|
||||
return Impl::execute(
|
||||
*column_array,
|
||||
column_array->getNestedColumn().getDataPtr(),
|
||||
arguments.data());
|
||||
else
|
||||
return Impl::execute(*column_array, column_array->getNestedColumn().getDataPtr());
|
||||
}
|
||||
if constexpr (num_fixed_params)
|
||||
return Impl::execute(
|
||||
*column_array,
|
||||
column_array->getDataPtr(),
|
||||
arguments.data());
|
||||
else
|
||||
{
|
||||
if constexpr (num_fixed_params)
|
||||
return Impl::execute(
|
||||
*column_array,
|
||||
column_array->getDataPtr(),
|
||||
arguments.data());
|
||||
else
|
||||
return Impl::execute(*column_array, column_array->getDataPtr());
|
||||
}
|
||||
return Impl::execute(*column_array, column_array->getDataPtr());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -315,56 +283,81 @@ public:
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function.", getName());
|
||||
|
||||
const auto * column_function = typeid_cast<const ColumnFunction *>(column_with_type_and_name.column.get());
|
||||
|
||||
if (!column_function)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a function.", getName());
|
||||
|
||||
ColumnPtr offsets_column;
|
||||
const auto & type_function = assert_cast<const DataTypeFunction &>(*arguments[0].type);
|
||||
size_t num_function_arguments = type_function.getArgumentTypes().size();
|
||||
|
||||
ColumnPtr offsets_column;
|
||||
ColumnPtr column_first_array_ptr;
|
||||
const typename Impl::column_type * column_first_array = nullptr;
|
||||
const ColumnArray * column_first_array = nullptr;
|
||||
|
||||
ColumnsWithTypeAndName arrays;
|
||||
arrays.reserve(arguments.size() - 1);
|
||||
arrays.reserve(arguments.size() - 1 - num_fixed_params);
|
||||
|
||||
bool is_single_array_argument = arguments.size() == num_fixed_params + 2;
|
||||
for (size_t i = 1 + num_fixed_params; i < arguments.size(); ++i)
|
||||
{
|
||||
const auto & array_with_type_and_name = arguments[i];
|
||||
|
||||
ColumnPtr column_array_ptr = array_with_type_and_name.column;
|
||||
const auto * column_array = checkAndGetColumn<typename Impl::column_type>(column_array_ptr.get());
|
||||
auto column_array_ptr = array_with_type_and_name.column;
|
||||
const auto * column_array = checkAndGetColumn<ColumnArray>(column_array_ptr.get());
|
||||
|
||||
const DataTypePtr & array_type_ptr = array_with_type_and_name.type;
|
||||
const auto * array_type = checkAndGetDataType<typename Impl::data_type>(array_type_ptr.get());
|
||||
const auto & array_type_ptr = array_with_type_and_name.type;
|
||||
const auto * array_type = checkAndGetDataType<DataTypeArray>(array_type_ptr.get());
|
||||
|
||||
if (!column_array)
|
||||
{
|
||||
const ColumnConst * column_const_array = checkAndGetColumnConst<typename Impl::column_type>(column_array_ptr.get());
|
||||
const auto * column_const_array = checkAndGetColumnConst<ColumnArray>(column_array_ptr.get());
|
||||
if (!column_const_array)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN, "Expected {} column, found {}", argument_type_name, column_array_ptr->getName());
|
||||
ErrorCodes::ILLEGAL_COLUMN, "Expected Array column, found {}", column_array_ptr->getName());
|
||||
|
||||
column_array_ptr = recursiveRemoveLowCardinality(column_const_array->convertToFullColumn());
|
||||
column_array = checkAndGetColumn<typename Impl::column_type>(column_array_ptr.get());
|
||||
column_array = checkAndGetColumn<ColumnArray>(column_array_ptr.get());
|
||||
}
|
||||
|
||||
if (!array_type)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Expected {} type, found {}", argument_type_name, array_type_ptr->getName());
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Expected Array type, found {}", array_type_ptr->getName());
|
||||
|
||||
if (!offsets_column)
|
||||
{
|
||||
offsets_column = getOffsetsPtr(*column_array);
|
||||
offsets_column = column_array->getOffsetsPtr();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// The first condition is optimization: do not compare data if the pointers are equal.
|
||||
if (getOffsetsPtr(*column_array) != offsets_column
|
||||
&& getOffsets(*column_array) != typeid_cast<const ColumnArray::ColumnOffsets &>(*offsets_column).getData())
|
||||
if (column_array->getOffsetsPtr() != offsets_column
|
||||
&& column_array->getOffsets() != typeid_cast<const ColumnArray::ColumnOffsets &>(*offsets_column).getData())
|
||||
throw Exception(
|
||||
ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH,
|
||||
"{}s passed to {} must have equal size",
|
||||
argument_type_name,
|
||||
getName());
|
||||
"Arrays passed to {} must have equal size", getName());
|
||||
}
|
||||
|
||||
const auto * column_tuple = checkAndGetColumn<ColumnTuple>(&column_array->getData());
|
||||
if (is_single_array_argument && column_tuple && column_tuple->getColumns().size() == num_function_arguments)
|
||||
{
|
||||
const auto & type_tuple = assert_cast<const DataTypeTuple &>(*array_type->getNestedType());
|
||||
const auto & tuple_names = type_tuple.getElementNames();
|
||||
|
||||
size_t tuple_size = column_tuple->getColumns().size();
|
||||
arrays.reserve(column_tuple->getColumns().size());
|
||||
for (size_t j = 0; j < tuple_size; ++j)
|
||||
{
|
||||
arrays.emplace_back(
|
||||
column_tuple->getColumnPtr(j),
|
||||
recursiveRemoveLowCardinality(type_tuple.getElement(j)),
|
||||
array_with_type_and_name.name + "." + tuple_names[j]);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
arrays.emplace_back(
|
||||
column_array->getDataPtr(),
|
||||
recursiveRemoveLowCardinality(array_type->getNestedType()),
|
||||
array_with_type_and_name.name);
|
||||
}
|
||||
|
||||
if (i == 1 + num_fixed_params)
|
||||
@ -372,24 +365,10 @@ public:
|
||||
column_first_array_ptr = column_array_ptr;
|
||||
column_first_array = column_array;
|
||||
}
|
||||
|
||||
if constexpr (is_argument_type_map)
|
||||
{
|
||||
arrays.emplace_back(ColumnWithTypeAndName(
|
||||
column_array->getNestedData().getColumnPtr(0), recursiveRemoveLowCardinality(array_type->getKeyType()), array_with_type_and_name.name+".key"));
|
||||
arrays.emplace_back(ColumnWithTypeAndName(
|
||||
column_array->getNestedData().getColumnPtr(1), recursiveRemoveLowCardinality(array_type->getValueType()), array_with_type_and_name.name+".value"));
|
||||
}
|
||||
else
|
||||
{
|
||||
arrays.emplace_back(ColumnWithTypeAndName(column_array->getDataPtr(),
|
||||
recursiveRemoveLowCardinality(array_type->getNestedType()),
|
||||
array_with_type_and_name.name));
|
||||
}
|
||||
}
|
||||
|
||||
/// Put all the necessary columns multiplied by the sizes of arrays into the columns.
|
||||
auto replicated_column_function_ptr = IColumn::mutate(column_function->replicate(getOffsets(*column_first_array)));
|
||||
auto replicated_column_function_ptr = IColumn::mutate(column_function->replicate(column_first_array->getOffsets()));
|
||||
auto * replicated_column_function = typeid_cast<ColumnFunction *>(replicated_column_function_ptr.get());
|
||||
replicated_column_function->appendArguments(arrays);
|
||||
|
||||
|
464
src/Functions/array/FunctionsMapMiscellaneous.cpp
Normal file
464
src/Functions/array/FunctionsMapMiscellaneous.cpp
Normal file
@ -0,0 +1,464 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnFunction.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeFunction.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/like.h>
|
||||
#include <Functions/array/arrayConcat.h>
|
||||
#include <Functions/array/arrayFilter.h>
|
||||
#include <Functions/array/arrayMap.h>
|
||||
#include <Functions/array/arraySort.h>
|
||||
#include <Functions/array/arrayIndex.h>
|
||||
#include <Functions/array/arrayExists.h>
|
||||
#include <Functions/array/arrayAll.h>
|
||||
#include <Functions/identity.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include <base/map.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
/** An adapter that allows to execute array* functions over Map types arguments.
|
||||
* E.g. transform mapConcat to arrayConcat.
|
||||
*
|
||||
* Impl - the implementation of function that is applied
|
||||
* to internal column of Map arguments (e.g. 'arrayConcat').
|
||||
*
|
||||
* Adapter - a struct that determines the way how to extract the internal array columns
|
||||
* from Map arguments and possibly modify other columns.
|
||||
*/
|
||||
template <typename Impl, typename Adapter, typename Name>
|
||||
class FunctionMapToArrayAdapter : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = Name::name;
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionMapToArrayAdapter>(); }
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return impl.isVariadic(); }
|
||||
size_t getNumberOfArguments() const override { return impl.getNumberOfArguments(); }
|
||||
bool useDefaultImplementationForConstants() const override { return impl.useDefaultImplementationForConstants(); }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo &) const override { return false; }
|
||||
|
||||
void getLambdaArgumentTypes(DataTypes & arguments) const override
|
||||
{
|
||||
Adapter::extractNestedTypes(arguments);
|
||||
impl.getLambdaArgumentTypes(arguments);
|
||||
}
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
if (arguments.empty())
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Function {} requires at least one argument, passed {}", getName(), arguments.size());
|
||||
|
||||
auto nested_arguments = arguments;
|
||||
Adapter::extractNestedTypesAndColumns(nested_arguments);
|
||||
|
||||
constexpr bool impl_has_get_return_type = requires
|
||||
{
|
||||
impl.getReturnTypeImpl(nested_arguments);
|
||||
};
|
||||
|
||||
/// If method is not overloaded in the implementation call default implementation
|
||||
/// from IFunction. Here inheritance cannot be used for template parameterized field.
|
||||
if constexpr (impl_has_get_return_type)
|
||||
return Adapter::wrapType(impl.getReturnTypeImpl(nested_arguments));
|
||||
else
|
||||
return Adapter::wrapType(dynamic_cast<const IFunction &>(impl).getReturnTypeImpl(nested_arguments));
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
auto nested_arguments = arguments;
|
||||
Adapter::extractNestedTypesAndColumns(nested_arguments);
|
||||
return Adapter::wrapColumn(impl.executeImpl(nested_arguments, Adapter::extractResultType(result_type), input_rows_count));
|
||||
}
|
||||
|
||||
private:
|
||||
Impl impl;
|
||||
};
|
||||
|
||||
|
||||
template <typename Derived, typename Name>
|
||||
struct MapAdapterBase
|
||||
{
|
||||
static void extractNestedTypes(DataTypes & types)
|
||||
{
|
||||
bool has_map_column = false;
|
||||
for (auto & type : types)
|
||||
{
|
||||
if (const auto * type_map = typeid_cast<const DataTypeMap *>(type.get()))
|
||||
{
|
||||
has_map_column = true;
|
||||
type = Derived::extractNestedType(*type_map);
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_map_column)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Function {} requires at least one argument of type Map", Name::name);
|
||||
}
|
||||
|
||||
static void extractNestedTypesAndColumns(ColumnsWithTypeAndName & arguments)
|
||||
{
|
||||
bool has_map_column = false;
|
||||
for (auto & argument : arguments)
|
||||
{
|
||||
if (const auto * type_map = typeid_cast<const DataTypeMap *>(argument.type.get()))
|
||||
{
|
||||
has_map_column = true;
|
||||
argument.type = Derived::extractNestedType(*type_map);
|
||||
|
||||
if (argument.column)
|
||||
{
|
||||
if (const auto * const_map = checkAndGetColumnConstData<ColumnMap>(argument.column.get()))
|
||||
argument.column = ColumnConst::create(Derived::extractNestedColumn(*const_map), argument.column->size());
|
||||
else
|
||||
argument.column = Derived::extractNestedColumn(assert_cast<const ColumnMap &>(*argument.column));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_map_column)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Function {} requires at least one argument of type Map", Name::name);
|
||||
}
|
||||
};
|
||||
|
||||
/// Adapter that extracts nested Array(Tuple(key, value)) from Map columns.
|
||||
template <typename Name, bool returns_map = true>
|
||||
struct MapToNestedAdapter : public MapAdapterBase<MapToNestedAdapter<Name, returns_map>, Name>
|
||||
{
|
||||
using MapAdapterBase<MapToNestedAdapter, Name>::extractNestedTypes;
|
||||
using MapAdapterBase<MapToNestedAdapter, Name>::extractNestedTypesAndColumns;
|
||||
|
||||
static DataTypePtr extractNestedType(const DataTypeMap & type_map)
|
||||
{
|
||||
return type_map.getNestedTypeWithUnnamedTuple();
|
||||
}
|
||||
|
||||
static ColumnPtr extractNestedColumn(const ColumnMap & column_map)
|
||||
{
|
||||
return column_map.getNestedColumnPtr();
|
||||
}
|
||||
|
||||
static DataTypePtr extractResultType(const DataTypePtr & result_type)
|
||||
{
|
||||
if constexpr (returns_map)
|
||||
return assert_cast<const DataTypeMap &>(*result_type).getNestedType();
|
||||
return result_type;
|
||||
}
|
||||
|
||||
static DataTypePtr wrapType(DataTypePtr type)
|
||||
{
|
||||
if constexpr (returns_map)
|
||||
return std::make_shared<DataTypeMap>(std::move(type));
|
||||
return type;
|
||||
}
|
||||
|
||||
static ColumnPtr wrapColumn(ColumnPtr column)
|
||||
{
|
||||
if constexpr (returns_map)
|
||||
return ColumnMap::create(std::move(column));
|
||||
return column;
|
||||
}
|
||||
};
|
||||
|
||||
/// Adapter that extracts array with keys or values from Map columns.
|
||||
template <typename Name, size_t position>
|
||||
struct MapToSubcolumnAdapter : public MapAdapterBase<MapToSubcolumnAdapter<Name, position>, Name>
|
||||
{
|
||||
static_assert(position <= 1);
|
||||
using MapAdapterBase<MapToSubcolumnAdapter, Name>::extractNestedTypes;
|
||||
using MapAdapterBase<MapToSubcolumnAdapter, Name>::extractNestedTypesAndColumns;
|
||||
|
||||
static DataTypePtr extractNestedType(const DataTypeMap & type_map)
|
||||
{
|
||||
const auto & array_type = assert_cast<const DataTypeArray &>(*type_map.getNestedType());
|
||||
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*array_type.getNestedType());
|
||||
return std::make_shared<DataTypeArray>(tuple_type.getElement(position));
|
||||
}
|
||||
|
||||
static ColumnPtr extractNestedColumn(const ColumnMap & column_map)
|
||||
{
|
||||
const auto & array_column = column_map.getNestedColumn();
|
||||
const auto & tuple_column = column_map.getNestedData();
|
||||
return ColumnArray::create(tuple_column.getColumnPtr(position), array_column.getOffsetsPtr());
|
||||
}
|
||||
|
||||
static DataTypePtr extractResultType(const DataTypePtr & result_type) { return result_type; }
|
||||
static DataTypePtr wrapType(DataTypePtr type) { return type; }
|
||||
static ColumnPtr wrapColumn(ColumnPtr column) { return column; }
|
||||
};
|
||||
|
||||
/// A special function that works like the following:
|
||||
/// mapKeyLike(pattern, key, value) <=> key LIKE pattern
|
||||
/// It is used to mimic lambda: (key, value) -> key LIKE pattern.
|
||||
class FunctionMapKeyLike : public IFunction
|
||||
{
|
||||
public:
|
||||
String getName() const override { return "mapKeyLike"; }
|
||||
size_t getNumberOfArguments() const override { return 3; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
DataTypes new_arguments{arguments[1], arguments[0]};
|
||||
return impl.getReturnTypeImpl(new_arguments);
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
ColumnsWithTypeAndName new_arguments{arguments[1], arguments[0]};
|
||||
return impl.executeImpl(new_arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
private:
|
||||
FunctionLike impl;
|
||||
};
|
||||
|
||||
/// Adapter for map*KeyLike functions.
|
||||
/// It extracts nested Array(Tuple(key, value)) from Map columns
|
||||
/// and prepares ColumnFunction as first argument which works
|
||||
/// like lambda (k, v) -> k LIKE pattern to pass it to the nested
|
||||
/// function derived from FunctionArrayMapped.
|
||||
template <typename Name, bool returns_map>
|
||||
struct MapKeyLikeAdapter
|
||||
{
|
||||
static void checkTypes(const DataTypes & types)
|
||||
{
|
||||
if (types.size() != 2)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Number of arguments for function {} doesn't match: passed {}, should be 2",
|
||||
Name::name, types.size());
|
||||
|
||||
const auto * map_type = checkAndGetDataType<DataTypeMap>(types[0].get());
|
||||
if (!map_type)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a Map", Name::name);
|
||||
|
||||
if (!isStringOrFixedString(types[1]))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Second argument for function {} must be String or FixedString", Name::name);
|
||||
|
||||
if (!isStringOrFixedString(map_type->getKeyType()))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Key type of map for function {} must be String or FixedString", Name::name);
|
||||
}
|
||||
|
||||
static void extractNestedTypes(DataTypes & types)
|
||||
{
|
||||
checkTypes(types);
|
||||
const auto & map_type = assert_cast<const DataTypeMap &>(*types[0]);
|
||||
|
||||
DataTypes lambda_argument_types{types[1], map_type.getKeyType(), map_type.getValueType()};
|
||||
auto result_type = FunctionMapKeyLike().getReturnTypeImpl(lambda_argument_types);
|
||||
|
||||
DataTypes argument_types{map_type.getKeyType(), map_type.getValueType()};
|
||||
auto function_type = std::make_shared<DataTypeFunction>(argument_types, result_type);
|
||||
|
||||
types = {function_type, types[0]};
|
||||
MapToNestedAdapter<Name, returns_map>::extractNestedTypes(types);
|
||||
}
|
||||
|
||||
static void extractNestedTypesAndColumns(ColumnsWithTypeAndName & arguments)
|
||||
{
|
||||
checkTypes(collections::map<DataTypes>(arguments, [](const auto & elem) { return elem.type; }));
|
||||
|
||||
const auto & map_type = assert_cast<const DataTypeMap &>(*arguments[0].type);
|
||||
const auto & pattern_arg = arguments[1];
|
||||
|
||||
ColumnPtr function_column;
|
||||
auto function = std::make_shared<FunctionMapKeyLike>();
|
||||
|
||||
DataTypes lambda_argument_types{pattern_arg.type, map_type.getKeyType(), map_type.getValueType()};
|
||||
auto result_type = function->getReturnTypeImpl(lambda_argument_types);
|
||||
|
||||
DataTypes argument_types{map_type.getKeyType(), map_type.getValueType()};
|
||||
auto function_type = std::make_shared<DataTypeFunction>(argument_types, result_type);
|
||||
|
||||
if (pattern_arg.column)
|
||||
{
|
||||
/// Here we create ColumnFunction with already captured pattern column.
|
||||
/// Nested function will append keys and values column and it will work as desired lambda.
|
||||
auto function_base = std::make_shared<FunctionToFunctionBaseAdaptor>(function, lambda_argument_types, result_type);
|
||||
function_column = ColumnFunction::create(pattern_arg.column->size(), std::move(function_base), ColumnsWithTypeAndName{pattern_arg});
|
||||
}
|
||||
|
||||
ColumnWithTypeAndName function_arg{function_column, function_type, "__function_map_key_like"};
|
||||
arguments = {function_arg, arguments[0]};
|
||||
MapToNestedAdapter<Name, returns_map>::extractNestedTypesAndColumns(arguments);
|
||||
}
|
||||
|
||||
static DataTypePtr extractResultType(const DataTypePtr & result_type)
|
||||
{
|
||||
return MapToNestedAdapter<Name, returns_map>::extractResultType(result_type);
|
||||
}
|
||||
|
||||
static DataTypePtr wrapType(DataTypePtr type)
|
||||
{
|
||||
return MapToNestedAdapter<Name, returns_map>::wrapType(std::move(type));
|
||||
}
|
||||
|
||||
static ColumnPtr wrapColumn(ColumnPtr column)
|
||||
{
|
||||
return MapToNestedAdapter<Name, returns_map>::wrapColumn(std::move(column));
|
||||
}
|
||||
};
|
||||
|
||||
struct NameMapConcat { static constexpr auto name = "mapConcat"; };
|
||||
using FunctionMapConcat = FunctionMapToArrayAdapter<FunctionArrayConcat, MapToNestedAdapter<NameMapConcat>, NameMapConcat>;
|
||||
|
||||
struct NameMapKeys { static constexpr auto name = "mapKeys"; };
|
||||
using FunctionMapKeys = FunctionMapToArrayAdapter<FunctionIdentity, MapToSubcolumnAdapter<NameMapKeys, 0>, NameMapKeys>;
|
||||
|
||||
struct NameMapValues { static constexpr auto name = "mapValues"; };
|
||||
using FunctionMapValues = FunctionMapToArrayAdapter<FunctionIdentity, MapToSubcolumnAdapter<NameMapValues, 1>, NameMapValues>;
|
||||
|
||||
struct NameMapContains { static constexpr auto name = "mapContains"; };
|
||||
using FunctionMapContains = FunctionMapToArrayAdapter<FunctionArrayIndex<HasAction, NameMapContains>, MapToSubcolumnAdapter<NameMapKeys, 0>, NameMapContains>;
|
||||
|
||||
struct NameMapFilter { static constexpr auto name = "mapFilter"; };
|
||||
using FunctionMapFilter = FunctionMapToArrayAdapter<FunctionArrayFilter, MapToNestedAdapter<NameMapFilter>, NameMapFilter>;
|
||||
|
||||
struct NameMapApply { static constexpr auto name = "mapApply"; };
|
||||
using FunctionMapApply = FunctionMapToArrayAdapter<FunctionArrayMap, MapToNestedAdapter<NameMapApply>, NameMapApply>;
|
||||
|
||||
struct NameMapExists { static constexpr auto name = "mapExists"; };
|
||||
using FunctionMapExists = FunctionMapToArrayAdapter<FunctionArrayExists, MapToNestedAdapter<NameMapExists, false>, NameMapExists>;
|
||||
|
||||
struct NameMapAll { static constexpr auto name = "mapAll"; };
|
||||
using FunctionMapAll = FunctionMapToArrayAdapter<FunctionArrayAll, MapToNestedAdapter<NameMapAll, false>, NameMapAll>;
|
||||
|
||||
struct NameMapContainsKeyLike { static constexpr auto name = "mapContainsKeyLike"; };
|
||||
using FunctionMapContainsKeyLike = FunctionMapToArrayAdapter<FunctionArrayExists, MapKeyLikeAdapter<NameMapContainsKeyLike, false>, NameMapContainsKeyLike>;
|
||||
|
||||
struct NameMapExtractKeyLike { static constexpr auto name = "mapExtractKeyLike"; };
|
||||
using FunctionMapExtractKeyLike = FunctionMapToArrayAdapter<FunctionArrayFilter, MapKeyLikeAdapter<NameMapExtractKeyLike, true>, NameMapExtractKeyLike>;
|
||||
|
||||
struct NameMapSort { static constexpr auto name = "mapSort"; };
|
||||
struct NameMapReverseSort { static constexpr auto name = "mapReverseSort"; };
|
||||
struct NameMapPartialSort { static constexpr auto name = "mapPartialSort"; };
|
||||
struct NameMapPartialReverseSort { static constexpr auto name = "mapPartialReverseSort"; };
|
||||
|
||||
using FunctionMapSort = FunctionMapToArrayAdapter<FunctionArraySort, MapToNestedAdapter<NameMapSort>, NameMapSort>;
|
||||
using FunctionMapReverseSort = FunctionMapToArrayAdapter<FunctionArrayReverseSort, MapToNestedAdapter<NameMapReverseSort>, NameMapReverseSort>;
|
||||
using FunctionMapPartialSort = FunctionMapToArrayAdapter<FunctionArrayPartialSort, MapToNestedAdapter<NameMapPartialSort>, NameMapPartialSort>;
|
||||
using FunctionMapPartialReverseSort = FunctionMapToArrayAdapter<FunctionArrayPartialReverseSort, MapToNestedAdapter<NameMapPartialReverseSort>, NameMapPartialReverseSort>;
|
||||
|
||||
REGISTER_FUNCTION(MapMiscellaneous)
|
||||
{
|
||||
factory.registerFunction<FunctionMapConcat>(
|
||||
{
|
||||
"The same as arrayConcat.",
|
||||
Documentation::Examples{{"mapConcat", "SELECT mapConcat(map('k1', 'v1'), map('k2', 'v2'))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapKeys>(
|
||||
{
|
||||
"Returns an array with the keys of map.",
|
||||
Documentation::Examples{{"mapKeys", "SELECT mapKeys(map('k1', 'v1', 'k2', 'v2'))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapValues>(
|
||||
{
|
||||
"Returns an array with the values of map.",
|
||||
Documentation::Examples{{"mapValues", "SELECT mapValues(map('k1', 'v1', 'k2', 'v2'))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapContains>(
|
||||
{
|
||||
"Checks whether the map has the specified key.",
|
||||
Documentation::Examples{{"mapContains", "SELECT mapContains(map('k1', 'v1', 'k2', 'v2'), 'k1')"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapFilter>(
|
||||
{
|
||||
"The same as arrayFilter.",
|
||||
Documentation::Examples{{"mapFilter", "SELECT mapFilter((k, v) -> v > 1, map('k1', 1, 'k2', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapApply>(
|
||||
{
|
||||
"The same as arrayMap.",
|
||||
Documentation::Examples{{"mapApply", "SELECT mapApply((k, v) -> (k, v * 2), map('k1', 1, 'k2', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapExists>(
|
||||
{
|
||||
"The same as arrayExists.",
|
||||
Documentation::Examples{{"mapExists", "SELECT mapExists((k, v) -> v = 1, map('k1', 1, 'k2', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapAll>(
|
||||
{
|
||||
"The same as arrayAll.",
|
||||
Documentation::Examples{{"mapAll", "SELECT mapAll((k, v) -> v = 1, map('k1', 1, 'k2', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapSort>(
|
||||
{
|
||||
"The same as arraySort.",
|
||||
Documentation::Examples{{"mapSort", "SELECT mapSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapReverseSort>(
|
||||
{
|
||||
"The same as arrayReverseSort.",
|
||||
Documentation::Examples{{"mapReverseSort", "SELECT mapReverseSort((k, v) -> v, map('k1', 3, 'k2', 1, 'k3', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapPartialSort>(
|
||||
{
|
||||
"The same as arrayReverseSort.",
|
||||
Documentation::Examples{{"mapPartialSort", "SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapPartialReverseSort>(
|
||||
{
|
||||
"The same as arrayPartialReverseSort.",
|
||||
Documentation::Examples{{"mapPartialReverseSort", "SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2))"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapContainsKeyLike>(
|
||||
{
|
||||
"Checks whether map contains key LIKE specified pattern.",
|
||||
Documentation::Examples{{"mapContainsKeyLike", "SELECT mapContainsKeyLike(map('k1-1', 1, 'k2-1', 2), 'k1%')"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionMapExtractKeyLike>(
|
||||
{
|
||||
"Returns a map with elements which key matches the specified pattern.",
|
||||
Documentation::Examples{{"mapExtractKeyLike", "SELECT mapExtractKeyLike(map('k1-1', 1, 'k2-1', 2), 'k1%')"}},
|
||||
Documentation::Categories{"Map"},
|
||||
});
|
||||
}
|
||||
|
||||
}
|
@ -96,9 +96,6 @@ using ArrayAggregateResult = typename ArrayAggregateResultImpl<ArrayElement, ope
|
||||
template<AggregateOperation aggregate_operation>
|
||||
struct ArrayAggregateImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -1,90 +1,67 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/array/arrayAll.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
/** arrayAll(x1,...,xn -> expression, array1,...,arrayn) - is the expression true for all elements of the array.
|
||||
* An overload of the form f(array) is available, which works in the same way as f(x -> x, array).
|
||||
*/
|
||||
struct ArrayAllImpl
|
||||
ColumnPtr ArrayAllImpl::execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
const ColumnUInt8 * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
|
||||
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & /*array_element*/)
|
||||
if (!column_filter)
|
||||
{
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
|
||||
|
||||
if (!column_filter_const)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type of filter column");
|
||||
|
||||
if (column_filter_const->getValue<UInt8>())
|
||||
return DataTypeUInt8().createColumnConst(array.size(), 1u);
|
||||
else
|
||||
{
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_all = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
out_all[i] = offsets[i] == pos;
|
||||
pos = offsets[i];
|
||||
}
|
||||
|
||||
return out_column;
|
||||
}
|
||||
}
|
||||
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
const IColumn::Filter & filter = column_filter->getData();
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_all = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
const ColumnUInt8 * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
|
||||
|
||||
if (!column_filter)
|
||||
UInt8 all = 1;
|
||||
for (; pos < offsets[i]; ++pos)
|
||||
{
|
||||
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
|
||||
|
||||
if (!column_filter_const)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type of filter column");
|
||||
|
||||
if (column_filter_const->getValue<UInt8>())
|
||||
return DataTypeUInt8().createColumnConst(array.size(), 1u);
|
||||
else
|
||||
if (!filter[pos])
|
||||
{
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_all = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
out_all[i] = offsets[i] == pos;
|
||||
pos = offsets[i];
|
||||
}
|
||||
|
||||
return out_column;
|
||||
all = 0;
|
||||
pos = offsets[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const IColumn::Filter & filter = column_filter->getData();
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_all = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
UInt8 all = 1;
|
||||
for (; pos < offsets[i]; ++pos)
|
||||
{
|
||||
if (!filter[pos])
|
||||
{
|
||||
all = 0;
|
||||
pos = offsets[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
out_all[i] = all;
|
||||
}
|
||||
|
||||
return out_column;
|
||||
out_all[i] = all;
|
||||
}
|
||||
};
|
||||
|
||||
struct NameArrayAll { static constexpr auto name = "arrayAll"; };
|
||||
using FunctionArrayAll = FunctionArrayMapped<ArrayAllImpl, NameArrayAll>;
|
||||
return out_column;
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(ArrayAll)
|
||||
{
|
||||
@ -92,5 +69,3 @@ REGISTER_FUNCTION(ArrayAll)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
30
src/Functions/array/arrayAll.h
Normal file
30
src/Functions/array/arrayAll.h
Normal file
@ -0,0 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** arrayAll(x1,...,xn -> expression, array1,...,arrayn) - is the expression true for all elements of the array.
|
||||
* An overload of the form f(array) is available, which works in the same way as f(x -> x, array).
|
||||
*/
|
||||
struct ArrayAllImpl
|
||||
{
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & /*array_element*/)
|
||||
{
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
}
|
||||
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped);
|
||||
};
|
||||
|
||||
struct NameArrayAll { static constexpr auto name = "arrayAll"; };
|
||||
using FunctionArrayAll = FunctionArrayMapped<ArrayAllImpl, NameArrayAll>;
|
||||
|
||||
}
|
@ -19,9 +19,6 @@ namespace ErrorCodes
|
||||
|
||||
struct ArrayCompactImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/array/arrayConcat.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/GatherUtils/GatherUtils.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -6,10 +6,8 @@
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/range.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -20,84 +18,66 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
|
||||
/// arrayConcat(arr1, ...) - concatenate arrays.
|
||||
class FunctionArrayConcat : public IFunction
|
||||
DataTypePtr FunctionArrayConcat::getReturnTypeImpl(const DataTypes & arguments) const
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "arrayConcat";
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionArrayConcat>(); }
|
||||
if (arguments.empty())
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
for (auto i : collections::range(0, arguments.size()))
|
||||
{
|
||||
if (arguments.empty())
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
|
||||
|
||||
for (auto i : collections::range(0, arguments.size()))
|
||||
{
|
||||
const auto * array_type = typeid_cast<const DataTypeArray *>(arguments[i].get());
|
||||
if (!array_type)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Argument {} for function {} must be an array but it has type {}.",
|
||||
i, getName(), arguments[i]->getName());
|
||||
}
|
||||
|
||||
return getLeastSupertype(arguments);
|
||||
const auto * array_type = typeid_cast<const DataTypeArray *>(arguments[i].get());
|
||||
if (!array_type)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Argument {} for function {} must be an array but it has type {}.",
|
||||
i, getName(), arguments[i]->getName());
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
return getLeastSupertype(arguments);
|
||||
}
|
||||
|
||||
ColumnPtr FunctionArrayConcat::executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
|
||||
{
|
||||
if (result_type->onlyNull())
|
||||
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
||||
|
||||
size_t rows = input_rows_count;
|
||||
size_t num_args = arguments.size();
|
||||
|
||||
Columns preprocessed_columns(num_args);
|
||||
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
{
|
||||
if (result_type->onlyNull())
|
||||
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
||||
const ColumnWithTypeAndName & arg = arguments[i];
|
||||
ColumnPtr preprocessed_column = arg.column;
|
||||
|
||||
size_t rows = input_rows_count;
|
||||
size_t num_args = arguments.size();
|
||||
if (!arg.type->equals(*result_type))
|
||||
preprocessed_column = castColumn(arg, result_type);
|
||||
|
||||
Columns preprocessed_columns(num_args);
|
||||
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & arg = arguments[i];
|
||||
ColumnPtr preprocessed_column = arg.column;
|
||||
|
||||
if (!arg.type->equals(*result_type))
|
||||
preprocessed_column = castColumn(arg, result_type);
|
||||
|
||||
preprocessed_columns[i] = std::move(preprocessed_column);
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<GatherUtils::IArraySource>> sources;
|
||||
|
||||
for (auto & argument_column : preprocessed_columns)
|
||||
{
|
||||
bool is_const = false;
|
||||
|
||||
if (const auto * argument_column_const = typeid_cast<const ColumnConst *>(argument_column.get()))
|
||||
{
|
||||
is_const = true;
|
||||
argument_column = argument_column_const->getDataColumnPtr();
|
||||
}
|
||||
|
||||
if (const auto * argument_column_array = typeid_cast<const ColumnArray *>(argument_column.get()))
|
||||
sources.emplace_back(GatherUtils::createArraySource(*argument_column_array, is_const, rows));
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Arguments for function {} must be arrays.", getName());
|
||||
}
|
||||
|
||||
auto sink = GatherUtils::concat(sources);
|
||||
|
||||
return sink;
|
||||
preprocessed_columns[i] = std::move(preprocessed_column);
|
||||
}
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
};
|
||||
std::vector<std::unique_ptr<GatherUtils::IArraySource>> sources;
|
||||
|
||||
for (auto & argument_column : preprocessed_columns)
|
||||
{
|
||||
bool is_const = false;
|
||||
|
||||
if (const auto * argument_column_const = typeid_cast<const ColumnConst *>(argument_column.get()))
|
||||
{
|
||||
is_const = true;
|
||||
argument_column = argument_column_const->getDataColumnPtr();
|
||||
}
|
||||
|
||||
if (const auto * argument_column_array = typeid_cast<const ColumnArray *>(argument_column.get()))
|
||||
sources.emplace_back(GatherUtils::createArraySource(*argument_column_array, is_const, rows));
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Arguments for function {} must be arrays.", getName());
|
||||
}
|
||||
|
||||
auto sink = GatherUtils::concat(sources);
|
||||
|
||||
return sink;
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(ArrayConcat)
|
||||
{
|
||||
|
29
src/Functions/array/arrayConcat.h
Normal file
29
src/Functions/array/arrayConcat.h
Normal file
@ -0,0 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// arrayConcat(arr1, ...) - concatenate arrays.
|
||||
class FunctionArrayConcat : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "arrayConcat";
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionArrayConcat>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override;
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override;
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
};
|
||||
|
||||
}
|
@ -17,9 +17,6 @@ namespace ErrorCodes
|
||||
*/
|
||||
struct ArrayCountImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -18,9 +18,6 @@ namespace ErrorCodes
|
||||
|
||||
struct ArrayCumSumImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -19,9 +19,6 @@ namespace ErrorCodes
|
||||
*/
|
||||
struct ArrayCumSumNonNegativeImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -21,9 +21,6 @@ namespace ErrorCodes
|
||||
*/
|
||||
struct ArrayDifferenceImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -1,91 +1,67 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/array/arrayExists.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
/** arrayExists(x1,...,xn -> expression, array1,...,arrayn) - is the expression true for at least one array element.
|
||||
* An overload of the form f(array) is available, which works in the same way as f(x -> x, array).
|
||||
*/
|
||||
struct ArrayExistsImpl
|
||||
ColumnPtr ArrayExistsImpl::execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
const ColumnUInt8 * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
|
||||
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & /*array_element*/)
|
||||
if (!column_filter)
|
||||
{
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
|
||||
|
||||
if (!column_filter_const)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type of filter column");
|
||||
|
||||
if (column_filter_const->getValue<UInt8>())
|
||||
{
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_exists = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
out_exists[i] = offsets[i] - pos > 0;
|
||||
pos = offsets[i];
|
||||
}
|
||||
|
||||
return out_column;
|
||||
}
|
||||
else
|
||||
return DataTypeUInt8().createColumnConst(array.size(), 0u);
|
||||
}
|
||||
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
const IColumn::Filter & filter = column_filter->getData();
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_exists = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
const ColumnUInt8 * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
|
||||
|
||||
if (!column_filter)
|
||||
UInt8 exists = 0;
|
||||
for (; pos < offsets[i]; ++pos)
|
||||
{
|
||||
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
|
||||
|
||||
if (!column_filter_const)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type of filter column");
|
||||
|
||||
if (column_filter_const->getValue<UInt8>())
|
||||
if (filter[pos])
|
||||
{
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_exists = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
out_exists[i] = offsets[i] - pos > 0;
|
||||
pos = offsets[i];
|
||||
}
|
||||
|
||||
return out_column;
|
||||
exists = 1;
|
||||
pos = offsets[i];
|
||||
break;
|
||||
}
|
||||
else
|
||||
return DataTypeUInt8().createColumnConst(array.size(), 0u);
|
||||
}
|
||||
|
||||
const IColumn::Filter & filter = column_filter->getData();
|
||||
const IColumn::Offsets & offsets = array.getOffsets();
|
||||
auto out_column = ColumnUInt8::create(offsets.size());
|
||||
ColumnUInt8::Container & out_exists = out_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
UInt8 exists = 0;
|
||||
for (; pos < offsets[i]; ++pos)
|
||||
{
|
||||
if (filter[pos])
|
||||
{
|
||||
exists = 1;
|
||||
pos = offsets[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
out_exists[i] = exists;
|
||||
}
|
||||
|
||||
return out_column;
|
||||
out_exists[i] = exists;
|
||||
}
|
||||
};
|
||||
|
||||
struct NameArrayExists { static constexpr auto name = "arrayExists"; };
|
||||
using FunctionArrayExists = FunctionArrayMapped<ArrayExistsImpl, NameArrayExists>;
|
||||
return out_column;
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(ArrayExists)
|
||||
{
|
||||
@ -93,5 +69,3 @@ REGISTER_FUNCTION(ArrayExists)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
31
src/Functions/array/arrayExists.h
Normal file
31
src/Functions/array/arrayExists.h
Normal file
@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** arrayExists(x1,...,xn -> expression, array1,...,arrayn) - is the expression true for at least one array element.
|
||||
* An overload of the form f(array) is available, which works in the same way as f(x -> x, array).
|
||||
*/
|
||||
struct ArrayExistsImpl
|
||||
{
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & /*array_element*/)
|
||||
{
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
}
|
||||
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped);
|
||||
};
|
||||
|
||||
struct NameArrayExists { static constexpr auto name = "arrayExists"; };
|
||||
using FunctionArrayExists = FunctionArrayMapped<ArrayExistsImpl, NameArrayExists>;
|
||||
|
||||
}
|
@ -20,9 +20,6 @@ namespace ErrorCodes
|
||||
template <bool reverse>
|
||||
struct ArrayFillImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return true; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -1,78 +1,54 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/array/arrayFilter.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
/** arrayFilter(x -> predicate, array) - leave in the array only the elements for which the expression is true.
|
||||
*/
|
||||
struct ArrayFilterImpl
|
||||
ColumnPtr ArrayFilterImpl::execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
const ColumnUInt8 * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
|
||||
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return true; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & array_element)
|
||||
if (!column_filter)
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(array_element);
|
||||
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
|
||||
|
||||
if (!column_filter_const)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type of filter column");
|
||||
|
||||
if (column_filter_const->getValue<UInt8>())
|
||||
return array.clone();
|
||||
else
|
||||
return ColumnArray::create(
|
||||
array.getDataPtr()->cloneEmpty(),
|
||||
ColumnArray::ColumnOffsets::create(array.size(), 0));
|
||||
}
|
||||
|
||||
/// If there are several arrays, the first one is passed here.
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
const IColumn::Filter & filter = column_filter->getData();
|
||||
ColumnPtr filtered = array.getData().filter(filter, -1);
|
||||
|
||||
const IColumn::Offsets & in_offsets = array.getOffsets();
|
||||
auto column_offsets = ColumnArray::ColumnOffsets::create(in_offsets.size());
|
||||
IColumn::Offsets & out_offsets = column_offsets->getData();
|
||||
|
||||
size_t in_pos = 0;
|
||||
size_t out_pos = 0;
|
||||
for (size_t i = 0; i < in_offsets.size(); ++i)
|
||||
{
|
||||
const ColumnUInt8 * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
|
||||
|
||||
if (!column_filter)
|
||||
for (; in_pos < in_offsets[i]; ++in_pos)
|
||||
{
|
||||
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
|
||||
|
||||
if (!column_filter_const)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected type of filter column");
|
||||
|
||||
if (column_filter_const->getValue<UInt8>())
|
||||
return array.clone();
|
||||
else
|
||||
return ColumnArray::create(
|
||||
array.getDataPtr()->cloneEmpty(),
|
||||
ColumnArray::ColumnOffsets::create(array.size(), 0));
|
||||
if (filter[in_pos])
|
||||
++out_pos;
|
||||
}
|
||||
|
||||
const IColumn::Filter & filter = column_filter->getData();
|
||||
ColumnPtr filtered = array.getData().filter(filter, -1);
|
||||
|
||||
const IColumn::Offsets & in_offsets = array.getOffsets();
|
||||
auto column_offsets = ColumnArray::ColumnOffsets::create(in_offsets.size());
|
||||
IColumn::Offsets & out_offsets = column_offsets->getData();
|
||||
|
||||
size_t in_pos = 0;
|
||||
size_t out_pos = 0;
|
||||
for (size_t i = 0; i < in_offsets.size(); ++i)
|
||||
{
|
||||
for (; in_pos < in_offsets[i]; ++in_pos)
|
||||
{
|
||||
if (filter[in_pos])
|
||||
++out_pos;
|
||||
}
|
||||
out_offsets[i] = out_pos;
|
||||
}
|
||||
|
||||
return ColumnArray::create(filtered, std::move(column_offsets));
|
||||
out_offsets[i] = out_pos;
|
||||
}
|
||||
};
|
||||
|
||||
struct NameArrayFilter { static constexpr auto name = "arrayFilter"; };
|
||||
using FunctionArrayFilter = FunctionArrayMapped<ArrayFilterImpl, NameArrayFilter>;
|
||||
return ColumnArray::create(filtered, std::move(column_offsets));
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(ArrayFilter)
|
||||
{
|
||||
@ -80,5 +56,3 @@ REGISTER_FUNCTION(ArrayFilter)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
31
src/Functions/array/arrayFilter.h
Normal file
31
src/Functions/array/arrayFilter.h
Normal file
@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** arrayFilter(x -> predicate, array) - leave in the array only the elements for which the expression is true.
|
||||
*/
|
||||
struct ArrayFilterImpl
|
||||
{
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return true; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & array_element)
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(array_element);
|
||||
}
|
||||
|
||||
/// If there are several arrays, the first one is passed here.
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped);
|
||||
};
|
||||
|
||||
struct NameArrayFilter { static constexpr auto name = "arrayFilter"; };
|
||||
using FunctionArrayFilter = FunctionArrayMapped<ArrayFilterImpl, NameArrayFilter>;
|
||||
|
||||
}
|
@ -28,9 +28,6 @@ enum class ArrayFirstLastElementNotExistsStrategy : uint8_t
|
||||
template <ArrayFirstLastStrategy strategy, ArrayFirstLastElementNotExistsStrategy element_not_exists_strategy>
|
||||
struct ArrayFirstLastImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return true; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -21,9 +21,6 @@ enum class ArrayFirstLastIndexStrategy
|
||||
template <ArrayFirstLastIndexStrategy strategy>
|
||||
struct ArrayFirstLastIndexImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return true; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -1,44 +1,12 @@
|
||||
#include <Functions/array/arrayMap.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** arrayMap(x1, ..., xn -> expression, array1, ..., arrayn) - apply the expression to each element of the array (or set of parallel arrays).
|
||||
*/
|
||||
struct ArrayMapImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
/// true if the expression (for an overload of f(expression, arrays)) or an array (for f(array)) should be boolean.
|
||||
static bool needBoolean() { return false; }
|
||||
/// true if the f(array) overload is unavailable.
|
||||
static bool needExpression() { return true; }
|
||||
/// true if the array must be exactly one.
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & expression_return, const DataTypePtr & /*array_element*/)
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(expression_return);
|
||||
}
|
||||
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
{
|
||||
return ColumnArray::create(mapped->convertToFullColumnIfConst(), array.getOffsetsPtr());
|
||||
}
|
||||
};
|
||||
|
||||
struct NameArrayMap { static constexpr auto name = "arrayMap"; };
|
||||
using FunctionArrayMap = FunctionArrayMapped<ArrayMapImpl, NameArrayMap>;
|
||||
|
||||
REGISTER_FUNCTION(ArrayMap)
|
||||
{
|
||||
factory.registerFunction<FunctionArrayMap>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
33
src/Functions/array/arrayMap.h
Normal file
33
src/Functions/array/arrayMap.h
Normal file
@ -0,0 +1,33 @@
|
||||
#pragma once
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** arrayMap(x1, ..., xn -> expression, array1, ..., arrayn) - apply the expression to each element of the array (or set of parallel arrays).
|
||||
*/
|
||||
struct ArrayMapImpl
|
||||
{
|
||||
/// true if the expression (for an overload of f(expression, arrays)) or an array (for f(array)) should be boolean.
|
||||
static bool needBoolean() { return false; }
|
||||
/// true if the f(array) overload is unavailable.
|
||||
static bool needExpression() { return true; }
|
||||
/// true if the array must be exactly one.
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & expression_return, const DataTypePtr & /*array_element*/)
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(expression_return);
|
||||
}
|
||||
|
||||
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
|
||||
{
|
||||
return ColumnArray::create(mapped->convertToFullColumnIfConst(), array.getOffsetsPtr());
|
||||
}
|
||||
};
|
||||
|
||||
struct NameArrayMap { static constexpr auto name = "arrayMap"; };
|
||||
using FunctionArrayMap = FunctionArrayMapped<ArrayMapImpl, NameArrayMap>;
|
||||
|
||||
}
|
@ -1,140 +1,85 @@
|
||||
#include "FunctionArrayMapped.h"
|
||||
|
||||
#include <Functions/array/arraySort.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/** Sort arrays, by values of its elements, or by values of corresponding elements of calculated expression (known as "schwartzsort").
|
||||
*/
|
||||
namespace
|
||||
{
|
||||
|
||||
template <bool positive>
|
||||
struct Less
|
||||
{
|
||||
const IColumn & column;
|
||||
|
||||
explicit Less(const IColumn & column_) : column(column_) { }
|
||||
|
||||
bool operator()(size_t lhs, size_t rhs) const
|
||||
{
|
||||
if constexpr (positive)
|
||||
return column.compareAt(lhs, rhs, column, 1) < 0;
|
||||
else
|
||||
return column.compareAt(lhs, rhs, column, -1) > 0;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
template <bool positive, bool is_partial>
|
||||
struct ArraySortImpl
|
||||
ColumnPtr ArraySortImpl<positive, is_partial>::execute(
|
||||
const ColumnArray & array,
|
||||
ColumnPtr mapped,
|
||||
const ColumnWithTypeAndName * fixed_arguments)
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static constexpr auto num_fixed_params = is_partial;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & array_element)
|
||||
[[maybe_unused]] const auto limit = [&]() -> size_t
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(array_element);
|
||||
}
|
||||
|
||||
struct Less
|
||||
{
|
||||
const IColumn & column;
|
||||
|
||||
explicit Less(const IColumn & column_) : column(column_) { }
|
||||
|
||||
bool operator()(size_t lhs, size_t rhs) const
|
||||
if constexpr (is_partial)
|
||||
{
|
||||
if (positive)
|
||||
return column.compareAt(lhs, rhs, column, 1) < 0;
|
||||
else
|
||||
return column.compareAt(lhs, rhs, column, -1) > 0;
|
||||
if (!fixed_arguments)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected fixed arguments to get the limit for partial array sort"
|
||||
);
|
||||
return fixed_arguments[0].column.get()->getUInt(0);
|
||||
}
|
||||
};
|
||||
return 0;
|
||||
}();
|
||||
|
||||
static void checkArguments(const String & name, const ColumnWithTypeAndName * fixed_arguments)
|
||||
requires(num_fixed_params)
|
||||
const ColumnArray::Offsets & offsets = array.getOffsets();
|
||||
|
||||
size_t size = offsets.size();
|
||||
size_t nested_size = array.getData().size();
|
||||
IColumn::Permutation permutation(nested_size);
|
||||
|
||||
for (size_t i = 0; i < nested_size; ++i)
|
||||
permutation[i] = i;
|
||||
|
||||
ColumnArray::Offset current_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (!fixed_arguments)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected fixed arguments to get the limit for partial array sort"
|
||||
);
|
||||
WhichDataType which(fixed_arguments[0].type.get());
|
||||
if (!which.isUInt() && !which.isInt())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of limit argument of function {} (must be UInt or Int)",
|
||||
fixed_arguments[0].type->getName(),
|
||||
name);
|
||||
}
|
||||
|
||||
static ColumnPtr execute(
|
||||
const ColumnArray & array,
|
||||
ColumnPtr mapped,
|
||||
const ColumnWithTypeAndName * fixed_arguments [[maybe_unused]] = nullptr)
|
||||
{
|
||||
[[maybe_unused]] const auto limit = [&]() -> size_t
|
||||
auto next_offset = offsets[i];
|
||||
if constexpr (is_partial)
|
||||
{
|
||||
if constexpr (is_partial)
|
||||
if (limit)
|
||||
{
|
||||
if (!fixed_arguments)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected fixed arguments to get the limit for partial array sort"
|
||||
);
|
||||
return fixed_arguments[0].column.get()->getUInt(0);
|
||||
}
|
||||
return 0;
|
||||
}();
|
||||
|
||||
const ColumnArray::Offsets & offsets = array.getOffsets();
|
||||
|
||||
size_t size = offsets.size();
|
||||
size_t nested_size = array.getData().size();
|
||||
IColumn::Permutation permutation(nested_size);
|
||||
|
||||
for (size_t i = 0; i < nested_size; ++i)
|
||||
permutation[i] = i;
|
||||
|
||||
ColumnArray::Offset current_offset = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
auto next_offset = offsets[i];
|
||||
if constexpr (is_partial)
|
||||
{
|
||||
if (limit)
|
||||
{
|
||||
const auto effective_limit = std::min<size_t>(limit, next_offset - current_offset);
|
||||
::partial_sort(&permutation[current_offset], &permutation[current_offset + effective_limit], &permutation[next_offset], Less(*mapped));
|
||||
}
|
||||
else
|
||||
::sort(&permutation[current_offset], &permutation[next_offset], Less(*mapped));
|
||||
const auto effective_limit = std::min<size_t>(limit, next_offset - current_offset);
|
||||
::partial_sort(&permutation[current_offset], &permutation[current_offset + effective_limit], &permutation[next_offset], Less<positive>(*mapped));
|
||||
}
|
||||
else
|
||||
::sort(&permutation[current_offset], &permutation[next_offset], Less(*mapped));
|
||||
current_offset = next_offset;
|
||||
::sort(&permutation[current_offset], &permutation[next_offset], Less<positive>(*mapped));
|
||||
}
|
||||
|
||||
return ColumnArray::create(array.getData().permute(permutation, 0), array.getOffsetsPtr());
|
||||
else
|
||||
::sort(&permutation[current_offset], &permutation[next_offset], Less<positive>(*mapped));
|
||||
current_offset = next_offset;
|
||||
}
|
||||
};
|
||||
|
||||
struct NameArraySort
|
||||
{
|
||||
static constexpr auto name = "arraySort";
|
||||
};
|
||||
struct NameArrayReverseSort
|
||||
{
|
||||
static constexpr auto name = "arrayReverseSort";
|
||||
};
|
||||
struct NameArrayPartialSort
|
||||
{
|
||||
static constexpr auto name = "arrayPartialSort";
|
||||
};
|
||||
struct NameArrayPartialReverseSort
|
||||
{
|
||||
static constexpr auto name = "arrayPartialReverseSort";
|
||||
};
|
||||
|
||||
using FunctionArraySort = FunctionArrayMapped<ArraySortImpl<true, false>, NameArraySort>;
|
||||
using FunctionArrayReverseSort = FunctionArrayMapped<ArraySortImpl<false, false>, NameArrayReverseSort>;
|
||||
using FunctionArrayPartialSort = FunctionArrayMapped<ArraySortImpl<true, true>, NameArrayPartialSort>;
|
||||
using FunctionArrayPartialReverseSort = FunctionArrayMapped<ArraySortImpl<false, true>, NameArrayPartialReverseSort>;
|
||||
return ColumnArray::create(array.getData().permute(permutation, 0), array.getOffsetsPtr());
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(ArraySort)
|
||||
{
|
||||
|
79
src/Functions/array/arraySort.h
Normal file
79
src/Functions/array/arraySort.h
Normal file
@ -0,0 +1,79 @@
|
||||
#pragma once
|
||||
|
||||
#include "FunctionArrayMapped.h"
|
||||
#include <base/sort.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/** Sort arrays, by values of its elements, or by values of corresponding elements of calculated expression (known as "schwartzsort").
|
||||
*/
|
||||
template <bool positive, bool is_partial>
|
||||
struct ArraySortImpl
|
||||
{
|
||||
static constexpr auto num_fixed_params = is_partial;
|
||||
|
||||
static bool needBoolean() { return false; }
|
||||
static bool needExpression() { return false; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
||||
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & array_element)
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(array_element);
|
||||
}
|
||||
|
||||
static void checkArguments(
|
||||
const String & name,
|
||||
const ColumnWithTypeAndName * fixed_arguments)
|
||||
requires(num_fixed_params)
|
||||
{
|
||||
if (!fixed_arguments)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected fixed arguments to get the limit for partial array sort");
|
||||
|
||||
WhichDataType which(fixed_arguments[0].type.get());
|
||||
if (!which.isUInt() && !which.isInt())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of limit argument of function {} (must be UInt or Int)",
|
||||
fixed_arguments[0].type->getName(),
|
||||
name);
|
||||
}
|
||||
|
||||
static ColumnPtr execute(
|
||||
const ColumnArray & array,
|
||||
ColumnPtr mapped,
|
||||
const ColumnWithTypeAndName * fixed_arguments [[maybe_unused]] = nullptr);
|
||||
};
|
||||
|
||||
struct NameArraySort
|
||||
{
|
||||
static constexpr auto name = "arraySort";
|
||||
};
|
||||
struct NameArrayReverseSort
|
||||
{
|
||||
static constexpr auto name = "arrayReverseSort";
|
||||
};
|
||||
struct NameArrayPartialSort
|
||||
{
|
||||
static constexpr auto name = "arrayPartialSort";
|
||||
};
|
||||
struct NameArrayPartialReverseSort
|
||||
{
|
||||
static constexpr auto name = "arrayPartialReverseSort";
|
||||
};
|
||||
|
||||
using FunctionArraySort = FunctionArrayMapped<ArraySortImpl<true, false>, NameArraySort>;
|
||||
using FunctionArrayReverseSort = FunctionArrayMapped<ArraySortImpl<false, false>, NameArrayReverseSort>;
|
||||
using FunctionArrayPartialSort = FunctionArrayMapped<ArraySortImpl<true, true>, NameArrayPartialSort>;
|
||||
using FunctionArrayPartialReverseSort = FunctionArrayMapped<ArraySortImpl<false, true>, NameArrayPartialReverseSort>;
|
||||
|
||||
}
|
@ -15,9 +15,6 @@ namespace ErrorCodes
|
||||
template <bool reverse>
|
||||
struct ArraySplitImpl
|
||||
{
|
||||
using column_type = ColumnArray;
|
||||
using data_type = DataTypeArray;
|
||||
|
||||
static bool needBoolean() { return true; }
|
||||
static bool needExpression() { return true; }
|
||||
static bool needOneArray() { return false; }
|
||||
|
@ -16,8 +16,8 @@ template <typename A, typename B>
|
||||
struct BitAndImpl
|
||||
{
|
||||
using ResultType = typename NumberTraits::ResultOfBit<A, B>::Type;
|
||||
static constexpr const bool allow_fixed_string = true;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr bool allow_fixed_string = true;
|
||||
static constexpr bool allow_string_integer = false;
|
||||
|
||||
template <typename Result = ResultType>
|
||||
static inline Result apply(A a, B b)
|
||||
|
@ -6,15 +6,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename A>
|
||||
struct BitCountImpl
|
||||
{
|
||||
using ResultType = UInt8;
|
||||
static constexpr bool allow_fixed_string = false;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr bool allow_string_or_fixed_string = true;
|
||||
|
||||
static inline ResultType apply(A a)
|
||||
{
|
||||
@ -41,8 +37,6 @@ struct BitCountImpl
|
||||
struct NameBitCount { static constexpr auto name = "bitCount"; };
|
||||
using FunctionBitCount = FunctionUnaryArithmetic<BitCountImpl, NameBitCount, false /* is injective */>;
|
||||
|
||||
}
|
||||
|
||||
/// The function has no ranges of monotonicity.
|
||||
template <> struct FunctionUnaryArithmeticMonotonicity<NameBitCount>
|
||||
{
|
||||
|
@ -8,8 +8,8 @@ template <typename A, typename B>
|
||||
struct BitHammingDistanceImpl
|
||||
{
|
||||
using ResultType = UInt8;
|
||||
static const constexpr bool allow_fixed_string = false;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr bool allow_fixed_string = false;
|
||||
static constexpr bool allow_string_integer = false;
|
||||
|
||||
template <typename Result = ResultType>
|
||||
static inline NO_SANITIZE_UNDEFINED Result apply(A a, B b)
|
||||
|
@ -17,8 +17,7 @@ template <typename A>
|
||||
struct BitNotImpl
|
||||
{
|
||||
using ResultType = typename NumberTraits::ResultOfBitNot<A>::Type;
|
||||
static const constexpr bool allow_fixed_string = true;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr bool allow_string_or_fixed_string = true;
|
||||
|
||||
static inline ResultType NO_SANITIZE_UNDEFINED apply(A a)
|
||||
{
|
||||
|
@ -15,8 +15,8 @@ template <typename A, typename B>
|
||||
struct BitOrImpl
|
||||
{
|
||||
using ResultType = typename NumberTraits::ResultOfBit<A, B>::Type;
|
||||
static constexpr const bool allow_fixed_string = true;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr bool allow_fixed_string = true;
|
||||
static constexpr bool allow_string_integer = false;
|
||||
|
||||
template <typename Result = ResultType>
|
||||
static inline Result apply(A a, B b)
|
||||
|
@ -19,8 +19,7 @@ template <typename A>
|
||||
struct BitSwapLastTwoImpl
|
||||
{
|
||||
using ResultType = UInt8;
|
||||
static constexpr const bool allow_fixed_string = false;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr const bool allow_string_or_fixed_string = false;
|
||||
|
||||
static inline ResultType NO_SANITIZE_UNDEFINED apply([[maybe_unused]] A a)
|
||||
{
|
||||
|
@ -19,8 +19,7 @@ template <typename A>
|
||||
struct BitWrapperFuncImpl
|
||||
{
|
||||
using ResultType = UInt8;
|
||||
static constexpr const bool allow_fixed_string = false;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static constexpr const bool allow_string_or_fixed_string = false;
|
||||
|
||||
static inline ResultType NO_SANITIZE_UNDEFINED apply(A a [[maybe_unused]])
|
||||
{
|
||||
|
@ -17,8 +17,7 @@ struct FactorialImpl
|
||||
{
|
||||
using ResultType = UInt64;
|
||||
static const constexpr bool allow_decimal = false;
|
||||
static const constexpr bool allow_fixed_string = false;
|
||||
static const constexpr bool allow_string_integer = false;
|
||||
static const constexpr bool allow_string_or_fixed_string = false;
|
||||
|
||||
static inline NO_SANITIZE_UNDEFINED ResultType apply(A a)
|
||||
{
|
||||
|
@ -1,38 +1,8 @@
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/identity.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionIdentity : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "identity";
|
||||
static FunctionPtr create(ContextPtr)
|
||||
{
|
||||
return std::make_shared<FunctionIdentity>();
|
||||
}
|
||||
|
||||
String getName() const override { return name; }
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool isSuitableForConstantFolding() const override { return false; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
return arguments.front();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||
{
|
||||
return arguments.front().column;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(Identity)
|
||||
{
|
||||
|
31
src/Functions/identity.h
Normal file
31
src/Functions/identity.h
Normal file
@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class FunctionIdentity : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "identity";
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionIdentity>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool isSuitableForConstantFolding() const override { return false; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
return arguments.front();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||
{
|
||||
return arguments.front().column;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user