mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-14 19:45:11 +00:00
Merge branch 'master' into cgroup_v2
This commit is contained in:
commit
88ce78a7be
35
.github/workflows/pull_request.yml
vendored
35
.github/workflows/pull_request.yml
vendored
@ -1308,6 +1308,40 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseAnalyzer:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_analyzer
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, analyzer)
|
||||
REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3_0:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -4755,6 +4789,7 @@ jobs:
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated2
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated3
|
||||
- FunctionalStatelessTestReleaseWideParts
|
||||
- FunctionalStatelessTestReleaseAnalyzer
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
|
150
CHANGELOG.md
150
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v23.4, 2023-04-26](#234)**<br/>
|
||||
**[ClickHouse release v23.3 LTS, 2023-03-30](#233)**<br/>
|
||||
**[ClickHouse release v23.2, 2023-02-23](#232)**<br/>
|
||||
**[ClickHouse release v23.1, 2023-01-25](#231)**<br/>
|
||||
@ -6,6 +7,155 @@
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### <a id="234"></a> ClickHouse release 23.4 LTS, 2023-04-26
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* This change makes sense only if you are using the virtual filesystem cache. If `path` in the virtual filesystem cache configuration is not empty and is not an absolute path, then it will be put in `<clickhouse server data directory>/caches/<path_from_cache_config>`. [#48784](https://github.com/ClickHouse/ClickHouse/pull/48784) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Primary/secondary indices and sorting keys with identical expressions are now rejected. This behavior can be disabled using setting `allow_suspicious_indices`. [#48536](https://github.com/ClickHouse/ClickHouse/pull/48536) ([凌涛](https://github.com/lingtaolf)).
|
||||
|
||||
#### New Feature
|
||||
* Support new aggregate function `quantileGK`/`quantilesGK`, like [approx_percentile](https://spark.apache.org/docs/latest/api/sql/index.html#approx_percentile) in spark. Greenwald-Khanna algorithm refer to http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf. [#46428](https://github.com/ClickHouse/ClickHouse/pull/46428) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add a statement `SHOW COLUMNS` which shows distilled information from system.columns. [#48017](https://github.com/ClickHouse/ClickHouse/pull/48017) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added `LIGHTWEIGHT` and `PULL` modifiers for `SYSTEM SYNC REPLICA` query. `LIGHTWEIGHT` version waits for fetches and drop-ranges only (merges and mutations are ignored). `PULL` version pulls new entries from ZooKeeper and does not wait for them. Fixes [#47794](https://github.com/ClickHouse/ClickHouse/issues/47794). [#48085](https://github.com/ClickHouse/ClickHouse/pull/48085) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add `kafkaMurmurHash` function for compatibility with Kafka DefaultPartitioner. Closes [#47834](https://github.com/ClickHouse/ClickHouse/issues/47834). [#48185](https://github.com/ClickHouse/ClickHouse/pull/48185) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Allow to easily create a user with the same grants as the current user by using `GRANT CURRENT GRANTS`. [#48262](https://github.com/ClickHouse/ClickHouse/pull/48262) ([pufit](https://github.com/pufit)).
|
||||
* Add statistical aggregate function `kolmogorovSmirnovTest`. Close [#48228](https://github.com/ClickHouse/ClickHouse/issues/48228). [#48325](https://github.com/ClickHouse/ClickHouse/pull/48325) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
|
||||
* Added a `lost_part_count` column to the `system.replicas` table. The column value shows the total number of lost parts in the corresponding table. Value is stored in zookeeper and can be used instead of not persistent `ReplicatedDataLoss` profile event for monitoring. [#48526](https://github.com/ClickHouse/ClickHouse/pull/48526) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add `soundex` function for compatibility. Closes [#39880](https://github.com/ClickHouse/ClickHouse/issues/39880). [#48567](https://github.com/ClickHouse/ClickHouse/pull/48567) ([FriendLey](https://github.com/FriendLey)).
|
||||
* Support `Map` type for JSONExtract. [#48629](https://github.com/ClickHouse/ClickHouse/pull/48629) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add `PrettyJSONEachRow` format to output pretty JSON with new line delimieters and 4 space indents. [#48898](https://github.com/ClickHouse/ClickHouse/pull/48898) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add `ParquetMetadata` input format to read Parquet file metadata. [#48911](https://github.com/ClickHouse/ClickHouse/pull/48911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add `extractKeyValuePairs` function to extract key value pairs from strings. Input strings might contain noise (i.e log files / do not need to be 100% formatted in key-value-pair format), the algorithm will look for key value pairs matching the arguments passed to the function. As of now, function accepts the following arguments: `data_column` (mandatory), `key_value_pair_delimiter` (defaults to `:`), `pair_delimiters` (defaults to `\space \, \;`) and `quoting_character` (defaults to double quotes). [#43606](https://github.com/ClickHouse/ClickHouse/pull/43606) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Functions replaceOne(), replaceAll(), replaceRegexpOne() and replaceRegexpAll() can now be called with non-const pattern and replacement arguments. [#46589](https://github.com/ClickHouse/ClickHouse/pull/46589) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added functions to work with columns of type `Map`: `mapConcat`, `mapSort`, `mapExists`. [#48071](https://github.com/ClickHouse/ClickHouse/pull/48071) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Reading files in `Parquet` format is now much faster. IO and decoding are parallelized (controlled by `max_threads` setting), and only required data ranges are read. [#47964](https://github.com/ClickHouse/ClickHouse/pull/47964) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* If we run a mutation with IN (subquery) like this: `ALTER TABLE t UPDATE col='new value' WHERE id IN (SELECT id FROM huge_table)` and the table `t` has multiple parts than for each part a set for subquery `SELECT id FROM huge_table` is built in memory. And if there are many parts then this might consume a lot of memory (and lead to an OOM) and CPU. The solution is to introduce a short-lived cache of sets that are currently being built by mutation tasks. If another task of the same mutation is executed concurrently it can lookup the set in the cache, wait for it to be built and reuse it. [#46835](https://github.com/ClickHouse/ClickHouse/pull/46835) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Only check dependencies if necessary when applying `ALTER TABLE` queries. [#48062](https://github.com/ClickHouse/ClickHouse/pull/48062) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Optimize function `mapUpdate`. [#48118](https://github.com/ClickHouse/ClickHouse/pull/48118) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now an internal query to local replica is sent explicitly and data from it received through loopback interface. Setting `prefer_localhost_replica` is not respected for parallel replicas. This is needed for better scheduling and makes the code cleaner: the initiator is only responsible for coordinating of the reading process and merging results, continiously answering for requests while all the secondary queries read the data. Note: Using loopback interface is not so performant, otherwise some replicas could starve for tasks which could lead to even slower query execution and not utilizing all possible resources. The initialization of the coordinator is now even more lazy. All incoming requests contain the information about the reading algorithm we initialize the coordinator with it when first request comes. If any replica will decide to read with different algorithm - an exception will be thrown and a query will be aborted. [#48246](https://github.com/ClickHouse/ClickHouse/pull/48246) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Do not build set for the right side of `IN` clause with subquery when it is used only for analysis of skip indexes and they are disabled by setting (`use_skip_indexes=0`). Previously it might affect the performance of queries. [#48299](https://github.com/ClickHouse/ClickHouse/pull/48299) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Query processing is parallelized right after reading `FROM file(...)`. Related to [#38755](https://github.com/ClickHouse/ClickHouse/issues/38755). [#48525](https://github.com/ClickHouse/ClickHouse/pull/48525) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Query processing is parallelized right after reading from a data source. Affected data sources are mostly simple or external storages like table functions `url`, `file`. [#48727](https://github.com/ClickHouse/ClickHouse/pull/48727) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Lowered contention of ThreadPool mutex (may increase performance for a huge amount of small jobs). [#48750](https://github.com/ClickHouse/ClickHouse/pull/48750) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Reduce memory usage for multiple `ALTER DELETE` mutations. [#48522](https://github.com/ClickHouse/ClickHouse/pull/48522) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Remove the excessive connection attempts if the `skip_unavailable_shards` setting is enabled. [#48771](https://github.com/ClickHouse/ClickHouse/pull/48771) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Entries in the query cache are now squashed to max_block_size and compressed. [#45912](https://github.com/ClickHouse/ClickHouse/pull/45912) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* It is now possible to define per-user quotas in the query cache. [#48284](https://github.com/ClickHouse/ClickHouse/pull/48284) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Some fixes for parallel replicas [#48433](https://github.com/ClickHouse/ClickHouse/pull/48433) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Implement zero-copy-replication (an experimental feature) on encrypted disks. [#48741](https://github.com/ClickHouse/ClickHouse/pull/48741) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Improvement
|
||||
* Increase default value for `connect_timeout_with_failover_ms` to 1000 ms (because of adding async connections in https://github.com/ClickHouse/ClickHouse/pull/47229) . Closes [#5188](https://github.com/ClickHouse/ClickHouse/issues/5188). [#49009](https://github.com/ClickHouse/ClickHouse/pull/49009) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Several improvements around data lakes: - Make `Iceberg` work with non-partitioned data. - Support `Iceberg` format version v2 (previously only v1 was supported) - Support reading partitioned data for `DeltaLake`/`Hudi` - Faster reading of `DeltaLake` metadata by using Delta's checkpoint files - Fixed incorrect `Hudi` reads: previously it incorrectly chose which data to read and therefore was able to read correctly only small size tables - Made these engines to pickup updates of changed data (previously the state was set on table creation) - Make proper testing for `Iceberg`/`DeltaLake`/`Hudi` using spark. [#47307](https://github.com/ClickHouse/ClickHouse/pull/47307) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add async connection to socket and async writing to socket. Make creating connections and sending query/external tables async across shards. Refactor code with fibers. Closes [#46931](https://github.com/ClickHouse/ClickHouse/issues/46931). We will be able to increase `connect_timeout_with_failover_ms` by default after this PR (https://github.com/ClickHouse/ClickHouse/issues/5188). [#47229](https://github.com/ClickHouse/ClickHouse/pull/47229) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support config sections `keeper`/`keeper_server` as an alternative to `zookeeper`. Close [#34766](https://github.com/ClickHouse/ClickHouse/issues/34766) , [#34767](https://github.com/ClickHouse/ClickHouse/issues/34767). [#35113](https://github.com/ClickHouse/ClickHouse/pull/35113) ([李扬](https://github.com/taiyang-li)).
|
||||
* It is possible to set _secure_ flag in named_collections for a dictionary with a ClickHouse table source. Addresses [#38450](https://github.com/ClickHouse/ClickHouse/issues/38450) . [#46323](https://github.com/ClickHouse/ClickHouse/pull/46323) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* `bitCount` function support `FixedString` and `String` data type. [#49044](https://github.com/ClickHouse/ClickHouse/pull/49044) ([flynn](https://github.com/ucasfl)).
|
||||
* Added configurable retries for all operations with [Zoo]Keeper for Backup queries. [#47224](https://github.com/ClickHouse/ClickHouse/pull/47224) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Enable `use_environment_credentials` for S3 by default, so the entire provider chain is constructed by default. [#47397](https://github.com/ClickHouse/ClickHouse/pull/47397) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Currently, the JSON_VALUE function is similar as spark's get_json_object function, which support to get value from json string by a path like '$.key'. But still has something different - 1. in spark's get_json_object will return null while the path is not exist, but in JSON_VALUE will return empty string; - 2. in spark's get_json_object will return a complext type value, such as a json object/array value, but in JSON_VALUE will return empty string. [#47494](https://github.com/ClickHouse/ClickHouse/pull/47494) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* For `use_structure_from_insertion_table_in_table_functions` more flexible insert table structure propagation to table function. Fixed an issue with name mapping and using virtual columns. No more need for 'auto' setting. [#47962](https://github.com/ClickHouse/ClickHouse/pull/47962) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Do not continue retrying to connect to ZK if the query is killed or over limits. [#47985](https://github.com/ClickHouse/ClickHouse/pull/47985) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support Enum output/input in `BSONEachRow`, allow all map key types and avoid extra calculations on output. [#48122](https://github.com/ClickHouse/ClickHouse/pull/48122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support more ClickHouse types in `ORC`/`Arrow`/`Parquet` formats: Enum(8|16), (U)Int(128|256), Decimal256 (for ORC), allow reading IPv4 from Int32 values (ORC outputs IPv4 as Int32 and we couldn't read it back), fix reading Nullable(IPv6) from binary data for `ORC`. [#48126](https://github.com/ClickHouse/ClickHouse/pull/48126) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add columns `perform_ttl_move_on_insert`, `load_balancing` for table `system.storage_policies`, modify column `volume_type` type to `Enum8`. [#48167](https://github.com/ClickHouse/ClickHouse/pull/48167) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Added support for `BACKUP ALL` command which backups all tables and databases, including temporary and system ones. [#48189](https://github.com/ClickHouse/ClickHouse/pull/48189) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Function mapFromArrays supports `Map` type as an input. [#48207](https://github.com/ClickHouse/ClickHouse/pull/48207) ([李扬](https://github.com/taiyang-li)).
|
||||
* The output of some SHOW PROCESSLIST is now sorted. [#48241](https://github.com/ClickHouse/ClickHouse/pull/48241) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Per-query/per-server throttling for remote IO/local IO/BACKUPs (server settings: `max_remote_read_network_bandwidth_for_server`, `max_remote_write_network_bandwidth_for_server`, `max_local_read_bandwidth_for_server`, `max_local_write_bandwidth_for_server`, `max_backup_bandwidth_for_server`, settings: `max_remote_read_network_bandwidth`, `max_remote_write_network_bandwidth`, `max_local_read_bandwidth`, `max_local_write_bandwidth`, `max_backup_bandwidth`). [#48242](https://github.com/ClickHouse/ClickHouse/pull/48242) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support more types in `CapnProto` format: Map, (U)Int(128|256), Decimal(128|256). Allow integer conversions during input/output. [#48257](https://github.com/ClickHouse/ClickHouse/pull/48257) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't throw CURRENT_WRITE_BUFFER_IS_EXHAUSTED for normal behaviour. [#48288](https://github.com/ClickHouse/ClickHouse/pull/48288) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add new setting `keeper_map_strict_mode` which enforces extra guarantees on operations made on top of `KeeperMap` tables. [#48293](https://github.com/ClickHouse/ClickHouse/pull/48293) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Check primary key type for simple dictionary is native unsigned integer type Add setting `check_dictionary_primary_key ` for compatibility(set `check_dictionary_primary_key =false` to disable checking). [#48335](https://github.com/ClickHouse/ClickHouse/pull/48335) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Don't replicate mutations for `KeeperMap` because it's unnecessary. [#48354](https://github.com/ClickHouse/ClickHouse/pull/48354) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow write/read unnamed tuple as nested Message in Protobuf format. Tuple elements and Message fields are mathced by position. [#48390](https://github.com/ClickHouse/ClickHouse/pull/48390) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support `additional_table_filters` and `additional_result_filter` settings in the new planner. Also, add a documentation entry for `additional_result_filter`. [#48405](https://github.com/ClickHouse/ClickHouse/pull/48405) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* `parseDateTime` now understands format string '%f' (fractional seconds). [#48420](https://github.com/ClickHouse/ClickHouse/pull/48420) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Format string "%f" in formatDateTime() now prints "000000" if the formatted value has no fractional seconds, the previous behavior (single zero) can be restored using setting "formatdatetime_f_prints_single_zero = 1". [#48422](https://github.com/ClickHouse/ClickHouse/pull/48422) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Don't replicate DELETE and TRUNCATE for KeeperMap. [#48434](https://github.com/ClickHouse/ClickHouse/pull/48434) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Generate valid Decimals and Bools in generateRandom function. [#48436](https://github.com/ClickHouse/ClickHouse/pull/48436) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow trailing commas in expression list of SELECT query, for example `SELECT a, b, c, FROM table`. Closes [#37802](https://github.com/ClickHouse/ClickHouse/issues/37802). [#48438](https://github.com/ClickHouse/ClickHouse/pull/48438) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Override `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables with `--user` and `--password` client parameters. Closes [#38909](https://github.com/ClickHouse/ClickHouse/issues/38909). [#48440](https://github.com/ClickHouse/ClickHouse/pull/48440) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Added retries to loading of data parts in `MergeTree` tables in case of retryable errors. [#48442](https://github.com/ClickHouse/ClickHouse/pull/48442) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add support for `Date`, `Date32`, `DateTime`, `DateTime64` data types to `arrayMin`, `arrayMax`, `arrayDifference` functions. Closes [#21645](https://github.com/ClickHouse/ClickHouse/issues/21645). [#48445](https://github.com/ClickHouse/ClickHouse/pull/48445) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add support for `{server_uuid}` macro. It is useful for identifying replicas in autoscaled clusters when new replicas are constantly added and removed in runtime. This closes [#48554](https://github.com/ClickHouse/ClickHouse/issues/48554). [#48563](https://github.com/ClickHouse/ClickHouse/pull/48563) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The installation script will create a hard link instead of copying if it is possible. [#48578](https://github.com/ClickHouse/ClickHouse/pull/48578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support `SHOW TABLE` syntax meaning the same as `SHOW CREATE TABLE`. Closes [#48580](https://github.com/ClickHouse/ClickHouse/issues/48580). [#48591](https://github.com/ClickHouse/ClickHouse/pull/48591) ([flynn](https://github.com/ucasfl)).
|
||||
* HTTP temporary buffers now support working by evicting data from the virtual filesystem cache. [#48664](https://github.com/ClickHouse/ClickHouse/pull/48664) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Make Schema inference works for `CREATE AS SELECT`. Closes [#47599](https://github.com/ClickHouse/ClickHouse/issues/47599). [#48679](https://github.com/ClickHouse/ClickHouse/pull/48679) ([flynn](https://github.com/ucasfl)).
|
||||
* Added a `replicated_max_mutations_in_one_entry` setting for `ReplicatedMergeTree` that allows limiting the number of mutation commands per one `MUTATE_PART` entry (default is 10000). [#48731](https://github.com/ClickHouse/ClickHouse/pull/48731) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* In AggregateFunction types, don't count unused arena bytes as `read_bytes`. [#48745](https://github.com/ClickHouse/ClickHouse/pull/48745) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix some MySQL-related settings not being handled with the MySQL dictionary source + named collection. Closes [#48402](https://github.com/ClickHouse/ClickHouse/issues/48402). [#48759](https://github.com/ClickHouse/ClickHouse/pull/48759) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* If a user set `max_single_part_upload_size` to a very large value, it can lead to a crash due to a bug in the AWS S3 SDK. This fixes [#47679](https://github.com/ClickHouse/ClickHouse/issues/47679). [#48816](https://github.com/ClickHouse/ClickHouse/pull/48816) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data race in `RabbitMQ` ([report](https://pastila.nl/?004f7100/de1505289ab5bb355e67ebe6c7cc8707)), refactor the code. [#48845](https://github.com/ClickHouse/ClickHouse/pull/48845) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add aliases `name` and `part_name` form `system.parts` and `system.part_log`. Closes [#48718](https://github.com/ClickHouse/ClickHouse/issues/48718). [#48850](https://github.com/ClickHouse/ClickHouse/pull/48850) ([sichenzhao](https://github.com/sichenzhao)).
|
||||
* Functions "arrayDifferenceSupport()", "arrayCumSum()" and "arrayCumSumNonNegative()" now support input arrays of wide integer types (U)Int128/256. [#48866](https://github.com/ClickHouse/ClickHouse/pull/48866) ([cluster](https://github.com/infdahai)).
|
||||
* Multi-line history in clickhouse-client is now no longer padded. This makes pasting more natural. [#48870](https://github.com/ClickHouse/ClickHouse/pull/48870) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* Implement a slight improvement for the rare case when ClickHouse is run inside LXC and LXCFS is used. The LXCFS has an issue: sometimes it returns an error "Transport endpoint is not connected" on reading from the file inside `/proc`. This error was correctly logged into ClickHouse's server log. We have additionally workaround this issue by reopening a file. This is a minuscule change. [#48922](https://github.com/ClickHouse/ClickHouse/pull/48922) ([Real](https://github.com/RunningXie)).
|
||||
* Improve memory accounting for prefetches. Randomise prefetch settings In CI. [#48973](https://github.com/ClickHouse/ClickHouse/pull/48973) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Correctly set headers for native copy operations on GCS. [#48981](https://github.com/ClickHouse/ClickHouse/pull/48981) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add support for specifying setting names in the command line with dashes instead of underscores, for example, `--max-threads` instead of `--max_threads`. Additionally, support Unicode dash characters like `—` instead of `--` - this is useful when you communicate with a team in another company, and a manager from that team copy-pasted code from MS Word. [#48985](https://github.com/ClickHouse/ClickHouse/pull/48985) ([alekseygolub](https://github.com/alekseygolub)).
|
||||
* Add fallback to password authentication when authentication with SSL user certificate has failed. Closes [#48974](https://github.com/ClickHouse/ClickHouse/issues/48974). [#48989](https://github.com/ClickHouse/ClickHouse/pull/48989) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Improve the embedded dashboard. Close [#46671](https://github.com/ClickHouse/ClickHouse/issues/46671). [#49036](https://github.com/ClickHouse/ClickHouse/pull/49036) ([Kevin Zhang](https://github.com/Kinzeng)).
|
||||
* Add profile events for log messages, so you can easily see the count of log messages by severity. [#49042](https://github.com/ClickHouse/ClickHouse/pull/49042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In previous versions, the `LineAsString` format worked inconsistently when the parallel parsing was enabled or not, in presence of DOS or MacOS Classic line breaks. This closes [#49039](https://github.com/ClickHouse/ClickHouse/issues/49039). [#49052](https://github.com/ClickHouse/ClickHouse/pull/49052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The exception message about the unparsed query parameter will also tell about the name of the parameter. Reimplement [#48878](https://github.com/ClickHouse/ClickHouse/issues/48878). Close [#48772](https://github.com/ClickHouse/ClickHouse/issues/48772). [#49061](https://github.com/ClickHouse/ClickHouse/pull/49061) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Reduce the number of dependencies in the header files to speed up the build. [#47984](https://github.com/ClickHouse/ClickHouse/pull/47984) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Randomize compression of marks and indices in tests. [#48286](https://github.com/ClickHouse/ClickHouse/pull/48286) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Bump internal ZSTD from 1.5.4 to 1.5.5. [#46797](https://github.com/ClickHouse/ClickHouse/pull/46797) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Randomize vertical merges from compact to wide parts in tests. [#48287](https://github.com/ClickHouse/ClickHouse/pull/48287) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support for CRC32 checksum in HDFS. Fix performance issues. [#48614](https://github.com/ClickHouse/ClickHouse/pull/48614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove remainders of GCC support. [#48671](https://github.com/ClickHouse/ClickHouse/pull/48671) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add CI run with new analyzer infrastructure enabled. [#48719](https://github.com/ClickHouse/ClickHouse/pull/48719) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix system.query_views_log for MVs that are pushed from background threads [#46668](https://github.com/ClickHouse/ClickHouse/pull/46668) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix several `RENAME COLUMN` bugs [#46946](https://github.com/ClickHouse/ClickHouse/pull/46946) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix minor hiliting issues in clickhouse-format [#47610](https://github.com/ClickHouse/ClickHouse/pull/47610) ([Natasha Murashkina](https://github.com/murfel)).
|
||||
* Fix a bug in LLVM's libc++ leading to a crash for uploading parts to S3 which size is greater then INT_MAX [#47693](https://github.com/ClickHouse/ClickHouse/pull/47693) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix overflow in the `sparkbar` function [#48121](https://github.com/ClickHouse/ClickHouse/pull/48121) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix race in S3 [#48190](https://github.com/ClickHouse/ClickHouse/pull/48190) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Disable JIT for aggregate functions due to inconsistent behavior [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix alter formatting (minor) [#48289](https://github.com/ClickHouse/ClickHouse/pull/48289) ([Natasha Murashkina](https://github.com/murfel)).
|
||||
* Fix cpu usage in RabbitMQ (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix crash in EXPLAIN PIPELINE for Merge over Distributed [#48320](https://github.com/ClickHouse/ClickHouse/pull/48320) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix serializing LowCardinality as Arrow dictionary [#48361](https://github.com/ClickHouse/ClickHouse/pull/48361) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Reset downloader for cache file segment in TemporaryFileStream [#48386](https://github.com/ClickHouse/ClickHouse/pull/48386) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible SYSTEM SYNC REPLICA stuck in case of DROP/REPLACE PARTITION [#48391](https://github.com/ClickHouse/ClickHouse/pull/48391) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix a startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Don't check dependencies when renaming system tables automatically [#48431](https://github.com/ClickHouse/ClickHouse/pull/48431) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update only affected rows in KeeperMap storage [#48435](https://github.com/ClickHouse/ClickHouse/pull/48435) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible segfault in the VFS cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* `toTimeZone` function throws an error when no constant string is provided [#48471](https://github.com/ClickHouse/ClickHouse/pull/48471) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Fix logical error with IPv4 in Protobuf, add support for Date32 [#48486](https://github.com/ClickHouse/ClickHouse/pull/48486) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* "changed" flag in system.settings was calculated incorrectly for settings with multiple values [#48516](https://github.com/ClickHouse/ClickHouse/pull/48516) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix storage `Memory` with enabled compression [#48517](https://github.com/ClickHouse/ClickHouse/pull/48517) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bracketed-paste mode messing up password input in the event of client reconnection [#48528](https://github.com/ClickHouse/ClickHouse/pull/48528) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix an uncaught exception in case of parallel loader for hashed dictionaries [#48571](https://github.com/ClickHouse/ClickHouse/pull/48571) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The `groupArray` aggregate function correctly works for empty result over nullable types [#48593](https://github.com/ClickHouse/ClickHouse/pull/48593) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Allow IPv4 comparison operators with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix possible error from cache [#48636](https://github.com/ClickHouse/ClickHouse/pull/48636) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Async inserts with empty data will no longer throw exception. [#48663](https://github.com/ClickHouse/ClickHouse/pull/48663) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix table dependencies in case of failed RENAME TABLE [#48683](https://github.com/ClickHouse/ClickHouse/pull/48683) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* If the primary key has duplicate columns (which is only possible for projections), in previous versions it might lead to a bug [#48838](https://github.com/ClickHouse/ClickHouse/pull/48838) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix for a race condition in ZooKeeper when joining send_thread/receive_thread [#48849](https://github.com/ClickHouse/ClickHouse/pull/48849) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix unexpected part name error when trying to drop a ignored detached part with zero copy replication [#48862](https://github.com/ClickHouse/ClickHouse/pull/48862) ([Michael Lex](https://github.com/mlex)).
|
||||
* Fix reading `Date32` Parquet/Arrow column into not a `Date32` column [#48864](https://github.com/ClickHouse/ClickHouse/pull/48864) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `UNKNOWN_IDENTIFIER` error while selecting from table with row policy and column with dots [#48976](https://github.com/ClickHouse/ClickHouse/pull/48976) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix aggregation by empty nullable strings [#48999](https://github.com/ClickHouse/ClickHouse/pull/48999) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
|
||||
|
||||
### <a id="233"></a> ClickHouse release 23.3 LTS, 2023-03-30
|
||||
|
||||
#### Upgrade Notes
|
||||
|
@ -421,8 +421,11 @@ endif ()
|
||||
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X))
|
||||
if (NOT SANITIZE)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
|
@ -34,10 +34,52 @@
|
||||
* If no such characters, returns nullptr.
|
||||
*/
|
||||
|
||||
struct SearchSymbols
|
||||
{
|
||||
static constexpr auto BUFFER_SIZE = 16;
|
||||
|
||||
SearchSymbols() = default;
|
||||
|
||||
explicit SearchSymbols(std::string in)
|
||||
: str(std::move(in))
|
||||
{
|
||||
#if defined(__SSE4_2__)
|
||||
if (str.size() > BUFFER_SIZE)
|
||||
{
|
||||
throw std::runtime_error("SearchSymbols can contain at most " + std::to_string(BUFFER_SIZE) + " symbols and " + std::to_string(str.size()) + " was provided\n");
|
||||
}
|
||||
|
||||
char tmp_safety_buffer[BUFFER_SIZE] = {0};
|
||||
|
||||
memcpy(tmp_safety_buffer, str.data(), str.size());
|
||||
|
||||
simd_vector = _mm_loadu_si128(reinterpret_cast<const __m128i *>(tmp_safety_buffer));
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
__m128i simd_vector;
|
||||
#endif
|
||||
std::string str;
|
||||
};
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template <char ...chars> constexpr bool is_in(char x) { return ((x == chars) || ...); } // NOLINT(misc-redundant-expression)
|
||||
|
||||
static bool is_in(char c, const char * symbols, size_t num_chars)
|
||||
{
|
||||
for (size_t i = 0u; i < num_chars; ++i)
|
||||
{
|
||||
if (c == symbols[i])
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined(__SSE2__)
|
||||
template <char s0>
|
||||
inline __m128i mm_is_in(__m128i bytes)
|
||||
@ -53,6 +95,43 @@ inline __m128i mm_is_in(__m128i bytes)
|
||||
__m128i eq = mm_is_in<s1, tail...>(bytes);
|
||||
return _mm_or_si128(eq0, eq);
|
||||
}
|
||||
|
||||
inline __m128i mm_is_in(__m128i bytes, const char * symbols, size_t num_chars)
|
||||
{
|
||||
__m128i accumulator = _mm_setzero_si128();
|
||||
for (size_t i = 0; i < num_chars; ++i)
|
||||
{
|
||||
__m128i eq = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(symbols[i]));
|
||||
accumulator = _mm_or_si128(accumulator, eq);
|
||||
}
|
||||
|
||||
return accumulator;
|
||||
}
|
||||
|
||||
inline std::array<__m128i, 16u> mm_is_in_prepare(const char * symbols, size_t num_chars)
|
||||
{
|
||||
std::array<__m128i, 16u> result {};
|
||||
|
||||
for (size_t i = 0; i < num_chars; ++i)
|
||||
{
|
||||
result[i] = _mm_set1_epi8(symbols[i]);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline __m128i mm_is_in_execute(__m128i bytes, const std::array<__m128i, 16u> & needles)
|
||||
{
|
||||
__m128i accumulator = _mm_setzero_si128();
|
||||
|
||||
for (const auto & needle : needles)
|
||||
{
|
||||
__m128i eq = _mm_cmpeq_epi8(bytes, needle);
|
||||
accumulator = _mm_or_si128(accumulator, eq);
|
||||
}
|
||||
|
||||
return accumulator;
|
||||
}
|
||||
#endif
|
||||
|
||||
template <bool positive>
|
||||
@ -99,6 +178,32 @@ inline const char * find_first_symbols_sse2(const char * const begin, const char
|
||||
return return_mode == ReturnMode::End ? end : nullptr;
|
||||
}
|
||||
|
||||
template <bool positive, ReturnMode return_mode>
|
||||
inline const char * find_first_symbols_sse2(const char * const begin, const char * const end, const char * symbols, size_t num_chars)
|
||||
{
|
||||
const char * pos = begin;
|
||||
|
||||
#if defined(__SSE2__)
|
||||
const auto needles = mm_is_in_prepare(symbols, num_chars);
|
||||
for (; pos + 15 < end; pos += 16)
|
||||
{
|
||||
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(pos));
|
||||
|
||||
__m128i eq = mm_is_in_execute(bytes, needles);
|
||||
|
||||
uint16_t bit_mask = maybe_negate<positive>(uint16_t(_mm_movemask_epi8(eq)));
|
||||
if (bit_mask)
|
||||
return pos + __builtin_ctz(bit_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
for (; pos < end; ++pos)
|
||||
if (maybe_negate<positive>(is_in(*pos, symbols, num_chars)))
|
||||
return pos;
|
||||
|
||||
return return_mode == ReturnMode::End ? end : nullptr;
|
||||
}
|
||||
|
||||
|
||||
template <bool positive, ReturnMode return_mode, char... symbols>
|
||||
inline const char * find_last_symbols_sse2(const char * const begin, const char * const end)
|
||||
@ -179,6 +284,41 @@ inline const char * find_first_symbols_sse42(const char * const begin, const cha
|
||||
return return_mode == ReturnMode::End ? end : nullptr;
|
||||
}
|
||||
|
||||
template <bool positive, ReturnMode return_mode>
|
||||
inline const char * find_first_symbols_sse42(const char * const begin, const char * const end, const SearchSymbols & symbols)
|
||||
{
|
||||
const char * pos = begin;
|
||||
|
||||
const auto num_chars = symbols.str.size();
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT;
|
||||
|
||||
const __m128i set = symbols.simd_vector;
|
||||
|
||||
for (; pos + 15 < end; pos += 16)
|
||||
{
|
||||
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(pos));
|
||||
|
||||
if constexpr (positive)
|
||||
{
|
||||
if (_mm_cmpestrc(set, num_chars, bytes, 16, mode))
|
||||
return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY))
|
||||
return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (; pos < end; ++pos)
|
||||
if (maybe_negate<positive>(is_in(*pos, symbols.str.data(), num_chars)))
|
||||
return pos;
|
||||
|
||||
return return_mode == ReturnMode::End ? end : nullptr;
|
||||
}
|
||||
|
||||
/// NOTE No SSE 4.2 implementation for find_last_symbols_or_null. Not worth to do.
|
||||
|
||||
@ -194,6 +334,17 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
|
||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||
}
|
||||
|
||||
template <bool positive, ReturnMode return_mode>
|
||||
inline const char * find_first_symbols_dispatch(const std::string_view haystack, const SearchSymbols & symbols)
|
||||
{
|
||||
#if defined(__SSE4_2__)
|
||||
if (symbols.str.size() >= 5)
|
||||
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
|
||||
else
|
||||
#endif
|
||||
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -211,6 +362,11 @@ inline char * find_first_symbols(char * begin, char * end)
|
||||
return const_cast<char *>(detail::find_first_symbols_dispatch<true, detail::ReturnMode::End, symbols...>(begin, end));
|
||||
}
|
||||
|
||||
inline const char * find_first_symbols(std::string_view haystack, const SearchSymbols & symbols)
|
||||
{
|
||||
return detail::find_first_symbols_dispatch<true, detail::ReturnMode::End>(haystack, symbols);
|
||||
}
|
||||
|
||||
template <char... symbols>
|
||||
inline const char * find_first_not_symbols(const char * begin, const char * end)
|
||||
{
|
||||
@ -223,6 +379,11 @@ inline char * find_first_not_symbols(char * begin, char * end)
|
||||
return const_cast<char *>(detail::find_first_symbols_dispatch<false, detail::ReturnMode::End, symbols...>(begin, end));
|
||||
}
|
||||
|
||||
inline const char * find_first_not_symbols(std::string_view haystack, const SearchSymbols & symbols)
|
||||
{
|
||||
return detail::find_first_symbols_dispatch<false, detail::ReturnMode::End>(haystack, symbols);
|
||||
}
|
||||
|
||||
template <char... symbols>
|
||||
inline const char * find_first_symbols_or_null(const char * begin, const char * end)
|
||||
{
|
||||
@ -235,6 +396,11 @@ inline char * find_first_symbols_or_null(char * begin, char * end)
|
||||
return const_cast<char *>(detail::find_first_symbols_dispatch<true, detail::ReturnMode::Nullptr, symbols...>(begin, end));
|
||||
}
|
||||
|
||||
inline const char * find_first_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
|
||||
{
|
||||
return detail::find_first_symbols_dispatch<true, detail::ReturnMode::Nullptr>(haystack, symbols);
|
||||
}
|
||||
|
||||
template <char... symbols>
|
||||
inline const char * find_first_not_symbols_or_null(const char * begin, const char * end)
|
||||
{
|
||||
@ -247,6 +413,10 @@ inline char * find_first_not_symbols_or_null(char * begin, char * end)
|
||||
return const_cast<char *>(detail::find_first_symbols_dispatch<false, detail::ReturnMode::Nullptr, symbols...>(begin, end));
|
||||
}
|
||||
|
||||
inline const char * find_first_not_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
|
||||
{
|
||||
return detail::find_first_symbols_dispatch<false, detail::ReturnMode::Nullptr>(haystack, symbols);
|
||||
}
|
||||
|
||||
template <char... symbols>
|
||||
inline const char * find_last_symbols_or_null(const char * begin, const char * end)
|
||||
|
@ -5,44 +5,6 @@
|
||||
#include <bit>
|
||||
|
||||
|
||||
inline void reverseMemcpy(void * dst, const void * src, size_t size)
|
||||
{
|
||||
uint8_t * uint_dst = reinterpret_cast<uint8_t *>(dst);
|
||||
const uint8_t * uint_src = reinterpret_cast<const uint8_t *>(src);
|
||||
|
||||
uint_dst += size;
|
||||
while (size)
|
||||
{
|
||||
--uint_dst;
|
||||
*uint_dst = *uint_src;
|
||||
++uint_src;
|
||||
--size;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T unalignedLoadLE(const void * address)
|
||||
{
|
||||
T res {};
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(&res, address, sizeof(res));
|
||||
else
|
||||
reverseMemcpy(&res, address, sizeof(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline void unalignedStoreLE(void * address,
|
||||
const typename std::enable_if<true, T>::type & src)
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(address, &src, sizeof(src));
|
||||
else
|
||||
reverseMemcpy(address, &src, sizeof(src));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T unalignedLoad(const void * address)
|
||||
{
|
||||
@ -62,3 +24,70 @@ inline void unalignedStore(void * address,
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
memcpy(address, &src, sizeof(src));
|
||||
}
|
||||
|
||||
|
||||
inline void reverseMemcpy(void * dst, const void * src, size_t size)
|
||||
{
|
||||
uint8_t * uint_dst = reinterpret_cast<uint8_t *>(dst);
|
||||
const uint8_t * uint_src = reinterpret_cast<const uint8_t *>(src);
|
||||
|
||||
uint_dst += size;
|
||||
while (size)
|
||||
{
|
||||
--uint_dst;
|
||||
*uint_dst = *uint_src;
|
||||
++uint_src;
|
||||
--size;
|
||||
}
|
||||
}
|
||||
|
||||
template <std::endian endian, typename T>
|
||||
inline T unalignedLoadEndian(const void * address)
|
||||
{
|
||||
T res {};
|
||||
if constexpr (std::endian::native == endian)
|
||||
memcpy(&res, address, sizeof(res));
|
||||
else
|
||||
reverseMemcpy(&res, address, sizeof(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
template <std::endian endian, typename T>
|
||||
inline void unalignedStoreEndian(void * address, T & src)
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
if constexpr (std::endian::native == endian)
|
||||
memcpy(address, &src, sizeof(src));
|
||||
else
|
||||
reverseMemcpy(address, &src, sizeof(src));
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline T unalignedLoadLittleEndian(const void * address)
|
||||
{
|
||||
return unalignedLoadEndian<std::endian::little, T>(address);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline void unalignedStoreLittleEndian(void * address,
|
||||
const typename std::enable_if<true, T>::type & src)
|
||||
{
|
||||
unalignedStoreEndian<std::endian::little>(address, src);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T unalignedLoadBigEndian(const void * address)
|
||||
{
|
||||
return unalignedLoadEndian<std::endian::big, T>(address);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline void unalignedStoreBigEndian(void * address,
|
||||
const typename std::enable_if<true, T>::type & src)
|
||||
{
|
||||
unalignedStoreEndian<std::endian::big>(address, src);
|
||||
}
|
||||
|
@ -235,6 +235,17 @@ ssize_t getrandom(void *buf, size_t buflen, unsigned flags)
|
||||
return syscall(SYS_getrandom, buf, buflen, flags);
|
||||
}
|
||||
|
||||
/* Structure for scatter/gather I/O. */
|
||||
struct iovec
|
||||
{
|
||||
void *iov_base; /* Pointer to data. */
|
||||
size_t iov_len; /* Length of data. */
|
||||
};
|
||||
|
||||
ssize_t preadv(int __fd, const struct iovec *__iovec, int __count, __off_t __offset)
|
||||
{
|
||||
return syscall(SYS_preadv, __fd, __iovec, __count, (long)(__offset), (long)(__offset>>32));
|
||||
}
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
|
@ -1,37 +0,0 @@
|
||||
//
|
||||
// AutoTransaction.h
|
||||
//
|
||||
// Library: Data
|
||||
// Package: DataCore
|
||||
// Module: AutoTransaction
|
||||
//
|
||||
// Forward header for the Transaction class.
|
||||
//
|
||||
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Data_AutoTransaction_INCLUDED
|
||||
#define Data_AutoTransaction_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/Data/Transaction.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Data
|
||||
{
|
||||
|
||||
|
||||
typedef Transaction AutoTransaction;
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Data
|
||||
|
||||
|
||||
#endif // Data_AutoTransaction_INCLUDED
|
@ -1,54 +0,0 @@
|
||||
//
|
||||
// DynamicLOB.h
|
||||
//
|
||||
// Library: Data
|
||||
// Package: DataCore
|
||||
// Module: DynamicLOB
|
||||
//
|
||||
// Definition of the Poco::Dynamic::Var LOB cast operators.
|
||||
//
|
||||
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Data_DynamicLOB_INCLUDED
|
||||
#define Data_DynamicLOB_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/Data/Data.h"
|
||||
#include "Poco/Data/LOB.h"
|
||||
#include "Poco/Dynamic/Var.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Data
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
class LOB;
|
||||
typedef LOB<unsigned char> BLOB;
|
||||
typedef LOB<char> CLOB;
|
||||
|
||||
}
|
||||
} // namespace Poco::Data
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Dynamic
|
||||
{
|
||||
|
||||
template <>
|
||||
Data_API Var::operator Poco::Data::CLOB() const;
|
||||
template <>
|
||||
Data_API Var::operator Poco::Data::BLOB() const;
|
||||
|
||||
}
|
||||
} // namespace Poco::Dynamic
|
||||
|
||||
|
||||
#endif // Data_DynamicLOB_INCLUDED
|
@ -1,149 +0,0 @@
|
||||
//
|
||||
// LOBStream.h
|
||||
//
|
||||
// Library: Data
|
||||
// Package: DataCore
|
||||
// Module: LOBStream
|
||||
//
|
||||
// Definition of the LOBStream class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Data_LOBStream_INCLUDED
|
||||
#define Data_LOBStream_INCLUDED
|
||||
|
||||
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/Data/LOB.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/UnbufferedStreamBuf.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Data
|
||||
{
|
||||
|
||||
|
||||
template <typename T>
|
||||
class LOBStreamBuf : public BasicUnbufferedStreamBuf<T, std::char_traits<T>>
|
||||
/// This is the streambuf class used for reading from and writing to a LOB.
|
||||
{
|
||||
public:
|
||||
LOBStreamBuf(LOB<T> & lob) : _lob(lob), _it(_lob.begin())
|
||||
/// Creates LOBStreamBuf.
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
~LOBStreamBuf()
|
||||
/// Destroys LOBStreamBuf.
|
||||
{
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef std::char_traits<T> TraitsType;
|
||||
typedef BasicUnbufferedStreamBuf<T, TraitsType> BaseType;
|
||||
|
||||
typename BaseType::int_type readFromDevice()
|
||||
{
|
||||
if (_it != _lob.end())
|
||||
return BaseType::charToInt(*_it++);
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
typename BaseType::int_type writeToDevice(T c)
|
||||
{
|
||||
_lob.appendRaw(&c, 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
private:
|
||||
LOB<T> & _lob;
|
||||
typename LOB<T>::Iterator _it;
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
class LOBIOS : public virtual std::ios
|
||||
/// The base class for LOBInputStream and
|
||||
/// LOBOutputStream.
|
||||
///
|
||||
/// This class is needed to ensure the correct initialization
|
||||
/// order of the stream buffer and base classes.
|
||||
{
|
||||
public:
|
||||
LOBIOS(LOB<T> & lob, openmode mode) : _buf(lob)
|
||||
/// Creates the LOBIOS with the given LOB.
|
||||
{
|
||||
poco_ios_init(&_buf);
|
||||
}
|
||||
|
||||
~LOBIOS()
|
||||
/// Destroys the LOBIOS.
|
||||
{
|
||||
}
|
||||
|
||||
LOBStreamBuf<T> * rdbuf()
|
||||
/// Returns a pointer to the internal LOBStreamBuf.
|
||||
{
|
||||
return &_buf;
|
||||
}
|
||||
|
||||
protected:
|
||||
LOBStreamBuf<T> _buf;
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
class LOBOutputStream : public LOBIOS<T>, public std::basic_ostream<T, std::char_traits<T>>
|
||||
/// An output stream for writing to a LOB.
|
||||
{
|
||||
public:
|
||||
LOBOutputStream(LOB<T> & lob) : LOBIOS<T>(lob, std::ios::out), std::ostream(LOBIOS<T>::rdbuf())
|
||||
/// Creates the LOBOutputStream with the given LOB.
|
||||
{
|
||||
}
|
||||
|
||||
~LOBOutputStream()
|
||||
/// Destroys the LOBOutputStream.
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
class LOBInputStream : public LOBIOS<T>, public std::basic_istream<T, std::char_traits<T>>
|
||||
/// An input stream for reading from a LOB.
|
||||
{
|
||||
public:
|
||||
LOBInputStream(LOB<T> & lob) : LOBIOS<T>(lob, std::ios::in), std::istream(LOBIOS<T>::rdbuf())
|
||||
/// Creates the LOBInputStream with the given LOB.
|
||||
{
|
||||
}
|
||||
|
||||
~LOBInputStream()
|
||||
/// Destroys the LOBInputStream.
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
typedef LOBOutputStream<unsigned char> BLOBOutputStream;
|
||||
typedef LOBOutputStream<char> CLOBOutputStream;
|
||||
|
||||
typedef LOBInputStream<unsigned char> BLOBInputStream;
|
||||
typedef LOBInputStream<char> CLOBInputStream;
|
||||
|
||||
}
|
||||
} // namespace Poco::Data
|
||||
|
||||
|
||||
#endif // Data_LOBStream_INCLUDED
|
@ -1,74 +0,0 @@
|
||||
//
|
||||
// DynamicLOB.cpp
|
||||
//
|
||||
// Library: Data
|
||||
// Package: DataCore
|
||||
// Module: DynamicLOB
|
||||
//
|
||||
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifdef __GNUC__
|
||||
// TODO: determine g++ version able to do the right thing without these specializations
|
||||
|
||||
#include "Poco/Data/DynamicLOB.h"
|
||||
#include "Poco/Data/LOB.h"
|
||||
#include "Poco/Dynamic/Var.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
namespace Dynamic {
|
||||
|
||||
|
||||
using Poco::Data::CLOB;
|
||||
using Poco::Data::BLOB;
|
||||
|
||||
|
||||
template <>
|
||||
Var::operator CLOB () const
|
||||
{
|
||||
VarHolder* pHolder = content();
|
||||
|
||||
if (!pHolder)
|
||||
throw InvalidAccessException("Can not convert empty value.");
|
||||
|
||||
if (typeid(CLOB) == pHolder->type())
|
||||
return extract<CLOB>();
|
||||
else
|
||||
{
|
||||
std::string result;
|
||||
pHolder->convert(result);
|
||||
return CLOB(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
Var::operator BLOB () const
|
||||
{
|
||||
VarHolder* pHolder = content();
|
||||
|
||||
if (!pHolder)
|
||||
throw InvalidAccessException("Can not convert empty value.");
|
||||
|
||||
if (typeid(BLOB) == pHolder->type())
|
||||
return extract<BLOB>();
|
||||
else
|
||||
{
|
||||
std::string result;
|
||||
pHolder->convert(result);
|
||||
return BLOB(reinterpret_cast<const unsigned char*>(result.data()),
|
||||
result.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Data
|
||||
|
||||
|
||||
#endif // __GNUC__
|
||||
|
@ -86,7 +86,6 @@ set (SRCS
|
||||
src/LoggingFactory.cpp
|
||||
src/LoggingRegistry.cpp
|
||||
src/LogStream.cpp
|
||||
src/Manifest.cpp
|
||||
src/MD5Engine.cpp
|
||||
src/MemoryPool.cpp
|
||||
src/MemoryStream.cpp
|
||||
@ -107,7 +106,6 @@ set (SRCS
|
||||
src/PatternFormatter.cpp
|
||||
src/Pipe.cpp
|
||||
src/PipeImpl.cpp
|
||||
src/PipeStream.cpp
|
||||
src/PriorityNotificationQueue.cpp
|
||||
src/Process.cpp
|
||||
src/PurgeStrategy.cpp
|
||||
|
@ -1,92 +0,0 @@
|
||||
//
|
||||
// ClassLibrary.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: SharedLibrary
|
||||
// Module: ClassLoader
|
||||
//
|
||||
// Definitions for class libraries.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_ClassLibrary_INCLUDED
|
||||
#define Foundation_ClassLibrary_INCLUDED
|
||||
|
||||
|
||||
#include <typeinfo>
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Manifest.h"
|
||||
|
||||
|
||||
# define POCO_LIBRARY_API
|
||||
|
||||
|
||||
//
|
||||
// the entry points for every class library
|
||||
//
|
||||
extern "C" {
|
||||
bool POCO_LIBRARY_API pocoBuildManifest(Poco::ManifestBase * pManifest);
|
||||
void POCO_LIBRARY_API pocoInitializeLibrary();
|
||||
void POCO_LIBRARY_API pocoUninitializeLibrary();
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// additional support for named manifests
|
||||
//
|
||||
#define POCO_DECLARE_NAMED_MANIFEST(name) \
|
||||
extern "C" { \
|
||||
bool POCO_LIBRARY_API POCO_JOIN(pocoBuildManifest, name)(Poco::ManifestBase * pManifest); \
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Macros to automatically implement pocoBuildManifest
|
||||
//
|
||||
// usage:
|
||||
//
|
||||
// POCO_BEGIN_MANIFEST(MyBaseClass)
|
||||
// POCO_EXPORT_CLASS(MyFirstClass)
|
||||
// POCO_EXPORT_CLASS(MySecondClass)
|
||||
// ...
|
||||
// POCO_END_MANIFEST
|
||||
//
|
||||
#define POCO_BEGIN_MANIFEST_IMPL(fnName, base) \
|
||||
bool fnName(Poco::ManifestBase * pManifest_) \
|
||||
{ \
|
||||
typedef base _Base; \
|
||||
typedef Poco::Manifest<_Base> _Manifest; \
|
||||
std::string requiredType(typeid(_Manifest).name()); \
|
||||
std::string actualType(pManifest_->className()); \
|
||||
if (requiredType == actualType) \
|
||||
{ \
|
||||
Poco::Manifest<_Base> * pManifest = static_cast<_Manifest *>(pManifest_);
|
||||
|
||||
|
||||
#define POCO_BEGIN_MANIFEST(base) POCO_BEGIN_MANIFEST_IMPL(pocoBuildManifest, base)
|
||||
|
||||
|
||||
#define POCO_BEGIN_NAMED_MANIFEST(name, base) \
|
||||
POCO_DECLARE_NAMED_MANIFEST(name) \
|
||||
POCO_BEGIN_MANIFEST_IMPL(POCO_JOIN(pocoBuildManifest, name), base)
|
||||
|
||||
|
||||
#define POCO_END_MANIFEST \
|
||||
return true; \
|
||||
} \
|
||||
else return false; \
|
||||
}
|
||||
|
||||
|
||||
#define POCO_EXPORT_CLASS(cls) pManifest->insert(new Poco::MetaObject<cls, _Base>(#cls));
|
||||
|
||||
|
||||
#define POCO_EXPORT_SINGLETON(cls) pManifest->insert(new Poco::MetaSingleton<cls, _Base>(#cls));
|
||||
|
||||
|
||||
#endif // Foundation_ClassLibrary_INCLUDED
|
@ -1,355 +0,0 @@
|
||||
//
|
||||
// ClassLoader.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: SharedLibrary
|
||||
// Module: ClassLoader
|
||||
//
|
||||
// Definition of the ClassLoader class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_ClassLoader_INCLUDED
|
||||
#define Foundation_ClassLoader_INCLUDED
|
||||
|
||||
|
||||
#include <map>
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Manifest.h"
|
||||
#include "Poco/MetaObject.h"
|
||||
#include "Poco/Mutex.h"
|
||||
#include "Poco/SharedLibrary.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
template <class Base>
|
||||
class ClassLoader
|
||||
/// The ClassLoader loads C++ classes from shared libraries
|
||||
/// at runtime. It must be instantiated with a root class
|
||||
/// of the loadable classes.
|
||||
/// For a class to be loadable from a library, the library
|
||||
/// must provide a Manifest of all the classes it contains.
|
||||
/// The Manifest for a shared library can be easily built
|
||||
/// with the help of the macros in the header file
|
||||
/// "Foundation/ClassLibrary.h".
|
||||
///
|
||||
/// Starting with POCO release 1.3, a class library can
|
||||
/// export multiple manifests. In addition to the default
|
||||
/// (unnamed) manifest, multiple named manifests can
|
||||
/// be exported, each having a different base class.
|
||||
///
|
||||
/// There is one important restriction: one instance of
|
||||
/// ClassLoader can only load one manifest from a class
|
||||
/// library.
|
||||
{
|
||||
public:
|
||||
typedef AbstractMetaObject<Base> Meta;
|
||||
typedef Manifest<Base> Manif;
|
||||
typedef void (*InitializeLibraryFunc)();
|
||||
typedef void (*UninitializeLibraryFunc)();
|
||||
typedef bool (*BuildManifestFunc)(ManifestBase *);
|
||||
|
||||
struct LibraryInfo
|
||||
{
|
||||
SharedLibrary * pLibrary;
|
||||
const Manif * pManifest;
|
||||
int refCount;
|
||||
};
|
||||
typedef std::map<std::string, LibraryInfo> LibraryMap;
|
||||
|
||||
class Iterator
|
||||
/// The ClassLoader's very own iterator class.
|
||||
{
|
||||
public:
|
||||
typedef std::pair<std::string, const Manif *> Pair;
|
||||
|
||||
Iterator(const typename LibraryMap::const_iterator & it) { _it = it; }
|
||||
Iterator(const Iterator & it) { _it = it._it; }
|
||||
~Iterator() { }
|
||||
Iterator & operator=(const Iterator & it)
|
||||
{
|
||||
_it = it._it;
|
||||
return *this;
|
||||
}
|
||||
inline bool operator==(const Iterator & it) const { return _it == it._it; }
|
||||
inline bool operator!=(const Iterator & it) const { return _it != it._it; }
|
||||
Iterator & operator++() // prefix
|
||||
{
|
||||
++_it;
|
||||
return *this;
|
||||
}
|
||||
Iterator operator++(int) // postfix
|
||||
{
|
||||
Iterator result(_it);
|
||||
++_it;
|
||||
return result;
|
||||
}
|
||||
inline const Pair * operator*() const
|
||||
{
|
||||
_pair.first = _it->first;
|
||||
_pair.second = _it->second.pManifest;
|
||||
return &_pair;
|
||||
}
|
||||
inline const Pair * operator->() const
|
||||
{
|
||||
_pair.first = _it->first;
|
||||
_pair.second = _it->second.pManifest;
|
||||
return &_pair;
|
||||
}
|
||||
|
||||
private:
|
||||
typename LibraryMap::const_iterator _it;
|
||||
mutable Pair _pair;
|
||||
};
|
||||
|
||||
ClassLoader()
|
||||
/// Creates the ClassLoader.
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~ClassLoader()
|
||||
/// Destroys the ClassLoader.
|
||||
{
|
||||
for (typename LibraryMap::const_iterator it = _map.begin(); it != _map.end(); ++it)
|
||||
{
|
||||
delete it->second.pLibrary;
|
||||
delete it->second.pManifest;
|
||||
}
|
||||
}
|
||||
|
||||
void loadLibrary(const std::string & path, const std::string & manifest)
|
||||
/// Loads a library from the given path, using the given manifest.
|
||||
/// Does nothing if the library is already loaded.
|
||||
/// Throws a LibraryLoadException if the library
|
||||
/// cannot be loaded or does not have a Manifest.
|
||||
/// If the library exports a function named "pocoInitializeLibrary",
|
||||
/// this function is executed.
|
||||
/// If called multiple times for the same library,
|
||||
/// the number of calls to unloadLibrary() must be the same
|
||||
/// for the library to become unloaded.
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
typename LibraryMap::iterator it = _map.find(path);
|
||||
if (it == _map.end())
|
||||
{
|
||||
LibraryInfo li;
|
||||
li.pLibrary = 0;
|
||||
li.pManifest = 0;
|
||||
li.refCount = 1;
|
||||
try
|
||||
{
|
||||
li.pLibrary = new SharedLibrary(path);
|
||||
li.pManifest = new Manif();
|
||||
std::string pocoBuildManifestSymbol("pocoBuildManifest");
|
||||
pocoBuildManifestSymbol.append(manifest);
|
||||
if (li.pLibrary->hasSymbol("pocoInitializeLibrary"))
|
||||
{
|
||||
InitializeLibraryFunc initializeLibrary = (InitializeLibraryFunc)li.pLibrary->getSymbol("pocoInitializeLibrary");
|
||||
initializeLibrary();
|
||||
}
|
||||
if (li.pLibrary->hasSymbol(pocoBuildManifestSymbol))
|
||||
{
|
||||
BuildManifestFunc buildManifest = (BuildManifestFunc)li.pLibrary->getSymbol(pocoBuildManifestSymbol);
|
||||
if (buildManifest(const_cast<Manif *>(li.pManifest)))
|
||||
_map[path] = li;
|
||||
else
|
||||
throw LibraryLoadException(std::string("Manifest class mismatch in ") + path, manifest);
|
||||
}
|
||||
else
|
||||
throw LibraryLoadException(std::string("No manifest in ") + path, manifest);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
delete li.pLibrary;
|
||||
delete li.pManifest;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
++it->second.refCount;
|
||||
}
|
||||
}
|
||||
|
||||
void loadLibrary(const std::string & path)
|
||||
/// Loads a library from the given path. Does nothing
|
||||
/// if the library is already loaded.
|
||||
/// Throws a LibraryLoadException if the library
|
||||
/// cannot be loaded or does not have a Manifest.
|
||||
/// If the library exports a function named "pocoInitializeLibrary",
|
||||
/// this function is executed.
|
||||
/// If called multiple times for the same library,
|
||||
/// the number of calls to unloadLibrary() must be the same
|
||||
/// for the library to become unloaded.
|
||||
///
|
||||
/// Equivalent to loadLibrary(path, "").
|
||||
{
|
||||
loadLibrary(path, "");
|
||||
}
|
||||
|
||||
void unloadLibrary(const std::string & path)
|
||||
/// Unloads the given library.
|
||||
/// Be extremely cautious when unloading shared libraries.
|
||||
/// If objects from the library are still referenced somewhere,
|
||||
/// a total crash is very likely.
|
||||
/// If the library exports a function named "pocoUninitializeLibrary",
|
||||
/// this function is executed before it is unloaded.
|
||||
/// If loadLibrary() has been called multiple times for the same
|
||||
/// library, the number of calls to unloadLibrary() must be the same
|
||||
/// for the library to become unloaded.
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
typename LibraryMap::iterator it = _map.find(path);
|
||||
if (it != _map.end())
|
||||
{
|
||||
if (--it->second.refCount == 0)
|
||||
{
|
||||
if (it->second.pLibrary->hasSymbol("pocoUninitializeLibrary"))
|
||||
{
|
||||
UninitializeLibraryFunc uninitializeLibrary
|
||||
= (UninitializeLibraryFunc)it->second.pLibrary->getSymbol("pocoUninitializeLibrary");
|
||||
uninitializeLibrary();
|
||||
}
|
||||
delete it->second.pManifest;
|
||||
it->second.pLibrary->unload();
|
||||
delete it->second.pLibrary;
|
||||
_map.erase(it);
|
||||
}
|
||||
}
|
||||
else
|
||||
throw NotFoundException(path);
|
||||
}
|
||||
|
||||
const Meta * findClass(const std::string & className) const
|
||||
/// Returns a pointer to the MetaObject for the given
|
||||
/// class, or a null pointer if the class is not known.
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
for (typename LibraryMap::const_iterator it = _map.begin(); it != _map.end(); ++it)
|
||||
{
|
||||
const Manif * pManif = it->second.pManifest;
|
||||
typename Manif::Iterator itm = pManif->find(className);
|
||||
if (itm != pManif->end())
|
||||
return *itm;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
const Meta & classFor(const std::string & className) const
|
||||
/// Returns a reference to the MetaObject for the given
|
||||
/// class. Throws a NotFoundException if the class
|
||||
/// is not known.
|
||||
{
|
||||
const Meta * pMeta = findClass(className);
|
||||
if (pMeta)
|
||||
return *pMeta;
|
||||
else
|
||||
throw NotFoundException(className);
|
||||
}
|
||||
|
||||
Base * create(const std::string & className) const
|
||||
/// Creates an instance of the given class.
|
||||
/// Throws a NotFoundException if the class
|
||||
/// is not known.
|
||||
{
|
||||
return classFor(className).create();
|
||||
}
|
||||
|
||||
Base & instance(const std::string & className) const
|
||||
/// Returns a reference to the sole instance of
|
||||
/// the given class. The class must be a singleton,
|
||||
/// otherwise an InvalidAccessException will be thrown.
|
||||
/// Throws a NotFoundException if the class
|
||||
/// is not known.
|
||||
{
|
||||
return classFor(className).instance();
|
||||
}
|
||||
|
||||
bool canCreate(const std::string & className) const
|
||||
/// Returns true if create() can create new instances
|
||||
/// of the class.
|
||||
{
|
||||
return classFor(className).canCreate();
|
||||
}
|
||||
|
||||
void destroy(const std::string & className, Base * pObject) const
|
||||
/// Destroys the object pObject points to.
|
||||
/// Does nothing if object is not found.
|
||||
{
|
||||
classFor(className).destroy(pObject);
|
||||
}
|
||||
|
||||
bool isAutoDelete(const std::string & className, Base * pObject) const
|
||||
/// Returns true if the object is automatically
|
||||
/// deleted by its meta object.
|
||||
{
|
||||
return classFor(className).isAutoDelete(pObject);
|
||||
}
|
||||
|
||||
const Manif * findManifest(const std::string & path) const
|
||||
/// Returns a pointer to the Manifest for the given
|
||||
/// library, or a null pointer if the library has not been loaded.
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
typename LibraryMap::const_iterator it = _map.find(path);
|
||||
if (it != _map.end())
|
||||
return it->second.pManifest;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
const Manif & manifestFor(const std::string & path) const
|
||||
/// Returns a reference to the Manifest for the given library
|
||||
/// Throws a NotFoundException if the library has not been loaded.
|
||||
{
|
||||
const Manif * pManif = findManifest(path);
|
||||
if (pManif)
|
||||
return *pManif;
|
||||
else
|
||||
throw NotFoundException(path);
|
||||
}
|
||||
|
||||
bool isLibraryLoaded(const std::string & path) const
|
||||
/// Returns true if the library with the given name
|
||||
/// has already been loaded.
|
||||
{
|
||||
return findManifest(path) != 0;
|
||||
}
|
||||
|
||||
Iterator begin() const
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return Iterator(_map.begin());
|
||||
}
|
||||
|
||||
Iterator end() const
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return Iterator(_map.end());
|
||||
}
|
||||
|
||||
private:
|
||||
LibraryMap _map;
|
||||
mutable FastMutex _mutex;
|
||||
};
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_ClassLoader_INCLUDED
|
@ -1,152 +0,0 @@
|
||||
//
|
||||
// Manifest.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: SharedLibrary
|
||||
// Module: ClassLoader
|
||||
//
|
||||
// Definition of the Manifest class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_Manifest_INCLUDED
|
||||
#define Foundation_Manifest_INCLUDED
|
||||
|
||||
|
||||
#include <map>
|
||||
#include <typeinfo>
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/MetaObject.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
class Foundation_API ManifestBase
|
||||
/// ManifestBase is a common base class for
|
||||
/// all instantiations of Manifest.
|
||||
{
|
||||
public:
|
||||
ManifestBase();
|
||||
virtual ~ManifestBase();
|
||||
|
||||
virtual const char * className() const = 0;
|
||||
/// Returns the type name of the manifest's class.
|
||||
};
|
||||
|
||||
|
||||
template <class B>
|
||||
class Manifest : public ManifestBase
|
||||
/// A Manifest maintains a list of all classes
|
||||
/// contained in a dynamically loadable class
|
||||
/// library.
|
||||
/// Internally, the information is held
|
||||
/// in a map. An iterator is provided to
|
||||
/// iterate over all the classes in a Manifest.
|
||||
{
|
||||
public:
|
||||
typedef AbstractMetaObject<B> Meta;
|
||||
typedef std::map<std::string, const Meta *> MetaMap;
|
||||
|
||||
class Iterator
|
||||
/// The Manifest's very own iterator class.
|
||||
{
|
||||
public:
|
||||
Iterator(const typename MetaMap::const_iterator & it) { _it = it; }
|
||||
Iterator(const Iterator & it) { _it = it._it; }
|
||||
~Iterator() { }
|
||||
Iterator & operator=(const Iterator & it)
|
||||
{
|
||||
_it = it._it;
|
||||
return *this;
|
||||
}
|
||||
inline bool operator==(const Iterator & it) const { return _it == it._it; }
|
||||
inline bool operator!=(const Iterator & it) const { return _it != it._it; }
|
||||
Iterator & operator++() // prefix
|
||||
{
|
||||
++_it;
|
||||
return *this;
|
||||
}
|
||||
Iterator operator++(int) // postfix
|
||||
{
|
||||
Iterator result(_it);
|
||||
++_it;
|
||||
return result;
|
||||
}
|
||||
inline const Meta * operator*() const { return _it->second; }
|
||||
inline const Meta * operator->() const { return _it->second; }
|
||||
|
||||
private:
|
||||
typename MetaMap::const_iterator _it;
|
||||
};
|
||||
|
||||
Manifest()
|
||||
/// Creates an empty Manifest.
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~Manifest()
|
||||
/// Destroys the Manifest.
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
Iterator find(const std::string & className) const
|
||||
/// Returns an iterator pointing to the MetaObject
|
||||
/// for the given class. If the MetaObject cannot
|
||||
/// be found, the iterator points to end().
|
||||
{
|
||||
return Iterator(_metaMap.find(className));
|
||||
}
|
||||
|
||||
Iterator begin() const { return Iterator(_metaMap.begin()); }
|
||||
|
||||
Iterator end() const { return Iterator(_metaMap.end()); }
|
||||
|
||||
bool insert(const Meta * pMeta)
|
||||
/// Inserts a MetaObject. Returns true if insertion
|
||||
/// was successful, false if a class with the same
|
||||
/// name already exists.
|
||||
{
|
||||
return _metaMap.insert(typename MetaMap::value_type(pMeta->name(), pMeta)).second;
|
||||
}
|
||||
|
||||
void clear()
|
||||
/// Removes all MetaObjects from the manifest.
|
||||
{
|
||||
for (typename MetaMap::iterator it = _metaMap.begin(); it != _metaMap.end(); ++it)
|
||||
{
|
||||
delete it->second;
|
||||
}
|
||||
_metaMap.clear();
|
||||
}
|
||||
|
||||
int size() const
|
||||
/// Returns the number of MetaObjects in the Manifest.
|
||||
{
|
||||
return int(_metaMap.size());
|
||||
}
|
||||
|
||||
bool empty() const
|
||||
/// Returns true iff the Manifest does not contain any MetaObjects.
|
||||
{
|
||||
return _metaMap.empty();
|
||||
}
|
||||
|
||||
const char * className() const { return typeid(*this).name(); }
|
||||
|
||||
private:
|
||||
MetaMap _metaMap;
|
||||
};
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_Manifest_INCLUDED
|
@ -1,121 +0,0 @@
|
||||
//
|
||||
// PipeStream.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Processes
|
||||
// Module: PipeStream
|
||||
//
|
||||
// Definition of the PipeStream class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_PipeStream_INCLUDED
|
||||
#define Foundation_PipeStream_INCLUDED
|
||||
|
||||
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include "Poco/BufferedStreamBuf.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Pipe.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
class Foundation_API PipeStreamBuf : public BufferedStreamBuf
|
||||
/// This is the streambuf class used for reading from and writing to a Pipe.
|
||||
{
|
||||
public:
|
||||
typedef BufferedStreamBuf::openmode openmode;
|
||||
|
||||
PipeStreamBuf(const Pipe & pipe, openmode mode);
|
||||
/// Creates a PipeStreamBuf with the given Pipe.
|
||||
|
||||
~PipeStreamBuf();
|
||||
/// Destroys the PipeStreamBuf.
|
||||
|
||||
void close();
|
||||
/// Closes the pipe.
|
||||
|
||||
protected:
|
||||
int readFromDevice(char * buffer, std::streamsize length);
|
||||
int writeToDevice(const char * buffer, std::streamsize length);
|
||||
|
||||
private:
|
||||
enum
|
||||
{
|
||||
STREAM_BUFFER_SIZE = 1024
|
||||
};
|
||||
|
||||
Pipe _pipe;
|
||||
};
|
||||
|
||||
|
||||
class Foundation_API PipeIOS : public virtual std::ios
|
||||
/// The base class for PipeInputStream and
|
||||
/// PipeOutputStream.
|
||||
///
|
||||
/// This class is needed to ensure the correct initialization
|
||||
/// order of the stream buffer and base classes.
|
||||
{
|
||||
public:
|
||||
PipeIOS(const Pipe & pipe, openmode mode);
|
||||
/// Creates the PipeIOS with the given Pipe.
|
||||
|
||||
~PipeIOS();
|
||||
/// Destroys the PipeIOS.
|
||||
///
|
||||
/// Flushes the buffer, but does not close the pipe.
|
||||
|
||||
PipeStreamBuf * rdbuf();
|
||||
/// Returns a pointer to the internal PipeStreamBuf.
|
||||
|
||||
void close();
|
||||
/// Flushes the stream and closes the pipe.
|
||||
|
||||
protected:
|
||||
PipeStreamBuf _buf;
|
||||
};
|
||||
|
||||
|
||||
class Foundation_API PipeOutputStream : public PipeIOS, public std::ostream
|
||||
/// An output stream for writing to a Pipe.
|
||||
{
|
||||
public:
|
||||
PipeOutputStream(const Pipe & pipe);
|
||||
/// Creates the PipeOutputStream with the given Pipe.
|
||||
|
||||
~PipeOutputStream();
|
||||
/// Destroys the PipeOutputStream.
|
||||
///
|
||||
/// Flushes the buffer, but does not close the pipe.
|
||||
};
|
||||
|
||||
|
||||
class Foundation_API PipeInputStream : public PipeIOS, public std::istream
|
||||
/// An input stream for reading from a Pipe.
|
||||
///
|
||||
/// Using formatted input from a PipeInputStream
|
||||
/// is not recommended, due to the read-ahead behavior of
|
||||
/// istream with formatted reads.
|
||||
{
|
||||
public:
|
||||
PipeInputStream(const Pipe & pipe);
|
||||
/// Creates the PipeInputStream with the given Pipe.
|
||||
|
||||
~PipeInputStream();
|
||||
/// Destroys the PipeInputStream.
|
||||
};
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_PipeStream_INCLUDED
|
@ -1,31 +0,0 @@
|
||||
//
|
||||
// Manifest.cpp
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: SharedLibrary
|
||||
// Module: ClassLoader
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/Manifest.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
|
||||
|
||||
ManifestBase::ManifestBase()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ManifestBase::~ManifestBase()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
@ -1,127 +0,0 @@
|
||||
//
|
||||
// PipeStream.cpp
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Processes
|
||||
// Module: PipeStream
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/PipeStream.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
|
||||
|
||||
//
|
||||
// PipeStreamBuf
|
||||
//
|
||||
|
||||
|
||||
PipeStreamBuf::PipeStreamBuf(const Pipe& pipe, openmode mode):
|
||||
BufferedStreamBuf(STREAM_BUFFER_SIZE, mode),
|
||||
_pipe(pipe)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PipeStreamBuf::~PipeStreamBuf()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
int PipeStreamBuf::readFromDevice(char* buffer, std::streamsize length)
|
||||
{
|
||||
return _pipe.readBytes(buffer, (int) length);
|
||||
}
|
||||
|
||||
|
||||
int PipeStreamBuf::writeToDevice(const char* buffer, std::streamsize length)
|
||||
{
|
||||
return _pipe.writeBytes(buffer, (int) length);
|
||||
}
|
||||
|
||||
|
||||
void PipeStreamBuf::close()
|
||||
{
|
||||
_pipe.close(Pipe::CLOSE_BOTH);
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// PipeIOS
|
||||
//
|
||||
|
||||
|
||||
PipeIOS::PipeIOS(const Pipe& pipe, openmode mode):
|
||||
_buf(pipe, mode)
|
||||
{
|
||||
poco_ios_init(&_buf);
|
||||
}
|
||||
|
||||
|
||||
PipeIOS::~PipeIOS()
|
||||
{
|
||||
try
|
||||
{
|
||||
_buf.sync();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
PipeStreamBuf* PipeIOS::rdbuf()
|
||||
{
|
||||
return &_buf;
|
||||
}
|
||||
|
||||
|
||||
void PipeIOS::close()
|
||||
{
|
||||
_buf.sync();
|
||||
_buf.close();
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// PipeOutputStream
|
||||
//
|
||||
|
||||
|
||||
PipeOutputStream::PipeOutputStream(const Pipe& pipe):
|
||||
PipeIOS(pipe, std::ios::out),
|
||||
std::ostream(&_buf)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PipeOutputStream::~PipeOutputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// PipeInputStream
|
||||
//
|
||||
|
||||
|
||||
PipeInputStream::PipeInputStream(const Pipe& pipe):
|
||||
PipeIOS(pipe, std::ios::in),
|
||||
std::istream(&_buf)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PipeInputStream::~PipeInputStream()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
@ -1,109 +0,0 @@
|
||||
//
|
||||
// SMTPChannel.h
|
||||
//
|
||||
// Library: Net
|
||||
// Package: Logging
|
||||
// Module: SMTPChannel
|
||||
//
|
||||
// Definition of the SMTPChannel class.
|
||||
//
|
||||
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Net_SMTPChannel_INCLUDED
|
||||
#define Net_SMTPChannel_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/Channel.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
#include "Poco/String.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
|
||||
|
||||
class Net_API SMTPChannel : public Poco::Channel
|
||||
/// This Channel implements SMTP (email) logging.
|
||||
{
|
||||
public:
|
||||
SMTPChannel();
|
||||
/// Creates a SMTPChannel.
|
||||
|
||||
SMTPChannel(const std::string & mailhost, const std::string & sender, const std::string & recipient);
|
||||
/// Creates a SMTPChannel with the given target mailhost, sender, and recipient.
|
||||
|
||||
void open();
|
||||
/// Opens the SMTPChannel.
|
||||
|
||||
void close();
|
||||
/// Closes the SMTPChannel.
|
||||
|
||||
void log(const Message & msg);
|
||||
/// Sends the message's text to the recipient.
|
||||
|
||||
void setProperty(const std::string & name, const std::string & value);
|
||||
/// Sets the property with the given value.
|
||||
///
|
||||
/// The following properties are supported:
|
||||
/// * mailhost: The SMTP server. Default is "localhost".
|
||||
/// * sender: The sender address.
|
||||
/// * recipient: The recipient address.
|
||||
/// * local: If true, local time is used. Default is true.
|
||||
/// * attachment: Filename of the file to attach.
|
||||
/// * type: Content type of the file to attach.
|
||||
/// * delete: Boolean value indicating whether to delete
|
||||
/// the attachment file after sending.
|
||||
/// * throw: Boolean value indicating whether to throw
|
||||
/// exception upon failure.
|
||||
|
||||
std::string getProperty(const std::string & name) const;
|
||||
/// Returns the value of the property with the given name.
|
||||
|
||||
static void registerChannel();
|
||||
/// Registers the channel with the global LoggingFactory.
|
||||
|
||||
static const std::string PROP_MAILHOST;
|
||||
static const std::string PROP_SENDER;
|
||||
static const std::string PROP_RECIPIENT;
|
||||
static const std::string PROP_LOCAL;
|
||||
static const std::string PROP_ATTACHMENT;
|
||||
static const std::string PROP_TYPE;
|
||||
static const std::string PROP_DELETE;
|
||||
static const std::string PROP_THROW;
|
||||
|
||||
protected:
|
||||
~SMTPChannel();
|
||||
|
||||
private:
|
||||
bool isTrue(const std::string & value) const;
|
||||
|
||||
std::string _mailHost;
|
||||
std::string _sender;
|
||||
std::string _recipient;
|
||||
bool _local;
|
||||
std::string _attachment;
|
||||
std::string _type;
|
||||
bool _delete;
|
||||
bool _throw;
|
||||
};
|
||||
|
||||
|
||||
inline bool SMTPChannel::isTrue(const std::string & value) const
|
||||
{
|
||||
return (
|
||||
(0 == icompare(value, "true")) || (0 == icompare(value, "t")) || (0 == icompare(value, "yes")) || (0 == icompare(value, "y")));
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Net
|
||||
|
||||
|
||||
#endif // Net_SMTPChannel_INCLUDED
|
@ -1,210 +0,0 @@
|
||||
//
|
||||
// SMTPChannel.cpp
|
||||
//
|
||||
// Library: Net
|
||||
// Package: Logging
|
||||
// Module: SMTPChannel
|
||||
//
|
||||
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/Net/SMTPChannel.h"
|
||||
#include "Poco/Net/MailMessage.h"
|
||||
#include "Poco/Net/MailRecipient.h"
|
||||
#include "Poco/Net/SMTPClientSession.h"
|
||||
#include "Poco/Net/StringPartSource.h"
|
||||
#include "Poco/Message.h"
|
||||
#include "Poco/DateTimeFormatter.h"
|
||||
#include "Poco/DateTimeFormat.h"
|
||||
#include "Poco/LocalDateTime.h"
|
||||
#include "Poco/LoggingFactory.h"
|
||||
#include "Poco/Instantiator.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/FileStream.h"
|
||||
#include "Poco/File.h"
|
||||
#include "Poco/Environment.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
namespace Net {
|
||||
|
||||
|
||||
const std::string SMTPChannel::PROP_MAILHOST("mailhost");
|
||||
const std::string SMTPChannel::PROP_SENDER("sender");
|
||||
const std::string SMTPChannel::PROP_RECIPIENT("recipient");
|
||||
const std::string SMTPChannel::PROP_LOCAL("local");
|
||||
const std::string SMTPChannel::PROP_ATTACHMENT("attachment");
|
||||
const std::string SMTPChannel::PROP_TYPE("type");
|
||||
const std::string SMTPChannel::PROP_DELETE("delete");
|
||||
const std::string SMTPChannel::PROP_THROW("throw");
|
||||
|
||||
|
||||
SMTPChannel::SMTPChannel():
|
||||
_mailHost("localhost"),
|
||||
_local(true),
|
||||
_type("text/plain"),
|
||||
_delete(false),
|
||||
_throw(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
SMTPChannel::SMTPChannel(const std::string& mailhost, const std::string& sender, const std::string& recipient):
|
||||
_mailHost(mailhost),
|
||||
_sender(sender),
|
||||
_recipient(recipient),
|
||||
_local(true),
|
||||
_type("text/plain"),
|
||||
_delete(false),
|
||||
_throw(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
SMTPChannel::~SMTPChannel()
|
||||
{
|
||||
try
|
||||
{
|
||||
close();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void SMTPChannel::open()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void SMTPChannel::close()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void SMTPChannel::log(const Message& msg)
|
||||
{
|
||||
try
|
||||
{
|
||||
MailMessage message;
|
||||
message.setSender(_sender);
|
||||
message.addRecipient(MailRecipient(MailRecipient::PRIMARY_RECIPIENT, _recipient));
|
||||
message.setSubject("Log Message from " + _sender);
|
||||
std::stringstream content;
|
||||
content << "Log Message\r\n"
|
||||
<< "===========\r\n\r\n"
|
||||
<< "Host: " << Environment::nodeName() << "\r\n"
|
||||
<< "Logger: " << msg.getSource() << "\r\n";
|
||||
|
||||
if (_local)
|
||||
{
|
||||
DateTime dt(msg.getTime());
|
||||
content << "Timestamp: " << DateTimeFormatter::format(LocalDateTime(dt), DateTimeFormat::RFC822_FORMAT) << "\r\n";
|
||||
}
|
||||
else
|
||||
content << "Timestamp: " << DateTimeFormatter::format(msg.getTime(), DateTimeFormat::RFC822_FORMAT) << "\r\n";
|
||||
|
||||
content << "Priority: " << NumberFormatter::format(msg.getPriority()) << "\r\n"
|
||||
<< "Process ID: " << NumberFormatter::format(msg.getPid()) << "\r\n"
|
||||
<< "Thread: " << msg.getThread() << " (ID: " << msg.getTid() << ")\r\n"
|
||||
<< "Message text: " << msg.getText() << "\r\n\r\n";
|
||||
|
||||
message.addContent(new StringPartSource(content.str()));
|
||||
|
||||
if (!_attachment.empty())
|
||||
{
|
||||
{
|
||||
Poco::FileInputStream fis(_attachment, std::ios::in | std::ios::binary | std::ios::ate);
|
||||
if (fis.good())
|
||||
{
|
||||
typedef std::allocator<std::string::value_type>::size_type SST;
|
||||
|
||||
std::streamoff size = fis.tellg();
|
||||
poco_assert (std::numeric_limits<unsigned int>::max() >= size);
|
||||
poco_assert (std::numeric_limits<SST>::max() >= size);
|
||||
char* pMem = new char [static_cast<unsigned int>(size)];
|
||||
fis.seekg(std::ios::beg);
|
||||
fis.read(pMem, size);
|
||||
message.addAttachment(_attachment,
|
||||
new StringPartSource(std::string(pMem, static_cast<SST>(size)),
|
||||
_type,
|
||||
_attachment));
|
||||
|
||||
delete [] pMem;
|
||||
}
|
||||
}
|
||||
if (_delete) File(_attachment).remove();
|
||||
}
|
||||
|
||||
SMTPClientSession session(_mailHost);
|
||||
session.login();
|
||||
session.sendMessage(message);
|
||||
session.close();
|
||||
}
|
||||
catch (Exception&)
|
||||
{
|
||||
if (_throw) throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void SMTPChannel::setProperty(const std::string& name, const std::string& value)
|
||||
{
|
||||
if (name == PROP_MAILHOST)
|
||||
_mailHost = value;
|
||||
else if (name == PROP_SENDER)
|
||||
_sender = value;
|
||||
else if (name == PROP_RECIPIENT)
|
||||
_recipient = value;
|
||||
else if (name == PROP_LOCAL)
|
||||
_local = isTrue(value);
|
||||
else if (name == PROP_ATTACHMENT)
|
||||
_attachment = value;
|
||||
else if (name == PROP_TYPE)
|
||||
_type = value;
|
||||
else if (name == PROP_DELETE)
|
||||
_delete = isTrue(value);
|
||||
else if (name == PROP_THROW)
|
||||
_throw = isTrue(value);
|
||||
else
|
||||
Channel::setProperty(name, value);
|
||||
}
|
||||
|
||||
|
||||
std::string SMTPChannel::getProperty(const std::string& name) const
|
||||
{
|
||||
if (name == PROP_MAILHOST)
|
||||
return _mailHost;
|
||||
else if (name == PROP_SENDER)
|
||||
return _sender;
|
||||
else if (name == PROP_RECIPIENT)
|
||||
return _recipient;
|
||||
else if (name == PROP_LOCAL)
|
||||
return _local ? "true" : "false";
|
||||
else if (name == PROP_ATTACHMENT)
|
||||
return _attachment;
|
||||
else if (name == PROP_TYPE)
|
||||
return _type;
|
||||
else if (name == PROP_DELETE)
|
||||
return _delete ? "true" : "false";
|
||||
else if (name == PROP_THROW)
|
||||
return _throw ? "true" : "false";
|
||||
else
|
||||
return Channel::getProperty(name);
|
||||
}
|
||||
|
||||
|
||||
void SMTPChannel::registerChannel()
|
||||
{
|
||||
Poco::LoggingFactory::defaultFactory().registerChannelClass("SMTPChannel",
|
||||
new Poco::Instantiator<SMTPChannel, Poco::Channel>);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Net
|
@ -1,97 +0,0 @@
|
||||
//
|
||||
// ConfigurationMapper.h
|
||||
//
|
||||
// Library: Util
|
||||
// Package: Configuration
|
||||
// Module: ConfigurationMapper
|
||||
//
|
||||
// Definition of the ConfigurationMapper class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Util_ConfigurationMapper_INCLUDED
|
||||
#define Util_ConfigurationMapper_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/Util/AbstractConfiguration.h"
|
||||
#include "Poco/Util/Util.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Util
|
||||
{
|
||||
|
||||
|
||||
class Util_API ConfigurationMapper : public AbstractConfiguration
|
||||
/// This configuration maps a property hierarchy into another
|
||||
/// hierarchy.
|
||||
///
|
||||
/// For example, given a configuration with the following properties:
|
||||
/// config.value1
|
||||
/// config.value2
|
||||
/// config.sub.value1
|
||||
/// config.sub.value2
|
||||
/// and a ConfigurationView with fromPrefix == "config" and toPrefix == "root.conf", then
|
||||
/// the above properties will be available via the mapper as
|
||||
/// root.conf.value1
|
||||
/// root.conf.value2
|
||||
/// root.conf.sub.value1
|
||||
/// root.conf.sub.value2
|
||||
///
|
||||
/// FromPrefix can be empty, in which case, and given toPrefix == "root",
|
||||
/// the properties will be available as
|
||||
/// root.config.value1
|
||||
/// root.config.value2
|
||||
/// root.config.sub.value1
|
||||
/// root.config.sub.value2
|
||||
///
|
||||
/// This is equivalent to the functionality of the ConfigurationView class.
|
||||
///
|
||||
/// Similarly, toPrefix can also be empty. Given fromPrefix == "config" and
|
||||
/// toPrefix == "", the properties will be available as
|
||||
/// value1
|
||||
/// value2
|
||||
/// sub.value1
|
||||
/// sub.value2
|
||||
///
|
||||
/// If both fromPrefix and toPrefix are empty, no mapping is performed.
|
||||
///
|
||||
/// A ConfigurationMapper is most useful in combination with a
|
||||
/// LayeredConfiguration.
|
||||
{
|
||||
public:
|
||||
ConfigurationMapper(const std::string & fromPrefix, const std::string & toPrefix, AbstractConfiguration * pConfig);
|
||||
/// Creates the ConfigurationMapper. The ConfigurationMapper does not take
|
||||
/// ownership of the passed configuration.
|
||||
|
||||
protected:
|
||||
bool getRaw(const std::string & key, std::string & value) const;
|
||||
void setRaw(const std::string & key, const std::string & value);
|
||||
void enumerate(const std::string & key, Keys & range) const;
|
||||
void removeRaw(const std::string & key);
|
||||
|
||||
std::string translateKey(const std::string & key) const;
|
||||
|
||||
~ConfigurationMapper();
|
||||
|
||||
private:
|
||||
ConfigurationMapper(const ConfigurationMapper &);
|
||||
ConfigurationMapper & operator=(const ConfigurationMapper &);
|
||||
|
||||
std::string _fromPrefix;
|
||||
std::string _toPrefix;
|
||||
AbstractConfiguration * _pConfig;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Util
|
||||
|
||||
|
||||
#endif // Util_ConfigurationMapper_INCLUDED
|
@ -1,75 +0,0 @@
|
||||
//
|
||||
// WinRegistryConfiguration.h
|
||||
//
|
||||
// Library: Util
|
||||
// Package: Windows
|
||||
// Module: WinRegistryConfiguration
|
||||
//
|
||||
// Definition of the WinRegistryConfiguration class.
|
||||
//
|
||||
// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Util_WinRegistryConfiguration_INCLUDED
|
||||
#define Util_WinRegistryConfiguration_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/String.h"
|
||||
#include "Poco/Util/AbstractConfiguration.h"
|
||||
#include "Poco/Util/Util.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Util
|
||||
{
|
||||
|
||||
|
||||
class Util_API WinRegistryConfiguration : public AbstractConfiguration
|
||||
/// An implementation of AbstractConfiguration that stores configuration data
|
||||
/// in the Windows registry.
|
||||
///
|
||||
/// Removing key is not supported. An attempt to remove a key results
|
||||
/// in a NotImplementedException being thrown.
|
||||
{
|
||||
public:
|
||||
WinRegistryConfiguration(const std::string & rootPath, REGSAM extraSam = 0);
|
||||
/// Creates the WinRegistryConfiguration.
|
||||
/// The rootPath must start with one of the root key names
|
||||
/// like HKEY_CLASSES_ROOT, e.g. HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services.
|
||||
/// All further keys are relative to the root path and can be
|
||||
/// dot separated, e.g. the path MyService.ServiceName will be converted to
|
||||
/// HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\MyService\ServiceName.
|
||||
/// The extraSam parameter will be passed along to WinRegistryKey, to control
|
||||
/// registry virtualization for example.
|
||||
|
||||
protected:
|
||||
~WinRegistryConfiguration();
|
||||
/// Destroys the WinRegistryConfiguration.
|
||||
|
||||
bool getRaw(const std::string & key, std::string & value) const;
|
||||
void setRaw(const std::string & key, const std::string & value);
|
||||
void enumerate(const std::string & key, Keys & range) const;
|
||||
void removeRaw(const std::string & key);
|
||||
|
||||
std::string convertToRegFormat(const std::string & key, std::string & keyName) const;
|
||||
/// Takes a key in the format of A.B.C and converts it to
|
||||
/// registry format A\B\C, the last entry is the keyName, the rest is returned as path
|
||||
|
||||
friend class WinConfigurationTest;
|
||||
|
||||
private:
|
||||
std::string _rootPath;
|
||||
REGSAM _extraSam;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Util
|
||||
|
||||
|
||||
#endif // Util_WinRegistryConfiguration_INCLUDED
|
@ -1,199 +0,0 @@
|
||||
//
|
||||
// WinRegistryKey.h
|
||||
//
|
||||
// Library: Util
|
||||
// Package: Windows
|
||||
// Module: WinRegistryKey
|
||||
//
|
||||
// Definition of the WinRegistryKey class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Util_WinRegistryKey_INCLUDED
|
||||
#define Util_WinRegistryKey_INCLUDED
|
||||
|
||||
|
||||
#include <vector>
|
||||
#include "Poco/UnWindows.h"
|
||||
#include "Poco/Util/Util.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Util
|
||||
{
|
||||
|
||||
|
||||
class Util_API WinRegistryKey
|
||||
/// This class implements a convenient interface to the
|
||||
/// Windows Registry.
|
||||
///
|
||||
/// This class is only available on Windows platforms.
|
||||
{
|
||||
public:
|
||||
typedef std::vector<std::string> Keys;
|
||||
typedef std::vector<std::string> Values;
|
||||
|
||||
enum Type
|
||||
{
|
||||
REGT_NONE = 0,
|
||||
REGT_STRING = 1,
|
||||
REGT_STRING_EXPAND = 2,
|
||||
REGT_BINARY = 3,
|
||||
REGT_DWORD = 4,
|
||||
REGT_DWORD_BIG_ENDIAN = 5,
|
||||
REGT_LINK = 6,
|
||||
REGT_MULTI_STRING = 7,
|
||||
REGT_RESOURCE_LIST = 8,
|
||||
REGT_FULL_RESOURCE_DESCRIPTOR = 9,
|
||||
REGT_RESOURCE_REQUIREMENTS_LIST = 10,
|
||||
REGT_QWORD = 11
|
||||
};
|
||||
|
||||
WinRegistryKey(const std::string & key, bool readOnly = false, REGSAM extraSam = 0);
|
||||
/// Creates the WinRegistryKey.
|
||||
///
|
||||
/// The key must start with one of the root key names
|
||||
/// like HKEY_CLASSES_ROOT, e.g. HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services.
|
||||
///
|
||||
/// If readOnly is true, then only read access to the registry
|
||||
/// is available and any attempt to write to the registry will
|
||||
/// result in an exception.
|
||||
///
|
||||
/// extraSam is used to pass extra flags (in addition to KEY_READ and KEY_WRITE)
|
||||
/// to the samDesired argument of RegOpenKeyEx() or RegCreateKeyEx().
|
||||
|
||||
WinRegistryKey(HKEY hRootKey, const std::string & subKey, bool readOnly = false, REGSAM extraSam = 0);
|
||||
/// Creates the WinRegistryKey.
|
||||
///
|
||||
/// If readOnly is true, then only read access to the registry
|
||||
/// is available and any attempt to write to the registry will
|
||||
/// result in an exception.
|
||||
///
|
||||
/// extraSam is used to pass extra flags (in addition to KEY_READ and KEY_WRITE)
|
||||
/// to the samDesired argument of RegOpenKeyEx() or RegCreateKeyEx().
|
||||
|
||||
~WinRegistryKey();
|
||||
/// Destroys the WinRegistryKey.
|
||||
|
||||
void setString(const std::string & name, const std::string & value);
|
||||
/// Sets the string value (REG_SZ) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
|
||||
std::string getString(const std::string & name);
|
||||
/// Returns the string value (REG_SZ) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
///
|
||||
/// Throws a NotFoundException if the value does not exist.
|
||||
|
||||
void setStringExpand(const std::string & name, const std::string & value);
|
||||
/// Sets the expandable string value (REG_EXPAND_SZ) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
|
||||
std::string getStringExpand(const std::string & name);
|
||||
/// Returns the string value (REG_EXPAND_SZ) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
/// All references to environment variables (%VAR%) in the string
|
||||
/// are expanded.
|
||||
///
|
||||
/// Throws a NotFoundException if the value does not exist.
|
||||
|
||||
void setBinary(const std::string & name, const std::vector<char> & value);
|
||||
/// Sets the string value (REG_BINARY) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
|
||||
std::vector<char> getBinary(const std::string & name);
|
||||
/// Returns the string value (REG_BINARY) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
///
|
||||
/// Throws a NotFoundException if the value does not exist.
|
||||
|
||||
void setInt(const std::string & name, int value);
|
||||
/// Sets the numeric (REG_DWORD) value with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
|
||||
int getInt(const std::string & name);
|
||||
/// Returns the numeric value (REG_DWORD) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
///
|
||||
/// Throws a NotFoundException if the value does not exist.
|
||||
|
||||
|
||||
void setInt64(const std::string & name, Poco::Int64 value);
|
||||
/// Sets the numeric (REG_QWORD) value with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
|
||||
Poco::Int64 getInt64(const std::string & name);
|
||||
/// Returns the numeric value (REG_QWORD) with the given name.
|
||||
/// An empty name denotes the default value.
|
||||
///
|
||||
/// Throws a NotFoundException if the value does not exist.
|
||||
|
||||
|
||||
void deleteValue(const std::string & name);
|
||||
/// Deletes the value with the given name.
|
||||
///
|
||||
/// Throws a NotFoundException if the value does not exist.
|
||||
|
||||
void deleteKey();
|
||||
/// Recursively deletes the key and all subkeys.
|
||||
|
||||
bool exists();
|
||||
/// Returns true iff the key exists.
|
||||
|
||||
Type type(const std::string & name);
|
||||
/// Returns the type of the key value.
|
||||
|
||||
bool exists(const std::string & name);
|
||||
/// Returns true iff the given value exists under that key.
|
||||
|
||||
void subKeys(Keys & keys);
|
||||
/// Appends all subKey names to keys.
|
||||
|
||||
void values(Values & vals);
|
||||
/// Appends all value names to vals;
|
||||
|
||||
bool isReadOnly() const;
|
||||
/// Returns true iff the key has been opened for read-only access only.
|
||||
|
||||
protected:
|
||||
void open();
|
||||
void close();
|
||||
std::string key() const;
|
||||
std::string key(const std::string & valueName) const;
|
||||
HKEY handle();
|
||||
void handleSetError(const std::string & name);
|
||||
static HKEY handleFor(const std::string & rootKey);
|
||||
|
||||
private:
|
||||
WinRegistryKey();
|
||||
WinRegistryKey(const WinRegistryKey &);
|
||||
WinRegistryKey & operator=(const WinRegistryKey &);
|
||||
|
||||
HKEY _hRootKey;
|
||||
std::string _subKey;
|
||||
HKEY _hKey;
|
||||
bool _readOnly;
|
||||
REGSAM _extraSam;
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// inlines
|
||||
//
|
||||
inline bool WinRegistryKey::isReadOnly() const
|
||||
{
|
||||
return _readOnly;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Util
|
||||
|
||||
|
||||
#endif // Util_WinRegistryKey_INCLUDED
|
@ -1,140 +0,0 @@
|
||||
//
|
||||
// WinService.h
|
||||
//
|
||||
// Library: Util
|
||||
// Package: Windows
|
||||
// Module: WinService
|
||||
//
|
||||
// Definition of the WinService class.
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Util_WinService_INCLUDED
|
||||
#define Util_WinService_INCLUDED
|
||||
|
||||
|
||||
#include "Poco/UnWindows.h"
|
||||
#include "Poco/Util/Util.h"
|
||||
|
||||
|
||||
# define POCO_LPQUERY_SERVICE_CONFIG LPQUERY_SERVICE_CONFIGA
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Util
|
||||
{
|
||||
|
||||
|
||||
class Util_API WinService
|
||||
/// This class provides an object-oriented interface to
|
||||
/// the Windows Service Control Manager for registering,
|
||||
/// unregistering, configuring, starting and stopping
|
||||
/// services.
|
||||
///
|
||||
/// This class is only available on Windows platforms.
|
||||
{
|
||||
public:
|
||||
enum Startup
|
||||
{
|
||||
SVC_AUTO_START,
|
||||
SVC_MANUAL_START,
|
||||
SVC_DISABLED
|
||||
};
|
||||
|
||||
WinService(const std::string & name);
|
||||
/// Creates the WinService, using the given service name.
|
||||
|
||||
~WinService();
|
||||
/// Destroys the WinService.
|
||||
|
||||
const std::string & name() const;
|
||||
/// Returns the service name.
|
||||
|
||||
std::string displayName() const;
|
||||
/// Returns the service's display name.
|
||||
|
||||
std::string path() const;
|
||||
/// Returns the path to the service executable.
|
||||
///
|
||||
/// Throws a NotFoundException if the service has not been registered.
|
||||
|
||||
void registerService(const std::string & path, const std::string & displayName);
|
||||
/// Creates a Windows service with the executable specified by path
|
||||
/// and the given displayName.
|
||||
///
|
||||
/// Throws a ExistsException if the service has already been registered.
|
||||
|
||||
void registerService(const std::string & path);
|
||||
/// Creates a Windows service with the executable specified by path
|
||||
/// and the given displayName. The service name is used as display name.
|
||||
///
|
||||
/// Throws a ExistsException if the service has already been registered.
|
||||
|
||||
void unregisterService();
|
||||
/// Deletes the Windows service.
|
||||
///
|
||||
/// Throws a NotFoundException if the service has not been registered.
|
||||
|
||||
bool isRegistered() const;
|
||||
/// Returns true if the service has been registered with the Service Control Manager.
|
||||
|
||||
bool isRunning() const;
|
||||
/// Returns true if the service is currently running.
|
||||
|
||||
void start();
|
||||
/// Starts the service.
|
||||
/// Does nothing if the service is already running.
|
||||
///
|
||||
/// Throws a NotFoundException if the service has not been registered.
|
||||
|
||||
void stop();
|
||||
/// Stops the service.
|
||||
/// Does nothing if the service is not running.
|
||||
///
|
||||
/// Throws a NotFoundException if the service has not been registered.
|
||||
|
||||
void setStartup(Startup startup);
|
||||
/// Sets the startup mode for the service.
|
||||
|
||||
Startup getStartup() const;
|
||||
/// Returns the startup mode for the service.
|
||||
|
||||
void setDescription(const std::string & description);
|
||||
/// Sets the service description in the registry.
|
||||
|
||||
std::string getDescription() const;
|
||||
/// Returns the service description from the registry.
|
||||
|
||||
static const int STARTUP_TIMEOUT;
|
||||
|
||||
protected:
|
||||
static const std::string REGISTRY_KEY;
|
||||
static const std::string REGISTRY_DESCRIPTION;
|
||||
|
||||
private:
|
||||
void open() const;
|
||||
bool tryOpen() const;
|
||||
void close() const;
|
||||
POCO_LPQUERY_SERVICE_CONFIG config() const;
|
||||
|
||||
WinService();
|
||||
WinService(const WinService &);
|
||||
WinService & operator=(const WinService &);
|
||||
|
||||
std::string _name;
|
||||
SC_HANDLE _scmHandle;
|
||||
mutable SC_HANDLE _svcHandle;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
} // namespace Poco::Util
|
||||
|
||||
|
||||
#endif // Util_WinService_INCLUDED
|
@ -1,101 +0,0 @@
|
||||
//
|
||||
// ConfigurationMapper.cpp
|
||||
//
|
||||
// Library: Util
|
||||
// Package: Configuration
|
||||
// Module: ConfigurationMapper
|
||||
//
|
||||
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include "Poco/Util/ConfigurationMapper.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
namespace Util {
|
||||
|
||||
|
||||
ConfigurationMapper::ConfigurationMapper(const std::string& fromPrefix, const std::string& toPrefix, AbstractConfiguration* pConfig):
|
||||
_fromPrefix(fromPrefix),
|
||||
_toPrefix(toPrefix),
|
||||
_pConfig(pConfig)
|
||||
{
|
||||
poco_check_ptr (pConfig);
|
||||
|
||||
if (!_fromPrefix.empty()) _fromPrefix += '.';
|
||||
if (!_toPrefix.empty()) _toPrefix += '.';
|
||||
|
||||
_pConfig->duplicate();
|
||||
}
|
||||
|
||||
|
||||
ConfigurationMapper::~ConfigurationMapper()
|
||||
{
|
||||
_pConfig->release();
|
||||
}
|
||||
|
||||
|
||||
bool ConfigurationMapper::getRaw(const std::string& key, std::string& value) const
|
||||
{
|
||||
std::string translatedKey = translateKey(key);
|
||||
return _pConfig->getRaw(translatedKey, value);
|
||||
}
|
||||
|
||||
|
||||
void ConfigurationMapper::setRaw(const std::string& key, const std::string& value)
|
||||
{
|
||||
std::string translatedKey = translateKey(key);
|
||||
_pConfig->setRaw(translatedKey, value);
|
||||
}
|
||||
|
||||
|
||||
void ConfigurationMapper::enumerate(const std::string& key, Keys& range) const
|
||||
{
|
||||
std::string cKey(key);
|
||||
if (!cKey.empty()) cKey += '.';
|
||||
std::string::size_type keyLen = cKey.length();
|
||||
if (keyLen < _toPrefix.length())
|
||||
{
|
||||
if (_toPrefix.compare(0, keyLen, cKey) == 0)
|
||||
{
|
||||
std::string::size_type pos = _toPrefix.find_first_of('.', keyLen);
|
||||
poco_assert_dbg(pos != std::string::npos);
|
||||
range.push_back(_toPrefix.substr(keyLen, pos - keyLen));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string translatedKey;
|
||||
if (cKey == _toPrefix)
|
||||
{
|
||||
translatedKey = _fromPrefix;
|
||||
if (!translatedKey.empty())
|
||||
translatedKey.resize(translatedKey.length() - 1);
|
||||
}
|
||||
else translatedKey = translateKey(key);
|
||||
_pConfig->enumerate(translatedKey, range);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ConfigurationMapper::removeRaw(const std::string& key)
|
||||
{
|
||||
std::string translatedKey = translateKey(key);
|
||||
_pConfig->remove(translatedKey);
|
||||
}
|
||||
|
||||
|
||||
std::string ConfigurationMapper::translateKey(const std::string& key) const
|
||||
{
|
||||
std::string result(key);
|
||||
if (result.compare(0, _toPrefix.size(), _toPrefix) == 0)
|
||||
result.replace(0, _toPrefix.size(), _fromPrefix);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Util
|
@ -33,8 +33,7 @@ if (SANITIZE)
|
||||
# RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to
|
||||
# keep the binary size down.
|
||||
# TODO: try compiling with -Og and with ld.gold.
|
||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
||||
|
||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit f0081b2649b94837855f3bc7d05ef326b100bad8
|
||||
Subproject commit e0d1b64da666afbfaa6f1ee0487c33f3fd2cd5cb
|
@ -1,4 +1,3 @@
|
||||
# rebuild in #36968
|
||||
# docker build -t clickhouse/docs-builder .
|
||||
# nodejs 17 prefers ipv6 and is broken in our environment
|
||||
FROM node:16-alpine
|
||||
|
@ -18,7 +18,7 @@ SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
||||
RETRIES_SIGN = "Some tests were restarted"
|
||||
|
||||
|
||||
def process_test_log(log_path):
|
||||
def process_test_log(log_path, broken_tests):
|
||||
total = 0
|
||||
skipped = 0
|
||||
unknown = 0
|
||||
@ -62,8 +62,12 @@ def process_test_log(log_path):
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time, []))
|
||||
elif FAIL_SIGN in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
if test_name in broken_tests:
|
||||
success += 1
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
else:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
elif UNKNOWN_SIGN in line:
|
||||
unknown += 1
|
||||
test_results.append((test_name, "FAIL", test_time, []))
|
||||
@ -71,8 +75,21 @@ def process_test_log(log_path):
|
||||
skipped += 1
|
||||
test_results.append((test_name, "SKIPPED", test_time, []))
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
if OK_SIGN in line and test_name in broken_tests:
|
||||
failed += 1
|
||||
test_results.append(
|
||||
(
|
||||
test_name,
|
||||
"FAIL",
|
||||
test_time,
|
||||
[
|
||||
"Test is expected to fail! Please, update broken_tests.txt!\n"
|
||||
],
|
||||
)
|
||||
)
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time, []))
|
||||
test_end = False
|
||||
elif (
|
||||
len(test_results) > 0 and test_results[-1][1] == "FAIL" and not test_end
|
||||
@ -110,7 +127,7 @@ def process_test_log(log_path):
|
||||
)
|
||||
|
||||
|
||||
def process_result(result_path):
|
||||
def process_result(result_path, broken_tests):
|
||||
test_results = []
|
||||
state = "success"
|
||||
description = ""
|
||||
@ -134,7 +151,7 @@ def process_result(result_path):
|
||||
success_finish,
|
||||
retries,
|
||||
test_results,
|
||||
) = process_test_log(result_path)
|
||||
) = process_test_log(result_path, broken_tests)
|
||||
is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
||||
logging.info("Is flaky check: %s", is_flacky_check)
|
||||
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
@ -186,9 +203,17 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
parser.add_argument("--broken-tests", default="/broken_tests.txt")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir)
|
||||
broken_tests = list()
|
||||
if os.path.exists(args.broken_tests):
|
||||
logging.info(f"File {args.broken_tests} with broken tests found")
|
||||
with open(args.broken_tests) as f:
|
||||
broken_tests = f.read().splitlines()
|
||||
logging.info(f"Broken tests in the list: {len(broken_tests)}")
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir, broken_tests)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
|
29
docs/changelogs/v23.2.6.34-stable.md
Normal file
29
docs/changelogs/v23.2.6.34-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.2.6.34-stable (570190045b0) FIXME as compared to v23.2.5.46-stable (b50faecbb12)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#48709](https://github.com/ClickHouse/ClickHouse/issues/48709): Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48587](https://github.com/ClickHouse/ClickHouse/issues/48587): Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#48959](https://github.com/ClickHouse/ClickHouse/issues/48959): After the recent update, the `dockerd` requires `--tlsverify=false` together with the http port explicitly. [#48924](https://github.com/ClickHouse/ClickHouse/pull/48924) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix race in grace hash join with limit [#47153](https://github.com/ClickHouse/ClickHouse/pull/47153) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix crash in polygonsSymDifferenceCartesian [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
|
||||
* Remove a feature [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix cpu usage in rabbitmq (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* ClickHouse startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix possible segfault in cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix IPv4 comparable with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
|
@ -13,9 +13,11 @@ Supported platforms:
|
||||
- AArch64
|
||||
- Power9 (experimental)
|
||||
|
||||
## Normal Build for Development on Ubuntu
|
||||
## Building on Ubuntu
|
||||
|
||||
The following tutorial is based on the Ubuntu Linux system. With appropriate changes, it should also work on any other Linux distribution.
|
||||
The following tutorial is based on Ubuntu Linux.
|
||||
With appropriate changes, it should also work on any other Linux distribution.
|
||||
The minimum recommended Ubuntu version for development is 22.04 LTS.
|
||||
|
||||
### Install Prerequisites {#install-prerequisites}
|
||||
|
||||
@ -23,13 +25,11 @@ The following tutorial is based on the Ubuntu Linux system. With appropriate cha
|
||||
sudo apt-get install git cmake ccache python3 ninja-build yasm gawk
|
||||
```
|
||||
|
||||
Or cmake3 instead of cmake on older systems.
|
||||
### Install and Use the Clang compiler
|
||||
|
||||
### Install the latest clang (recommended)
|
||||
On Ubuntu/Debian you can use LLVM's automatic installation script, see [here](https://apt.llvm.org/).
|
||||
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
```bash
|
||||
``` bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
@ -40,19 +40,17 @@ sudo apt-get install software-properties-common
|
||||
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
```
|
||||
|
||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
|
||||
#### Use the latest clang for Builds
|
||||
As of April 2023, any version of Clang >= 15 will work.
|
||||
GCC as a compiler is not supported
|
||||
To build with a specific Clang version:
|
||||
|
||||
``` bash
|
||||
export CC=clang-15
|
||||
export CXX=clang++-15
|
||||
```
|
||||
|
||||
In this example we use version 15 that is the latest as of Sept 2022.
|
||||
|
||||
Gcc cannot be used.
|
||||
|
||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
||||
``` bash
|
||||
@ -70,79 +68,46 @@ git clone --recursive --shallow-submodules https://github.com/ClickHouse/ClickHo
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
ninja
|
||||
cmake -S . -B build
|
||||
cmake --build build # or: `cd build; ninja`
|
||||
```
|
||||
|
||||
To create an executable, run `ninja clickhouse`.
|
||||
This will create the `programs/clickhouse` executable, which can be used with `client` or `server` arguments.
|
||||
To create an executable, run `cmake --build --target clickhouse` (or: `cd build; ninja clickhouse`).
|
||||
This will create executable `build/programs/clickhouse` which can be used with `client` or `server` arguments.
|
||||
|
||||
## How to Build ClickHouse on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
## Building on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
|
||||
The build requires the following components:
|
||||
|
||||
- Git (is used only to checkout the sources, it’s not needed for the build)
|
||||
- CMake 3.15 or newer
|
||||
- Git (used to checkout the sources, not needed for the build)
|
||||
- CMake 3.20 or newer
|
||||
- Compiler: Clang 15 or newer
|
||||
- Linker: lld 15 or newer
|
||||
- Ninja
|
||||
- C++ compiler: clang-15 or newer
|
||||
- Linker: lld
|
||||
- Yasm
|
||||
- Gawk
|
||||
|
||||
If all the components are installed, you may build in the same way as the steps above.
|
||||
|
||||
Example for Ubuntu Eoan:
|
||||
``` bash
|
||||
sudo apt update
|
||||
sudo apt install git cmake ninja-build clang++ python yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
ninja
|
||||
```
|
||||
|
||||
Example for OpenSUSE Tumbleweed:
|
||||
|
||||
``` bash
|
||||
sudo zypper install git cmake ninja clang-c++ python lld yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
ninja
|
||||
mkdir build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
```
|
||||
|
||||
Example for Fedora Rawhide:
|
||||
|
||||
``` bash
|
||||
sudo yum update
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
make -j $(nproc)
|
||||
```
|
||||
|
||||
Here is an example of how to build `clang` and all the llvm infrastructure from sources:
|
||||
|
||||
```
|
||||
git clone git@github.com:llvm/llvm-project.git
|
||||
mkdir llvm-build && cd llvm-build
|
||||
cmake -DCMAKE_BUILD_TYPE:STRING=Release -DLLVM_ENABLE_PROJECTS=all ../llvm-project/llvm/
|
||||
make -j16
|
||||
sudo make install
|
||||
hash clang
|
||||
clang --version
|
||||
```
|
||||
|
||||
You can install the older clang like clang-11 from packages and then use it to build the new clang from sources.
|
||||
|
||||
Here is an example of how to install the new `cmake` from the official website:
|
||||
|
||||
```
|
||||
wget https://github.com/Kitware/CMake/releases/download/v3.22.2/cmake-3.22.2-linux-x86_64.sh
|
||||
chmod +x cmake-3.22.2-linux-x86_64.sh
|
||||
./cmake-3.22.2-linux-x86_64.sh
|
||||
export PATH=/home/milovidov/work/cmake-3.22.2-linux-x86_64/bin/:${PATH}
|
||||
hash cmake
|
||||
mkdir build
|
||||
cmake -S . -B build
|
||||
cmake --build build
|
||||
```
|
||||
|
||||
## You Don’t Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
|
||||
|
@ -119,7 +119,7 @@ When processing a query, the client shows:
|
||||
1. Progress, which is updated no more than 10 times per second (by default). For quick queries, the progress might not have time to be displayed.
|
||||
2. The formatted query after parsing, for debugging.
|
||||
3. The result in the specified format.
|
||||
4. The number of lines in the result, the time passed, and the average speed of query processing.
|
||||
4. The number of lines in the result, the time passed, and the average speed of query processing. All data amounts refer to uncompressed data.
|
||||
|
||||
You can cancel a long query by pressing Ctrl+C. However, you will still need to wait for a little for the server to abort the request. It is not possible to cancel a query at certain stages. If you do not wait and press Ctrl+C a second time, the client will exit.
|
||||
|
||||
|
@ -88,6 +88,33 @@ If the query was aborted due to an exception or user cancellation, no entry is w
|
||||
The size of the query cache in bytes, the maximum number of cache entries and the maximum size of individual cache entries (in bytes and in
|
||||
records) can be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-cache).
|
||||
|
||||
It is also possible to limit the cache usage of individual users using [settings profiles](settings/settings-profiles.md) and [settings
|
||||
constraints](settings/constraints-on-settings.md). More specifically, you can restrict the maximum amount of memory (in bytes) a user may
|
||||
allocate in the query cache and the the maximum number of stored query results. For that, first provide configurations
|
||||
[query_cache_max_size_in_bytes](settings/settings.md#query-cache-max-size-in-bytes) and
|
||||
[query_cache_max_entries](settings/settings.md#query-cache-size-max-items) in a user profile in `users.xml`, then make both settings
|
||||
readonly:
|
||||
|
||||
``` xml
|
||||
<profiles>
|
||||
<default>
|
||||
<!-- The maximum cache size in bytes for user/profile 'default' -->
|
||||
<query_cache_max_size_in_bytes>10000</query_cache_max_size_in_bytes>
|
||||
<!-- The maximum number of SELECT query results stored in the cache for user/profile 'default' -->
|
||||
<query_cache_max_entries>100</query_cache_max_entries>
|
||||
<!-- Make both settings read-only so the user cannot change them -->
|
||||
<constraints>
|
||||
<query_cache_max_size_in_bytes>
|
||||
<readonly/>
|
||||
</query_cache_max_size_in_bytes>
|
||||
<query_cache_max_entries>
|
||||
<readonly/>
|
||||
<query_cache_max_entries>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
```
|
||||
|
||||
To define how long a query must run at least such that its result can be cached, you can use setting
|
||||
[query_cache_min_query_duration](settings/settings.md#query-cache-min-query-duration). For example, the result of query
|
||||
|
||||
|
@ -1382,25 +1382,25 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
|
||||
The following settings are available:
|
||||
|
||||
- `max_size`: The maximum cache size in bytes. 0 means the query cache is disabled. Default value: `1073741824` (1 GiB).
|
||||
- `max_size_in_bytes`: The maximum cache size in bytes. 0 means the query cache is disabled. Default value: `1073741824` (1 GiB).
|
||||
- `max_entries`: The maximum number of `SELECT` query results stored in the cache. Default value: `1024`.
|
||||
- `max_entry_size`: The maximum size in bytes `SELECT` query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
|
||||
- `max_entry_rows`: The maximum number of rows `SELECT` query results may have to be saved in the cache. Default value: `30000000` (30 mil).
|
||||
- `max_entry_size_in_bytes`: The maximum size in bytes `SELECT` query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
|
||||
- `max_entry_size_in_rows`: The maximum number of rows `SELECT` query results may have to be saved in the cache. Default value: `30000000` (30 mil).
|
||||
|
||||
Changed settings take effect immediately.
|
||||
|
||||
:::note
|
||||
Data for the query cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `max_size` or disable the query cache altogether.
|
||||
Data for the query cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `max_size_in_bytes` or disable the query cache altogether.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<query_cache>
|
||||
<max_size>1073741824</max_size>
|
||||
<max_size_in_bytes>1073741824</max_size_in_bytes>
|
||||
<max_entries>1024</max_entries>
|
||||
<max_entry_size>1048576</max_entry_size>
|
||||
<max_entry_rows>30000000</max_entry_rows>
|
||||
<max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>
|
||||
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
||||
</query_cache>
|
||||
```
|
||||
|
||||
|
@ -40,7 +40,7 @@ If the user tries to violate the constraints an exception is thrown and the sett
|
||||
There are supported few types of constraints: `min`, `max`, `readonly` (with alias `const`) and `changeable_in_readonly`. The `min` and `max` constraints specify upper and lower boundaries for a numeric setting and can be used in combination. The `readonly` or `const` constraint specifies that the user cannot change the corresponding setting at all. The `changeable_in_readonly` constraint type allows user to change the setting within `min`/`max` range even if `readonly` setting is set to 1, otherwise settings are not allow to be changed in `readonly=1` mode. Note that `changeable_in_readonly` is supported only if `settings_constraints_replace_previous` is enabled:
|
||||
``` xml
|
||||
<access_control_improvements>
|
||||
<settings_constraints_replace_previous>true<settings_constraints_replace_previous>
|
||||
<settings_constraints_replace_previous>true</settings_constraints_replace_previous>
|
||||
</access_control_improvements>
|
||||
```
|
||||
|
||||
|
@ -890,7 +890,7 @@ Write time that processor spent during execution/waiting for data to `system.pro
|
||||
|
||||
See also:
|
||||
|
||||
- [`system.processors_profile_log`](../../operations/system-tables/processors_profile_log.md#system-processors_profile_log)
|
||||
- [`system.processors_profile_log`](../../operations/system-tables/processors_profile_log.md)
|
||||
- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)
|
||||
|
||||
## max_insert_block_size {#settings-max_insert_block_size}
|
||||
@ -1512,6 +1512,26 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## query_cache_max_size_in_bytes {#query-cache-max-size-in-bytes}
|
||||
|
||||
The maximum amount of memory (in bytes) the current user may allocate in the query cache. 0 means unlimited.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: 0 (no restriction).
|
||||
|
||||
## query_cache_max_entries {#query-cache-max-entries}
|
||||
|
||||
The maximum number of query results the current user may store in the query cache. 0 means unlimited.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer >= 0.
|
||||
|
||||
Default value: 0 (no restriction).
|
||||
|
||||
## insert_quorum {#settings-insert_quorum}
|
||||
|
||||
Enables the quorum writes.
|
||||
|
@ -32,6 +32,589 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
└─────────────────────────────────────────┴────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Metric descriptions
|
||||
|
||||
|
||||
### AsynchronousHeavyMetricsCalculationTimeSpent
|
||||
|
||||
Time in seconds spent for calculation of asynchronous heavy (tables related) metrics (this is the overhead of asynchronous metrics).
|
||||
|
||||
### AsynchronousHeavyMetricsUpdateInterval
|
||||
|
||||
Heavy (tables related) metrics update interval
|
||||
|
||||
### AsynchronousMetricsCalculationTimeSpent
|
||||
|
||||
Time in seconds spent for calculation of asynchronous metrics (this is the overhead of asynchronous metrics).
|
||||
|
||||
### AsynchronousMetricsUpdateInterval
|
||||
|
||||
Metrics update interval
|
||||
|
||||
### BlockActiveTime_*name*
|
||||
|
||||
Time in seconds the block device had the IO requests queued. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockDiscardBytes_*name*
|
||||
|
||||
Number of discarded bytes on the block device. These operations are relevant for SSD. Discard operations are not used by ClickHouse, but can be used by other processes on the system. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockDiscardMerges_*name*
|
||||
|
||||
Number of discard operations requested from the block device and merged together by the OS IO scheduler. These operations are relevant for SSD. Discard operations are not used by ClickHouse, but can be used by other processes on the system. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockDiscardOps_*name*
|
||||
|
||||
Number of discard operations requested from the block device. These operations are relevant for SSD. Discard operations are not used by ClickHouse, but can be used by other processes on the system. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockDiscardTime_*name*
|
||||
|
||||
Time in seconds spend in discard operations requested from the block device, summed across all the operations. These operations are relevant for SSD. Discard operations are not used by ClickHouse, but can be used by other processes on the system. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockInFlightOps_*name*
|
||||
|
||||
This value counts the number of I/O requests that have been issued to the device driver but have not yet completed. It does not include IO requests that are in the queue but not yet issued to the device driver. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockQueueTime_*name*
|
||||
|
||||
This value counts the number of milliseconds that IO requests have waited on this block device. If there are multiple IO requests waiting, this value will increase as the product of the number of milliseconds times the number of requests waiting. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockReadBytes_*name*
|
||||
|
||||
Number of bytes read from the block device. It can be lower than the number of bytes read from the filesystem due to the usage of the OS page cache, that saves IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockReadMerges_*name*
|
||||
|
||||
Number of read operations requested from the block device and merged together by the OS IO scheduler. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockReadOps_*name*
|
||||
|
||||
Number of read operations requested from the block device. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockReadTime_*name*
|
||||
|
||||
Time in seconds spend in read operations requested from the block device, summed across all the operations. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockWriteBytes_*name*
|
||||
|
||||
Number of bytes written to the block device. It can be lower than the number of bytes written to the filesystem due to the usage of the OS page cache, that saves IO. A write to the block device may happen later than the corresponding write to the filesystem due to write-through caching. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockWriteMerges_*name*
|
||||
|
||||
Number of write operations requested from the block device and merged together by the OS IO scheduler. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockWriteOps_*name*
|
||||
|
||||
Number of write operations requested from the block device. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### BlockWriteTime_*name*
|
||||
|
||||
Time in seconds spend in write operations requested from the block device, summed across all the operations. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Source: `/sys/block`. See https://www.kernel.org/doc/Documentation/block/stat.txt
|
||||
|
||||
### CPUFrequencyMHz_*name*
|
||||
|
||||
The current frequency of the CPU, in MHz. Most of the modern CPUs adjust the frequency dynamically for power saving and Turbo Boosting.
|
||||
|
||||
### CompiledExpressionCacheBytes
|
||||
|
||||
Total bytes used for the cache of JIT-compiled code.
|
||||
|
||||
### CompiledExpressionCacheCount
|
||||
|
||||
Total entries in the cache of JIT-compiled code.
|
||||
|
||||
### DiskAvailable_*name*
|
||||
|
||||
Available bytes on the disk (virtual filesystem). Remote filesystems can show a large value like 16 EiB.
|
||||
|
||||
### DiskTotal_*name*
|
||||
|
||||
The total size in bytes of the disk (virtual filesystem). Remote filesystems can show a large value like 16 EiB.
|
||||
|
||||
### DiskUnreserved_*name*
|
||||
|
||||
Available bytes on the disk (virtual filesystem) without the reservations for merges, fetches, and moves. Remote filesystems can show a large value like 16 EiB.
|
||||
|
||||
### DiskUsed_*name*
|
||||
|
||||
Used bytes on the disk (virtual filesystem). Remote filesystems not always provide this information.
|
||||
|
||||
### FilesystemCacheBytes
|
||||
|
||||
Total bytes in the `cache` virtual filesystem. This cache is hold on disk.
|
||||
|
||||
### FilesystemCacheFiles
|
||||
|
||||
Total number of cached file segments in the `cache` virtual filesystem. This cache is hold on disk.
|
||||
|
||||
### FilesystemLogsPathAvailableBytes
|
||||
|
||||
Available bytes on the volume where ClickHouse logs path is mounted. If this value approaches zero, you should tune the log rotation in the configuration file.
|
||||
|
||||
### FilesystemLogsPathAvailableINodes
|
||||
|
||||
The number of available inodes on the volume where ClickHouse logs path is mounted.
|
||||
|
||||
### FilesystemLogsPathTotalBytes
|
||||
|
||||
The size of the volume where ClickHouse logs path is mounted, in bytes. It's recommended to have at least 10 GB for logs.
|
||||
|
||||
### FilesystemLogsPathTotalINodes
|
||||
|
||||
The total number of inodes on the volume where ClickHouse logs path is mounted.
|
||||
|
||||
### FilesystemLogsPathUsedBytes
|
||||
|
||||
Used bytes on the volume where ClickHouse logs path is mounted.
|
||||
|
||||
### FilesystemLogsPathUsedINodes
|
||||
|
||||
The number of used inodes on the volume where ClickHouse logs path is mounted.
|
||||
|
||||
### FilesystemMainPathAvailableBytes
|
||||
|
||||
Available bytes on the volume where the main ClickHouse path is mounted.
|
||||
|
||||
### FilesystemMainPathAvailableINodes
|
||||
|
||||
The number of available inodes on the volume where the main ClickHouse path is mounted. If it is close to zero, it indicates a misconfiguration, and you will get 'no space left on device' even when the disk is not full.
|
||||
|
||||
### FilesystemMainPathTotalBytes
|
||||
|
||||
The size of the volume where the main ClickHouse path is mounted, in bytes.
|
||||
|
||||
### FilesystemMainPathTotalINodes
|
||||
|
||||
The total number of inodes on the volume where the main ClickHouse path is mounted. If it is less than 25 million, it indicates a misconfiguration.
|
||||
|
||||
### FilesystemMainPathUsedBytes
|
||||
|
||||
Used bytes on the volume where the main ClickHouse path is mounted.
|
||||
|
||||
### FilesystemMainPathUsedINodes
|
||||
|
||||
The number of used inodes on the volume where the main ClickHouse path is mounted. This value mostly corresponds to the number of files.
|
||||
|
||||
### HTTPThreads
|
||||
|
||||
Number of threads in the server of the HTTP interface (without TLS).
|
||||
|
||||
### InterserverThreads
|
||||
|
||||
Number of threads in the server of the replicas communication protocol (without TLS).
|
||||
|
||||
### Jitter
|
||||
|
||||
The difference in time the thread for calculation of the asynchronous metrics was scheduled to wake up and the time it was in fact, woken up. A proxy-indicator of overall system latency and responsiveness.
|
||||
|
||||
### LoadAverage_*N*
|
||||
|
||||
The whole system load, averaged with exponential smoothing over 1 minute. The load represents the number of threads across all the processes (the scheduling entities of the OS kernel), that are currently running by CPU or waiting for IO, or ready to run but not being scheduled at this point of time. This number includes all the processes, not only clickhouse-server. The number can be greater than the number of CPU cores, if the system is overloaded, and many processes are ready to run but waiting for CPU or IO.
|
||||
|
||||
### MMapCacheCells
|
||||
|
||||
The number of files opened with `mmap` (mapped in memory). This is used for queries with the setting `local_filesystem_read_method` set to `mmap`. The files opened with `mmap` are kept in the cache to avoid costly TLB flushes.
|
||||
|
||||
### MarkCacheBytes
|
||||
|
||||
Total size of mark cache in bytes
|
||||
|
||||
### MarkCacheFiles
|
||||
|
||||
Total number of mark files cached in the mark cache
|
||||
|
||||
### MaxPartCountForPartition
|
||||
|
||||
Maximum number of parts per partition across all partitions of all tables of MergeTree family. Values larger than 300 indicates misconfiguration, overload, or massive data loading.
|
||||
|
||||
### MemoryCode
|
||||
|
||||
The amount of virtual memory mapped for the pages of machine code of the server process, in bytes.
|
||||
|
||||
### MemoryDataAndStack
|
||||
|
||||
The amount of virtual memory mapped for the use of stack and for the allocated memory, in bytes. It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call. This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring.
|
||||
|
||||
### MemoryResident
|
||||
|
||||
The amount of physical memory used by the server process, in bytes.
|
||||
|
||||
### MemoryShared
|
||||
|
||||
The amount of memory used by the server process, that is also shared by another processes, in bytes. ClickHouse does not use shared memory, but some memory can be labeled by OS as shared for its own reasons. This metric does not make a lot of sense to watch, and it exists only for completeness reasons.
|
||||
|
||||
### MemoryVirtual
|
||||
|
||||
The size of the virtual address space allocated by the server process, in bytes. The size of the virtual address space is usually much greater than the physical memory consumption, and should not be used as an estimate for the memory consumption. The large values of this metric are totally normal, and makes only technical sense.
|
||||
|
||||
### MySQLThreads
|
||||
|
||||
Number of threads in the server of the MySQL compatibility protocol.
|
||||
|
||||
### NetworkReceiveBytes_*name*
|
||||
|
||||
Number of bytes received via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkReceiveDrop_*name*
|
||||
|
||||
Number of bytes a packet was dropped while received via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkReceiveErrors_*name*
|
||||
|
||||
Number of times error happened receiving via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkReceivePackets_*name*
|
||||
|
||||
Number of network packets received via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkSendBytes_*name*
|
||||
|
||||
Number of bytes sent via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkSendDrop_*name*
|
||||
|
||||
Number of times a packed was dropped while sending via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkSendErrors_*name*
|
||||
|
||||
Number of times error (e.g. TCP retransmit) happened while sending via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NetworkSendPackets_*name*
|
||||
|
||||
Number of network packets sent via the network interface. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### NumberOfDatabases
|
||||
|
||||
Total number of databases on the server.
|
||||
|
||||
### NumberOfDetachedByUserParts
|
||||
|
||||
The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts and they can be removed.
|
||||
|
||||
### NumberOfDetachedParts
|
||||
|
||||
The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts and they can be removed.
|
||||
|
||||
### NumberOfTables
|
||||
|
||||
Total number of tables summed across the databases on the server, excluding the databases that cannot contain MergeTree tables. The excluded database engines are those who generate the set of tables on the fly, like `Lazy`, `MySQL`, `PostgreSQL`, `SQlite`.
|
||||
|
||||
### OSContextSwitches
|
||||
|
||||
The number of context switches that the system underwent on the host machine. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSGuestNiceTime
|
||||
|
||||
The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This metric is irrelevant for ClickHouse, but still exists for completeness. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSGuestNiceTimeCPU_*N*
|
||||
|
||||
The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This metric is irrelevant for ClickHouse, but still exists for completeness. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSGuestNiceTimeNormalized
|
||||
|
||||
The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSGuestTime
|
||||
|
||||
The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This metric is irrelevant for ClickHouse, but still exists for completeness. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSGuestTimeCPU_*N*
|
||||
|
||||
The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This metric is irrelevant for ClickHouse, but still exists for completeness. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSGuestTimeNormalized
|
||||
|
||||
The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSIOWaitTime
|
||||
|
||||
The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSIOWaitTimeCPU_*N*
|
||||
|
||||
The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSIOWaitTimeNormalized
|
||||
|
||||
The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSIdleTime
|
||||
|
||||
The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core). The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSIdleTimeCPU_*N*
|
||||
|
||||
The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core). The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSIdleTimeNormalized
|
||||
|
||||
The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSInterrupts
|
||||
|
||||
The number of interrupts on the host machine. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSIrqTime
|
||||
|
||||
The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. A high number of this metric may indicate hardware misconfiguration or a very high network load. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSIrqTimeCPU_*N*
|
||||
|
||||
The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. A high number of this metric may indicate hardware misconfiguration or a very high network load. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSIrqTimeNormalized
|
||||
|
||||
The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSMemoryAvailable
|
||||
|
||||
The amount of memory available to be used by programs, in bytes. This is very similar to the `OSMemoryFreePlusCached` metric. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryBuffers
|
||||
|
||||
The amount of memory used by OS kernel buffers, in bytes. This should be typically small, and large values may indicate a misconfiguration of the OS. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryCached
|
||||
|
||||
The amount of memory used by the OS page cache, in bytes. Typically, almost all available memory is used by the OS page cache - high values of this metric are normal and expected. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryFreePlusCached
|
||||
|
||||
The amount of free memory plus OS page cache memory on the host system, in bytes. This memory is available to be used by programs. The value should be very similar to `OSMemoryAvailable`. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryFreeWithoutCached
|
||||
|
||||
The amount of free memory on the host system, in bytes. This does not include the memory used by the OS page cache memory, in bytes. The page cache memory is also available for usage by programs, so the value of this metric can be confusing. See the `OSMemoryAvailable` metric instead. For convenience we also provide the `OSMemoryFreePlusCached` metric, that should be somewhat similar to OSMemoryAvailable. See also https://www.linuxatemyram.com/. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSMemoryTotal
|
||||
|
||||
The total amount of memory on the host system, in bytes.
|
||||
|
||||
### OSNiceTime
|
||||
|
||||
The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSNiceTimeCPU_*N*
|
||||
|
||||
The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSNiceTimeNormalized
|
||||
|
||||
The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSOpenFiles
|
||||
|
||||
The total number of opened files on the host machine. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSProcessesBlocked
|
||||
|
||||
Number of threads blocked waiting for I/O to complete (`man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSProcessesCreated
|
||||
|
||||
The number of processes created. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSProcessesRunning
|
||||
|
||||
The number of runnable (running or ready to run) threads by the operating system. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server.
|
||||
|
||||
### OSSoftIrqTime
|
||||
|
||||
The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. A high number of this metric may indicate inefficient software running on the system. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSSoftIrqTimeCPU_*N*
|
||||
|
||||
The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. A high number of this metric may indicate inefficient software running on the system. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSSoftIrqTimeNormalized
|
||||
|
||||
The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSStealTime
|
||||
|
||||
The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Not every virtualized environments present this metric, and most of them don't. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSStealTimeCPU_*N*
|
||||
|
||||
The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. Not every virtualized environments present this metric, and most of them don't. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSStealTimeNormalized
|
||||
|
||||
The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSSystemTime
|
||||
|
||||
The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSSystemTimeCPU_*N*
|
||||
|
||||
The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSSystemTimeNormalized
|
||||
|
||||
The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### OSThreadsRunnable
|
||||
|
||||
The total number of 'runnable' threads, as the OS kernel scheduler seeing it.
|
||||
|
||||
### OSThreadsTotal
|
||||
|
||||
The total number of threads, as the OS kernel scheduler seeing it.
|
||||
|
||||
### OSUptime
|
||||
|
||||
The uptime of the host server (the machine where ClickHouse is running), in seconds.
|
||||
|
||||
### OSUserTime
|
||||
|
||||
The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core). The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSUserTimeCPU_*N*
|
||||
|
||||
The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server. This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core). The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores].
|
||||
|
||||
### OSUserTimeNormalized
|
||||
|
||||
The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores. This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric.
|
||||
|
||||
### PostgreSQLThreads
|
||||
|
||||
Number of threads in the server of the PostgreSQL compatibility protocol.
|
||||
|
||||
### ReplicasMaxAbsoluteDelay
|
||||
|
||||
Maximum difference in seconds between the most fresh replicated part and the most fresh data part still to be replicated, across Replicated tables. A very high value indicates a replica with no data.
|
||||
|
||||
### ReplicasMaxInsertsInQueue
|
||||
|
||||
Maximum number of INSERT operations in the queue (still to be replicated) across Replicated tables.
|
||||
|
||||
### ReplicasMaxMergesInQueue
|
||||
|
||||
Maximum number of merge operations in the queue (still to be applied) across Replicated tables.
|
||||
|
||||
### ReplicasMaxQueueSize
|
||||
|
||||
Maximum queue size (in the number of operations like get, merge) across Replicated tables.
|
||||
|
||||
### ReplicasMaxRelativeDelay
|
||||
|
||||
Maximum difference between the replica delay and the delay of the most up-to-date replica of the same table, across Replicated tables.
|
||||
|
||||
### ReplicasSumInsertsInQueue
|
||||
|
||||
Sum of INSERT operations in the queue (still to be replicated) across Replicated tables.
|
||||
|
||||
### ReplicasSumMergesInQueue
|
||||
|
||||
Sum of merge operations in the queue (still to be applied) across Replicated tables.
|
||||
|
||||
### ReplicasSumQueueSize
|
||||
|
||||
Sum queue size (in the number of operations like get, merge) across Replicated tables.
|
||||
|
||||
### TCPThreads
|
||||
|
||||
Number of threads in the server of the TCP protocol (without TLS).
|
||||
|
||||
### Temperature_*N*
|
||||
|
||||
The temperature of the corresponding device in ℃. A sensor can return an unrealistic value. Source: `/sys/class/thermal`
|
||||
|
||||
### Temperature_*name*
|
||||
|
||||
The temperature reported by the corresponding hardware monitor and the corresponding sensor in ℃. A sensor can return an unrealistic value. Source: `/sys/class/hwmon`
|
||||
|
||||
### TotalBytesOfMergeTreeTables
|
||||
|
||||
Total amount of bytes (compressed, including data and indices) stored in all tables of MergeTree family.
|
||||
|
||||
### TotalPartsOfMergeTreeTables
|
||||
|
||||
Total amount of data parts in all tables of MergeTree family. Numbers larger than 10 000 will negatively affect the server startup time and it may indicate unreasonable choice of the partition key.
|
||||
|
||||
### TotalRowsOfMergeTreeTables
|
||||
|
||||
Total amount of rows (records) stored in all tables of MergeTree family.
|
||||
|
||||
### UncompressedCacheBytes
|
||||
|
||||
Total size of uncompressed cache in bytes. Uncompressed cache does not usually improve the performance and should be mostly avoided.
|
||||
|
||||
### UncompressedCacheCells
|
||||
|
||||
Total number of entries in the uncompressed cache. Each entry represents a decompressed block of data. Uncompressed cache does not usually improve performance and should be mostly avoided.
|
||||
|
||||
### Uptime
|
||||
|
||||
The server uptime in seconds. It includes the time spent for server initialization before accepting connections.
|
||||
|
||||
### jemalloc.active
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.allocated
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.arenas.all.dirty_purged
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.arenas.all.muzzy_purged
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.arenas.all.pactive
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.arenas.all.pdirty
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.arenas.all.pmuzzy
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.background_thread.num_runs
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.background_thread.num_threads
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.background_thread.run_intervals
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.epoch
|
||||
|
||||
An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other `jemalloc` metrics.
|
||||
|
||||
### jemalloc.mapped
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.metadata
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.metadata_thp
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.resident
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
### jemalloc.retained
|
||||
|
||||
An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||
|
@ -34,6 +34,720 @@ SELECT * FROM system.metrics LIMIT 10
|
||||
└──────────────────────────────────────┴───────┴────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Metric descriptions
|
||||
|
||||
### AggregatorThreads
|
||||
|
||||
Number of threads in the Aggregator thread pool.
|
||||
|
||||
### AggregatorThreadsActive
|
||||
|
||||
Number of threads in the Aggregator thread pool running a task.
|
||||
|
||||
### AsyncInsertCacheSize
|
||||
|
||||
Number of async insert hash id in cache
|
||||
|
||||
### AsynchronousInsertThreads
|
||||
|
||||
Number of threads in the AsynchronousInsert thread pool.
|
||||
|
||||
### AsynchronousInsertThreadsActive
|
||||
|
||||
Number of threads in the AsynchronousInsert thread pool running a task.
|
||||
|
||||
### AsynchronousReadWait
|
||||
|
||||
Number of threads waiting for asynchronous read.
|
||||
|
||||
### BackgroundBufferFlushSchedulePoolSize
|
||||
|
||||
Limit on number of tasks in BackgroundBufferFlushSchedulePool
|
||||
|
||||
### BackgroundBufferFlushSchedulePoolTask
|
||||
|
||||
Number of active tasks in BackgroundBufferFlushSchedulePool. This pool is used for periodic Buffer flushes
|
||||
|
||||
### BackgroundCommonPoolSize
|
||||
|
||||
Limit on number of tasks in an associated background pool
|
||||
|
||||
### BackgroundCommonPoolTask
|
||||
|
||||
Number of active tasks in an associated background pool
|
||||
|
||||
### BackgroundDistributedSchedulePoolSize
|
||||
|
||||
Limit on number of tasks in BackgroundDistributedSchedulePool
|
||||
|
||||
### BackgroundDistributedSchedulePoolTask
|
||||
|
||||
Number of active tasks in BackgroundDistributedSchedulePool. This pool is used for distributed sends that is done in background.
|
||||
|
||||
### BackgroundFetchesPoolSize
|
||||
|
||||
Limit on number of simultaneous fetches in an associated background pool
|
||||
|
||||
### BackgroundFetchesPoolTask
|
||||
|
||||
Number of active fetches in an associated background pool
|
||||
|
||||
### BackgroundMergesAndMutationsPoolSize
|
||||
|
||||
Limit on number of active merges and mutations in an associated background pool
|
||||
|
||||
### BackgroundMergesAndMutationsPoolTask
|
||||
|
||||
Number of active merges and mutations in an associated background pool
|
||||
|
||||
### BackgroundMessageBrokerSchedulePoolSize
|
||||
|
||||
Limit on number of tasks in BackgroundProcessingPool for message streaming
|
||||
|
||||
### BackgroundMessageBrokerSchedulePoolTask
|
||||
|
||||
Number of active tasks in BackgroundProcessingPool for message streaming
|
||||
|
||||
### BackgroundMovePoolSize
|
||||
|
||||
Limit on number of tasks in BackgroundProcessingPool for moves
|
||||
|
||||
### BackgroundMovePoolTask
|
||||
|
||||
Number of active tasks in BackgroundProcessingPool for moves
|
||||
|
||||
### BackgroundSchedulePoolSize
|
||||
|
||||
Limit on number of tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc.
|
||||
|
||||
### BackgroundSchedulePoolTask
|
||||
|
||||
Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc.
|
||||
|
||||
### BackupsIOThreads
|
||||
|
||||
Number of threads in the BackupsIO thread pool.
|
||||
|
||||
### BackupsIOThreadsActive
|
||||
|
||||
Number of threads in the BackupsIO thread pool running a task.
|
||||
|
||||
### BackupsThreads
|
||||
|
||||
Number of threads in the thread pool for BACKUP.
|
||||
|
||||
### BackupsThreadsActive
|
||||
|
||||
Number of threads in thread pool for BACKUP running a task.
|
||||
|
||||
### BrokenDistributedFilesToInsert
|
||||
|
||||
Number of files for asynchronous insertion into Distributed tables that has been marked as broken. This metric will starts from 0 on start. Number of files for every shard is summed.
|
||||
|
||||
### CacheDetachedFileSegments
|
||||
|
||||
Number of existing detached cache file segments
|
||||
|
||||
### CacheDictionaryThreads
|
||||
|
||||
Number of threads in the CacheDictionary thread pool.
|
||||
|
||||
### CacheDictionaryThreadsActive
|
||||
|
||||
Number of threads in the CacheDictionary thread pool running a task.
|
||||
|
||||
### CacheDictionaryUpdateQueueBatches
|
||||
|
||||
Number of 'batches' (a set of keys) in update queue in CacheDictionaries.
|
||||
|
||||
### CacheDictionaryUpdateQueueKeys
|
||||
|
||||
Exact number of keys in update queue in CacheDictionaries.
|
||||
|
||||
### CacheFileSegments
|
||||
|
||||
Number of existing cache file segments
|
||||
|
||||
### ContextLockWait
|
||||
|
||||
Number of threads waiting for lock in Context. This is global lock.
|
||||
|
||||
### DDLWorkerThreads
|
||||
|
||||
Number of threads in the DDLWorker thread pool for ON CLUSTER queries.
|
||||
|
||||
### DDLWorkerThreadsActive
|
||||
|
||||
Number of threads in the DDLWORKER thread pool for ON CLUSTER queries running a task.
|
||||
|
||||
### DatabaseCatalogThreads
|
||||
|
||||
Number of threads in the DatabaseCatalog thread pool.
|
||||
|
||||
### DatabaseCatalogThreadsActive
|
||||
|
||||
Number of threads in the DatabaseCatalog thread pool running a task.
|
||||
|
||||
### DatabaseOnDiskThreads
|
||||
|
||||
Number of threads in the DatabaseOnDisk thread pool.
|
||||
|
||||
### DatabaseOnDiskThreadsActive
|
||||
|
||||
Number of threads in the DatabaseOnDisk thread pool running a task.
|
||||
|
||||
### DatabaseOrdinaryThreads
|
||||
|
||||
Number of threads in the Ordinary database thread pool.
|
||||
|
||||
### DatabaseOrdinaryThreadsActive
|
||||
|
||||
Number of threads in the Ordinary database thread pool running a task.
|
||||
|
||||
### DelayedInserts
|
||||
|
||||
Number of INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree table.
|
||||
|
||||
### DestroyAggregatesThreads
|
||||
|
||||
Number of threads in the thread pool for destroy aggregate states.
|
||||
|
||||
### DestroyAggregatesThreadsActive
|
||||
|
||||
Number of threads in the thread pool for destroy aggregate states running a task.
|
||||
|
||||
### DictCacheRequests
|
||||
|
||||
Number of requests in fly to data sources of dictionaries of cache type.
|
||||
|
||||
### DiskObjectStorageAsyncThreads
|
||||
|
||||
Number of threads in the async thread pool for DiskObjectStorage.
|
||||
|
||||
### DiskObjectStorageAsyncThreadsActive
|
||||
|
||||
Number of threads in the async thread pool for DiskObjectStorage running a task.
|
||||
|
||||
### DiskSpaceReservedForMerge
|
||||
|
||||
Disk space reserved for currently running background merges. It is slightly more than the total size of currently merging parts.
|
||||
|
||||
### DistributedFilesToInsert
|
||||
|
||||
Number of pending files to process for asynchronous insertion into Distributed tables. Number of files for every shard is summed.
|
||||
|
||||
### DistributedSend
|
||||
|
||||
Number of connections to remote servers sending data that was INSERTed into Distributed tables. Both synchronous and asynchronous mode.
|
||||
|
||||
### EphemeralNode
|
||||
|
||||
Number of ephemeral nodes hold in ZooKeeper.
|
||||
|
||||
### FilesystemCacheElements
|
||||
|
||||
Filesystem cache elements (file segments)
|
||||
|
||||
### FilesystemCacheReadBuffers
|
||||
|
||||
Number of active cache buffers
|
||||
|
||||
### FilesystemCacheSize
|
||||
|
||||
Filesystem cache size in bytes
|
||||
|
||||
### GlobalThread
|
||||
|
||||
Number of threads in global thread pool.
|
||||
|
||||
### GlobalThreadActive
|
||||
|
||||
Number of threads in global thread pool running a task.
|
||||
|
||||
### HTTPConnection
|
||||
|
||||
Number of connections to HTTP server
|
||||
|
||||
### HashedDictionaryThreads
|
||||
|
||||
Number of threads in the HashedDictionary thread pool.
|
||||
|
||||
### HashedDictionaryThreadsActive
|
||||
|
||||
Number of threads in the HashedDictionary thread pool running a task.
|
||||
|
||||
### IOPrefetchThreads
|
||||
|
||||
Number of threads in the IO prefertch thread pool.
|
||||
|
||||
### IOPrefetchThreadsActive
|
||||
|
||||
Number of threads in the IO prefetch thread pool running a task.
|
||||
|
||||
### IOThreads
|
||||
|
||||
Number of threads in the IO thread pool.
|
||||
|
||||
### IOThreadsActive
|
||||
|
||||
Number of threads in the IO thread pool running a task.
|
||||
|
||||
### IOUringInFlightEvents
|
||||
|
||||
Number of io_uring SQEs in flight
|
||||
|
||||
### IOUringPendingEvents
|
||||
|
||||
Number of io_uring SQEs waiting to be submitted
|
||||
|
||||
### IOWriterThreads
|
||||
|
||||
Number of threads in the IO writer thread pool.
|
||||
|
||||
### IOWriterThreadsActive
|
||||
|
||||
Number of threads in the IO writer thread pool running a task.
|
||||
|
||||
### InterserverConnection
|
||||
|
||||
Number of connections from other replicas to fetch parts
|
||||
|
||||
### KafkaAssignedPartitions
|
||||
|
||||
Number of partitions Kafka tables currently assigned to
|
||||
|
||||
### KafkaBackgroundReads
|
||||
|
||||
Number of background reads currently working (populating materialized views from Kafka)
|
||||
|
||||
### KafkaConsumers
|
||||
|
||||
Number of active Kafka consumers
|
||||
|
||||
### KafkaConsumersInUse
|
||||
|
||||
Number of consumers which are currently used by direct or background reads
|
||||
|
||||
### KafkaConsumersWithAssignment
|
||||
|
||||
Number of active Kafka consumers which have some partitions assigned.
|
||||
|
||||
### KafkaLibrdkafkaThreads
|
||||
|
||||
Number of active librdkafka threads
|
||||
|
||||
### KafkaProducers
|
||||
|
||||
Number of active Kafka producer created
|
||||
|
||||
### KafkaWrites
|
||||
|
||||
Number of currently running inserts to Kafka
|
||||
|
||||
### KeeperAliveConnections
|
||||
|
||||
Number of alive connections
|
||||
|
||||
### KeeperOutstandingRequets
|
||||
|
||||
Number of outstanding requests
|
||||
|
||||
### LocalThread
|
||||
|
||||
Number of threads in local thread pools. The threads in local thread pools are taken from the global thread pool.
|
||||
|
||||
### LocalThreadActive
|
||||
|
||||
Number of threads in local thread pools running a task.
|
||||
|
||||
### MMappedAllocBytes
|
||||
|
||||
Sum bytes of mmapped allocations
|
||||
|
||||
### MMappedAllocs
|
||||
|
||||
Total number of mmapped allocations
|
||||
|
||||
### MMappedFileBytes
|
||||
|
||||
Sum size of mmapped file regions.
|
||||
|
||||
### MMappedFiles
|
||||
|
||||
Total number of mmapped files.
|
||||
|
||||
### MarksLoaderThreads
|
||||
|
||||
Number of threads in thread pool for loading marks.
|
||||
|
||||
### MarksLoaderThreadsActive
|
||||
|
||||
Number of threads in the thread pool for loading marks running a task.
|
||||
|
||||
### MaxDDLEntryID
|
||||
|
||||
Max processed DDL entry of DDLWorker.
|
||||
|
||||
### MaxPushedDDLEntryID
|
||||
|
||||
Max DDL entry of DDLWorker that pushed to zookeeper.
|
||||
|
||||
### MemoryTracking
|
||||
|
||||
Total amount of memory (bytes) allocated by the server.
|
||||
|
||||
### Merge
|
||||
|
||||
Number of executing background merges
|
||||
|
||||
### MergeTreeAllRangesAnnouncementsSent
|
||||
|
||||
The current number of announcement being sent in flight from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.
|
||||
|
||||
### MergeTreeBackgroundExecutorThreads
|
||||
|
||||
Number of threads in the MergeTreeBackgroundExecutor thread pool.
|
||||
|
||||
### MergeTreeBackgroundExecutorThreadsActive
|
||||
|
||||
Number of threads in the MergeTreeBackgroundExecutor thread pool running a task.
|
||||
|
||||
### MergeTreeDataSelectExecutorThreads
|
||||
|
||||
Number of threads in the MergeTreeDataSelectExecutor thread pool.
|
||||
|
||||
### MergeTreeDataSelectExecutorThreadsActive
|
||||
|
||||
Number of threads in the MergeTreeDataSelectExecutor thread pool running a task.
|
||||
|
||||
### MergeTreePartsCleanerThreads
|
||||
|
||||
Number of threads in the MergeTree parts cleaner thread pool.
|
||||
|
||||
### MergeTreePartsCleanerThreadsActive
|
||||
|
||||
Number of threads in the MergeTree parts cleaner thread pool running a task.
|
||||
|
||||
### MergeTreePartsLoaderThreads
|
||||
|
||||
Number of threads in the MergeTree parts loader thread pool.
|
||||
|
||||
### MergeTreePartsLoaderThreadsActive
|
||||
|
||||
Number of threads in the MergeTree parts loader thread pool running a task.
|
||||
|
||||
### MergeTreeReadTaskRequestsSent
|
||||
|
||||
The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.
|
||||
|
||||
### Move
|
||||
|
||||
Number of currently executing moves
|
||||
|
||||
### MySQLConnection
|
||||
|
||||
Number of client connections using MySQL protocol
|
||||
|
||||
### NetworkReceive
|
||||
|
||||
Number of threads receiving data from network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.
|
||||
|
||||
### NetworkSend
|
||||
|
||||
Number of threads sending data to network. Only ClickHouse-related network interaction is included, not by 3rd party libraries.
|
||||
|
||||
### OpenFileForRead
|
||||
|
||||
Number of files open for reading
|
||||
|
||||
### OpenFileForWrite
|
||||
|
||||
Number of files open for writing
|
||||
|
||||
### ParallelFormattingOutputFormatThreads
|
||||
|
||||
Number of threads in the ParallelFormattingOutputFormatThreads thread pool.
|
||||
|
||||
### ParallelFormattingOutputFormatThreadsActive
|
||||
|
||||
Number of threads in the ParallelFormattingOutputFormatThreads thread pool running a task.
|
||||
|
||||
### ParallelParsingInputFormatThreads
|
||||
|
||||
Number of threads in the ParallelParsingInputFormat thread pool.
|
||||
|
||||
### ParallelParsingInputFormatThreadsActive
|
||||
|
||||
Number of threads in the ParallelParsingInputFormat thread pool running a task.
|
||||
|
||||
### PartMutation
|
||||
|
||||
Number of mutations (ALTER DELETE/UPDATE)
|
||||
|
||||
### PartsActive
|
||||
|
||||
Active data part, used by current and upcoming SELECTs.
|
||||
|
||||
### PartsCommitted
|
||||
|
||||
Deprecated. See PartsActive.
|
||||
|
||||
### PartsCompact
|
||||
|
||||
Compact parts.
|
||||
|
||||
### PartsDeleteOnDestroy
|
||||
|
||||
Part was moved to another disk and should be deleted in own destructor.
|
||||
|
||||
### PartsDeleting
|
||||
|
||||
Not active data part with identity refcounter, it is deleting right now by a cleaner.
|
||||
|
||||
### PartsInMemory
|
||||
|
||||
In-memory parts.
|
||||
|
||||
### PartsOutdated
|
||||
|
||||
Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.
|
||||
|
||||
### PartsPreActive
|
||||
|
||||
The part is in data_parts, but not used for SELECTs.
|
||||
|
||||
### PartsPreCommitted
|
||||
|
||||
Deprecated. See PartsPreActive.
|
||||
|
||||
### PartsTemporary
|
||||
|
||||
The part is generating now, it is not in data_parts list.
|
||||
|
||||
### PartsWide
|
||||
|
||||
Wide parts.
|
||||
|
||||
### PendingAsyncInsert
|
||||
|
||||
Number of asynchronous inserts that are waiting for flush.
|
||||
|
||||
### PostgreSQLConnection
|
||||
|
||||
Number of client connections using PostgreSQL protocol
|
||||
|
||||
### Query
|
||||
|
||||
Number of executing queries
|
||||
|
||||
### QueryPreempted
|
||||
|
||||
Number of queries that are stopped and waiting due to 'priority' setting.
|
||||
|
||||
### QueryThread
|
||||
|
||||
Number of query processing threads
|
||||
|
||||
### RWLockActiveReaders
|
||||
|
||||
Number of threads holding read lock in a table RWLock.
|
||||
|
||||
### RWLockActiveWriters
|
||||
|
||||
Number of threads holding write lock in a table RWLock.
|
||||
|
||||
### RWLockWaitingReaders
|
||||
|
||||
Number of threads waiting for read on a table RWLock.
|
||||
|
||||
### RWLockWaitingWriters
|
||||
|
||||
Number of threads waiting for write on a table RWLock.
|
||||
|
||||
### Read
|
||||
|
||||
Number of read (read, pread, io_getevents, etc.) syscalls in fly
|
||||
|
||||
### ReadTaskRequestsSent
|
||||
|
||||
The current number of callback requests in flight from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.
|
||||
|
||||
### ReadonlyReplica
|
||||
|
||||
Number of Replicated tables that are currently in readonly state due to re-initialization after ZooKeeper session loss or due to startup without ZooKeeper configured.
|
||||
|
||||
### RemoteRead
|
||||
|
||||
Number of read with remote reader in fly
|
||||
|
||||
### ReplicatedChecks
|
||||
|
||||
Number of data parts checking for consistency
|
||||
|
||||
### ReplicatedFetch
|
||||
|
||||
Number of data parts being fetched from replica
|
||||
|
||||
### ReplicatedSend
|
||||
|
||||
Number of data parts being sent to replicas
|
||||
|
||||
### RestartReplicaThreads
|
||||
|
||||
Number of threads in the RESTART REPLICA thread pool.
|
||||
|
||||
### RestartReplicaThreadsActive
|
||||
|
||||
Number of threads in the RESTART REPLICA thread pool running a task.
|
||||
|
||||
### RestoreThreads
|
||||
|
||||
Number of threads in the thread pool for RESTORE.
|
||||
|
||||
### RestoreThreadsActive
|
||||
|
||||
Number of threads in the thread pool for RESTORE running a task.
|
||||
|
||||
### Revision
|
||||
|
||||
Revision of the server. It is a number incremented for every release or release candidate except patch releases.
|
||||
|
||||
### S3Requests
|
||||
|
||||
S3 requests
|
||||
|
||||
### SendExternalTables
|
||||
|
||||
Number of connections that are sending data for external tables to remote servers. External tables are used to implement GLOBAL IN and GLOBAL JOIN operators with distributed subqueries.
|
||||
|
||||
### SendScalars
|
||||
|
||||
Number of connections that are sending data for scalars to remote servers.
|
||||
|
||||
### StartupSystemTablesThreads
|
||||
|
||||
Number of threads in the StartupSystemTables thread pool.
|
||||
|
||||
### StartupSystemTablesThreadsActive
|
||||
|
||||
Number of threads in the StartupSystemTables thread pool running a task.
|
||||
|
||||
### StorageBufferBytes
|
||||
|
||||
Number of bytes in buffers of Buffer tables
|
||||
|
||||
### StorageBufferRows
|
||||
|
||||
Number of rows in buffers of Buffer tables
|
||||
|
||||
### StorageDistributedThreads
|
||||
|
||||
Number of threads in the StorageDistributed thread pool.
|
||||
|
||||
### StorageDistributedThreadsActive
|
||||
|
||||
Number of threads in the StorageDistributed thread pool running a task.
|
||||
|
||||
### StorageHiveThreads
|
||||
|
||||
Number of threads in the StorageHive thread pool.
|
||||
|
||||
### StorageHiveThreadsActive
|
||||
|
||||
Number of threads in the StorageHive thread pool running a task.
|
||||
|
||||
### StorageS3Threads
|
||||
|
||||
Number of threads in the StorageS3 thread pool.
|
||||
|
||||
### StorageS3ThreadsActive
|
||||
|
||||
Number of threads in the StorageS3 thread pool running a task.
|
||||
|
||||
### SystemReplicasThreads
|
||||
|
||||
Number of threads in the system.replicas thread pool.
|
||||
|
||||
### SystemReplicasThreadsActive
|
||||
|
||||
Number of threads in the system.replicas thread pool running a task.
|
||||
|
||||
### TCPConnection
|
||||
|
||||
Number of connections to TCP server (clients with native interface), also included server-server distributed query connections
|
||||
|
||||
### TablesLoaderThreads
|
||||
|
||||
Number of threads in the tables loader thread pool.
|
||||
|
||||
### TablesLoaderThreadsActive
|
||||
|
||||
Number of threads in the tables loader thread pool running a task.
|
||||
|
||||
### TablesToDropQueueSize
|
||||
|
||||
Number of dropped tables, that are waiting for background data removal.
|
||||
|
||||
### TemporaryFilesForAggregation
|
||||
|
||||
Number of temporary files created for external aggregation
|
||||
|
||||
### TemporaryFilesForJoin
|
||||
|
||||
Number of temporary files created for JOIN
|
||||
|
||||
### TemporaryFilesForSort
|
||||
|
||||
Number of temporary files created for external sorting
|
||||
|
||||
### TemporaryFilesUnknown
|
||||
|
||||
Number of temporary files created without known purpose
|
||||
|
||||
### ThreadPoolFSReaderThreads
|
||||
|
||||
Number of threads in the thread pool for local_filesystem_read_method=threadpool.
|
||||
|
||||
### ThreadPoolFSReaderThreadsActive
|
||||
|
||||
Number of threads in the thread pool for local_filesystem_read_method=threadpool running a task.
|
||||
|
||||
### ThreadPoolRemoteFSReaderThreads
|
||||
|
||||
Number of threads in the thread pool for remote_filesystem_read_method=threadpool.
|
||||
|
||||
### ThreadPoolRemoteFSReaderThreadsActive
|
||||
|
||||
Number of threads in the thread pool for remote_filesystem_read_method=threadpool running a task.
|
||||
|
||||
### ThreadsInOvercommitTracker
|
||||
|
||||
Number of waiting threads inside of OvercommitTracker
|
||||
|
||||
### TotalTemporaryFiles
|
||||
|
||||
Number of temporary files created
|
||||
|
||||
### VersionInteger
|
||||
|
||||
Version of the server in a single integer number in base-1000. For example, version 11.22.33 is translated to 11022033.
|
||||
|
||||
### Write
|
||||
|
||||
Number of write (write, pwrite, io_getevents, etc.) syscalls in fly
|
||||
|
||||
### ZooKeeperRequest
|
||||
|
||||
Number of requests to ZooKeeper in fly.
|
||||
|
||||
### ZooKeeperSession
|
||||
|
||||
Number of sessions (connections) to ZooKeeper. Should be no more than one, because using more than one connection to ZooKeeper may lead to bugs due to lack of linearizability (stale reads) that ZooKeeper consistency model allows.
|
||||
|
||||
### ZooKeeperWatch
|
||||
|
||||
Number of watches (event subscriptions) in ZooKeeper.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# system.processors_profile_log {#system-processors_profile_log}
|
||||
# processors_profile_log
|
||||
|
||||
This table contains profiling on processors level (that you can find in [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)).
|
||||
|
||||
@ -73,4 +73,4 @@ Here you can see:
|
||||
|
||||
**See Also**
|
||||
|
||||
- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)
|
||||
- [`EXPLAIN PIPELINE`](../../sql-reference/statements/explain.md#explain-pipeline)
|
||||
|
@ -646,7 +646,7 @@ SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res;
|
||||
|
||||
Array elements set to `NULL` are handled as normal values.
|
||||
|
||||
## arraySort(\[func,\] arr, …)
|
||||
## arraySort(\[func,\] arr, …) {#array_functions-sort}
|
||||
|
||||
Sorts the elements of the `arr` array in ascending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the elements of the array. If `func` accepts multiple arguments, the `arraySort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arraySort` description.
|
||||
|
||||
@ -751,7 +751,7 @@ To improve sorting efficiency, the [Schwartzian transform](https://en.wikipedia.
|
||||
|
||||
Same as `arraySort` with additional `limit` argument allowing partial sorting. Returns an array of the same size as the original array where elements in range `[1..limit]` are sorted in ascending order. Remaining elements `(limit..N]` shall contain elements in unspecified order.
|
||||
|
||||
## arrayReverseSort(\[func,\] arr, …)
|
||||
## arrayReverseSort(\[func,\] arr, …) {#array_functions-reverse-sort}
|
||||
|
||||
Sorts the elements of the `arr` array in descending order. If the `func` function is specified, `arr` is sorted according to the result of the `func` function applied to the elements of the array, and then the sorted array is reversed. If `func` accepts multiple arguments, the `arrayReverseSort` function is passed several arrays that the arguments of `func` will correspond to. Detailed examples are shown at the end of `arrayReverseSort` description.
|
||||
|
||||
|
@ -1215,3 +1215,96 @@ Result:
|
||||
│ A240 │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairs
|
||||
|
||||
Extracts key-value pairs from any string. The string does not need to be 100% structured in a key value pair format;
|
||||
|
||||
It can contain noise (e.g. log files). The key-value pair format to be interpreted should be specified via function arguments.
|
||||
|
||||
A key-value pair consists of a key followed by a `key_value_delimiter` and a value. Quoted keys and values are also supported. Key value pairs must be separated by pair delimiters.
|
||||
|
||||
**Syntax**
|
||||
``` sql
|
||||
extractKeyValuePairs(data, [key_value_delimiter], [pair_delimiter], [quoting_character])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
- `data` - String to extract key-value pairs from. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `key_value_delimiter` - Character to be used as delimiter between the key and the value. Defaults to `:`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `pair_delimiters` - Set of character to be used as delimiters between pairs. Defaults to `\space`, `,` and `;`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `quoting_character` - Character to be used as quoting character. Defaults to `"`. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned values**
|
||||
- The extracted key-value pairs in a Map(String, String).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
**Simple case**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv
|
||||
|
||||
SELECT extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') as kv
|
||||
|
||||
Query id: f9e0ca6f-3178-4ee2-aa2c-a5517abb9cee
|
||||
|
||||
┌─kv──────────────────────────────────────────────────────────────────────┐
|
||||
│ {'name':'neymar','age':'31','team':'psg','nationality':'brazil'} │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Single quote as quoting character**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv
|
||||
|
||||
SELECT extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') as kv
|
||||
|
||||
Query id: 0e22bf6b-9844-414a-99dc-32bf647abd5e
|
||||
|
||||
┌─kv───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ {'name':'neymar','age':'31','team':'psg','nationality':'brazil','last_key':'last_value'} │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Escape sequences without escape sequences support**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairs('age:a\\x0A\\n\\0') as kv
|
||||
|
||||
SELECT extractKeyValuePairs('age:a\\x0A\\n\\0') AS kv
|
||||
|
||||
Query id: e9fd26ee-b41f-4a11-b17f-25af6fd5d356
|
||||
|
||||
┌─kv─────────────────────┐
|
||||
│ {'age':'a\\x0A\\n\\0'} │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
## extractKeyValuePairsWithEscaping
|
||||
|
||||
Same as `extractKeyValuePairs` but with escaping support.
|
||||
|
||||
Escape sequences supported: `\x`, `\N`, `\a`, `\b`, `\e`, `\f`, `\n`, `\r`, `\t`, `\v` and `\0`.
|
||||
Non standard escape sequences are returned as it is (including the backslash) unless they are one of the following:
|
||||
`\\`, `'`, `"`, `backtick`, `/`, `=` or ASCII control characters (c <= 31).
|
||||
|
||||
This function will satisfy the use case where pre-escaping and post-escaping are not suitable. For instance, consider the following
|
||||
input string: `a: "aaaa\"bbb"`. The expected output is: `a: aaaa\"bbbb`.
|
||||
- Pre-escaping: Pre-escaping it will output: `a: "aaaa"bbb"` and `extractKeyValuePairs` will then output: `a: aaaa`
|
||||
- Post-escaping: `extractKeyValuePairs` will output `a: aaaa\` and post-escaping will keep it as it is.
|
||||
|
||||
Leading escape sequences will be skipped in keys and will be considered invalid for values.
|
||||
|
||||
**Escape sequences with escape sequence support turned on**
|
||||
``` sql
|
||||
arthur :) select extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') as kv
|
||||
|
||||
SELECT extractKeyValuePairsWithEscaping('age:a\\x0A\\n\\0') AS kv
|
||||
|
||||
Query id: 44c114f0-5658-4c75-ab87-4574de3a1645
|
||||
|
||||
┌─kv────────────────┐
|
||||
│ {'age':'a\n\n\0'} │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
@ -69,24 +69,27 @@ Result:
|
||||
|
||||
Merges an [Array](../../sql-reference/data-types/array.md) of keys and an [Array](../../sql-reference/data-types/array.md) of values into a [Map(key, value)](../../sql-reference/data-types/map.md). Notice that the second argument could also be a [Map](../../sql-reference/data-types/map.md), thus it is casted to an Array when executing.
|
||||
|
||||
|
||||
The function is a more convenient alternative to `CAST((key_array, value_array_or_map), 'Map(key_type, value_type)')`. For example, instead of writing `CAST((['aa', 'bb'], [4, 5]), 'Map(String, UInt32)')`, you can write `mapFromArrays(['aa', 'bb'], [4, 5])`.
|
||||
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapFromArrays(keys, values)
|
||||
```
|
||||
```
|
||||
|
||||
Alias: `MAP_FROM_ARRAYS(keys, values)`
|
||||
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `keys` — Given key array to create a map from. The nested type of array must be: [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md)
|
||||
- `values` - Given value array or map to create a map from.
|
||||
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map whose keys and values are constructed from the key array and value array/map.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -94,6 +97,7 @@ Query:
|
||||
```sql
|
||||
select mapFromArrays(['a', 'b', 'c'], [1, 2, 3])
|
||||
|
||||
|
||||
┌─mapFromArrays(['a', 'b', 'c'], [1, 2, 3])─┐
|
||||
│ {'a':1,'b':2,'c':3} │
|
||||
└───────────────────────────────────────────┘
|
||||
@ -391,25 +395,24 @@ Result:
|
||||
│ ['eleven','11'] │
|
||||
│ ['twelve','6.0'] │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## mapContainsKeyLike
|
||||
|
||||
```
|
||||
|
||||
## mapContainsKeyLike
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapContainsKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1` if `map` contains `key` like specified pattern, `0` if not.
|
||||
|
||||
- `1` if `map` contains `key` like specified pattern, `0` if not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -420,34 +423,34 @@ CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapContainsKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapContainsKeyLike(a, 'a%')─┐
|
||||
│ 1 │
|
||||
│ 0 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapExtractKeyLike
|
||||
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapExtractKeyLike
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapExtractKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map contained elements the key of which matchs the specified pattern. If there are no elements matched the pattern, it will return an empty map.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -458,34 +461,34 @@ CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapExtractKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapExtractKeyLike(a, 'a%')─┐
|
||||
│ {'abc':'abc'} │
|
||||
│ {} │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapApply
|
||||
|
||||
```
|
||||
|
||||
## mapApply
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapApply(func, map)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
|
||||
- `func` - [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map obtained from the original map by application of `func(map1[i], …, mapN[i])` for each element.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -497,36 +500,36 @@ FROM
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r─────────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key1':10,'key2':20} │
|
||||
│ {'key1':20,'key2':40} │
|
||||
└───────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
## mapFilter
|
||||
|
||||
## mapFilter
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapFilter(func, map)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `func` - [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map containing only the elements in `map` for which `func(map1[i], …, mapN[i])` returns something other than 0.
|
||||
|
||||
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
@ -538,27 +541,27 @@ FROM
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r───────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key2':2} │
|
||||
│ {'key1':2,'key2':4} │
|
||||
└─────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
|
||||
## mapUpdate
|
||||
|
||||
## mapUpdate
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapUpdate(map1, map2)
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `map1` [Map](../../sql-reference/data-types/map.md).
|
||||
@ -567,19 +570,166 @@ mapUpdate(map1, map2)
|
||||
**Returned value**
|
||||
|
||||
- Returns a map1 with values updated of values for the corresponding keys in map2.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map────────────────────────────┐
|
||||
│ {'key3':0,'key1':10,'key2':10} │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
## mapConcat
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapConcat(maps)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `maps` – Arbitrary number of arguments of [Map](../../sql-reference/data-types/map.md) type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map with concatenated maps passed as arguments. If there are same keys in two or more maps, all of them are added to the result map, but only the first one is accessible via operator `[]`
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapConcat(map('key1', 1, 'key3', 3), map('key2', 2)) AS map;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key1':1,'key3':3,'key2':2} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapConcat(map('key1', 1, 'key2', 2), map('key1', 3)) AS map, map['key1'];
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map──────────────────────────┬─elem─┐
|
||||
│ {'key1':1,'key2':2,'key1':3} │ 1 │
|
||||
└──────────────────────────────┴──────┘
|
||||
```
|
||||
|
||||
## mapExists(\[func,\], map)
|
||||
|
||||
Returns 1 if there is at least one key-value pair in `map` for which `func(key, value)` returns something other than 0. Otherwise, it returns 0.
|
||||
|
||||
Note that the `mapExists` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapExists((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res─┐
|
||||
│ 1 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## mapAll(\[func,\] map)
|
||||
|
||||
Returns 1 if `func(key, value)` returns something other than 0 for all key-value pairs in `map`. Otherwise, it returns 0.
|
||||
|
||||
Note that the `mapAll` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapAll((k, v) -> (v = 1), map('k1', 1, 'k2', 2)) AS res
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res─┐
|
||||
│ 0 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## mapSort(\[func,\], map)
|
||||
|
||||
Sorts the elements of the `map` in ascending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the keys and values of the map.
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT mapSort(map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key1':3,'key2':2,'key3':1} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key3':1,'key2':2,'key1':3} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-sort) for `arraySort` function.
|
||||
|
||||
## mapReverseSort(\[func,\], map)
|
||||
|
||||
Sorts the elements of the `map` in descending order. If the `func` function is specified, sorting order is determined by the result of the `func` function applied to the keys and values of the map.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT mapReverseSort(map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key3':1,'key2':2,'key1':3} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─map──────────────────────────┐
|
||||
│ {'key1':3,'key2':2,'key3':1} │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-reverse-sort) for `arrayReverseSort` function.
|
||||
|
@ -36,6 +36,18 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US
|
||||
The `WITH ADMIN OPTION` clause grants [ADMIN OPTION](#admin-option-privilege) privilege to `user` or `role`.
|
||||
The `WITH REPLACE OPTION` clause replace old roles by new role for the `user` or `role`, if is not specified it appends roles.
|
||||
|
||||
## Grant Current Grants Syntax
|
||||
``` sql
|
||||
GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*}) | ON {db.table|db.*|*.*|table|*}} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Type of privilege.
|
||||
- `role` — ClickHouse user role.
|
||||
- `user` — ClickHouse user account.
|
||||
|
||||
Using the `CURRENT GRANTS` statement allows you to give all specified privileges to the given user or role.
|
||||
If none of the privileges were specified, then the given user or role will receive all available privileges for `CURRENT_USER`.
|
||||
|
||||
## Usage
|
||||
|
||||
To use `GRANT`, your account must have the `GRANT OPTION` privilege. You can grant privileges only inside the scope of your account privileges.
|
||||
|
@ -37,6 +37,19 @@ GRANT [ON CLUSTER cluster_name] role [,...] TO {user | another_role | CURRENT_US
|
||||
`WITH ADMIN OPTION` присваивает привилегию [ADMIN OPTION](#admin-option-privilege) пользователю или роли.
|
||||
`WITH REPLACE OPTION` заменяет все старые роли новыми ролями для пользователя `user` или `role`, если не указано, добавляет новые новые роли.
|
||||
|
||||
## Синтаксис присвоения текущих привилегий {#grant-current-grants-syntax}
|
||||
|
||||
```sql
|
||||
GRANT CURRENT GRANTS{(privilege[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*}) | ON {db.table|db.*|*.*|table|*}} TO {user | role | CURRENT_USER} [,...] [WITH GRANT OPTION] [WITH REPLACE OPTION]
|
||||
```
|
||||
|
||||
- `privilege` — Тип привилегии
|
||||
- `role` — Роль пользователя ClickHouse.
|
||||
- `user` — Пользователь ClickHouse.
|
||||
|
||||
Использование выражения `CURRENT GRANTS` позволяет присвоить все указанные и доступные для присвоения привилегии.
|
||||
Если список привелегий не задан, то указанный пользователь или роль получат все доступные привилегии для `CURRENT_USER`.
|
||||
|
||||
## Использование {#grant-usage}
|
||||
|
||||
Для использования `GRANT` пользователь должен иметь привилегию `GRANT OPTION`. Пользователь может выдавать привилегии только внутри области действий назначенных ему самому привилегий.
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <Client/Connection.h>
|
||||
|
@ -4,9 +4,11 @@
|
||||
#include "TaskCluster.h"
|
||||
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
|
@ -1293,7 +1293,7 @@
|
||||
|
||||
<!-- Path in ZooKeeper to store user-defined SQL functions created by the command CREATE FUNCTION.
|
||||
If not specified they will be stored locally. -->
|
||||
<!-- <user_defined_zookeeper_path>/clickhouse/user_defined<user_defined_zookeeper_path> -->
|
||||
<!-- <user_defined_zookeeper_path>/clickhouse/user_defined</user_defined_zookeeper_path> -->
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
@ -1517,10 +1517,10 @@
|
||||
|
||||
<!-- Configuration for the query cache -->
|
||||
<!-- <query_cache> -->
|
||||
<!-- <max_size>1073741824</max_size> -->
|
||||
<!-- <max_size_in_bytes>1073741824</max_size_in_bytes> -->
|
||||
<!-- <max_entries>1024</max_entries> -->
|
||||
<!-- <max_entry_size>1048576</max_entry_size> -->
|
||||
<!-- <max_entry_rows>30000000</max_entry_rows> -->
|
||||
<!-- <max_entry_size_in_bytes>1048576</max_entry_size_in_bytes> -->
|
||||
<!-- <max_entry_size_in_rows>30000000</max_entry_size_in_rows> -->
|
||||
<!-- </query_cache> -->
|
||||
|
||||
<!-- Uncomment if enable merge tree metadata cache -->
|
||||
|
@ -14,9 +14,9 @@
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/ZooKeeper/Types.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <base/range.h>
|
||||
#include <base/sleep.h>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <IO/ReadHelpersArena.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadHelpersArena.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
|
||||
#include <Columns/ColumnVector.h>
|
||||
@ -29,6 +28,7 @@
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
class Arena;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/arithmeticOverflow.h>
|
||||
#include <base/sort.h>
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/PODArray_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/PODArray_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
|
@ -14,8 +14,6 @@
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <base/range.h>
|
||||
#include <bitset>
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadHelpersArena.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionNull.h>
|
||||
|
@ -5,9 +5,9 @@
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Backups/BackupCoordinationReplicatedAccess.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/ZooKeeper/Common.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Functions/UserDefined/UserDefinedSQLObjectType.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
@ -1,10 +1,11 @@
|
||||
#include <Backups/BackupCoordinationRemote.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Backups/RestoreCoordinationRemote.h>
|
||||
#include <Backups/BackupCoordinationStageSync.h>
|
||||
#include <Functions/UserDefined/UserDefinedSQLObjectType.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include "Backups/BackupCoordinationStageSync.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -19,8 +19,8 @@
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Databases/DDLDependencyVisitor.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <filesystem>
|
||||
|
@ -385,8 +385,7 @@ void ColumnAggregateFunction::updateHashFast(SipHash & hash) const
|
||||
/// threads, so we can't know the size of these data.
|
||||
size_t ColumnAggregateFunction::byteSize() const
|
||||
{
|
||||
return data.size() * sizeof(data[0])
|
||||
+ (my_arena ? my_arena->size() : 0);
|
||||
return data.size() * sizeof(data[0]) + (my_arena ? my_arena->usedBytes() : 0);
|
||||
}
|
||||
|
||||
size_t ColumnAggregateFunction::byteSizeAt(size_t) const
|
||||
@ -395,11 +394,11 @@ size_t ColumnAggregateFunction::byteSizeAt(size_t) const
|
||||
return sizeof(data[0]) + func->sizeOfData();
|
||||
}
|
||||
|
||||
/// Like in byteSize(), the size is underestimated.
|
||||
/// Similar to byteSize() the size is underestimated.
|
||||
/// In this case it's also overestimated at the same time as it counts all the bytes allocated by the arena, used or not
|
||||
size_t ColumnAggregateFunction::allocatedBytes() const
|
||||
{
|
||||
return data.allocated_bytes()
|
||||
+ (my_arena ? my_arena->size() : 0);
|
||||
return data.allocated_bytes() + (my_arena ? my_arena->allocatedBytes() : 0);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::protect()
|
||||
|
@ -258,12 +258,11 @@ void ColumnFunction::appendArguments(const ColumnsWithTypeAndName & columns)
|
||||
|
||||
void ColumnFunction::appendArgument(const ColumnWithTypeAndName & column)
|
||||
{
|
||||
const auto & argumnet_types = function->getArgumentTypes();
|
||||
|
||||
const auto & argument_types = function->getArgumentTypes();
|
||||
auto index = captured_columns.size();
|
||||
if (!is_short_circuit_argument && !column.type->equals(*argumnet_types[index]))
|
||||
if (!is_short_circuit_argument && !column.type->equals(*argument_types[index]))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot capture column {} because it has incompatible type: "
|
||||
"got {}, but {} is expected.", argumnet_types.size(), column.type->getName(), argumnet_types[index]->getName());
|
||||
"got {}, but {} is expected.", argument_types.size(), column.type->getName(), argument_types[index]->getName());
|
||||
|
||||
captured_columns.push_back(column);
|
||||
}
|
||||
|
@ -80,7 +80,8 @@ private:
|
||||
|
||||
/// Last contiguous MemoryChunk of memory.
|
||||
MemoryChunk * head;
|
||||
size_t size_in_bytes;
|
||||
size_t allocated_bytes;
|
||||
size_t used_bytes;
|
||||
size_t page_size;
|
||||
|
||||
static size_t roundUpToPageSize(size_t s, size_t page_size)
|
||||
@ -119,7 +120,7 @@ private:
|
||||
void NO_INLINE addMemoryChunk(size_t min_size)
|
||||
{
|
||||
head = new MemoryChunk(nextSize(min_size + pad_right), head);
|
||||
size_in_bytes += head->size();
|
||||
allocated_bytes += head->size();
|
||||
}
|
||||
|
||||
friend class ArenaAllocator;
|
||||
@ -127,9 +128,12 @@ private:
|
||||
|
||||
public:
|
||||
explicit Arena(size_t initial_size_ = 4096, size_t growth_factor_ = 2, size_t linear_growth_threshold_ = 128 * 1024 * 1024)
|
||||
: growth_factor(growth_factor_), linear_growth_threshold(linear_growth_threshold_),
|
||||
head(new MemoryChunk(initial_size_, nullptr)), size_in_bytes(head->size()),
|
||||
page_size(static_cast<size_t>(::getPageSize()))
|
||||
: growth_factor(growth_factor_)
|
||||
, linear_growth_threshold(linear_growth_threshold_)
|
||||
, head(new MemoryChunk(initial_size_, nullptr))
|
||||
, allocated_bytes(head->size())
|
||||
, used_bytes(0)
|
||||
, page_size(static_cast<size_t>(::getPageSize()))
|
||||
{
|
||||
}
|
||||
|
||||
@ -141,6 +145,7 @@ public:
|
||||
/// Get piece of memory, without alignment.
|
||||
char * alloc(size_t size)
|
||||
{
|
||||
used_bytes += size;
|
||||
if (unlikely(static_cast<std::ptrdiff_t>(size) > head->end - head->pos))
|
||||
addMemoryChunk(size);
|
||||
|
||||
@ -153,6 +158,7 @@ public:
|
||||
/// Get piece of memory with alignment
|
||||
char * alignedAlloc(size_t size, size_t alignment)
|
||||
{
|
||||
used_bytes += size;
|
||||
do
|
||||
{
|
||||
void * head_pos = head->pos;
|
||||
@ -184,6 +190,7 @@ public:
|
||||
*/
|
||||
void * rollback(size_t size)
|
||||
{
|
||||
used_bytes -= size;
|
||||
head->pos -= size;
|
||||
ASAN_POISON_MEMORY_REGION(head->pos, size + pad_right);
|
||||
return head->pos;
|
||||
@ -299,11 +306,11 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Size of MemoryChunks in bytes.
|
||||
size_t size() const
|
||||
{
|
||||
return size_in_bytes;
|
||||
}
|
||||
/// Size of all MemoryChunks in bytes.
|
||||
size_t allocatedBytes() const { return allocated_bytes; }
|
||||
|
||||
/// Total space actually used (not counting padding or space unused by caller allocations) in all MemoryChunks in bytes.
|
||||
size_t usedBytes() const { return used_bytes; }
|
||||
|
||||
/// Bad method, don't use it -- the MemoryChunks are not your business, the entire
|
||||
/// purpose of the arena code is to manage them for you, so if you find
|
||||
|
@ -107,10 +107,7 @@ public:
|
||||
}
|
||||
|
||||
/// Size of the allocated pool in bytes
|
||||
size_t size() const
|
||||
{
|
||||
return pool.size();
|
||||
}
|
||||
size_t allocatedBytes() const { return pool.allocatedBytes(); }
|
||||
};
|
||||
|
||||
class SynchronizedArenaWithFreeLists : private ArenaWithFreeLists
|
||||
@ -135,10 +132,10 @@ public:
|
||||
}
|
||||
|
||||
/// Size of the allocated pool in bytes
|
||||
size_t size() const
|
||||
size_t allocatedBytes() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return ArenaWithFreeLists::size();
|
||||
return ArenaWithFreeLists::allocatedBytes();
|
||||
}
|
||||
private:
|
||||
mutable std::mutex mutex;
|
||||
|
@ -214,13 +214,19 @@ public:
|
||||
void setMaxCount(size_t max_count)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return cache_policy->setMaxCount(max_count, lock);
|
||||
cache_policy->setMaxCount(max_count, lock);
|
||||
}
|
||||
|
||||
void setMaxSize(size_t max_size_in_bytes)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return cache_policy->setMaxSize(max_size_in_bytes, lock);
|
||||
cache_policy->setMaxSize(max_size_in_bytes, lock);
|
||||
}
|
||||
|
||||
void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
cache_policy->setQuotaForUser(user_name, max_size_in_bytes, max_entries, lock);
|
||||
}
|
||||
|
||||
virtual ~CacheBase() = default;
|
||||
|
@ -221,7 +221,7 @@ inline UInt32 updateWeakHash32(const DB::UInt8 * pos, size_t size, DB::UInt32 up
|
||||
const auto * end = pos + size;
|
||||
while (pos + 8 <= end)
|
||||
{
|
||||
auto word = unalignedLoadLE<UInt64>(pos);
|
||||
auto word = unalignedLoadLittleEndian<UInt64>(pos);
|
||||
updated_value = static_cast<UInt32>(intHashCRC32(word, updated_value));
|
||||
|
||||
pos += 8;
|
||||
@ -233,7 +233,7 @@ inline UInt32 updateWeakHash32(const DB::UInt8 * pos, size_t size, DB::UInt32 up
|
||||
/// Lets' assume the string was 'abcdefghXYZ', so it's tail is 'XYZ'.
|
||||
DB::UInt8 tail_size = end - pos;
|
||||
/// Load tailing 8 bytes. Word is 'defghXYZ'.
|
||||
auto word = unalignedLoadLE<UInt64>(end - 8);
|
||||
auto word = unalignedLoadLittleEndian<UInt64>(end - 8);
|
||||
/// Prepare mask which will set other 5 bytes to 0. It is 0xFFFFFFFFFFFFFFFF << 5 = 0xFFFFFF0000000000.
|
||||
/// word & mask = '\0\0\0\0\0XYZ' (bytes are reversed because of little ending)
|
||||
word &= (~UInt64(0)) << DB::UInt8(8 * (8 - tail_size));
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ICachePolicyUserQuota.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -38,12 +39,16 @@ public:
|
||||
MappedPtr mapped;
|
||||
};
|
||||
|
||||
virtual size_t weight(std::lock_guard<std::mutex> & /* cache_lock */) const = 0;
|
||||
virtual size_t count(std::lock_guard<std::mutex> & /* cache_lock */) const = 0;
|
||||
virtual size_t maxSize(std::lock_guard<std::mutex>& /* cache_lock */) const = 0;
|
||||
explicit ICachePolicy(CachePolicyUserQuotaPtr user_quotas_) : user_quotas(std::move(user_quotas_)) {}
|
||||
virtual ~ICachePolicy() = default;
|
||||
|
||||
virtual size_t weight(std::lock_guard<std::mutex> & /*cache_lock*/) const = 0;
|
||||
virtual size_t count(std::lock_guard<std::mutex> & /*cache_lock*/) const = 0;
|
||||
virtual size_t maxSize(std::lock_guard<std::mutex>& /*cache_lock*/) const = 0;
|
||||
|
||||
virtual void setMaxCount(size_t /*max_count*/, std::lock_guard<std::mutex> & /* cache_lock */) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for cache policy"); }
|
||||
virtual void setMaxSize(size_t /*max_size_in_bytes*/, std::lock_guard<std::mutex> & /* cache_lock */) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for cache policy"); }
|
||||
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries, std::lock_guard<std::mutex> & /*cache_lock*/) { user_quotas->setQuotaForUser(user_name, max_size_in_bytes, max_entries); }
|
||||
|
||||
/// HashFunction usually hashes the entire key and the found key will be equal the provided key. In such cases, use get(). It is also
|
||||
/// possible to store other, non-hashed data in the key. In that case, the found key is potentially different from the provided key.
|
||||
@ -51,14 +56,15 @@ public:
|
||||
virtual MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual std::optional<KeyMapped> getWithKey(const Key &, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
|
||||
virtual void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
|
||||
virtual void remove(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual void remove(const Key & key, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
|
||||
virtual void reset(std::lock_guard<std::mutex> & /* cache_lock */) = 0;
|
||||
virtual void reset(std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
|
||||
virtual std::vector<KeyMapped> dump() const = 0;
|
||||
|
||||
virtual ~ICachePolicy() = default;
|
||||
protected:
|
||||
CachePolicyUserQuotaPtr user_quotas;
|
||||
};
|
||||
|
||||
}
|
||||
|
43
src/Common/ICachePolicyUserQuota.h
Normal file
43
src/Common/ICachePolicyUserQuota.h
Normal file
@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Per-user quotas for usage of shared caches, used by ICachePolicy.
|
||||
/// Currently allows to limit
|
||||
/// - the maximum amount of cache memory a user may consume
|
||||
/// - the maximum number of items a user can store in the cache
|
||||
/// Note that caches usually also have global limits which restrict these values at cache level. Per-user quotas have no effect if they
|
||||
/// exceed the global thresholds.
|
||||
class ICachePolicyUserQuota
|
||||
{
|
||||
public:
|
||||
/// Register or update the user's quota for the given resource.
|
||||
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) = 0;
|
||||
|
||||
/// Update the actual resource usage for the given user.
|
||||
virtual void increaseActual(const String & user_name, size_t entry_size_in_bytes) = 0;
|
||||
virtual void decreaseActual(const String & user_name, size_t entry_size_in_bytes) = 0;
|
||||
|
||||
/// Is the user allowed to write a new entry into the cache?
|
||||
virtual bool approveWrite(const String & user_name, size_t entry_size_in_bytes) const = 0;
|
||||
|
||||
virtual ~ICachePolicyUserQuota() = default;
|
||||
};
|
||||
|
||||
using CachePolicyUserQuotaPtr = std::unique_ptr<ICachePolicyUserQuota>;
|
||||
|
||||
|
||||
class NoCachePolicyUserQuota : public ICachePolicyUserQuota
|
||||
{
|
||||
public:
|
||||
void setQuotaForUser(const String & /*user_name*/, size_t /*max_size_in_bytes*/, size_t /*max_entries*/) override {}
|
||||
void increaseActual(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) override {}
|
||||
void decreaseActual(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) override {}
|
||||
bool approveWrite(const String & /*user_name*/, size_t /*entry_size_in_bytes*/) const override { return true; }
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -27,7 +27,8 @@ public:
|
||||
* max_count == 0 means no elements size restrictions.
|
||||
*/
|
||||
LRUCachePolicy(size_t max_size_in_bytes_, size_t max_count_, OnWeightLossFunction on_weight_loss_function_)
|
||||
: max_size_in_bytes(std::max(1uz, max_size_in_bytes_))
|
||||
: Base(std::make_unique<NoCachePolicyUserQuota>())
|
||||
, max_size_in_bytes(std::max(1uz, max_size_in_bytes_))
|
||||
, max_count(max_count_)
|
||||
, on_weight_loss_function(on_weight_loss_function_)
|
||||
{
|
||||
|
@ -10,6 +10,7 @@
|
||||
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
||||
M(AsyncInsertQuery, "Same as InsertQuery, but only for asynchronous INSERT queries.") \
|
||||
M(AsyncInsertBytes, "Data size in bytes of asynchronous INSERT queries.") \
|
||||
M(AsyncInsertRows, "Number of rows inserted by asynchronous INSERT queries.") \
|
||||
M(AsyncInsertCacheHits, "Number of times a duplicate hash id has been found in asynchronous INSERT hash id cache.") \
|
||||
M(FailedQuery, "Number of failed queries.") \
|
||||
M(FailedSelectQuery, "Same as FailedQuery, but only for SELECT queries.") \
|
||||
|
@ -31,7 +31,8 @@ public:
|
||||
*/
|
||||
/// TODO: construct from special struct with cache policy parameters (also with max_protected_size).
|
||||
SLRUCachePolicy(size_t max_size_in_bytes_, size_t max_count_, double size_ratio, OnWeightLossFunction on_weight_loss_function_)
|
||||
: max_protected_size(static_cast<size_t>(max_size_in_bytes_ * std::min(1.0, size_ratio)))
|
||||
: Base(std::make_unique<NoCachePolicyUserQuota>())
|
||||
, max_protected_size(static_cast<size_t>(max_size_in_bytes_ * std::min(1.0, size_ratio)))
|
||||
, max_size_in_bytes(max_size_in_bytes_)
|
||||
, max_count(max_count_)
|
||||
, on_weight_loss_function(on_weight_loss_function_)
|
||||
|
@ -13,6 +13,7 @@
|
||||
* (~ 700 MB/sec, 15 million strings per second)
|
||||
*/
|
||||
|
||||
#include <bit>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <Core/Defines.h>
|
||||
@ -21,6 +22,7 @@
|
||||
#include <base/unaligned.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -29,15 +31,13 @@ namespace ErrorCodes
|
||||
}
|
||||
}
|
||||
|
||||
#define ROTL(x, b) static_cast<UInt64>(((x) << (b)) | ((x) >> (64 - (b))))
|
||||
|
||||
#define SIPROUND \
|
||||
do \
|
||||
{ \
|
||||
v0 += v1; v1 = ROTL(v1, 13); v1 ^= v0; v0 = ROTL(v0, 32); \
|
||||
v2 += v3; v3 = ROTL(v3, 16); v3 ^= v2; \
|
||||
v0 += v3; v3 = ROTL(v3, 21); v3 ^= v0; \
|
||||
v2 += v1; v1 = ROTL(v1, 17); v1 ^= v2; v2 = ROTL(v2, 32); \
|
||||
v0 += v1; v1 = std::rotl(v1, 13); v1 ^= v0; v0 = std::rotl(v0, 32); \
|
||||
v2 += v3; v3 = std::rotl(v3, 16); v3 ^= v2; \
|
||||
v0 += v3; v3 = std::rotl(v3, 21); v3 ^= v0; \
|
||||
v2 += v1; v1 = std::rotl(v1, 17); v1 ^= v2; v2 = std::rotl(v2, 32); \
|
||||
} while(0)
|
||||
|
||||
/// Define macro CURRENT_BYTES_IDX for building index used in current_bytes array
|
||||
@ -136,7 +136,7 @@ public:
|
||||
|
||||
while (data + 8 <= end)
|
||||
{
|
||||
current_word = unalignedLoadLE<UInt64>(data);
|
||||
current_word = unalignedLoadLittleEndian<UInt64>(data);
|
||||
|
||||
v3 ^= current_word;
|
||||
SIPROUND;
|
||||
@ -242,14 +242,16 @@ public:
|
||||
SIPROUND;
|
||||
SIPROUND;
|
||||
auto hi = v0 ^ v1 ^ v2 ^ v3;
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
{
|
||||
lo = __builtin_bswap64(lo);
|
||||
hi = __builtin_bswap64(hi);
|
||||
lo = std::byteswap(lo);
|
||||
hi = std::byteswap(hi);
|
||||
auto tmp = hi;
|
||||
hi = lo;
|
||||
lo = tmp;
|
||||
}
|
||||
|
||||
UInt128 res = hi;
|
||||
res <<= 64;
|
||||
res |= lo;
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <base/defines.h>
|
||||
#include <base/StringRef.h>
|
||||
#include <Common/HashTable/StringHashMap.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
@ -11,6 +10,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Arena;
|
||||
|
||||
enum TLDType
|
||||
{
|
||||
|
@ -2,11 +2,80 @@
|
||||
|
||||
#include <Common/ICachePolicy.h>
|
||||
|
||||
#include <limits>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class PerUserTTLCachePolicyUserQuota : public ICachePolicyUserQuota
|
||||
{
|
||||
public:
|
||||
void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) override
|
||||
{
|
||||
quotas[user_name] = {max_size_in_bytes, max_entries};
|
||||
}
|
||||
|
||||
void increaseActual(const String & user_name, size_t entry_size_in_bytes) override
|
||||
{
|
||||
auto & actual_for_user = actual[user_name];
|
||||
actual_for_user.size_in_bytes += entry_size_in_bytes;
|
||||
actual_for_user.num_items += 1;
|
||||
}
|
||||
|
||||
void decreaseActual(const String & user_name, size_t entry_size_in_bytes) override
|
||||
{
|
||||
chassert(actual.contains(user_name));
|
||||
|
||||
chassert(actual[user_name].size_in_bytes >= entry_size_in_bytes);
|
||||
actual[user_name].size_in_bytes -= entry_size_in_bytes;
|
||||
|
||||
chassert(actual[user_name].num_items >= 1);
|
||||
actual[user_name].num_items -= 1;
|
||||
}
|
||||
|
||||
bool approveWrite(const String & user_name, size_t entry_size_in_bytes) const override
|
||||
{
|
||||
auto it_actual = actual.find(user_name);
|
||||
Resources actual_for_user{.size_in_bytes = 0, .num_items = 0}; /// assume zero actual resource consumption is user isn't found
|
||||
if (it_actual != actual.end())
|
||||
actual_for_user = it_actual->second;
|
||||
|
||||
auto it_quota = quotas.find(user_name);
|
||||
Resources quota_for_user{.size_in_bytes = std::numeric_limits<size_t>::max(), .num_items = std::numeric_limits<size_t>::max()}; /// assume no threshold if no quota is found
|
||||
if (it_quota != quotas.end())
|
||||
quota_for_user = it_quota->second;
|
||||
|
||||
/// Special case: A quota configured as 0 means no threshold
|
||||
if (quota_for_user.size_in_bytes == 0)
|
||||
quota_for_user.size_in_bytes = std::numeric_limits<UInt64>::max();
|
||||
if (quota_for_user.num_items == 0)
|
||||
quota_for_user.num_items = std::numeric_limits<UInt64>::max();
|
||||
|
||||
/// Check size quota
|
||||
if (actual_for_user.size_in_bytes + entry_size_in_bytes >= quota_for_user.size_in_bytes)
|
||||
return false;
|
||||
|
||||
/// Check items quota
|
||||
if (quota_for_user.num_items + 1 >= quota_for_user.num_items)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct Resources
|
||||
{
|
||||
size_t size_in_bytes = 0;
|
||||
size_t num_items = 0;
|
||||
};
|
||||
|
||||
/// user name --> cache size quota (in bytes) / number of items quota
|
||||
std::map<String, Resources> quotas;
|
||||
/// user name --> actual cache usage (in bytes) / number of items
|
||||
std::map<String, Resources> actual;
|
||||
};
|
||||
|
||||
|
||||
/// TTLCachePolicy evicts entries for which IsStaleFunction returns true.
|
||||
/// The cache size (in bytes and number of entries) can be changed at runtime. It is expected to set both sizes explicitly after construction.
|
||||
template <typename Key, typename Mapped, typename HashFunction, typename WeightFunction, typename IsStaleFunction>
|
||||
@ -18,8 +87,9 @@ public:
|
||||
using typename Base::KeyMapped;
|
||||
using typename Base::OnWeightLossFunction;
|
||||
|
||||
TTLCachePolicy()
|
||||
: max_size_in_bytes(0)
|
||||
explicit TTLCachePolicy(CachePolicyUserQuotaPtr quotas_)
|
||||
: Base(std::move(quotas_))
|
||||
, max_size_in_bytes(0)
|
||||
, max_count(0)
|
||||
{
|
||||
}
|
||||
@ -61,8 +131,10 @@ public:
|
||||
auto it = cache.find(key);
|
||||
if (it == cache.end())
|
||||
return;
|
||||
size_in_bytes -= weight_function(*it->second);
|
||||
size_t sz = weight_function(*it->second);
|
||||
Base::user_quotas->decreaseActual(it->first.user_name, sz);
|
||||
cache.erase(it);
|
||||
size_in_bytes -= sz;
|
||||
}
|
||||
|
||||
MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
|
||||
@ -88,35 +160,47 @@ public:
|
||||
|
||||
const size_t entry_size_in_bytes = weight_function(*mapped);
|
||||
|
||||
/// Checks against per-cache limits
|
||||
auto sufficient_space_in_cache = [&]()
|
||||
{
|
||||
return (size_in_bytes + entry_size_in_bytes <= max_size_in_bytes) && (cache.size() + 1 <= max_count);
|
||||
};
|
||||
|
||||
if (!sufficient_space_in_cache())
|
||||
/// Checks against per-user limits
|
||||
auto sufficient_space_in_cache_for_user = [&]()
|
||||
{
|
||||
return Base::user_quotas->approveWrite(key.user_name, entry_size_in_bytes);
|
||||
};
|
||||
|
||||
if (!sufficient_space_in_cache() || !sufficient_space_in_cache_for_user())
|
||||
{
|
||||
/// Remove stale entries
|
||||
for (auto it = cache.begin(); it != cache.end();)
|
||||
if (is_stale_function(it->first))
|
||||
{
|
||||
size_in_bytes -= weight_function(*it->second);
|
||||
size_t sz = weight_function(*it->second);
|
||||
Base::user_quotas->decreaseActual(it->first.user_name, sz);
|
||||
it = cache.erase(it);
|
||||
size_in_bytes -= sz;
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
|
||||
if (sufficient_space_in_cache())
|
||||
if (sufficient_space_in_cache() && sufficient_space_in_cache_for_user())
|
||||
{
|
||||
/// Insert or replace key
|
||||
if (auto it = cache.find(key); it != cache.end())
|
||||
{
|
||||
size_in_bytes -= weight_function(*it->second);
|
||||
size_t sz = weight_function(*it->second);
|
||||
Base::user_quotas->decreaseActual(it->first.user_name, sz);
|
||||
cache.erase(it); // stupid bug: (*) doesn't replace existing entries (likely due to custom hash function), need to erase explicitly
|
||||
size_in_bytes -= sz;
|
||||
}
|
||||
|
||||
cache[key] = std::move(mapped); // (*)
|
||||
size_in_bytes += entry_size_in_bytes;
|
||||
Base::user_quotas->increaseActual(key.user_name, entry_size_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,48 +1,14 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
void write(size_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap64(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
#ifdef OS_DARWIN
|
||||
void write(uint64_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap64(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
#endif
|
||||
|
||||
void write(int64_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap64(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
void write(int32_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap32(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(uint8_t x, WriteBuffer & out)
|
||||
{
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(OpNum x, WriteBuffer & out)
|
||||
{
|
||||
write(static_cast<int32_t>(x), out);
|
||||
}
|
||||
|
||||
void write(bool x, WriteBuffer & out)
|
||||
{
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(const std::string & s, WriteBuffer & out)
|
||||
{
|
||||
write(static_cast<int32_t>(s.size()), out);
|
||||
@ -76,37 +42,6 @@ void write(const Error & x, WriteBuffer & out)
|
||||
write(static_cast<int32_t>(x), out);
|
||||
}
|
||||
|
||||
#ifdef OS_DARWIN
|
||||
void read(uint64_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap64(x);
|
||||
}
|
||||
#endif
|
||||
|
||||
void read(size_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap64(x);
|
||||
}
|
||||
|
||||
void read(int64_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap64(x);
|
||||
}
|
||||
|
||||
void read(uint8_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(int32_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap32(x);
|
||||
}
|
||||
|
||||
void read(OpNum & x, ReadBuffer & in)
|
||||
{
|
||||
int32_t raw_op_num;
|
||||
@ -114,16 +49,6 @@ void read(OpNum & x, ReadBuffer & in)
|
||||
x = getOpNum(raw_op_num);
|
||||
}
|
||||
|
||||
void read(bool & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(int8_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(std::string & s, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
@ -8,23 +9,20 @@
|
||||
#include <vector>
|
||||
#include <array>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
|
||||
void write(size_t x, WriteBuffer & out);
|
||||
template <typename T>
|
||||
requires is_arithmetic_v<T>
|
||||
void write(T x, WriteBuffer & out)
|
||||
{
|
||||
writeBinaryBigEndian(x, out);
|
||||
}
|
||||
|
||||
/// uint64_t != size_t on darwin
|
||||
#ifdef OS_DARWIN
|
||||
void write(uint64_t x, WriteBuffer & out);
|
||||
#endif
|
||||
|
||||
void write(int64_t x, WriteBuffer & out);
|
||||
void write(int32_t x, WriteBuffer & out);
|
||||
void write(uint8_t x, WriteBuffer & out);
|
||||
void write(OpNum x, WriteBuffer & out);
|
||||
void write(bool x, WriteBuffer & out);
|
||||
void write(const std::string & s, WriteBuffer & out);
|
||||
void write(const ACL & acl, WriteBuffer & out);
|
||||
void write(const Stat & stat, WriteBuffer & out);
|
||||
@ -45,16 +43,14 @@ void write(const std::vector<T> & arr, WriteBuffer & out)
|
||||
write(elem, out);
|
||||
}
|
||||
|
||||
void read(size_t & x, ReadBuffer & in);
|
||||
#ifdef OS_DARWIN
|
||||
void read(uint64_t & x, ReadBuffer & in);
|
||||
#endif
|
||||
void read(int64_t & x, ReadBuffer & in);
|
||||
void read(int32_t & x, ReadBuffer & in);
|
||||
void read(uint8_t & x, ReadBuffer & in);
|
||||
template <typename T>
|
||||
requires is_arithmetic_v<T>
|
||||
void read(T & x, ReadBuffer & in)
|
||||
{
|
||||
readBinaryBigEndian(x, in);
|
||||
}
|
||||
|
||||
void read(OpNum & x, ReadBuffer & in);
|
||||
void read(bool & x, ReadBuffer & in);
|
||||
void read(int8_t & x, ReadBuffer & in);
|
||||
void read(std::string & s, ReadBuffer & in);
|
||||
void read(ACL & acl, ReadBuffer & in);
|
||||
void read(Stat & stat, ReadBuffer & in);
|
||||
|
@ -270,7 +270,7 @@ int main(int argc, char ** argv)
|
||||
|
||||
watch.stop();
|
||||
std::cerr
|
||||
<< "Insert info arena. Bytes: " << arena.size()
|
||||
<< "Insert info arena. Bytes: " << arena.allocatedBytes()
|
||||
<< ", elapsed: " << watch.elapsedSeconds()
|
||||
<< " (" << data.size() / watch.elapsedSeconds() << " elem/sec.,"
|
||||
<< " " << sum_strings_size / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)"
|
||||
@ -298,7 +298,7 @@ int main(int argc, char ** argv)
|
||||
|
||||
watch.stop();
|
||||
std::cerr
|
||||
<< "Randomly remove and insert elements. Bytes: " << arena.size()
|
||||
<< "Randomly remove and insert elements. Bytes: " << arena.allocatedBytes()
|
||||
<< ", elapsed: " << watch.elapsedSeconds()
|
||||
<< " (" << data.size() / watch.elapsedSeconds() << " elem/sec.,"
|
||||
<< " " << bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)"
|
||||
@ -331,7 +331,7 @@ int main(int argc, char ** argv)
|
||||
|
||||
watch.stop();
|
||||
std::cerr
|
||||
<< "Filling cache. Bytes: " << arena.size()
|
||||
<< "Filling cache. Bytes: " << arena.allocatedBytes()
|
||||
<< ", elapsed: " << watch.elapsedSeconds()
|
||||
<< " (" << data.size() / watch.elapsedSeconds() << " elem/sec.,"
|
||||
<< " " << bytes / 1048576.0 / watch.elapsedSeconds() << " MiB/sec.)"
|
||||
|
@ -47,7 +47,7 @@ void setThreadName(const char * name)
|
||||
#endif
|
||||
DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
|
||||
memcpy(thread_name, name, 1 + strlen(name));
|
||||
memcpy(thread_name, name, std::min<size_t>(1 + strlen(name), THREAD_NAME_SIZE - 1));
|
||||
}
|
||||
|
||||
const char * getThreadName()
|
||||
|
@ -13,9 +13,17 @@ void test_find_first_not(const std::string & haystack, std::size_t expected_pos)
|
||||
ASSERT_EQ(begin + expected_pos, find_first_not_symbols<symbols...>(begin, end));
|
||||
}
|
||||
|
||||
void test_find_first_not(const std::string & haystack, const std::string & symbols, const std::size_t expected_pos)
|
||||
{
|
||||
const char * begin = haystack.data();
|
||||
|
||||
ASSERT_EQ(begin + expected_pos, find_first_not_symbols(haystack, SearchSymbols(symbols)));
|
||||
}
|
||||
|
||||
|
||||
TEST(FindSymbols, SimpleTest)
|
||||
{
|
||||
std::string s = "Hello, world! Goodbye...";
|
||||
const std::string s = "Hello, world! Goodbye...";
|
||||
const char * begin = s.data();
|
||||
const char * end = s.data() + s.size();
|
||||
|
||||
@ -26,6 +34,9 @@ TEST(FindSymbols, SimpleTest)
|
||||
ASSERT_EQ(find_first_symbols<'H'>(begin, end), begin);
|
||||
ASSERT_EQ((find_first_symbols<'a', 'e'>(begin, end)), begin + 1);
|
||||
|
||||
ASSERT_EQ((find_first_symbols<'a', 'e', 'w', 'x', 'z'>(begin, end)), begin + 1);
|
||||
ASSERT_EQ((find_first_symbols<'p', 'q', 's', 'x', 'z'>(begin, end)), end);
|
||||
|
||||
ASSERT_EQ(find_last_symbols_or_null<'a'>(begin, end), nullptr);
|
||||
ASSERT_EQ(find_last_symbols_or_null<'e'>(begin, end), end - 4);
|
||||
ASSERT_EQ(find_last_symbols_or_null<'.'>(begin, end), end - 1);
|
||||
@ -46,6 +57,90 @@ TEST(FindSymbols, SimpleTest)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(FindSymbols, RunTimeNeedle)
|
||||
{
|
||||
auto test_haystack = [](const auto & haystack, const auto & unfindable_needle) {
|
||||
#define TEST_HAYSTACK_AND_NEEDLE(haystack_, needle_) \
|
||||
do { \
|
||||
const auto & h = haystack_; \
|
||||
const auto & n = needle_; \
|
||||
EXPECT_EQ( \
|
||||
std::find_first_of(h.data(), h.data() + h.size(), n.data(), n.data() + n.size()), \
|
||||
find_first_symbols(h, SearchSymbols(n)) \
|
||||
) << "haystack: \"" << h << "\" (" << static_cast<const void*>(h.data()) << ")" \
|
||||
<< ", needle: \"" << n << "\""; \
|
||||
} \
|
||||
while (false)
|
||||
|
||||
// can't find needle
|
||||
TEST_HAYSTACK_AND_NEEDLE(haystack, unfindable_needle);
|
||||
|
||||
#define TEST_WITH_MODIFIED_NEEDLE(haystack, in_needle, needle_update_statement) \
|
||||
do \
|
||||
{ \
|
||||
std::string needle = (in_needle); \
|
||||
(needle_update_statement); \
|
||||
TEST_HAYSTACK_AND_NEEDLE(haystack, needle); \
|
||||
} \
|
||||
while (false)
|
||||
|
||||
// findable symbol is at beginning of the needle
|
||||
// Can find at first pos of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle.front() = haystack.front());
|
||||
// Can find at first pos of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle.front() = haystack.back());
|
||||
// Can find in the middle of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle.front() = haystack[haystack.size() / 2]);
|
||||
|
||||
// findable symbol is at end of the needle
|
||||
// Can find at first pos of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle.back() = haystack.front());
|
||||
// Can find at first pos of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle.back() = haystack.back());
|
||||
// Can find in the middle of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle.back() = haystack[haystack.size() / 2]);
|
||||
|
||||
// findable symbol is in the middle of the needle
|
||||
// Can find at first pos of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle[needle.size() / 2] = haystack.front());
|
||||
// Can find at first pos of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle[needle.size() / 2] = haystack.back());
|
||||
// Can find in the middle of haystack
|
||||
TEST_WITH_MODIFIED_NEEDLE(haystack, unfindable_needle, needle[needle.size() / 2] = haystack[haystack.size() / 2]);
|
||||
|
||||
#undef TEST_WITH_MODIFIED_NEEDLE
|
||||
#undef TEST_HAYSTACK_AND_NEEDLE
|
||||
};
|
||||
|
||||
// there are 4 major groups of cases:
|
||||
// haystack < 16 bytes, haystack > 16 bytes
|
||||
// needle < 5 bytes, needle >= 5 bytes
|
||||
|
||||
// First and last symbols of haystack should be unique
|
||||
const std::string long_haystack = "Hello, world! Goodbye...?";
|
||||
const std::string short_haystack = "Hello, world!";
|
||||
|
||||
// In sync with find_first_symbols_dispatch code: long needles receive special treatment.
|
||||
// as of now "long" means >= 5
|
||||
const std::string unfindable_long_needle = "0123456789ABCDEF";
|
||||
const std::string unfindable_short_needle = "0123";
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Long haystack");
|
||||
test_haystack(long_haystack, unfindable_long_needle);
|
||||
test_haystack(long_haystack, unfindable_short_needle);
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Short haystack");
|
||||
test_haystack(short_haystack, unfindable_long_needle);
|
||||
test_haystack(short_haystack, unfindable_short_needle);
|
||||
}
|
||||
|
||||
// Assert big haystack is not accepted and exception is thrown
|
||||
ASSERT_ANY_THROW(find_first_symbols(long_haystack, SearchSymbols("ABCDEFIJKLMNOPQRSTUVWXYZacfghijkmnpqstuvxz")));
|
||||
}
|
||||
|
||||
TEST(FindNotSymbols, AllSymbolsPresent)
|
||||
{
|
||||
std::string str_with_17_bytes = "hello world hello";
|
||||
@ -64,6 +159,12 @@ TEST(FindNotSymbols, AllSymbolsPresent)
|
||||
test_find_first_not<'h', 'e', 'l', 'o', 'w', 'r', 'd', ' '>(str_with_15_bytes, str_with_15_bytes.size());
|
||||
test_find_first_not<'h', 'e', 'l', 'o', 'w', 'r', 'd', ' '>(str_with_16_bytes, str_with_16_bytes.size());
|
||||
test_find_first_not<'h', 'e', 'l', 'o', 'w', 'r', 'd', ' '>(str_with_17_bytes, str_with_17_bytes.size());
|
||||
|
||||
const auto * symbols = "helowrd ";
|
||||
|
||||
test_find_first_not(str_with_15_bytes, symbols, str_with_15_bytes.size());
|
||||
test_find_first_not(str_with_16_bytes, symbols, str_with_16_bytes.size());
|
||||
test_find_first_not(str_with_17_bytes, symbols, str_with_17_bytes.size());
|
||||
}
|
||||
|
||||
TEST(FindNotSymbols, NoSymbolsMatch)
|
||||
@ -72,24 +173,28 @@ TEST(FindNotSymbols, NoSymbolsMatch)
|
||||
|
||||
// begin should be returned since the first character of the string does not match any of the below symbols
|
||||
test_find_first_not<'h', 'i', 'j'>(s, 0u);
|
||||
test_find_first_not(s, "hij", 0u);
|
||||
}
|
||||
|
||||
TEST(FindNotSymbols, ExtraSymbols)
|
||||
{
|
||||
std::string s = "hello_world_hello";
|
||||
test_find_first_not<'h', 'e', 'l', 'o', ' '>(s, 5u);
|
||||
test_find_first_not(s, "helo ", 5u);
|
||||
}
|
||||
|
||||
TEST(FindNotSymbols, EmptyString)
|
||||
{
|
||||
std::string s;
|
||||
test_find_first_not<'h', 'e', 'l', 'o', 'w', 'r', 'd', ' '>(s, s.size());
|
||||
test_find_first_not(s, "helowrd ", s.size());
|
||||
}
|
||||
|
||||
TEST(FindNotSymbols, SingleChar)
|
||||
{
|
||||
std::string s = "a";
|
||||
test_find_first_not<'a'>(s, s.size());
|
||||
test_find_first_not(s, "a", s.size());
|
||||
}
|
||||
|
||||
TEST(FindNotSymbols, NullCharacter)
|
||||
@ -99,4 +204,5 @@ TEST(FindNotSymbols, NullCharacter)
|
||||
// to \0.
|
||||
std::string s("abcdefg\0x", 9u);
|
||||
test_find_first_not<'a', 'b', 'c', 'd', 'e', 'f', 'g'>(s, 7u);
|
||||
test_find_first_not(s, "abcdefg", 7u);
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <Compression/ICompressionCodec.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
#include <Compression/CompressionInfo.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -191,7 +192,11 @@ size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed,
|
||||
|
||||
if (!disable_checksum)
|
||||
{
|
||||
Checksum & checksum = *reinterpret_cast<Checksum *>(own_compressed_buffer.data());
|
||||
Checksum checksum;
|
||||
ReadBufferFromMemory checksum_in(own_compressed_buffer.data(), sizeof(checksum));
|
||||
readBinaryLittleEndian(checksum.first, checksum_in);
|
||||
readBinaryLittleEndian(checksum.second, checksum_in);
|
||||
|
||||
validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum);
|
||||
}
|
||||
|
||||
@ -231,7 +236,11 @@ size_t CompressedReadBufferBase::readCompressedDataBlockForAsynchronous(size_t &
|
||||
|
||||
if (!disable_checksum)
|
||||
{
|
||||
Checksum & checksum = *reinterpret_cast<Checksum *>(own_compressed_buffer.data());
|
||||
Checksum checksum;
|
||||
ReadBufferFromMemory checksum_in(own_compressed_buffer.data(), sizeof(checksum));
|
||||
readBinaryLittleEndian(checksum.first, checksum_in);
|
||||
readBinaryLittleEndian(checksum.second, checksum_in);
|
||||
|
||||
validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum);
|
||||
}
|
||||
|
||||
@ -319,5 +328,4 @@ CompressedReadBufferBase::CompressedReadBufferBase(ReadBuffer * in, bool allow_d
|
||||
|
||||
CompressedReadBufferBase::~CompressedReadBufferBase() = default; /// Proper destruction of unique_ptr of forward-declared type.
|
||||
|
||||
|
||||
}
|
||||
|
@ -5,19 +5,15 @@
|
||||
#include <base/unaligned.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include "CompressedWriteBuffer.h"
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
}
|
||||
|
||||
static constexpr auto CHECKSUM_SIZE{sizeof(CityHash_v1_0_2::uint128)};
|
||||
|
||||
void CompressedWriteBuffer::nextImpl()
|
||||
{
|
||||
if (!offset())
|
||||
@ -29,21 +25,23 @@ void CompressedWriteBuffer::nextImpl()
|
||||
|
||||
/** During compression we need buffer with capacity >= compressed_reserve_size + CHECKSUM_SIZE.
|
||||
*
|
||||
* If output buffer has necessary capacity, we can compress data directly in output buffer.
|
||||
* If output buffer has necessary capacity, we can compress data directly into the output buffer.
|
||||
* Then we can write checksum at the output buffer begin.
|
||||
*
|
||||
* If output buffer does not have necessary capacity. Compress data in temporary buffer.
|
||||
* Then we can write checksum and temporary buffer in output buffer.
|
||||
* If output buffer does not have necessary capacity. Compress data into a temporary buffer.
|
||||
* Then we can write checksum and copy the temporary buffer into the output buffer.
|
||||
*/
|
||||
if (out.available() >= compressed_reserve_size + CHECKSUM_SIZE)
|
||||
if (out.available() >= compressed_reserve_size + sizeof(CityHash_v1_0_2::uint128))
|
||||
{
|
||||
char * out_checksum_ptr = out.position();
|
||||
char * out_compressed_ptr = out.position() + CHECKSUM_SIZE;
|
||||
char * out_compressed_ptr = out.position() + sizeof(CityHash_v1_0_2::uint128);
|
||||
UInt32 compressed_size = codec->compress(working_buffer.begin(), decompressed_size, out_compressed_ptr);
|
||||
|
||||
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(out_compressed_ptr, compressed_size);
|
||||
memcpy(out_checksum_ptr, reinterpret_cast<const char *>(&checksum), CHECKSUM_SIZE);
|
||||
out.position() += CHECKSUM_SIZE + compressed_size;
|
||||
|
||||
writeBinaryLittleEndian(checksum.first, out);
|
||||
writeBinaryLittleEndian(checksum.second, out);
|
||||
|
||||
out.position() += compressed_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -51,7 +49,10 @@ void CompressedWriteBuffer::nextImpl()
|
||||
UInt32 compressed_size = codec->compress(working_buffer.begin(), decompressed_size, compressed_buffer.data());
|
||||
|
||||
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(compressed_buffer.data(), compressed_size);
|
||||
out.write(reinterpret_cast<const char *>(&checksum), CHECKSUM_SIZE);
|
||||
|
||||
writeBinaryLittleEndian(checksum.first, out);
|
||||
writeBinaryLittleEndian(checksum.second, out);
|
||||
|
||||
out.write(compressed_buffer.data(), compressed_size);
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
const char * dest_start = dest;
|
||||
|
||||
const UInt32 items_count = source_size / sizeof(ValueType);
|
||||
unalignedStoreLE<UInt32>(dest, items_count);
|
||||
unalignedStoreLittleEndian<UInt32>(dest, items_count);
|
||||
dest += sizeof(items_count);
|
||||
|
||||
ValueType prev_value{};
|
||||
@ -301,8 +301,8 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
|
||||
if (source < source_end)
|
||||
{
|
||||
prev_value = unalignedLoadLE<ValueType>(source);
|
||||
unalignedStoreLE<ValueType>(dest, prev_value);
|
||||
prev_value = unalignedLoadLittleEndian<ValueType>(source);
|
||||
unalignedStoreLittleEndian<ValueType>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -310,10 +310,10 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
|
||||
if (source < source_end)
|
||||
{
|
||||
const ValueType curr_value = unalignedLoadLE<ValueType>(source);
|
||||
const ValueType curr_value = unalignedLoadLittleEndian<ValueType>(source);
|
||||
|
||||
prev_delta = curr_value - prev_value;
|
||||
unalignedStoreLE<UnsignedDeltaType>(dest, prev_delta);
|
||||
unalignedStoreLittleEndian<UnsignedDeltaType>(dest, prev_delta);
|
||||
|
||||
source += sizeof(curr_value);
|
||||
dest += sizeof(prev_delta);
|
||||
@ -325,7 +325,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
int item = 2;
|
||||
for (; source < source_end; source += sizeof(ValueType), ++item)
|
||||
{
|
||||
const ValueType curr_value = unalignedLoadLE<ValueType>(source);
|
||||
const ValueType curr_value = unalignedLoadLittleEndian<ValueType>(source);
|
||||
|
||||
const UnsignedDeltaType delta = curr_value - prev_value;
|
||||
const UnsignedDeltaType double_delta = delta - prev_delta;
|
||||
@ -369,7 +369,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
if (source + sizeof(UInt32) > source_end)
|
||||
return;
|
||||
|
||||
const UInt32 items_count = unalignedLoadLE<UInt32>(source);
|
||||
const UInt32 items_count = unalignedLoadLittleEndian<UInt32>(source);
|
||||
source += sizeof(items_count);
|
||||
|
||||
ValueType prev_value{};
|
||||
@ -379,10 +379,10 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
if (source + sizeof(ValueType) > source_end || items_count < 1)
|
||||
return;
|
||||
|
||||
prev_value = unalignedLoadLE<ValueType>(source);
|
||||
prev_value = unalignedLoadLittleEndian<ValueType>(source);
|
||||
if (dest + sizeof(prev_value) > output_end)
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
|
||||
unalignedStoreLE<ValueType>(dest, prev_value);
|
||||
unalignedStoreLittleEndian<ValueType>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -391,11 +391,11 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
if (source + sizeof(UnsignedDeltaType) > source_end || items_count < 2)
|
||||
return;
|
||||
|
||||
prev_delta = unalignedLoadLE<UnsignedDeltaType>(source);
|
||||
prev_delta = unalignedLoadLittleEndian<UnsignedDeltaType>(source);
|
||||
prev_value = prev_value + static_cast<ValueType>(prev_delta);
|
||||
if (dest + sizeof(prev_value) > output_end)
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
|
||||
unalignedStoreLE<ValueType>(dest, prev_value);
|
||||
unalignedStoreLittleEndian<ValueType>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_delta);
|
||||
dest += sizeof(prev_value);
|
||||
@ -428,7 +428,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
const ValueType curr_value = prev_value + delta;
|
||||
if (dest + sizeof(curr_value) > output_end)
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
|
||||
unalignedStoreLE<ValueType>(dest, curr_value);
|
||||
unalignedStoreLittleEndian<ValueType>(dest, curr_value);
|
||||
dest += sizeof(curr_value);
|
||||
|
||||
prev_delta = curr_value - prev_value;
|
||||
|
@ -205,7 +205,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
|
||||
const UInt32 items_count = source_size / sizeof(T);
|
||||
|
||||
unalignedStoreLE<UInt32>(dest, items_count);
|
||||
unalignedStoreLittleEndian<UInt32>(dest, items_count);
|
||||
dest += sizeof(items_count);
|
||||
|
||||
T prev_value = 0;
|
||||
@ -214,8 +214,8 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
|
||||
if (source < source_end)
|
||||
{
|
||||
prev_value = unalignedLoadLE<T>(source);
|
||||
unalignedStoreLE<T>(dest, prev_value);
|
||||
prev_value = unalignedLoadLittleEndian<T>(source);
|
||||
unalignedStoreLittleEndian<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -229,7 +229,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
|
||||
while (source < source_end)
|
||||
{
|
||||
const T curr_value = unalignedLoadLE<T>(source);
|
||||
const T curr_value = unalignedLoadLittleEndian<T>(source);
|
||||
source += sizeof(curr_value);
|
||||
|
||||
const auto xored_data = curr_value ^ prev_value;
|
||||
@ -271,7 +271,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source + sizeof(UInt32) > source_end)
|
||||
return;
|
||||
|
||||
const UInt32 items_count = unalignedLoadLE<UInt32>(source);
|
||||
const UInt32 items_count = unalignedLoadLittleEndian<UInt32>(source);
|
||||
source += sizeof(items_count);
|
||||
|
||||
T prev_value = 0;
|
||||
@ -280,8 +280,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source + sizeof(T) > source_end || items_count < 1)
|
||||
return;
|
||||
|
||||
prev_value = unalignedLoadLE<T>(source);
|
||||
unalignedStoreLE<T>(dest, prev_value);
|
||||
prev_value = unalignedLoadLittleEndian<T>(source);
|
||||
unalignedStoreLittleEndian<T>(dest, prev_value);
|
||||
|
||||
source += sizeof(prev_value);
|
||||
dest += sizeof(prev_value);
|
||||
@ -326,7 +326,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
}
|
||||
// else: 0b0 prefix - use prev_value
|
||||
|
||||
unalignedStoreLE<T>(dest, curr_value);
|
||||
unalignedStoreLittleEndian<T>(dest, curr_value);
|
||||
dest += sizeof(curr_value);
|
||||
|
||||
prev_xored_info = curr_xored_info;
|
||||
|
@ -326,7 +326,7 @@ void load(const char * src, T * buf, UInt32 tail = 64)
|
||||
/// as little-endian types on big-endian machine (s390x, etc).
|
||||
for (UInt32 i = 0; i < tail; ++i)
|
||||
{
|
||||
buf[i] = unalignedLoadLE<T>(src + i * sizeof(T));
|
||||
buf[i] = unalignedLoadLittleEndian<T>(src + i * sizeof(T));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -86,8 +86,8 @@ UInt32 ICompressionCodec::compress(const char * source, UInt32 source_size, char
|
||||
UInt8 header_size = getHeaderSize();
|
||||
/// Write data from header_size
|
||||
UInt32 compressed_bytes_written = doCompressData(source, source_size, &dest[header_size]);
|
||||
unalignedStoreLE<UInt32>(&dest[1], compressed_bytes_written + header_size);
|
||||
unalignedStoreLE<UInt32>(&dest[5], source_size);
|
||||
unalignedStoreLittleEndian<UInt32>(&dest[1], compressed_bytes_written + header_size);
|
||||
unalignedStoreLittleEndian<UInt32>(&dest[5], source_size);
|
||||
return header_size + compressed_bytes_written;
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, ch
|
||||
|
||||
UInt32 ICompressionCodec::readCompressedBlockSize(const char * source)
|
||||
{
|
||||
UInt32 compressed_block_size = unalignedLoadLE<UInt32>(&source[1]);
|
||||
UInt32 compressed_block_size = unalignedLoadLittleEndian<UInt32>(&source[1]);
|
||||
if (compressed_block_size == 0)
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA, "Can't decompress data: header is corrupt with compressed block size 0");
|
||||
return compressed_block_size;
|
||||
@ -123,7 +123,7 @@ UInt32 ICompressionCodec::readCompressedBlockSize(const char * source)
|
||||
|
||||
UInt32 ICompressionCodec::readDecompressedBlockSize(const char * source)
|
||||
{
|
||||
UInt32 decompressed_block_size = unalignedLoadLE<UInt32>(&source[5]);
|
||||
UInt32 decompressed_block_size = unalignedLoadLittleEndian<UInt32>(&source[5]);
|
||||
if (decompressed_block_size == 0)
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA, "Can't decompress data: header is corrupt with decompressed block size 0");
|
||||
return decompressed_block_size;
|
||||
|
@ -172,7 +172,7 @@ private:
|
||||
throw std::runtime_error("No more data to read");
|
||||
}
|
||||
|
||||
current_value = unalignedLoadLE<T>(data);
|
||||
current_value = unalignedLoadLittleEndian<T>(data);
|
||||
data = reinterpret_cast<const char *>(data) + sizeof(T);
|
||||
}
|
||||
};
|
||||
@ -368,7 +368,7 @@ CodecTestSequence makeSeq(Args && ... args)
|
||||
char * write_pos = data.data();
|
||||
for (const auto & v : vals)
|
||||
{
|
||||
unalignedStoreLE<T>(write_pos, v);
|
||||
unalignedStoreLittleEndian<T>(write_pos, v);
|
||||
write_pos += sizeof(v);
|
||||
}
|
||||
|
||||
@ -390,7 +390,7 @@ CodecTestSequence generateSeq(Generator gen, const char* gen_name, B Begin = 0,
|
||||
{
|
||||
const T v = static_cast<T>(gen(i));
|
||||
|
||||
unalignedStoreLE<T>(write_pos, v);
|
||||
unalignedStoreLittleEndian<T>(write_pos, v);
|
||||
write_pos += sizeof(v);
|
||||
}
|
||||
|
||||
@ -1297,9 +1297,9 @@ TEST(LZ4Test, DecompressMalformedInput)
|
||||
|
||||
DB::Memory<> memory;
|
||||
memory.resize(ICompressionCodec::getHeaderSize() + uncompressed_size + LZ4::ADDITIONAL_BYTES_AT_END_OF_BUFFER);
|
||||
unalignedStoreLE<uint8_t>(memory.data(), static_cast<uint8_t>(CompressionMethodByte::LZ4));
|
||||
unalignedStoreLE<uint32_t>(&memory[1], source_size);
|
||||
unalignedStoreLE<uint32_t>(&memory[5], uncompressed_size);
|
||||
unalignedStoreLittleEndian<uint8_t>(memory.data(), static_cast<uint8_t>(CompressionMethodByte::LZ4));
|
||||
unalignedStoreLittleEndian<uint32_t>(&memory[1], source_size);
|
||||
unalignedStoreLittleEndian<uint32_t>(&memory[5], uncompressed_size);
|
||||
|
||||
auto codec = CompressionCodecFactory::instance().get("LZ4", {});
|
||||
ASSERT_THROW(codec->decompress(source, source_size, memory.data()), Exception);
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <bit>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -34,7 +36,7 @@ int32_t IFourLetterCommand::code()
|
||||
|
||||
String IFourLetterCommand::toName(int32_t code)
|
||||
{
|
||||
int reverted_code = __builtin_bswap32(code);
|
||||
int reverted_code = std::byteswap(code);
|
||||
return String(reinterpret_cast<char *>(&reverted_code), 4);
|
||||
}
|
||||
|
||||
@ -42,7 +44,7 @@ int32_t IFourLetterCommand::toCode(const String & name)
|
||||
{
|
||||
int32_t res = *reinterpret_cast<const int32_t *>(name.data());
|
||||
/// keep consistent with Coordination::read method by changing big endian to little endian.
|
||||
return __builtin_bswap32(res);
|
||||
return std::byteswap(res);
|
||||
}
|
||||
|
||||
IFourLetterCommand::~IFourLetterCommand() = default;
|
||||
|
@ -333,10 +333,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t keyArenaSize() const
|
||||
{
|
||||
return arena.size();
|
||||
}
|
||||
uint64_t keyArenaSize() const { return arena.allocatedBytes(); }
|
||||
|
||||
iterator begin() { return list.begin(); }
|
||||
const_iterator begin() const { return list.cbegin(); }
|
||||
|
@ -129,6 +129,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, allow_suspicious_low_cardinality_types, false, "In CREATE TABLE statement allows specifying LowCardinality modifier for types of small fixed size (8 or less). Enabling this may increase merge times and memory consumption.", 0) \
|
||||
M(Bool, allow_suspicious_fixed_string_types, false, "In CREATE TABLE statement allows creating columns of type FixedString(n) with n > 256. FixedString with length >= 256 is suspicious and most likely indicates misusage", 0) \
|
||||
M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \
|
||||
M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \
|
||||
M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \
|
||||
M(Bool, compile_aggregate_expressions, false, "Compile aggregate functions to native code. This feature has a bug and should not be used.", 0) \
|
||||
@ -565,6 +566,8 @@ class IColumn;
|
||||
M(Bool, enable_writes_to_query_cache, true, "Enable storing results of SELECT queries in the query cache", 0) \
|
||||
M(Bool, enable_reads_from_query_cache, true, "Enable reading results of SELECT queries from the query cache", 0) \
|
||||
M(Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false, "Store results of queries with non-deterministic functions (e.g. rand(), now()) in the query cache", 0) \
|
||||
M(UInt64, query_cache_max_size_in_bytes, 0, "The maximum amount of memory (in bytes) the current user may allocate in the query cache. 0 means unlimited. ", 0) \
|
||||
M(UInt64, query_cache_max_entries, 0, "The maximum number of query results the current user may store in the query cache. 0 means unlimited.", 0) \
|
||||
M(UInt64, query_cache_min_query_runs, 0, "Minimum number a SELECT query must run before its result is stored in the query cache", 0) \
|
||||
M(Milliseconds, query_cache_min_query_duration, 0, "Minimum time in milliseconds for a query to run for its result to be stored in the query cache.", 0) \
|
||||
M(Bool, query_cache_compress_entries, true, "Compress cache entries.", 0) \
|
||||
|
@ -80,6 +80,7 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}}},
|
||||
{"23.4", {{"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"},
|
||||
{"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"},
|
||||
{"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}}},
|
||||
|
@ -128,6 +128,13 @@ bool DataTypeMap::checkKeyType(DataTypePtr key_type)
|
||||
return true;
|
||||
}
|
||||
|
||||
DataTypePtr DataTypeMap::getNestedTypeWithUnnamedTuple() const
|
||||
{
|
||||
const auto & from_array = assert_cast<const DataTypeArray &>(*nested);
|
||||
const auto & from_tuple = assert_cast<const DataTypeTuple &>(*from_array.getNestedType());
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeTuple>(from_tuple.getElements()));
|
||||
}
|
||||
|
||||
static DataTypePtr create(const ASTPtr & arguments)
|
||||
{
|
||||
if (!arguments || arguments->children.size() != 2)
|
||||
|
@ -47,6 +47,7 @@ public:
|
||||
const DataTypePtr & getValueType() const { return value_type; }
|
||||
DataTypes getKeyValueTypes() const { return {key_type, value_type}; }
|
||||
const DataTypePtr & getNestedType() const { return nested; }
|
||||
DataTypePtr getNestedTypeWithUnnamedTuple() const;
|
||||
|
||||
SerializationPtr doGetDefaultSerialization() const override;
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -51,6 +52,7 @@ DatabasePostgreSQL::DatabasePostgreSQL(
|
||||
, configuration(configuration_)
|
||||
, pool(std::move(pool_))
|
||||
, cache_tables(cache_tables_)
|
||||
, log(&Poco::Logger::get("DatabasePostgreSQL(" + dbname_ + ")"))
|
||||
{
|
||||
cleaner_task = getContext()->getSchedulePool().createTask("PostgreSQLCleanerTask", [this]{ removeOutdatedTables(); });
|
||||
cleaner_task->deactivate();
|
||||
@ -192,7 +194,10 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr,
|
||||
ColumnsDescription{columns_info->columns}, ConstraintsDescription{}, String{}, configuration.schema, configuration.on_conflict);
|
||||
|
||||
if (cache_tables)
|
||||
{
|
||||
LOG_TEST(log, "Cached table `{}`", table_name);
|
||||
cached_tables[table_name] = storage;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ private:
|
||||
mutable Tables cached_tables;
|
||||
std::unordered_set<std::string> detached_or_dropped;
|
||||
BackgroundSchedulePool::TaskHolder cleaner_task;
|
||||
Poco::Logger * log;
|
||||
|
||||
String getTableNameForLogs(const String & table_name) const;
|
||||
|
||||
|
@ -157,7 +157,7 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
return arena.size() + sizeof(Cell) * configuration.max_size_in_cells + attributes_size_in_bytes;
|
||||
return arena.allocatedBytes() + sizeof(Cell) * configuration.max_size_in_cells + attributes_size_in_bytes;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Columns/ColumnDecimal.h>
|
||||
@ -29,6 +28,8 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class Arena;
|
||||
|
||||
/** Simple helper for getting default.
|
||||
* Initialized with default value and default values column.
|
||||
* If default values column is not null default value is taken from column.
|
||||
|
@ -505,7 +505,7 @@ void FlatDictionary::calculateBytesAllocated()
|
||||
bytes_allocated += hierarchical_index_bytes_allocated;
|
||||
}
|
||||
|
||||
bytes_allocated += string_arena.size();
|
||||
bytes_allocated += string_arena.allocatedBytes();
|
||||
}
|
||||
|
||||
FlatDictionary::Attribute FlatDictionary::createAttribute(const DictionaryAttribute & dictionary_attribute)
|
||||
|
@ -797,7 +797,7 @@ void HashedArrayDictionary<dictionary_key_type>::calculateBytesAllocated()
|
||||
bytes_allocated += hierarchical_index_bytes_allocated;
|
||||
}
|
||||
|
||||
bytes_allocated += string_arena.size();
|
||||
bytes_allocated += string_arena.allocatedBytes();
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
|
@ -1022,7 +1022,7 @@ void HashedDictionary<dictionary_key_type, sparse, sharded>::calculateBytesAlloc
|
||||
}
|
||||
|
||||
for (const auto & arena : string_arenas)
|
||||
bytes_allocated += arena->size();
|
||||
bytes_allocated += arena->allocatedBytes();
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type, bool sparse, bool sharded>
|
||||
|
@ -541,7 +541,7 @@ template <>
|
||||
void IPAddressDictionary::addAttributeSize<String>(const Attribute & attribute)
|
||||
{
|
||||
addAttributeSize<StringRef>(attribute);
|
||||
bytes_allocated += sizeof(Arena) + attribute.string_arena->size();
|
||||
bytes_allocated += sizeof(Arena) + attribute.string_arena->allocatedBytes();
|
||||
}
|
||||
|
||||
void IPAddressDictionary::calculateBytesAllocated()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user