diff --git a/.clang-format b/.clang-format
index d8f273702c8..2da3911dced 100644
--- a/.clang-format
+++ b/.clang-format
@@ -21,7 +21,6 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
ExperimentalAutoDetectBinPacking: true
UseTab: Never
TabWidth: 4
-IndentWidth: 4
Standard: Cpp11
PointerAlignment: Middle
MaxEmptyLinesToKeep: 2
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 449abc9484d..4b201802cae 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -7,11 +7,11 @@ tests/ci/run_check.py
### Changelog category (leave one):
- New Feature
- Improvement
-- Bug Fix (user-visible misbehavior in an official stable release)
- Performance Improvement
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
+- Bug Fix (user-visible misbehavior in an official stable release)
- Not for changelog (changelog entry is not required)
diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 0d81a7b303c..d69168b01ee 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -349,6 +349,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index ecd5b85d320..1182481c897 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -487,6 +487,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index f6d6d192f48..85d865252ad 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -118,9 +118,11 @@ jobs:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
SonarCloud:
+ # TODO: Remove if: whenever SonarCloud supports c++23
+ if: ${{ false }}
runs-on: [self-hosted, builder]
env:
- SONAR_SCANNER_VERSION: 4.7.0.2747
+ SONAR_SCANNER_VERSION: 4.8.0.2856
SONAR_SERVER_URL: "https://sonarcloud.io"
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
CC: clang-15
@@ -173,4 +175,4 @@ jobs:
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
--define sonar.projectKey="ClickHouse_ClickHouse" \
--define sonar.organization="clickhouse-java" \
- --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
+ --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" \
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index ab0cbbb7ec1..506ed451b6d 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -550,6 +550,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@@ -1301,6 +1308,40 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestReleaseAnalyzer:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_analyzer
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (release, analyzer)
+ REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
+ KILL_TIMEOUT=10800
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3_0:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
@@ -4748,6 +4789,7 @@ jobs:
- FunctionalStatelessTestReleaseDatabaseReplicated2
- FunctionalStatelessTestReleaseDatabaseReplicated3
- FunctionalStatelessTestReleaseWideParts
+ - FunctionalStatelessTestReleaseAnalyzer
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan0
- FunctionalStatelessTestAsan1
@@ -4839,3 +4881,41 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py
python3 merge_pr.py --check-approved
+##############################################################################################
+########################### SQLLOGIC TEST ###################################################
+##############################################################################################
+ SQLLogicTestRelease:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/sqllogic_debug
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Sqllogic test (release)
+ REPO_COPY=${{runner.temp}}/sqllogic_debug/ClickHouse
+ KILL_TIMEOUT=10800
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v2
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Clear repository
+ run: |
+ sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
+ - name: Check out repository code
+ uses: actions/checkout@v2
+ - name: Sqllogic test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 sqllogic_test.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index 1282dbef50b..21284815583 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -406,6 +406,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
diff --git a/.gitmodules b/.gitmodules
index ca55281e643..e4d63a34118 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -335,3 +335,6 @@
[submodule "contrib/liburing"]
path = contrib/liburing
url = https://github.com/axboe/liburing
+[submodule "contrib/isa-l"]
+ path = contrib/isa-l
+ url = https://github.com/ClickHouse/isa-l.git
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 47320208f02..e2505856d0c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
### Table of Contents
+**[ClickHouse release v23.4, 2023-04-26](#234)**
**[ClickHouse release v23.3 LTS, 2023-03-30](#233)**
**[ClickHouse release v23.2, 2023-02-23](#232)**
**[ClickHouse release v23.1, 2023-01-25](#231)**
@@ -6,6 +7,153 @@
# 2023 Changelog
+### ClickHouse release 23.4 LTS, 2023-04-26
+
+#### Backward Incompatible Change
+* Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
+* This change makes sense only if you are using the virtual filesystem cache. If `path` in the virtual filesystem cache configuration is not empty and is not an absolute path, then it will be put in `/caches/`. [#48784](https://github.com/ClickHouse/ClickHouse/pull/48784) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Primary/secondary indices and sorting keys with identical expressions are now rejected. This behavior can be disabled using setting `allow_suspicious_indices`. [#48536](https://github.com/ClickHouse/ClickHouse/pull/48536) ([凌涛](https://github.com/lingtaolf)).
+
+#### New Feature
+* Support new aggregate function `quantileGK`/`quantilesGK`, like [approx_percentile](https://spark.apache.org/docs/latest/api/sql/index.html#approx_percentile) in spark. Greenwald-Khanna algorithm refer to http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf. [#46428](https://github.com/ClickHouse/ClickHouse/pull/46428) ([李扬](https://github.com/taiyang-li)).
+* Add a statement `SHOW COLUMNS` which shows distilled information from system.columns. [#48017](https://github.com/ClickHouse/ClickHouse/pull/48017) ([Robert Schulze](https://github.com/rschu1ze)).
+* Added `LIGHTWEIGHT` and `PULL` modifiers for `SYSTEM SYNC REPLICA` query. `LIGHTWEIGHT` version waits for fetches and drop-ranges only (merges and mutations are ignored). `PULL` version pulls new entries from ZooKeeper and does not wait for them. Fixes [#47794](https://github.com/ClickHouse/ClickHouse/issues/47794). [#48085](https://github.com/ClickHouse/ClickHouse/pull/48085) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* Add `kafkaMurmurHash` function for compatibility with Kafka DefaultPartitioner. Closes [#47834](https://github.com/ClickHouse/ClickHouse/issues/47834). [#48185](https://github.com/ClickHouse/ClickHouse/pull/48185) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Allow to easily create a user with the same grants as the current user by using `GRANT CURRENT GRANTS`. [#48262](https://github.com/ClickHouse/ClickHouse/pull/48262) ([pufit](https://github.com/pufit)).
+* Add statistical aggregate function `kolmogorovSmirnovTest`. Close [#48228](https://github.com/ClickHouse/ClickHouse/issues/48228). [#48325](https://github.com/ClickHouse/ClickHouse/pull/48325) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
+* Added a `lost_part_count` column to the `system.replicas` table. The column value shows the total number of lost parts in the corresponding table. Value is stored in zookeeper and can be used instead of not persistent `ReplicatedDataLoss` profile event for monitoring. [#48526](https://github.com/ClickHouse/ClickHouse/pull/48526) ([Sergei Trifonov](https://github.com/serxa)).
+* Add `soundex` function for compatibility. Closes [#39880](https://github.com/ClickHouse/ClickHouse/issues/39880). [#48567](https://github.com/ClickHouse/ClickHouse/pull/48567) ([FriendLey](https://github.com/FriendLey)).
+* Support `Map` type for JSONExtract. [#48629](https://github.com/ClickHouse/ClickHouse/pull/48629) ([李扬](https://github.com/taiyang-li)).
+* Add `PrettyJSONEachRow` format to output pretty JSON with new line delimiters and 4 space indents. [#48898](https://github.com/ClickHouse/ClickHouse/pull/48898) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add `ParquetMetadata` input format to read Parquet file metadata. [#48911](https://github.com/ClickHouse/ClickHouse/pull/48911) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add `extractKeyValuePairs` function to extract key value pairs from strings. Input strings might contain noise (i.e. log files / do not need to be 100% formatted in key-value-pair format), the algorithm will look for key value pairs matching the arguments passed to the function. As of now, function accepts the following arguments: `data_column` (mandatory), `key_value_pair_delimiter` (defaults to `:`), `pair_delimiters` (defaults to `\space \, \;`) and `quoting_character` (defaults to double quotes). [#43606](https://github.com/ClickHouse/ClickHouse/pull/43606) ([Arthur Passos](https://github.com/arthurpassos)).
+* Functions replaceOne(), replaceAll(), replaceRegexpOne() and replaceRegexpAll() can now be called with non-const pattern and replacement arguments. [#46589](https://github.com/ClickHouse/ClickHouse/pull/46589) ([Robert Schulze](https://github.com/rschu1ze)).
+* Added functions to work with columns of type `Map`: `mapConcat`, `mapSort`, `mapExists`. [#48071](https://github.com/ClickHouse/ClickHouse/pull/48071) ([Anton Popov](https://github.com/CurtizJ)).
+
+#### Performance Improvement
+* Reading files in `Parquet` format is now much faster. IO and decoding are parallelized (controlled by `max_threads` setting), and only required data ranges are read. [#47964](https://github.com/ClickHouse/ClickHouse/pull/47964) ([Michael Kolupaev](https://github.com/al13n321)).
+* If we run a mutation with IN (subquery) like this: `ALTER TABLE t UPDATE col='new value' WHERE id IN (SELECT id FROM huge_table)` and the table `t` has multiple parts than for each part a set for subquery `SELECT id FROM huge_table` is built in memory. And if there are many parts then this might consume a lot of memory (and lead to an OOM) and CPU. The solution is to introduce a short-lived cache of sets that are currently being built by mutation tasks. If another task of the same mutation is executed concurrently it can look up the set in the cache, wait for it to be built and reuse it. [#46835](https://github.com/ClickHouse/ClickHouse/pull/46835) ([Alexander Gololobov](https://github.com/davenger)).
+* Only check dependencies if necessary when applying `ALTER TABLE` queries. [#48062](https://github.com/ClickHouse/ClickHouse/pull/48062) ([Raúl Marín](https://github.com/Algunenano)).
+* Optimize function `mapUpdate`. [#48118](https://github.com/ClickHouse/ClickHouse/pull/48118) ([Anton Popov](https://github.com/CurtizJ)).
+* Now an internal query to local replica is sent explicitly and data from it received through loopback interface. Setting `prefer_localhost_replica` is not respected for parallel replicas. This is needed for better scheduling and makes the code cleaner: the initiator is only responsible for coordinating of the reading process and merging results, continuously answering for requests while all the secondary queries read the data. Note: Using loopback interface is not so performant, otherwise some replicas could starve for tasks which could lead to even slower query execution and not utilizing all possible resources. The initialization of the coordinator is now even more lazy. All incoming requests contain the information about the reading algorithm we initialize the coordinator with it when first request comes. If any replica decides to read with a different algorithm–an exception will be thrown and a query will be aborted. [#48246](https://github.com/ClickHouse/ClickHouse/pull/48246) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Do not build set for the right side of `IN` clause with subquery when it is used only for analysis of skip indexes, and they are disabled by setting (`use_skip_indexes=0`). Previously it might affect the performance of queries. [#48299](https://github.com/ClickHouse/ClickHouse/pull/48299) ([Anton Popov](https://github.com/CurtizJ)).
+* Query processing is parallelized right after reading `FROM file(...)`. Related to [#38755](https://github.com/ClickHouse/ClickHouse/issues/38755). [#48525](https://github.com/ClickHouse/ClickHouse/pull/48525) ([Igor Nikonov](https://github.com/devcrafter)). Query processing is parallelized right after reading from any data source. Affected data sources are mostly simple or external storages like table functions `url`, `file`. [#48727](https://github.com/ClickHouse/ClickHouse/pull/48727) ([Igor Nikonov](https://github.com/devcrafter)). This is controlled by the setting `parallelize_output_from_storages` which is not enabled by default.
+* Lowered contention of ThreadPool mutex (may increase performance for a huge amount of small jobs). [#48750](https://github.com/ClickHouse/ClickHouse/pull/48750) ([Sergei Trifonov](https://github.com/serxa)).
+* Reduce memory usage for multiple `ALTER DELETE` mutations. [#48522](https://github.com/ClickHouse/ClickHouse/pull/48522) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Remove the excessive connection attempts if the `skip_unavailable_shards` setting is enabled. [#48771](https://github.com/ClickHouse/ClickHouse/pull/48771) ([Azat Khuzhin](https://github.com/azat)).
+
+#### Experimental Feature
+* Entries in the query cache are now squashed to max_block_size and compressed. [#45912](https://github.com/ClickHouse/ClickHouse/pull/45912) ([Robert Schulze](https://github.com/rschu1ze)).
+* It is now possible to define per-user quotas in the query cache. [#48284](https://github.com/ClickHouse/ClickHouse/pull/48284) ([Robert Schulze](https://github.com/rschu1ze)).
+* Some fixes for parallel replicas [#48433](https://github.com/ClickHouse/ClickHouse/pull/48433) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Implement zero-copy-replication (an experimental feature) on encrypted disks. [#48741](https://github.com/ClickHouse/ClickHouse/pull/48741) ([Vitaly Baranov](https://github.com/vitlibar)).
+
+#### Improvement
+* Increase default value for `connect_timeout_with_failover_ms` to 1000 ms (because of adding async connections in https://github.com/ClickHouse/ClickHouse/pull/47229) . Closes [#5188](https://github.com/ClickHouse/ClickHouse/issues/5188). [#49009](https://github.com/ClickHouse/ClickHouse/pull/49009) ([Kruglov Pavel](https://github.com/Avogar)).
+* Several improvements around data lakes: - Make `Iceberg` work with non-partitioned data. - Support `Iceberg` format version v2 (previously only v1 was supported) - Support reading partitioned data for `DeltaLake`/`Hudi` - Faster reading of `DeltaLake` metadata by using Delta's checkpoint files - Fixed incorrect `Hudi` reads: previously it incorrectly chose which data to read and therefore was able to read correctly only small size tables - Made these engines to pickup updates of changed data (previously the state was set on table creation) - Make proper testing for `Iceberg`/`DeltaLake`/`Hudi` using spark. [#47307](https://github.com/ClickHouse/ClickHouse/pull/47307) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Add async connection to socket and async writing to socket. Make creating connections and sending query/external tables async across shards. Refactor code with fibers. Closes [#46931](https://github.com/ClickHouse/ClickHouse/issues/46931). We will be able to increase `connect_timeout_with_failover_ms` by default after this PR (https://github.com/ClickHouse/ClickHouse/issues/5188). [#47229](https://github.com/ClickHouse/ClickHouse/pull/47229) ([Kruglov Pavel](https://github.com/Avogar)).
+* Support config sections `keeper`/`keeper_server` as an alternative to `zookeeper`. Close [#34766](https://github.com/ClickHouse/ClickHouse/issues/34766) , [#34767](https://github.com/ClickHouse/ClickHouse/issues/34767). [#35113](https://github.com/ClickHouse/ClickHouse/pull/35113) ([李扬](https://github.com/taiyang-li)).
+* It is possible to set _secure_ flag in named_collections for a dictionary with a ClickHouse table source. Addresses [#38450](https://github.com/ClickHouse/ClickHouse/issues/38450) . [#46323](https://github.com/ClickHouse/ClickHouse/pull/46323) ([Ilya Golshtein](https://github.com/ilejn)).
+* `bitCount` function support `FixedString` and `String` data type. [#49044](https://github.com/ClickHouse/ClickHouse/pull/49044) ([flynn](https://github.com/ucasfl)).
+* Added configurable retries for all operations with [Zoo]Keeper for Backup queries. [#47224](https://github.com/ClickHouse/ClickHouse/pull/47224) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Enable `use_environment_credentials` for S3 by default, so the entire provider chain is constructed by default. [#47397](https://github.com/ClickHouse/ClickHouse/pull/47397) ([Antonio Andelic](https://github.com/antonio2368)).
+* Currently, the JSON_VALUE function is similar as spark's get_json_object function, which support to get value from JSON string by a path like '$.key'. But still has something different - 1. in spark's get_json_object will return null while the path is not exist, but in JSON_VALUE will return empty string; - 2. in spark's get_json_object will return a complex type value, such as a JSON object/array value, but in JSON_VALUE will return empty string. [#47494](https://github.com/ClickHouse/ClickHouse/pull/47494) ([KevinyhZou](https://github.com/KevinyhZou)).
+* For `use_structure_from_insertion_table_in_table_functions` more flexible insert table structure propagation to table function. Fixed an issue with name mapping and using virtual columns. No more need for 'auto' setting. [#47962](https://github.com/ClickHouse/ClickHouse/pull/47962) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Do not continue retrying to connect to Keeper if the query is killed or over limits. [#47985](https://github.com/ClickHouse/ClickHouse/pull/47985) ([Raúl Marín](https://github.com/Algunenano)).
+* Support Enum output/input in `BSONEachRow`, allow all map key types and avoid extra calculations on output. [#48122](https://github.com/ClickHouse/ClickHouse/pull/48122) ([Kruglov Pavel](https://github.com/Avogar)).
+* Support more ClickHouse types in `ORC`/`Arrow`/`Parquet` formats: Enum(8|16), (U)Int(128|256), Decimal256 (for ORC), allow reading IPv4 from Int32 values (ORC outputs IPv4 as Int32, and we couldn't read it back), fix reading Nullable(IPv6) from binary data for `ORC`. [#48126](https://github.com/ClickHouse/ClickHouse/pull/48126) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add columns `perform_ttl_move_on_insert`, `load_balancing` for table `system.storage_policies`, modify column `volume_type` type to `Enum8`. [#48167](https://github.com/ClickHouse/ClickHouse/pull/48167) ([lizhuoyu5](https://github.com/lzydmxy)).
+* Added support for `BACKUP ALL` command which backups all tables and databases, including temporary and system ones. [#48189](https://github.com/ClickHouse/ClickHouse/pull/48189) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Function mapFromArrays supports `Map` type as an input. [#48207](https://github.com/ClickHouse/ClickHouse/pull/48207) ([李扬](https://github.com/taiyang-li)).
+* The output of some SHOW PROCESSLIST is now sorted. [#48241](https://github.com/ClickHouse/ClickHouse/pull/48241) ([Robert Schulze](https://github.com/rschu1ze)).
+* Per-query/per-server throttling for remote IO/local IO/BACKUPs (server settings: `max_remote_read_network_bandwidth_for_server`, `max_remote_write_network_bandwidth_for_server`, `max_local_read_bandwidth_for_server`, `max_local_write_bandwidth_for_server`, `max_backup_bandwidth_for_server`, settings: `max_remote_read_network_bandwidth`, `max_remote_write_network_bandwidth`, `max_local_read_bandwidth`, `max_local_write_bandwidth`, `max_backup_bandwidth`). [#48242](https://github.com/ClickHouse/ClickHouse/pull/48242) ([Azat Khuzhin](https://github.com/azat)).
+* Support more types in `CapnProto` format: Map, (U)Int(128|256), Decimal(128|256). Allow integer conversions during input/output. [#48257](https://github.com/ClickHouse/ClickHouse/pull/48257) ([Kruglov Pavel](https://github.com/Avogar)).
+* Don't throw CURRENT_WRITE_BUFFER_IS_EXHAUSTED for normal behaviour. [#48288](https://github.com/ClickHouse/ClickHouse/pull/48288) ([Raúl Marín](https://github.com/Algunenano)).
+* Add new setting `keeper_map_strict_mode` which enforces extra guarantees on operations made on top of `KeeperMap` tables. [#48293](https://github.com/ClickHouse/ClickHouse/pull/48293) ([Antonio Andelic](https://github.com/antonio2368)).
+* Check primary key type for simple dictionary is native unsigned integer type Add setting `check_dictionary_primary_key ` for compatibility(set `check_dictionary_primary_key =false` to disable checking). [#48335](https://github.com/ClickHouse/ClickHouse/pull/48335) ([lizhuoyu5](https://github.com/lzydmxy)).
+* Don't replicate mutations for `KeeperMap` because it's unnecessary. [#48354](https://github.com/ClickHouse/ClickHouse/pull/48354) ([Antonio Andelic](https://github.com/antonio2368)).
+* Allow to write/read unnamed tuple as nested Message in Protobuf format. Tuple elements and Message fields are matched by position. [#48390](https://github.com/ClickHouse/ClickHouse/pull/48390) ([Kruglov Pavel](https://github.com/Avogar)).
+* Support `additional_table_filters` and `additional_result_filter` settings in the new planner. Also, add a documentation entry for `additional_result_filter`. [#48405](https://github.com/ClickHouse/ClickHouse/pull/48405) ([Dmitry Novik](https://github.com/novikd)).
+* `parseDateTime` now understands format string '%f' (fractional seconds). [#48420](https://github.com/ClickHouse/ClickHouse/pull/48420) ([Robert Schulze](https://github.com/rschu1ze)).
+* Format string "%f" in formatDateTime() now prints "000000" if the formatted value has no fractional seconds, the previous behavior (single zero) can be restored using setting "formatdatetime_f_prints_single_zero = 1". [#48422](https://github.com/ClickHouse/ClickHouse/pull/48422) ([Robert Schulze](https://github.com/rschu1ze)).
+* Don't replicate DELETE and TRUNCATE for KeeperMap. [#48434](https://github.com/ClickHouse/ClickHouse/pull/48434) ([Antonio Andelic](https://github.com/antonio2368)).
+* Generate valid Decimals and Bools in generateRandom function. [#48436](https://github.com/ClickHouse/ClickHouse/pull/48436) ([Kruglov Pavel](https://github.com/Avogar)).
+* Allow trailing commas in expression list of SELECT query, for example `SELECT a, b, c, FROM table`. Closes [#37802](https://github.com/ClickHouse/ClickHouse/issues/37802). [#48438](https://github.com/ClickHouse/ClickHouse/pull/48438) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Override `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables with `--user` and `--password` client parameters. Closes [#38909](https://github.com/ClickHouse/ClickHouse/issues/38909). [#48440](https://github.com/ClickHouse/ClickHouse/pull/48440) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Added retries to loading of data parts in `MergeTree` tables in case of retryable errors. [#48442](https://github.com/ClickHouse/ClickHouse/pull/48442) ([Anton Popov](https://github.com/CurtizJ)).
+* Add support for `Date`, `Date32`, `DateTime`, `DateTime64` data types to `arrayMin`, `arrayMax`, `arrayDifference` functions. Closes [#21645](https://github.com/ClickHouse/ClickHouse/issues/21645). [#48445](https://github.com/ClickHouse/ClickHouse/pull/48445) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Add support for `{server_uuid}` macro. It is useful for identifying replicas in autoscaled clusters when new replicas are constantly added and removed in runtime. This closes [#48554](https://github.com/ClickHouse/ClickHouse/issues/48554). [#48563](https://github.com/ClickHouse/ClickHouse/pull/48563) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* The installation script will create a hard link instead of copying if it is possible. [#48578](https://github.com/ClickHouse/ClickHouse/pull/48578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Support `SHOW TABLE` syntax meaning the same as `SHOW CREATE TABLE`. Closes [#48580](https://github.com/ClickHouse/ClickHouse/issues/48580). [#48591](https://github.com/ClickHouse/ClickHouse/pull/48591) ([flynn](https://github.com/ucasfl)).
+* HTTP temporary buffers now support working by evicting data from the virtual filesystem cache. [#48664](https://github.com/ClickHouse/ClickHouse/pull/48664) ([Vladimir C](https://github.com/vdimir)).
+* Make Schema inference works for `CREATE AS SELECT`. Closes [#47599](https://github.com/ClickHouse/ClickHouse/issues/47599). [#48679](https://github.com/ClickHouse/ClickHouse/pull/48679) ([flynn](https://github.com/ucasfl)).
+* Added a `replicated_max_mutations_in_one_entry` setting for `ReplicatedMergeTree` that allows limiting the number of mutation commands per one `MUTATE_PART` entry (default is 10000). [#48731](https://github.com/ClickHouse/ClickHouse/pull/48731) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* In AggregateFunction types, don't count unused arena bytes as `read_bytes`. [#48745](https://github.com/ClickHouse/ClickHouse/pull/48745) ([Raúl Marín](https://github.com/Algunenano)).
+* Fix some MySQL-related settings not being handled with the MySQL dictionary source + named collection. Closes [#48402](https://github.com/ClickHouse/ClickHouse/issues/48402). [#48759](https://github.com/ClickHouse/ClickHouse/pull/48759) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* If a user set `max_single_part_upload_size` to a very large value, it can lead to a crash due to a bug in the AWS S3 SDK. This fixes [#47679](https://github.com/ClickHouse/ClickHouse/issues/47679). [#48816](https://github.com/ClickHouse/ClickHouse/pull/48816) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Fix data race in `RabbitMQ` ([report](https://pastila.nl/?004f7100/de1505289ab5bb355e67ebe6c7cc8707)), refactor the code. [#48845](https://github.com/ClickHouse/ClickHouse/pull/48845) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Add aliases `name` and `part_name` form `system.parts` and `system.part_log`. Closes [#48718](https://github.com/ClickHouse/ClickHouse/issues/48718). [#48850](https://github.com/ClickHouse/ClickHouse/pull/48850) ([sichenzhao](https://github.com/sichenzhao)).
+* Functions "arrayDifferenceSupport()", "arrayCumSum()" and "arrayCumSumNonNegative()" now support input arrays of wide integer types (U)Int128/256. [#48866](https://github.com/ClickHouse/ClickHouse/pull/48866) ([cluster](https://github.com/infdahai)).
+* Multi-line history in clickhouse-client is now no longer padded. This makes pasting more natural. [#48870](https://github.com/ClickHouse/ClickHouse/pull/48870) ([Joanna Hulboj](https://github.com/jh0x)).
+* Implement a slight improvement for the rare case when ClickHouse is run inside LXC and LXCFS is used. The LXCFS has an issue: sometimes it returns an error "Transport endpoint is not connected" on reading from the file inside `/proc`. This error was correctly logged into ClickHouse's server log. We have additionally workaround this issue by reopening a file. This is a minuscule change. [#48922](https://github.com/ClickHouse/ClickHouse/pull/48922) ([Real](https://github.com/RunningXie)).
+* Improve memory accounting for prefetches. Randomise prefetch settings In CI. [#48973](https://github.com/ClickHouse/ClickHouse/pull/48973) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Correctly set headers for native copy operations on GCS. [#48981](https://github.com/ClickHouse/ClickHouse/pull/48981) ([Antonio Andelic](https://github.com/antonio2368)).
+* Add support for specifying setting names in the command line with dashes instead of underscores, for example, `--max-threads` instead of `--max_threads`. Additionally, support Unicode dash characters like `—` instead of `--` - this is useful when you communicate with a team in another company, and a manager from that team copy-pasted code from MS Word. [#48985](https://github.com/ClickHouse/ClickHouse/pull/48985) ([alekseygolub](https://github.com/alekseygolub)).
+* Add fallback to password authentication when authentication with SSL user certificate has failed. Closes [#48974](https://github.com/ClickHouse/ClickHouse/issues/48974). [#48989](https://github.com/ClickHouse/ClickHouse/pull/48989) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Improve the embedded dashboard. Close [#46671](https://github.com/ClickHouse/ClickHouse/issues/46671). [#49036](https://github.com/ClickHouse/ClickHouse/pull/49036) ([Kevin Zhang](https://github.com/Kinzeng)).
+* Add profile events for log messages, so you can easily see the count of log messages by severity. [#49042](https://github.com/ClickHouse/ClickHouse/pull/49042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* In previous versions, the `LineAsString` format worked inconsistently when the parallel parsing was enabled or not, in presence of DOS or macOS Classic line breaks. This closes [#49039](https://github.com/ClickHouse/ClickHouse/issues/49039). [#49052](https://github.com/ClickHouse/ClickHouse/pull/49052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* The exception message about the unparsed query parameter will also tell about the name of the parameter. Reimplement [#48878](https://github.com/ClickHouse/ClickHouse/issues/48878). Close [#48772](https://github.com/ClickHouse/ClickHouse/issues/48772). [#49061](https://github.com/ClickHouse/ClickHouse/pull/49061) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+
+#### Build/Testing/Packaging Improvement
+* Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Reduce the number of dependencies in the header files to speed up the build. [#47984](https://github.com/ClickHouse/ClickHouse/pull/47984) ([Dmitry Novik](https://github.com/novikd)).
+* Randomize compression of marks and indices in tests. [#48286](https://github.com/ClickHouse/ClickHouse/pull/48286) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Bump internal ZSTD from 1.5.4 to 1.5.5. [#46797](https://github.com/ClickHouse/ClickHouse/pull/46797) ([Robert Schulze](https://github.com/rschu1ze)).
+* Randomize vertical merges from compact to wide parts in tests. [#48287](https://github.com/ClickHouse/ClickHouse/pull/48287) ([Raúl Marín](https://github.com/Algunenano)).
+* Support for CRC32 checksum in HDFS. Fix performance issues. [#48614](https://github.com/ClickHouse/ClickHouse/pull/48614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Remove remainders of GCC support. [#48671](https://github.com/ClickHouse/ClickHouse/pull/48671) ([Robert Schulze](https://github.com/rschu1ze)).
+* Add CI run with new analyzer infrastructure enabled. [#48719](https://github.com/ClickHouse/ClickHouse/pull/48719) ([Dmitry Novik](https://github.com/novikd)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Fix system.query_views_log for MVs that are pushed from background threads [#46668](https://github.com/ClickHouse/ClickHouse/pull/46668) ([Azat Khuzhin](https://github.com/azat)).
+* Fix several `RENAME COLUMN` bugs [#46946](https://github.com/ClickHouse/ClickHouse/pull/46946) ([alesapin](https://github.com/alesapin)).
+* Fix minor hiliting issues in clickhouse-format [#47610](https://github.com/ClickHouse/ClickHouse/pull/47610) ([Natasha Murashkina](https://github.com/murfel)).
+* Fix a bug in LLVM's libc++ leading to a crash for uploading parts to S3 which size is greater than INT_MAX [#47693](https://github.com/ClickHouse/ClickHouse/pull/47693) ([Azat Khuzhin](https://github.com/azat)).
+* Fix overflow in the `sparkbar` function [#48121](https://github.com/ClickHouse/ClickHouse/pull/48121) ([Vladimir C](https://github.com/vdimir)).
+* Fix race in S3 [#48190](https://github.com/ClickHouse/ClickHouse/pull/48190) ([Anton Popov](https://github.com/CurtizJ)).
+* Disable JIT for aggregate functions due to inconsistent behavior [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Fix alter formatting (minor) [#48289](https://github.com/ClickHouse/ClickHouse/pull/48289) ([Natasha Murashkina](https://github.com/murfel)).
+* Fix CPU usage in RabbitMQ (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Fix crash in EXPLAIN PIPELINE for Merge over Distributed [#48320](https://github.com/ClickHouse/ClickHouse/pull/48320) ([Azat Khuzhin](https://github.com/azat)).
+* Fix serializing LowCardinality as Arrow dictionary [#48361](https://github.com/ClickHouse/ClickHouse/pull/48361) ([Kruglov Pavel](https://github.com/Avogar)).
+* Reset downloader for cache file segment in TemporaryFileStream [#48386](https://github.com/ClickHouse/ClickHouse/pull/48386) ([Vladimir C](https://github.com/vdimir)).
+* Fix possible SYSTEM SYNC REPLICA stuck in case of DROP/REPLACE PARTITION [#48391](https://github.com/ClickHouse/ClickHouse/pull/48391) ([Azat Khuzhin](https://github.com/azat)).
+* Fix a startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
+* Don't check dependencies when renaming system tables automatically [#48431](https://github.com/ClickHouse/ClickHouse/pull/48431) ([Raúl Marín](https://github.com/Algunenano)).
+* Update only affected rows in KeeperMap storage [#48435](https://github.com/ClickHouse/ClickHouse/pull/48435) ([Antonio Andelic](https://github.com/antonio2368)).
+* Fix possible segfault in the VFS cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* `toTimeZone` function throws an error when no constant string is provided [#48471](https://github.com/ClickHouse/ClickHouse/pull/48471) ([Jordi Villar](https://github.com/jrdi)).
+* Fix logical error with IPv4 in Protobuf, add support for Date32 [#48486](https://github.com/ClickHouse/ClickHouse/pull/48486) ([Kruglov Pavel](https://github.com/Avogar)).
+* "changed" flag in system.settings was calculated incorrectly for settings with multiple values [#48516](https://github.com/ClickHouse/ClickHouse/pull/48516) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
+* Fix storage `Memory` with enabled compression [#48517](https://github.com/ClickHouse/ClickHouse/pull/48517) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix bracketed-paste mode messing up password input in the event of client reconnection [#48528](https://github.com/ClickHouse/ClickHouse/pull/48528) ([Michael Kolupaev](https://github.com/al13n321)).
+* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Fix an uncaught exception in case of parallel loader for hashed dictionaries [#48571](https://github.com/ClickHouse/ClickHouse/pull/48571) ([Azat Khuzhin](https://github.com/azat)).
+* The `groupArray` aggregate function correctly works for empty result over nullable types [#48593](https://github.com/ClickHouse/ClickHouse/pull/48593) ([lgbo](https://github.com/lgbo-ustc)).
+* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
+* Allow IPv4 comparison operators with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Fix possible error from cache [#48636](https://github.com/ClickHouse/ClickHouse/pull/48636) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Async inserts with empty data will no longer throw exception. [#48663](https://github.com/ClickHouse/ClickHouse/pull/48663) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix table dependencies in case of failed RENAME TABLE [#48683](https://github.com/ClickHouse/ClickHouse/pull/48683) ([Azat Khuzhin](https://github.com/azat)).
+* If the primary key has duplicate columns (which is only possible for projections), in previous versions it might lead to a bug [#48838](https://github.com/ClickHouse/ClickHouse/pull/48838) ([Amos Bird](https://github.com/amosbird)).
+* Fix for a race condition in ZooKeeper when joining send_thread/receive_thread [#48849](https://github.com/ClickHouse/ClickHouse/pull/48849) ([Alexander Gololobov](https://github.com/davenger)).
+* Fix unexpected part name error when trying to drop a ignored detached part with zero copy replication [#48862](https://github.com/ClickHouse/ClickHouse/pull/48862) ([Michael Lex](https://github.com/mlex)).
+* Fix reading `Date32` Parquet/Arrow column into not a `Date32` column [#48864](https://github.com/ClickHouse/ClickHouse/pull/48864) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix `UNKNOWN_IDENTIFIER` error while selecting from table with row policy and column with dots [#48976](https://github.com/ClickHouse/ClickHouse/pull/48976) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix aggregation by empty nullable strings [#48999](https://github.com/ClickHouse/ClickHouse/pull/48999) ([LiuNeng](https://github.com/liuneng1994)).
+
### ClickHouse release 23.3 LTS, 2023-03-30
#### Upgrade Notes
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ce615c11f2b..0554403cce5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -58,7 +58,7 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
set (RLIMIT_CPU 1000)
# gcc10/gcc10/clang -fsanitize=memory is too heavy
- if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
+ if (SANITIZE STREQUAL "memory")
set (RLIMIT_DATA 10000000000) # 10G
endif()
@@ -286,48 +286,31 @@ set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
-if (COMPILER_GCC OR COMPILER_CLANG)
- # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
- # See https://reviews.llvm.org/D112921
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
-endif ()
-
-# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
-# benchmarks.
-if (COMPILER_GCC OR COMPILER_CLANG)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
-endif ()
-
-if (ARCH_AMD64)
- # align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
- # which makes benchmark results more stable.
- set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
- if (COMPILER_GCC)
- # gcc is in assembler, need to add "-Wa," prefix
- set(BRANCHES_WITHIN_32B_BOUNDARIES "-Wa,${BRANCHES_WITHIN_32B_BOUNDARIES}")
- endif()
-
- set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
-endif()
-
-if (COMPILER_GCC)
- set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcoroutines")
-endif ()
-
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
-if (WITH_COVERAGE AND COMPILER_CLANG)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
- # If we want to disable coverage for specific translation units
- set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
-endif()
+if (COMPILER_CLANG)
+ # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
+ # See https://reviews.llvm.org/D112921
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
-if (WITH_COVERAGE AND COMPILER_GCC)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage")
- set(COVERAGE_OPTION "-lgcov")
- set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage")
-endif()
+ # falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
+ # benchmarks.
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
+
+ if (ARCH_AMD64)
+ # align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
+ # which makes benchmark results more stable.
+ set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
+ endif()
+
+ if (WITH_COVERAGE)
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
+ # If we want to disable coverage for specific translation units
+ set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
+ endif()
+endif ()
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
@@ -410,7 +393,11 @@ else()
endif ()
option (ENABLE_GWP_ASAN "Enable Gwp-Asan" ON)
-if (NOT OS_LINUX AND NOT OS_ANDROID)
+# We use mmap for allocations more heavily in debug builds,
+# but GWP-ASan also wants to use mmap frequently,
+# and due to a large number of memory mappings,
+# it does not work together well.
+if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
set(ENABLE_GWP_ASAN OFF)
endif ()
@@ -434,8 +421,11 @@ endif ()
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
-set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
-if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X))
+if (NOT SANITIZE)
+ set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
+endif()
+
+if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
# Slightly more efficient code can be generated
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
diff --git a/README.md b/README.md
index 61d840ecd34..c82c64cfd22 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/clickhouse-presentations/raw/master/images/logo-400x240.png)](https://clickhouse.com)
+[](https://clickhouse.com?utm_source=github)
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
@@ -21,10 +21,11 @@ curl https://clickhouse.com/ | sh
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Upcoming Events
-* [**ClickHouse Meetup in Austin**](https://www.meetup.com/clickhouse-austin-user-group/events/291486654/) - Mar 30 - The first ClickHouse Meetup in Austin is happening soon! Interested in speaking, let us know!
-* [**v23.3 Release Webinar**](https://clickhouse.com/company/events/v23-3-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-02) - Mar 30 - 23.3 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
+* [**ClickHouse Spring Meetup in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/292517734) - April 26 - It's spring, and it's time to meet again in the city! Talks include: "Building a domain specific query language on top of Clickhouse", "A Galaxy of Information", "Our Journey to ClickHouse Cloud from Redshift", and a ClickHouse update!
+* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
+* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16 - Save the date! ClickHouse is coming back to Berlin. We’re excited to announce an upcoming ClickHouse Meetup that you won’t want to miss. Join us as we gather together to discuss the latest in the world of ClickHouse and share user stories.
## Recent Recordings
-* **FOSDEM 2023**: In the "Fast and Streaming Data" room Alexey gave a talk entitled "Building Analytical Apps With ClickHouse" that looks at the landscape of data tools, an interesting data set, and how you can interact with data quickly. Check out the recording on **[YouTube](https://www.youtube.com/watch?v=JlcI2Vfz_uk)**.
-* **Recording available**: [**v23.2 Release Webinar**](https://www.youtube.com/watch?v=2o0vRMMIrkY) NTILE Window Function support, Partition Key for GROUP By, io_uring, Apache Iceberg support, Dynamic Disks, integrations updates! Watch it now!
+* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
+* **Recording available**: [**v23.3 Release Webinar**](https://www.youtube.com/watch?v=ISaGUjvBNao) UNDROP TABLE, server settings introspection, nested dynamic disks, MySQL compatibility, parseDate Time, Lightweight Deletes, Parallel Replicas, integrations updates, and so much more! Watch it now!
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
diff --git a/SECURITY.md b/SECURITY.md
index 566a1820834..44a122956b4 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported |
|:-|:-|
+| 23.4 | ✔️ |
| 23.3 | ✔️ |
| 23.2 | ✔️ |
-| 23.1 | ✔️ |
+| 23.1 | ❌ |
| 22.12 | ❌ |
| 22.11 | ❌ |
| 22.10 | ❌ |
diff --git a/base/base/Decimal.h b/base/base/Decimal.h
index 22cb577b1b2..2405ba9ca0d 100644
--- a/base/base/Decimal.h
+++ b/base/base/Decimal.h
@@ -1,5 +1,6 @@
#pragma once
#include
+#include
#if !defined(NO_SANITIZE_UNDEFINED)
#if defined(__clang__)
@@ -19,23 +20,6 @@ using Decimal64 = Decimal;
using Decimal128 = Decimal;
using Decimal256 = Decimal;
-template
-concept is_decimal =
- std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v;
-
-template
-concept is_over_big_int =
- std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v;
-
template struct NativeTypeT { using Type = T; };
template struct NativeTypeT { using Type = typename T::NativeType; };
template using NativeType = typename NativeTypeT::Type;
diff --git a/base/base/Decimal_fwd.h b/base/base/Decimal_fwd.h
new file mode 100644
index 00000000000..589d6224917
--- /dev/null
+++ b/base/base/Decimal_fwd.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#include
+
+namespace wide
+{
+
+template
+class integer;
+
+}
+
+using Int128 = wide::integer<128, signed>;
+using UInt128 = wide::integer<128, unsigned>;
+using Int256 = wide::integer<256, signed>;
+using UInt256 = wide::integer<256, unsigned>;
+
+namespace DB
+{
+
+template struct Decimal;
+
+using Decimal32 = Decimal;
+using Decimal64 = Decimal;
+using Decimal128 = Decimal;
+using Decimal256 = Decimal;
+
+class DateTime64;
+
+template
+concept is_decimal =
+ std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v;
+
+template
+concept is_over_big_int =
+ std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v;
+}
diff --git a/base/base/IPv4andIPv6.h b/base/base/IPv4andIPv6.h
index 0e97d83b07e..7b745ec7b84 100644
--- a/base/base/IPv4andIPv6.h
+++ b/base/base/IPv4andIPv6.h
@@ -51,3 +51,15 @@ namespace DB
};
}
+
+namespace std
+{
+ template <>
+ struct hash
+ {
+ size_t operator()(const DB::IPv6 & x) const
+ {
+ return std::hash()(x.toUnderType());
+ }
+ };
+}
diff --git a/base/base/argsToConfig.cpp b/base/base/argsToConfig.cpp
index d7983779d2d..faa1462218d 100644
--- a/base/base/argsToConfig.cpp
+++ b/base/base/argsToConfig.cpp
@@ -3,13 +3,29 @@
#include
#include
-
-void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority)
+void argsToConfig(const Poco::Util::Application::ArgVec & argv,
+ Poco::Util::LayeredConfiguration & config,
+ int priority,
+ const std::unordered_set* alias_names)
{
/// Parsing all args and converting to config layer
/// Test: -- --1=1 --1=2 --3 5 7 8 -9 10 -11=12 14= 15== --16==17 --=18 --19= --20 21 22 --23 --24 25 --26 -27 28 ---29=30 -- ----31 32 --33 3-4
Poco::AutoPtr map_config = new Poco::Util::MapConfiguration;
std::string key;
+
+ auto add_arg = [&map_config, &alias_names](const std::string & k, const std::string & v)
+ {
+ map_config->setString(k, v);
+
+ if (alias_names && !alias_names->contains(k))
+ {
+ std::string alias_key = k;
+ std::replace(alias_key.begin(), alias_key.end(), '-', '_');
+ if (alias_names->contains(alias_key))
+ map_config->setString(alias_key, v);
+ }
+ };
+
for (const auto & arg : argv)
{
auto key_start = arg.find_first_not_of('-');
@@ -19,7 +35,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
// old saved '--key', will set to some true value "1"
if (!key.empty() && pos_minus != std::string::npos && pos_minus < key_start)
{
- map_config->setString(key, "1");
+ add_arg(key, "1");
key = "";
}
@@ -29,7 +45,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
{
if (pos_minus == std::string::npos || pos_minus > key_start)
{
- map_config->setString(key, arg);
+ add_arg(key, arg);
}
key = "";
}
@@ -55,7 +71,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
if (arg.size() > pos_eq)
value = arg.substr(pos_eq + 1);
- map_config->setString(key, value);
+ add_arg(key, value);
key = "";
}
diff --git a/base/base/argsToConfig.h b/base/base/argsToConfig.h
index 9b7b44b7b7f..ef34a8a2145 100644
--- a/base/base/argsToConfig.h
+++ b/base/base/argsToConfig.h
@@ -1,6 +1,8 @@
#pragma once
#include
+#include
+#include
namespace Poco::Util
{
@@ -8,4 +10,7 @@ class LayeredConfiguration; // NOLINT(cppcoreguidelines-virtual-class-destructor
}
/// Import extra command line arguments to configuration. These are command line arguments after --.
-void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority);
+void argsToConfig(const Poco::Util::Application::ArgVec & argv,
+ Poco::Util::LayeredConfiguration & config,
+ int priority,
+ const std::unordered_set* registered_alias_names = nullptr);
diff --git a/base/base/find_symbols.h b/base/base/find_symbols.h
index fe5d3bbadab..a8747ecc9b7 100644
--- a/base/base/find_symbols.h
+++ b/base/base/find_symbols.h
@@ -34,10 +34,52 @@
* If no such characters, returns nullptr.
*/
+struct SearchSymbols
+{
+ static constexpr auto BUFFER_SIZE = 16;
+
+ SearchSymbols() = default;
+
+ explicit SearchSymbols(std::string in)
+ : str(std::move(in))
+ {
+#if defined(__SSE4_2__)
+ if (str.size() > BUFFER_SIZE)
+ {
+ throw std::runtime_error("SearchSymbols can contain at most " + std::to_string(BUFFER_SIZE) + " symbols and " + std::to_string(str.size()) + " was provided\n");
+ }
+
+ char tmp_safety_buffer[BUFFER_SIZE] = {0};
+
+ memcpy(tmp_safety_buffer, str.data(), str.size());
+
+ simd_vector = _mm_loadu_si128(reinterpret_cast(tmp_safety_buffer));
+#endif
+ }
+
+#if defined(__SSE4_2__)
+ __m128i simd_vector;
+#endif
+ std::string str;
+};
+
namespace detail
{
template constexpr bool is_in(char x) { return ((x == chars) || ...); } // NOLINT(misc-redundant-expression)
+static bool is_in(char c, const char * symbols, size_t num_chars)
+{
+ for (size_t i = 0u; i < num_chars; ++i)
+ {
+ if (c == symbols[i])
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
#if defined(__SSE2__)
template
inline __m128i mm_is_in(__m128i bytes)
@@ -53,6 +95,43 @@ inline __m128i mm_is_in(__m128i bytes)
__m128i eq = mm_is_in(bytes);
return _mm_or_si128(eq0, eq);
}
+
+inline __m128i mm_is_in(__m128i bytes, const char * symbols, size_t num_chars)
+{
+ __m128i accumulator = _mm_setzero_si128();
+ for (size_t i = 0; i < num_chars; ++i)
+ {
+ __m128i eq = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(symbols[i]));
+ accumulator = _mm_or_si128(accumulator, eq);
+ }
+
+ return accumulator;
+}
+
+inline std::array<__m128i, 16u> mm_is_in_prepare(const char * symbols, size_t num_chars)
+{
+ std::array<__m128i, 16u> result {};
+
+ for (size_t i = 0; i < num_chars; ++i)
+ {
+ result[i] = _mm_set1_epi8(symbols[i]);
+ }
+
+ return result;
+}
+
+inline __m128i mm_is_in_execute(__m128i bytes, const std::array<__m128i, 16u> & needles)
+{
+ __m128i accumulator = _mm_setzero_si128();
+
+ for (const auto & needle : needles)
+ {
+ __m128i eq = _mm_cmpeq_epi8(bytes, needle);
+ accumulator = _mm_or_si128(accumulator, eq);
+ }
+
+ return accumulator;
+}
#endif
template
@@ -99,6 +178,32 @@ inline const char * find_first_symbols_sse2(const char * const begin, const char
return return_mode == ReturnMode::End ? end : nullptr;
}
+template
+inline const char * find_first_symbols_sse2(const char * const begin, const char * const end, const char * symbols, size_t num_chars)
+{
+ const char * pos = begin;
+
+#if defined(__SSE2__)
+ const auto needles = mm_is_in_prepare(symbols, num_chars);
+ for (; pos + 15 < end; pos += 16)
+ {
+ __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos));
+
+ __m128i eq = mm_is_in_execute(bytes, needles);
+
+ uint16_t bit_mask = maybe_negate(uint16_t(_mm_movemask_epi8(eq)));
+ if (bit_mask)
+ return pos + __builtin_ctz(bit_mask);
+ }
+#endif
+
+ for (; pos < end; ++pos)
+ if (maybe_negate(is_in(*pos, symbols, num_chars)))
+ return pos;
+
+ return return_mode == ReturnMode::End ? end : nullptr;
+}
+
template
inline const char * find_last_symbols_sse2(const char * const begin, const char * const end)
@@ -179,6 +284,41 @@ inline const char * find_first_symbols_sse42(const char * const begin, const cha
return return_mode == ReturnMode::End ? end : nullptr;
}
+template
+inline const char * find_first_symbols_sse42(const char * const begin, const char * const end, const SearchSymbols & symbols)
+{
+ const char * pos = begin;
+
+ const auto num_chars = symbols.str.size();
+
+#if defined(__SSE4_2__)
+ constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT;
+
+ const __m128i set = symbols.simd_vector;
+
+ for (; pos + 15 < end; pos += 16)
+ {
+ __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos));
+
+ if constexpr (positive)
+ {
+ if (_mm_cmpestrc(set, num_chars, bytes, 16, mode))
+ return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode);
+ }
+ else
+ {
+ if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY))
+ return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY);
+ }
+ }
+#endif
+
+ for (; pos < end; ++pos)
+ if (maybe_negate(is_in(*pos, symbols.str.data(), num_chars)))
+ return pos;
+
+ return return_mode == ReturnMode::End ? end : nullptr;
+}
/// NOTE No SSE 4.2 implementation for find_last_symbols_or_null. Not worth to do.
@@ -194,6 +334,17 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
return find_first_symbols_sse2(begin, end);
}
+template
+inline const char * find_first_symbols_dispatch(const std::string_view haystack, const SearchSymbols & symbols)
+{
+#if defined(__SSE4_2__)
+ if (symbols.str.size() >= 5)
+ return find_first_symbols_sse42(haystack.begin(), haystack.end(), symbols);
+ else
+#endif
+ return find_first_symbols_sse2(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
+}
+
}
@@ -211,6 +362,11 @@ inline char * find_first_symbols(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_symbols(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
+
template
inline const char * find_first_not_symbols(const char * begin, const char * end)
{
@@ -223,6 +379,11 @@ inline char * find_first_not_symbols(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_not_symbols(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
+
template
inline const char * find_first_symbols_or_null(const char * begin, const char * end)
{
@@ -235,6 +396,11 @@ inline char * find_first_symbols_or_null(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
+
template
inline const char * find_first_not_symbols_or_null(const char * begin, const char * end)
{
@@ -247,6 +413,10 @@ inline char * find_first_not_symbols_or_null(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_not_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
template
inline const char * find_last_symbols_or_null(const char * begin, const char * end)
diff --git a/base/base/unaligned.h b/base/base/unaligned.h
index fcaaa38f2fe..3ab25c803bb 100644
--- a/base/base/unaligned.h
+++ b/base/base/unaligned.h
@@ -5,44 +5,6 @@
#include
-inline void reverseMemcpy(void * dst, const void * src, size_t size)
-{
- uint8_t * uint_dst = reinterpret_cast(dst);
- const uint8_t * uint_src = reinterpret_cast(src);
-
- uint_dst += size;
- while (size)
- {
- --uint_dst;
- *uint_dst = *uint_src;
- ++uint_src;
- --size;
- }
-}
-
-template
-inline T unalignedLoadLE(const void * address)
-{
- T res {};
- if constexpr (std::endian::native == std::endian::little)
- memcpy(&res, address, sizeof(res));
- else
- reverseMemcpy(&res, address, sizeof(res));
- return res;
-}
-
-
-template
-inline void unalignedStoreLE(void * address,
- const typename std::enable_if::type & src)
-{
- static_assert(std::is_trivially_copyable_v);
- if constexpr (std::endian::native == std::endian::little)
- memcpy(address, &src, sizeof(src));
- else
- reverseMemcpy(address, &src, sizeof(src));
-}
-
template
inline T unalignedLoad(const void * address)
{
@@ -62,3 +24,70 @@ inline void unalignedStore(void * address,
static_assert(std::is_trivially_copyable_v);
memcpy(address, &src, sizeof(src));
}
+
+
+inline void reverseMemcpy(void * dst, const void * src, size_t size)
+{
+ uint8_t * uint_dst = reinterpret_cast(dst);
+ const uint8_t * uint_src = reinterpret_cast(src);
+
+ uint_dst += size;
+ while (size)
+ {
+ --uint_dst;
+ *uint_dst = *uint_src;
+ ++uint_src;
+ --size;
+ }
+}
+
+template
+inline T unalignedLoadEndian(const void * address)
+{
+ T res {};
+ if constexpr (std::endian::native == endian)
+ memcpy(&res, address, sizeof(res));
+ else
+ reverseMemcpy(&res, address, sizeof(res));
+ return res;
+}
+
+
+template
+inline void unalignedStoreEndian(void * address, T & src)
+{
+ static_assert(std::is_trivially_copyable_v);
+ if constexpr (std::endian::native == endian)
+ memcpy(address, &src, sizeof(src));
+ else
+ reverseMemcpy(address, &src, sizeof(src));
+}
+
+
+template
+inline T unalignedLoadLittleEndian(const void * address)
+{
+ return unalignedLoadEndian(address);
+}
+
+
+template
+inline void unalignedStoreLittleEndian(void * address,
+ const typename std::enable_if::type & src)
+{
+ unalignedStoreEndian(address, src);
+}
+
+template
+inline T unalignedLoadBigEndian(const void * address)
+{
+ return unalignedLoadEndian(address);
+}
+
+
+template
+inline void unalignedStoreBigEndian(void * address,
+ const typename std::enable_if::type & src)
+{
+ unalignedStoreEndian(address, src);
+}
diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h
index 7cdb527f9cf..ed4570d5e3f 100644
--- a/base/base/wide_integer_impl.h
+++ b/base/base/wide_integer_impl.h
@@ -155,13 +155,13 @@ struct common_type, Arithmetic>
std::is_floating_point_v,
Arithmetic,
std::conditional_t<
- sizeof(Arithmetic) < Bits * sizeof(long),
+ sizeof(Arithmetic) * 8 < Bits,
wide::integer,
std::conditional_t<
- Bits * sizeof(long) < sizeof(Arithmetic),
+ Bits < sizeof(Arithmetic) * 8,
Arithmetic,
std::conditional_t<
- Bits * sizeof(long) == sizeof(Arithmetic) && (std::is_same_v || std::is_signed_v),
+ Bits == sizeof(Arithmetic) * 8 && (std::is_same_v || std::is_signed_v),
Arithmetic,
wide::integer>>>>;
};
diff --git a/base/glibc-compatibility/glibc-compatibility.c b/base/glibc-compatibility/glibc-compatibility.c
index 7e8ea5051d7..49bb81a58be 100644
--- a/base/glibc-compatibility/glibc-compatibility.c
+++ b/base/glibc-compatibility/glibc-compatibility.c
@@ -235,6 +235,17 @@ ssize_t getrandom(void *buf, size_t buflen, unsigned flags)
return syscall(SYS_getrandom, buf, buflen, flags);
}
+/* Structure for scatter/gather I/O. */
+struct iovec
+{
+ void *iov_base; /* Pointer to data. */
+ size_t iov_len; /* Length of data. */
+};
+
+ssize_t preadv(int __fd, const struct iovec *__iovec, int __count, __off_t __offset)
+{
+ return syscall(SYS_preadv, __fd, __iovec, __count, (long)(__offset), (long)(__offset>>32));
+}
#include
#include
diff --git a/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h b/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h
index 2a72861a84e..d051ef1b768 100644
--- a/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h
+++ b/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h
@@ -90,20 +90,6 @@ namespace Crypto
std::string groupName() const;
/// Returns the EC key group name.
- void save(const std::string & publicKeyFile, const std::string & privateKeyFile = "", const std::string & privateKeyPassphrase = "")
- const;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- void
- save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
static std::string getCurveName(int nid = -1);
/// Returns elliptical curve name corresponding to
/// the given nid; if nid is not found, returns
@@ -150,22 +136,6 @@ namespace Crypto
{
return OBJ_nid2sn(groupId());
}
-
-
- inline void
- ECKeyImpl::save(const std::string & publicKeyFile, const std::string & privateKeyFile, const std::string & privateKeyPassphrase) const
- {
- EVPPKey(_pEC).save(publicKeyFile, privateKeyFile, privateKeyPassphrase);
- }
-
-
- inline void
- ECKeyImpl::save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream, const std::string & privateKeyPassphrase) const
- {
- EVPPKey(_pEC).save(pPublicKeyStream, pPrivateKeyStream, privateKeyPassphrase);
- }
-
-
}
} // namespace Poco::Crypto
diff --git a/base/poco/Crypto/include/Poco/Crypto/KeyPair.h b/base/poco/Crypto/include/Poco/Crypto/KeyPair.h
index 36adbec6a4d..291a0f8b749 100644
--- a/base/poco/Crypto/include/Poco/Crypto/KeyPair.h
+++ b/base/poco/Crypto/include/Poco/Crypto/KeyPair.h
@@ -56,24 +56,6 @@ namespace Crypto
virtual int size() const;
/// Returns the RSA modulus size.
- virtual void save(
- const std::string & publicKeyPairFile,
- const std::string & privateKeyPairFile = "",
- const std::string & privateKeyPairPassphrase = "") const;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- virtual void save(
- std::ostream * pPublicKeyPairStream,
- std::ostream * pPrivateKeyPairStream = 0,
- const std::string & privateKeyPairPassphrase = "") const;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
KeyPairImpl::Ptr impl() const;
/// Returns the impl object.
@@ -97,21 +79,6 @@ namespace Crypto
return _pImpl->size();
}
-
- inline void
- KeyPair::save(const std::string & publicKeyFile, const std::string & privateKeyFile, const std::string & privateKeyPassphrase) const
- {
- _pImpl->save(publicKeyFile, privateKeyFile, privateKeyPassphrase);
- }
-
-
- inline void
- KeyPair::save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream, const std::string & privateKeyPassphrase) const
- {
- _pImpl->save(pPublicKeyStream, pPrivateKeyStream, privateKeyPassphrase);
- }
-
-
inline const std::string & KeyPair::name() const
{
return _pImpl->name();
diff --git a/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h b/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h
index 155efd20b9c..ecafbef0241 100644
--- a/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h
+++ b/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h
@@ -55,22 +55,6 @@ namespace Crypto
virtual int size() const = 0;
/// Returns the key size.
- virtual void save(
- const std::string & publicKeyFile,
- const std::string & privateKeyFile = "",
- const std::string & privateKeyPassphrase = "") const = 0;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- virtual void save(
- std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const = 0;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
const std::string & name() const;
/// Returns key pair name
diff --git a/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h b/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h
index 4ccbb324c06..010c68bacd7 100644
--- a/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h
+++ b/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h
@@ -96,20 +96,6 @@ namespace Crypto
ByteVec decryptionExponent() const;
/// Returns the RSA decryption exponent.
- void save(const std::string & publicKeyFile, const std::string & privateKeyFile = "", const std::string & privateKeyPassphrase = "")
- const;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- void
- save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
private:
RSAKeyImpl();
@@ -139,4 +125,4 @@ namespace Crypto
} // namespace Poco::Crypto
-#endif // Crypto_RSAKeyImplImpl_INCLUDED
\ No newline at end of file
+#endif // Crypto_RSAKeyImplImpl_INCLUDED
diff --git a/base/poco/Crypto/src/RSAKeyImpl.cpp b/base/poco/Crypto/src/RSAKeyImpl.cpp
index eb6e758343a..229a3bce828 100644
--- a/base/poco/Crypto/src/RSAKeyImpl.cpp
+++ b/base/poco/Crypto/src/RSAKeyImpl.cpp
@@ -269,103 +269,6 @@ RSAKeyImpl::ByteVec RSAKeyImpl::decryptionExponent() const
}
-void RSAKeyImpl::save(const std::string& publicKeyFile,
- const std::string& privateKeyFile,
- const std::string& privateKeyPassphrase) const
-{
- if (!publicKeyFile.empty())
- {
- BIO* bio = BIO_new(BIO_s_file());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing public key file", publicKeyFile);
- try
- {
- if (BIO_write_filename(bio, const_cast(publicKeyFile.c_str())))
- {
- if (!PEM_write_bio_RSAPublicKey(bio, _pRSA))
- throw Poco::WriteFileException("Failed to write public key to file", publicKeyFile);
- }
- else throw Poco::CreateFileException("Cannot create public key file");
- }
- catch (...)
- {
- BIO_free(bio);
- throw;
- }
- BIO_free(bio);
- }
-
- if (!privateKeyFile.empty())
- {
- BIO* bio = BIO_new(BIO_s_file());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing private key file", privateKeyFile);
- try
- {
- if (BIO_write_filename(bio, const_cast(privateKeyFile.c_str())))
- {
- int rc = 0;
- if (privateKeyPassphrase.empty())
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, 0, 0, 0, 0, 0);
- else
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, EVP_des_ede3_cbc(),
- reinterpret_cast(const_cast(privateKeyPassphrase.c_str())),
- static_cast(privateKeyPassphrase.length()), 0, 0);
- if (!rc) throw Poco::FileException("Failed to write private key to file", privateKeyFile);
- }
- else throw Poco::CreateFileException("Cannot create private key file", privateKeyFile);
- }
- catch (...)
- {
- BIO_free(bio);
- throw;
- }
- BIO_free(bio);
- }
-}
-
-
-void RSAKeyImpl::save(std::ostream* pPublicKeyStream,
- std::ostream* pPrivateKeyStream,
- const std::string& privateKeyPassphrase) const
-{
- if (pPublicKeyStream)
- {
- BIO* bio = BIO_new(BIO_s_mem());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing public key");
- if (!PEM_write_bio_RSAPublicKey(bio, _pRSA))
- {
- BIO_free(bio);
- throw Poco::WriteFileException("Failed to write public key to stream");
- }
- char* pData;
- long size = BIO_get_mem_data(bio, &pData);
- pPublicKeyStream->write(pData, static_cast(size));
- BIO_free(bio);
- }
-
- if (pPrivateKeyStream)
- {
- BIO* bio = BIO_new(BIO_s_mem());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing public key");
- int rc = 0;
- if (privateKeyPassphrase.empty())
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, 0, 0, 0, 0, 0);
- else
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, EVP_des_ede3_cbc(),
- reinterpret_cast(const_cast(privateKeyPassphrase.c_str())),
- static_cast(privateKeyPassphrase.length()), 0, 0);
- if (!rc)
- {
- BIO_free(bio);
- throw Poco::FileException("Failed to write private key to stream");
- }
- char* pData;
- long size = BIO_get_mem_data(bio, &pData);
- pPrivateKeyStream->write(pData, static_cast(size));
- BIO_free(bio);
- }
-}
-
-
RSAKeyImpl::ByteVec RSAKeyImpl::convertToByteVec(const BIGNUM* bn)
{
int numBytes = BN_num_bytes(bn);
@@ -383,4 +286,4 @@ RSAKeyImpl::ByteVec RSAKeyImpl::convertToByteVec(const BIGNUM* bn)
}
-} } // namespace Poco::Crypto
\ No newline at end of file
+} } // namespace Poco::Crypto
diff --git a/base/poco/Data/ODBC/include/Poco/Data/ODBC/Unicode_WIN32.h b/base/poco/Data/ODBC/include/Poco/Data/ODBC/Unicode_WIN32.h
deleted file mode 100644
index 06af853e443..00000000000
--- a/base/poco/Data/ODBC/include/Poco/Data/ODBC/Unicode_WIN32.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-// Unicode.h
-//
-// Library: Data/ODBC
-// Package: ODBC
-// Module: Unicode
-//
-// Definition of Unicode_WIN32.
-//
-// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Data_ODBC_Unicode_WIN32_INCLUDED
-#define Data_ODBC_Unicode_WIN32_INCLUDED
-
-
-namespace Poco
-{
-namespace Data
-{
- namespace ODBC
- {
-
-
- inline void makeUTF16(SQLCHAR * pSQLChar, SQLINTEGER length, std::wstring & target)
- /// Utility function for conversion from UTF-8 to UTF-16
- {
- int len = length;
- if (SQL_NTS == len)
- len = (int)std::strlen((const char *)pSQLChar);
-
- UnicodeConverter::toUTF16((const char *)pSQLChar, len, target);
- }
-
-
- inline void makeUTF8(Poco::Buffer & buffer, SQLINTEGER length, SQLPOINTER pTarget, SQLINTEGER targetLength)
- /// Utility function for conversion from UTF-16 to UTF-8. Length is in bytes.
- {
- if (buffer.sizeBytes() < length)
- throw InvalidArgumentException("Specified length exceeds available length.");
- else if ((length % 2) != 0)
- throw InvalidArgumentException("Length must be an even number.");
-
- length /= sizeof(wchar_t);
- std::string result;
- UnicodeConverter::toUTF8(buffer.begin(), length, result);
-
- std::memset(pTarget, 0, targetLength);
- std::strncpy((char *)pTarget, result.c_str(), result.size() < targetLength ? result.size() : targetLength);
- }
-
-
- }
-}
-} // namespace Poco::Data::ODBC
-
-
-#endif // Data_ODBC_Unicode_WIN32_INCLUDED
diff --git a/base/poco/Data/ODBC/src/Unicode_WIN32.cpp b/base/poco/Data/ODBC/src/Unicode_WIN32.cpp
deleted file mode 100644
index fe637e49b3d..00000000000
--- a/base/poco/Data/ODBC/src/Unicode_WIN32.cpp
+++ /dev/null
@@ -1,761 +0,0 @@
-//
-// Unicode.cpp
-//
-// Library: Data/ODBC
-// Package: ODBC
-// Module: Unicode
-//
-// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#include "Poco/Data/ODBC/ODBC.h"
-#include "Poco/Data/ODBC/Utility.h"
-#include "Poco/Data/ODBC/Unicode_WIN32.h"
-#include "Poco/Buffer.h"
-#include "Poco/Exception.h"
-
-
-using Poco::Buffer;
-using Poco::InvalidArgumentException;
-using Poco::NotImplementedException;
-
-
-namespace Poco {
-namespace Data {
-namespace ODBC {
-
-
-SQLRETURN SQLColAttribute(SQLHSTMT hstmt,
- SQLUSMALLINT iCol,
- SQLUSMALLINT iField,
- SQLPOINTER pCharAttr,
- SQLSMALLINT cbCharAttrMax,
- SQLSMALLINT* pcbCharAttr,
- NumAttrPtrType pNumAttr)
-{
- if (isString(pCharAttr, cbCharAttrMax))
- {
- Buffer buffer(stringLength(pCharAttr, cbCharAttrMax));
-
- SQLRETURN rc = SQLColAttributeW(hstmt,
- iCol,
- iField,
- buffer.begin(),
- (SQLSMALLINT) buffer.sizeBytes(),
- pcbCharAttr,
- pNumAttr);
-
- makeUTF8(buffer, *pcbCharAttr, pCharAttr, cbCharAttrMax);
- return rc;
- }
-
- return SQLColAttributeW(hstmt,
- iCol,
- iField,
- pCharAttr,
- cbCharAttrMax,
- pcbCharAttr,
- pNumAttr);
-}
-
-
-SQLRETURN SQLColAttributes(SQLHSTMT hstmt,
- SQLUSMALLINT icol,
- SQLUSMALLINT fDescType,
- SQLPOINTER rgbDesc,
- SQLSMALLINT cbDescMax,
- SQLSMALLINT* pcbDesc,
- SQLLEN* pfDesc)
-{
- return SQLColAttribute(hstmt,
- icol,
- fDescType,
- rgbDesc,
- cbDescMax,
- pcbDesc,
- pfDesc);
-}
-
-
-SQLRETURN SQLConnect(SQLHDBC hdbc,
- SQLCHAR* szDSN,
- SQLSMALLINT cbDSN,
- SQLCHAR* szUID,
- SQLSMALLINT cbUID,
- SQLCHAR* szAuthStr,
- SQLSMALLINT cbAuthStr)
-{
- std::wstring sqlDSN;
- makeUTF16(szDSN, cbDSN, sqlDSN);
-
- std::wstring sqlUID;
- makeUTF16(szUID, cbUID, sqlUID);
-
- std::wstring sqlPWD;
- makeUTF16(szAuthStr, cbAuthStr, sqlPWD);
-
- return SQLConnectW(hdbc,
- (SQLWCHAR*) sqlDSN.c_str(),
- (SQLSMALLINT) sqlDSN.size(),
- (SQLWCHAR*) sqlUID.c_str(),
- (SQLSMALLINT) sqlUID.size(),
- (SQLWCHAR*) sqlPWD.c_str(),
- (SQLSMALLINT) sqlPWD.size());
-}
-
-
-SQLRETURN SQLDescribeCol(SQLHSTMT hstmt,
- SQLUSMALLINT icol,
- SQLCHAR* szColName,
- SQLSMALLINT cbColNameMax,
- SQLSMALLINT* pcbColName,
- SQLSMALLINT* pfSqlType,
- SQLULEN* pcbColDef,
- SQLSMALLINT* pibScale,
- SQLSMALLINT* pfNullable)
-{
- Buffer buffer(cbColNameMax);
- SQLRETURN rc = SQLDescribeColW(hstmt,
- icol,
- (SQLWCHAR*) buffer.begin(),
- (SQLSMALLINT) buffer.size(),
- pcbColName,
- pfSqlType,
- pcbColDef,
- pibScale,
- pfNullable);
-
- makeUTF8(buffer, *pcbColName * sizeof(wchar_t), szColName, cbColNameMax);
- return rc;
-}
-
-
-SQLRETURN SQLError(SQLHENV henv,
- SQLHDBC hdbc,
- SQLHSTMT hstmt,
- SQLCHAR* szSqlState,
- SQLINTEGER* pfNativeError,
- SQLCHAR* szErrorMsg,
- SQLSMALLINT cbErrorMsgMax,
- SQLSMALLINT* pcbErrorMsg)
-{
- throw NotImplementedException("SQLError is obsolete. "
- "Use SQLGetDiagRec instead.");
-}
-
-
-SQLRETURN SQLExecDirect(SQLHSTMT hstmt,
- SQLCHAR* szSqlStr,
- SQLINTEGER cbSqlStr)
-{
- std::wstring sqlStr;
- makeUTF16(szSqlStr, cbSqlStr, sqlStr);
-
- return SQLExecDirectW(hstmt,
- (SQLWCHAR*) sqlStr.c_str(),
- (SQLINTEGER) sqlStr.size());
-}
-
-
-SQLRETURN SQLGetConnectAttr(SQLHDBC hdbc,
- SQLINTEGER fAttribute,
- SQLPOINTER rgbValue,
- SQLINTEGER cbValueMax,
- SQLINTEGER* pcbValue)
-{
- if (isString(rgbValue, cbValueMax))
- {
- Buffer buffer(stringLength(rgbValue, cbValueMax));
-
- SQLRETURN rc = SQLGetConnectAttrW(hdbc,
- fAttribute,
- buffer.begin(),
- (SQLINTEGER) buffer.sizeBytes(),
- pcbValue);
-
- makeUTF8(buffer, *pcbValue, rgbValue, cbValueMax);
- return rc;
- }
-
-
- return SQLGetConnectAttrW(hdbc,
- fAttribute,
- rgbValue,
- cbValueMax,
- pcbValue);
-}
-
-
-SQLRETURN SQLGetCursorName(SQLHSTMT hstmt,
- SQLCHAR* szCursor,
- SQLSMALLINT cbCursorMax,
- SQLSMALLINT* pcbCursor)
-{
- throw NotImplementedException("Not implemented");
-}
-
-
-SQLRETURN SQLSetDescField(SQLHDESC hdesc,
- SQLSMALLINT iRecord,
- SQLSMALLINT iField,
- SQLPOINTER rgbValue,
- SQLINTEGER cbValueMax)
-{
- if (isString(rgbValue, cbValueMax))
- {
- std::wstring str;
- makeUTF16((SQLCHAR*) rgbValue, cbValueMax, str);
-
- SQLRETURN rc = SQLSetDescFieldW(hdesc,
- iRecord,
- iField,
- (SQLPOINTER) str.c_str(),
- (SQLINTEGER) str.size() * sizeof(std::wstring::value_type));
-
- return rc;
- }
-
- return SQLSetDescFieldW(hdesc,
- iRecord,
- iField,
- rgbValue,
- cbValueMax);
-}
-
-
-SQLRETURN SQLGetDescField(SQLHDESC hdesc,
- SQLSMALLINT iRecord,
- SQLSMALLINT iField,
- SQLPOINTER rgbValue,
- SQLINTEGER cbValueMax,
- SQLINTEGER* pcbValue)
-{
- if (isString(rgbValue, cbValueMax))
- {
- Buffer buffer(stringLength(rgbValue, cbValueMax));
-
- SQLRETURN rc = SQLGetDescFieldW(hdesc,
- iRecord,
- iField,
- buffer.begin(),
- (SQLINTEGER) buffer.sizeBytes(),
- pcbValue);
-
- makeUTF8(buffer, *pcbValue, rgbValue, cbValueMax);
- return rc;
- }
-
- return SQLGetDescFieldW(hdesc,
- iRecord,
- iField,
- rgbValue,
- cbValueMax,
- pcbValue);
-}
-
-
-SQLRETURN SQLGetDescRec(SQLHDESC hdesc,
- SQLSMALLINT iRecord,
- SQLCHAR* szName,
- SQLSMALLINT cbNameMax,
- SQLSMALLINT* pcbName,
- SQLSMALLINT* pfType,
- SQLSMALLINT* pfSubType,
- SQLLEN* pLength,
- SQLSMALLINT* pPrecision,
- SQLSMALLINT* pScale,
- SQLSMALLINT* pNullable)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLGetDiagField(SQLSMALLINT fHandleType,
- SQLHANDLE handle,
- SQLSMALLINT iRecord,
- SQLSMALLINT fDiagField,
- SQLPOINTER rgbDiagInfo,
- SQLSMALLINT cbDiagInfoMax,
- SQLSMALLINT* pcbDiagInfo)
-{
- if (isString(rgbDiagInfo, cbDiagInfoMax))
- {
- Buffer buffer(stringLength(rgbDiagInfo, cbDiagInfoMax));
-
- SQLRETURN rc = SQLGetDiagFieldW(fHandleType,
- handle,
- iRecord,
- fDiagField,
- buffer.begin(),
- (SQLSMALLINT) buffer.sizeBytes(),
- pcbDiagInfo);
-
- makeUTF8(buffer, *pcbDiagInfo, rgbDiagInfo, cbDiagInfoMax);
- return rc;
- }
-
- return SQLGetDiagFieldW(fHandleType,
- handle,
- iRecord,
- fDiagField,
- rgbDiagInfo,
- cbDiagInfoMax,
- pcbDiagInfo);
-}
-
-
-SQLRETURN SQLGetDiagRec(SQLSMALLINT fHandleType,
- SQLHANDLE handle,
- SQLSMALLINT iRecord,
- SQLCHAR* szSqlState,
- SQLINTEGER* pfNativeError,
- SQLCHAR* szErrorMsg,
- SQLSMALLINT cbErrorMsgMax,
- SQLSMALLINT* pcbErrorMsg)
-{
- const SQLINTEGER stateLen = SQL_SQLSTATE_SIZE + 1;
- Buffer bufState(stateLen);
- Buffer bufErr(cbErrorMsgMax);
-
- SQLRETURN rc = SQLGetDiagRecW(fHandleType,
- handle,
- iRecord,
- bufState.begin(),
- pfNativeError,
- bufErr.begin(),
- (SQLSMALLINT) bufErr.size(),
- pcbErrorMsg);
-
- makeUTF8(bufState, stateLen * sizeof(wchar_t), szSqlState, stateLen);
- makeUTF8(bufErr, *pcbErrorMsg * sizeof(wchar_t), szErrorMsg, cbErrorMsgMax);
-
- return rc;
-}
-
-
-SQLRETURN SQLPrepare(SQLHSTMT hstmt,
- SQLCHAR* szSqlStr,
- SQLINTEGER cbSqlStr)
-{
- std::wstring sqlStr;
- makeUTF16(szSqlStr, cbSqlStr, sqlStr);
-
- return SQLPrepareW(hstmt,
- (SQLWCHAR*) sqlStr.c_str(),
- (SQLINTEGER) sqlStr.size());
-}
-
-
-SQLRETURN SQLSetConnectAttr(SQLHDBC hdbc,
- SQLINTEGER fAttribute,
- SQLPOINTER rgbValue,
- SQLINTEGER cbValue)
-{
- if (isString(rgbValue, cbValue))
- {
- std::wstring str;
- makeUTF16((SQLCHAR*) rgbValue, cbValue, str);
-
- return SQLSetConnectAttrW(hdbc,
- fAttribute,
- (SQLWCHAR*) str.c_str(),
- (SQLINTEGER) str.size() * sizeof(std::wstring::value_type));
- }
-
- return SQLSetConnectAttrW(hdbc,
- fAttribute,
- rgbValue,
- cbValue);
-}
-
-
-SQLRETURN SQLSetCursorName(SQLHSTMT hstmt,
- SQLCHAR* szCursor,
- SQLSMALLINT cbCursor)
-{
- throw NotImplementedException("Not implemented");
-}
-
-
-SQLRETURN SQLSetStmtAttr(SQLHSTMT hstmt,
- SQLINTEGER fAttribute,
- SQLPOINTER rgbValue,
- SQLINTEGER cbValueMax)
-{
- if (isString(rgbValue, cbValueMax))
- {
- std::wstring str;
- makeUTF16((SQLCHAR*) rgbValue, cbValueMax, str);
-
- return SQLSetStmtAttrW(hstmt,
- fAttribute,
- (SQLPOINTER) str.c_str(),
- (SQLINTEGER) str.size());
- }
-
- return SQLSetStmtAttrW(hstmt,
- fAttribute,
- rgbValue,
- cbValueMax);
-}
-
-
-SQLRETURN SQLGetStmtAttr(SQLHSTMT hstmt,
- SQLINTEGER fAttribute,
- SQLPOINTER rgbValue,
- SQLINTEGER cbValueMax,
- SQLINTEGER* pcbValue)
-{
- if (isString(rgbValue, cbValueMax))
- {
- Buffer buffer(stringLength(rgbValue, cbValueMax));
-
- return SQLGetStmtAttrW(hstmt,
- fAttribute,
- (SQLPOINTER) buffer.begin(),
- (SQLINTEGER) buffer.sizeBytes(),
- pcbValue);
- }
-
- return SQLGetStmtAttrW(hstmt,
- fAttribute,
- rgbValue,
- cbValueMax,
- pcbValue);
-}
-
-
-SQLRETURN SQLColumns(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName,
- SQLCHAR* szColumnName,
- SQLSMALLINT cbColumnName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLGetConnectOption(SQLHDBC hdbc,
- SQLUSMALLINT fOption,
- SQLPOINTER pvParam)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLGetInfo(SQLHDBC hdbc,
- SQLUSMALLINT fInfoType,
- SQLPOINTER rgbInfoValue,
- SQLSMALLINT cbInfoValueMax,
- SQLSMALLINT* pcbInfoValue)
-{
- if (cbInfoValueMax)
- {
- Buffer buffer(cbInfoValueMax);
-
- SQLRETURN rc = SQLGetInfoW(hdbc,
- fInfoType,
- (SQLPOINTER) buffer.begin(),
- (SQLSMALLINT) buffer.sizeBytes(),
- pcbInfoValue);
-
- makeUTF8(buffer, *pcbInfoValue, rgbInfoValue, cbInfoValueMax);
-
- return rc;
- }
-
- return SQLGetInfoW(hdbc,
- fInfoType,
- rgbInfoValue,
- cbInfoValueMax,
- pcbInfoValue);
-}
-
-
-SQLRETURN SQLGetTypeInfo(SQLHSTMT StatementHandle, SQLSMALLINT DataType)
-{
- return SQLGetTypeInfoW(StatementHandle, DataType);
-}
-
-
-SQLRETURN SQLSetConnectOption(SQLHDBC hdbc,
- SQLUSMALLINT fOption,
- SQLULEN vParam)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLSpecialColumns(SQLHSTMT hstmt,
- SQLUSMALLINT fColType,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName,
- SQLUSMALLINT fScope,
- SQLUSMALLINT fNullable)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLStatistics(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName,
- SQLUSMALLINT fUnique,
- SQLUSMALLINT fAccuracy)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLTables(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName,
- SQLCHAR* szTableType,
- SQLSMALLINT cbTableType)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLDataSources(SQLHENV henv,
- SQLUSMALLINT fDirection,
- SQLCHAR* szDSN,
- SQLSMALLINT cbDSNMax,
- SQLSMALLINT* pcbDSN,
- SQLCHAR* szDesc,
- SQLSMALLINT cbDescMax,
- SQLSMALLINT* pcbDesc)
-{
- Buffer bufDSN(cbDSNMax);
- Buffer bufDesc(cbDescMax);
-
- SQLRETURN rc = SQLDataSourcesW(henv,
- fDirection,
- bufDSN.begin(),
- (SQLSMALLINT) bufDSN.size(),
- pcbDSN,
- bufDesc.begin(),
- (SQLSMALLINT) bufDesc.size(),
- pcbDesc);
-
- makeUTF8(bufDSN, *pcbDSN * sizeof(wchar_t), szDSN, cbDSNMax);
- makeUTF8(bufDesc, *pcbDesc * sizeof(wchar_t), szDesc, cbDescMax);
-
- return rc;
-}
-
-
-SQLRETURN SQLDriverConnect(SQLHDBC hdbc,
- SQLHWND hwnd,
- SQLCHAR* szConnStrIn,
- SQLSMALLINT cbConnStrIn,
- SQLCHAR* szConnStrOut,
- SQLSMALLINT cbConnStrOutMax,
- SQLSMALLINT* pcbConnStrOut,
- SQLUSMALLINT fDriverCompletion)
-{
- std::wstring connStrIn;
- int len = cbConnStrIn;
- if (SQL_NTS == len)
- len = (int) std::strlen((const char*) szConnStrIn);
-
- Poco::UnicodeConverter::toUTF16((const char *) szConnStrIn, len, connStrIn);
-
- Buffer bufOut(cbConnStrOutMax);
- SQLRETURN rc = SQLDriverConnectW(hdbc,
- hwnd,
- (SQLWCHAR*) connStrIn.c_str(),
- (SQLSMALLINT) connStrIn.size(),
- bufOut.begin(),
- (SQLSMALLINT) bufOut.size(),
- pcbConnStrOut,
- fDriverCompletion);
-
- if (!Utility::isError(rc))
- makeUTF8(bufOut, *pcbConnStrOut * sizeof(wchar_t), szConnStrOut, cbConnStrOutMax);
-
- return rc;
-}
-
-
-SQLRETURN SQLBrowseConnect(SQLHDBC hdbc,
- SQLCHAR* szConnStrIn,
- SQLSMALLINT cbConnStrIn,
- SQLCHAR* szConnStrOut,
- SQLSMALLINT cbConnStrOutMax,
- SQLSMALLINT* pcbConnStrOut)
-{
- std::wstring str;
- makeUTF16(szConnStrIn, cbConnStrIn, str);
-
- Buffer bufConnStrOut(cbConnStrOutMax);
-
- SQLRETURN rc = SQLBrowseConnectW(hdbc,
- (SQLWCHAR*) str.c_str(),
- (SQLSMALLINT) str.size(),
- bufConnStrOut.begin(),
- (SQLSMALLINT) bufConnStrOut.size(),
- pcbConnStrOut);
-
- makeUTF8(bufConnStrOut, *pcbConnStrOut * sizeof(wchar_t), szConnStrOut, cbConnStrOutMax);
-
- return rc;
-}
-
-
-SQLRETURN SQLColumnPrivileges(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName,
- SQLCHAR* szColumnName,
- SQLSMALLINT cbColumnName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLForeignKeys(SQLHSTMT hstmt,
- SQLCHAR* szPkCatalogName,
- SQLSMALLINT cbPkCatalogName,
- SQLCHAR* szPkSchemaName,
- SQLSMALLINT cbPkSchemaName,
- SQLCHAR* szPkTableName,
- SQLSMALLINT cbPkTableName,
- SQLCHAR* szFkCatalogName,
- SQLSMALLINT cbFkCatalogName,
- SQLCHAR* szFkSchemaName,
- SQLSMALLINT cbFkSchemaName,
- SQLCHAR* szFkTableName,
- SQLSMALLINT cbFkTableName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLNativeSql(SQLHDBC hdbc,
- SQLCHAR* szSqlStrIn,
- SQLINTEGER cbSqlStrIn,
- SQLCHAR* szSqlStr,
- SQLINTEGER cbSqlStrMax,
- SQLINTEGER* pcbSqlStr)
-{
- std::wstring str;
- makeUTF16(szSqlStrIn, cbSqlStrIn, str);
-
- Buffer bufSQLOut(cbSqlStrMax);
-
- SQLRETURN rc = SQLNativeSqlW(hdbc,
- (SQLWCHAR*) str.c_str(),
- (SQLINTEGER) str.size(),
- bufSQLOut.begin(),
- (SQLINTEGER) bufSQLOut.size(),
- pcbSqlStr);
-
- makeUTF8(bufSQLOut, *pcbSqlStr * sizeof(wchar_t), szSqlStr, cbSqlStrMax);
-
- return rc;
-}
-
-
-SQLRETURN SQLPrimaryKeys(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLProcedureColumns(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szProcName,
- SQLSMALLINT cbProcName,
- SQLCHAR* szColumnName,
- SQLSMALLINT cbColumnName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLProcedures(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szProcName,
- SQLSMALLINT cbProcName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLTablePrivileges(SQLHSTMT hstmt,
- SQLCHAR* szCatalogName,
- SQLSMALLINT cbCatalogName,
- SQLCHAR* szSchemaName,
- SQLSMALLINT cbSchemaName,
- SQLCHAR* szTableName,
- SQLSMALLINT cbTableName)
-{
- throw NotImplementedException();
-}
-
-
-SQLRETURN SQLDrivers(SQLHENV henv,
- SQLUSMALLINT fDirection,
- SQLCHAR* szDriverDesc,
- SQLSMALLINT cbDriverDescMax,
- SQLSMALLINT* pcbDriverDesc,
- SQLCHAR* szDriverAttributes,
- SQLSMALLINT cbDrvrAttrMax,
- SQLSMALLINT* pcbDrvrAttr)
-{
- Buffer bufDriverDesc(cbDriverDescMax);
- Buffer bufDriverAttr(cbDrvrAttrMax);
-
- SQLRETURN rc = SQLDriversW(henv,
- fDirection,
- bufDriverDesc.begin(),
- (SQLSMALLINT) bufDriverDesc.size(),
- pcbDriverDesc,
- bufDriverAttr.begin(),
- (SQLSMALLINT) bufDriverAttr.size(),
- pcbDrvrAttr);
-
- makeUTF8(bufDriverDesc, *pcbDriverDesc * sizeof(wchar_t), szDriverDesc, cbDriverDescMax);
- makeUTF8(bufDriverAttr, *pcbDrvrAttr * sizeof(wchar_t), szDriverAttributes, cbDrvrAttrMax);
-
- return rc;
-}
-
-
-} } } // namespace Poco::Data::ODBC
diff --git a/base/poco/Data/include/Poco/Data/AutoTransaction.h b/base/poco/Data/include/Poco/Data/AutoTransaction.h
deleted file mode 100644
index a222bd27afe..00000000000
--- a/base/poco/Data/include/Poco/Data/AutoTransaction.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//
-// AutoTransaction.h
-//
-// Library: Data
-// Package: DataCore
-// Module: AutoTransaction
-//
-// Forward header for the Transaction class.
-//
-// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Data_AutoTransaction_INCLUDED
-#define Data_AutoTransaction_INCLUDED
-
-
-#include "Poco/Data/Transaction.h"
-
-
-namespace Poco
-{
-namespace Data
-{
-
-
- typedef Transaction AutoTransaction;
-
-
-}
-} // namespace Poco::Data
-
-
-#endif // Data_AutoTransaction_INCLUDED
diff --git a/base/poco/Data/include/Poco/Data/DynamicLOB.h b/base/poco/Data/include/Poco/Data/DynamicLOB.h
deleted file mode 100644
index 749b269ffac..00000000000
--- a/base/poco/Data/include/Poco/Data/DynamicLOB.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//
-// DynamicLOB.h
-//
-// Library: Data
-// Package: DataCore
-// Module: DynamicLOB
-//
-// Definition of the Poco::Dynamic::Var LOB cast operators.
-//
-// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Data_DynamicLOB_INCLUDED
-#define Data_DynamicLOB_INCLUDED
-
-
-#include "Poco/Data/Data.h"
-#include "Poco/Data/LOB.h"
-#include "Poco/Dynamic/Var.h"
-
-
-namespace Poco
-{
-namespace Data
-{
-
- template
- class LOB;
- typedef LOB BLOB;
- typedef LOB CLOB;
-
-}
-} // namespace Poco::Data
-
-
-namespace Poco
-{
-namespace Dynamic
-{
-
- template <>
- Data_API Var::operator Poco::Data::CLOB() const;
- template <>
- Data_API Var::operator Poco::Data::BLOB() const;
-
-}
-} // namespace Poco::Dynamic
-
-
-#endif // Data_DynamicLOB_INCLUDED
diff --git a/base/poco/Data/include/Poco/Data/LOBStream.h b/base/poco/Data/include/Poco/Data/LOBStream.h
deleted file mode 100644
index 23346224c0f..00000000000
--- a/base/poco/Data/include/Poco/Data/LOBStream.h
+++ /dev/null
@@ -1,149 +0,0 @@
-//
-// LOBStream.h
-//
-// Library: Data
-// Package: DataCore
-// Module: LOBStream
-//
-// Definition of the LOBStream class.
-//
-// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Data_LOBStream_INCLUDED
-#define Data_LOBStream_INCLUDED
-
-
-#include
-#include
-#include "Poco/Data/LOB.h"
-#include "Poco/Foundation.h"
-#include "Poco/UnbufferedStreamBuf.h"
-
-
-namespace Poco
-{
-namespace Data
-{
-
-
- template
- class LOBStreamBuf : public BasicUnbufferedStreamBuf>
- /// This is the streambuf class used for reading from and writing to a LOB.
- {
- public:
- LOBStreamBuf(LOB & lob) : _lob(lob), _it(_lob.begin())
- /// Creates LOBStreamBuf.
- {
- }
-
-
- ~LOBStreamBuf()
- /// Destroys LOBStreamBuf.
- {
- }
-
- protected:
- typedef std::char_traits TraitsType;
- typedef BasicUnbufferedStreamBuf BaseType;
-
- typename BaseType::int_type readFromDevice()
- {
- if (_it != _lob.end())
- return BaseType::charToInt(*_it++);
- else
- return -1;
- }
-
- typename BaseType::int_type writeToDevice(T c)
- {
- _lob.appendRaw(&c, 1);
- return 1;
- }
-
- private:
- LOB & _lob;
- typename LOB::Iterator _it;
- };
-
-
- template
- class LOBIOS : public virtual std::ios
- /// The base class for LOBInputStream and
- /// LOBOutputStream.
- ///
- /// This class is needed to ensure the correct initialization
- /// order of the stream buffer and base classes.
- {
- public:
- LOBIOS(LOB & lob, openmode mode) : _buf(lob)
- /// Creates the LOBIOS with the given LOB.
- {
- poco_ios_init(&_buf);
- }
-
- ~LOBIOS()
- /// Destroys the LOBIOS.
- {
- }
-
- LOBStreamBuf * rdbuf()
- /// Returns a pointer to the internal LOBStreamBuf.
- {
- return &_buf;
- }
-
- protected:
- LOBStreamBuf _buf;
- };
-
-
- template
- class LOBOutputStream : public LOBIOS, public std::basic_ostream>
- /// An output stream for writing to a LOB.
- {
- public:
- LOBOutputStream(LOB & lob) : LOBIOS(lob, std::ios::out), std::ostream(LOBIOS::rdbuf())
- /// Creates the LOBOutputStream with the given LOB.
- {
- }
-
- ~LOBOutputStream()
- /// Destroys the LOBOutputStream.
- {
- }
- };
-
-
- template
- class LOBInputStream : public LOBIOS, public std::basic_istream>
- /// An input stream for reading from a LOB.
- {
- public:
- LOBInputStream(LOB & lob) : LOBIOS(lob, std::ios::in), std::istream(LOBIOS::rdbuf())
- /// Creates the LOBInputStream with the given LOB.
- {
- }
-
- ~LOBInputStream()
- /// Destroys the LOBInputStream.
- {
- }
- };
-
-
- typedef LOBOutputStream BLOBOutputStream;
- typedef LOBOutputStream CLOBOutputStream;
-
- typedef LOBInputStream BLOBInputStream;
- typedef LOBInputStream CLOBInputStream;
-
-}
-} // namespace Poco::Data
-
-
-#endif // Data_LOBStream_INCLUDED
diff --git a/base/poco/Data/src/DynamicLOB.cpp b/base/poco/Data/src/DynamicLOB.cpp
deleted file mode 100644
index 5dfe3df8574..00000000000
--- a/base/poco/Data/src/DynamicLOB.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-// DynamicLOB.cpp
-//
-// Library: Data
-// Package: DataCore
-// Module: DynamicLOB
-//
-// Copyright (c) 2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifdef __GNUC__
-// TODO: determine g++ version able to do the right thing without these specializations
-
-#include "Poco/Data/DynamicLOB.h"
-#include "Poco/Data/LOB.h"
-#include "Poco/Dynamic/Var.h"
-
-
-namespace Poco {
-namespace Dynamic {
-
-
-using Poco::Data::CLOB;
-using Poco::Data::BLOB;
-
-
-template <>
-Var::operator CLOB () const
-{
- VarHolder* pHolder = content();
-
- if (!pHolder)
- throw InvalidAccessException("Can not convert empty value.");
-
- if (typeid(CLOB) == pHolder->type())
- return extract();
- else
- {
- std::string result;
- pHolder->convert(result);
- return CLOB(result);
- }
-}
-
-
-template <>
-Var::operator BLOB () const
-{
- VarHolder* pHolder = content();
-
- if (!pHolder)
- throw InvalidAccessException("Can not convert empty value.");
-
- if (typeid(BLOB) == pHolder->type())
- return extract();
- else
- {
- std::string result;
- pHolder->convert(result);
- return BLOB(reinterpret_cast(result.data()),
- result.size());
- }
-}
-
-
-} } // namespace Poco::Data
-
-
-#endif // __GNUC__
-
diff --git a/base/poco/Foundation/CMakeLists.txt b/base/poco/Foundation/CMakeLists.txt
index dceb18e68cc..358f49ed055 100644
--- a/base/poco/Foundation/CMakeLists.txt
+++ b/base/poco/Foundation/CMakeLists.txt
@@ -31,8 +31,6 @@ set (SRCS
src/ASCIIEncoding.cpp
src/AsyncChannel.cpp
src/AtomicCounter.cpp
- src/Base32Decoder.cpp
- src/Base32Encoder.cpp
src/Base64Decoder.cpp
src/Base64Encoder.cpp
src/BinaryReader.cpp
@@ -81,9 +79,6 @@ set (SRCS
src/HexBinaryEncoder.cpp
src/InflatingStream.cpp
src/JSONString.cpp
- src/Latin1Encoding.cpp
- src/Latin2Encoding.cpp
- src/Latin9Encoding.cpp
src/LineEndingConverter.cpp
src/LocalDateTime.cpp
src/LogFile.cpp
@@ -91,8 +86,6 @@ set (SRCS
src/LoggingFactory.cpp
src/LoggingRegistry.cpp
src/LogStream.cpp
- src/Manifest.cpp
- src/MD4Engine.cpp
src/MD5Engine.cpp
src/MemoryPool.cpp
src/MemoryStream.cpp
@@ -113,7 +106,6 @@ set (SRCS
src/PatternFormatter.cpp
src/Pipe.cpp
src/PipeImpl.cpp
- src/PipeStream.cpp
src/PriorityNotificationQueue.cpp
src/Process.cpp
src/PurgeStrategy.cpp
@@ -136,10 +128,8 @@ set (SRCS
src/StreamChannel.cpp
src/StreamConverter.cpp
src/StreamCopier.cpp
- src/StreamTokenizer.cpp
src/String.cpp
src/StringTokenizer.cpp
- src/SynchronizedObject.cpp
src/SyslogChannel.cpp
src/Task.cpp
src/TaskManager.cpp
@@ -175,9 +165,6 @@ set (SRCS
src/VarHolder.cpp
src/VarIterator.cpp
src/Void.cpp
- src/Windows1250Encoding.cpp
- src/Windows1251Encoding.cpp
- src/Windows1252Encoding.cpp
)
add_library (_poco_foundation ${SRCS})
@@ -233,7 +220,8 @@ target_link_libraries (_poco_foundation
PRIVATE
Poco::Foundation::PCRE
ch_contrib::zlib
- ch_contrib::lz4)
+ ch_contrib::lz4
+ ch_contrib::double_conversion)
if(OS_DARWIN AND ARCH_AARCH64)
target_compile_definitions (_poco_foundation
diff --git a/base/poco/Foundation/include/Poco/Base32Decoder.h b/base/poco/Foundation/include/Poco/Base32Decoder.h
deleted file mode 100644
index 96b6f013db8..00000000000
--- a/base/poco/Foundation/include/Poco/Base32Decoder.h
+++ /dev/null
@@ -1,105 +0,0 @@
-//
-// Base32Decoder.h
-//
-// Library: Foundation
-// Package: Streams
-// Module: Base32
-//
-// Definition of class Base32Decoder.
-//
-// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Foundation_Base32Decoder_INCLUDED
-#define Foundation_Base32Decoder_INCLUDED
-
-
-#include
-#include "Poco/Foundation.h"
-#include "Poco/UnbufferedStreamBuf.h"
-
-
-namespace Poco
-{
-
-
-class Foundation_API Base32DecoderBuf : public UnbufferedStreamBuf
-/// This streambuf base32-decodes all data read
-/// from the istream connected to it.
-///
-/// Note: For performance reasons, the characters
-/// are read directly from the given istream's
-/// underlying streambuf, so the state
-/// of the istream will not reflect that of
-/// its streambuf.
-{
-public:
- Base32DecoderBuf(std::istream & istr);
- ~Base32DecoderBuf();
-
-private:
- int readFromDevice();
- int readOne();
-
- unsigned char _group[8];
- int _groupLength;
- int _groupIndex;
- std::streambuf & _buf;
-
- static unsigned char IN_ENCODING[256];
- static bool IN_ENCODING_INIT;
-
-private:
- Base32DecoderBuf(const Base32DecoderBuf &);
- Base32DecoderBuf & operator=(const Base32DecoderBuf &);
-};
-
-
-class Foundation_API Base32DecoderIOS : public virtual std::ios
-/// The base class for Base32Decoder.
-///
-/// This class is needed to ensure the correct initialization
-/// order of the stream buffer and base classes.
-{
-public:
- Base32DecoderIOS(std::istream & istr);
- ~Base32DecoderIOS();
- Base32DecoderBuf * rdbuf();
-
-protected:
- Base32DecoderBuf _buf;
-
-private:
- Base32DecoderIOS(const Base32DecoderIOS &);
- Base32DecoderIOS & operator=(const Base32DecoderIOS &);
-};
-
-
-class Foundation_API Base32Decoder : public Base32DecoderIOS, public std::istream
-/// This istream base32-decodes all data
-/// read from the istream connected to it.
-///
-/// Note: For performance reasons, the characters
-/// are read directly from the given istream's
-/// underlying streambuf, so the state
-/// of the istream will not reflect that of
-/// its streambuf.
-{
-public:
- Base32Decoder(std::istream & istr);
- ~Base32Decoder();
-
-private:
- Base32Decoder(const Base32Decoder &);
- Base32Decoder & operator=(const Base32Decoder &);
-};
-
-
-} // namespace Poco
-
-
-#endif // Foundation_Base32Decoder_INCLUDED
diff --git a/base/poco/Foundation/include/Poco/Base32Encoder.h b/base/poco/Foundation/include/Poco/Base32Encoder.h
deleted file mode 100644
index ced0dd6f3bb..00000000000
--- a/base/poco/Foundation/include/Poco/Base32Encoder.h
+++ /dev/null
@@ -1,111 +0,0 @@
-//
-// Base32Encoder.h
-//
-// Library: Foundation
-// Package: Streams
-// Module: Base32
-//
-// Definition of class Base32Encoder.
-//
-// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Foundation_Base32Encoder_INCLUDED
-#define Foundation_Base32Encoder_INCLUDED
-
-
-#include
-#include "Poco/Foundation.h"
-#include "Poco/UnbufferedStreamBuf.h"
-
-
-namespace Poco
-{
-
-
-class Foundation_API Base32EncoderBuf : public UnbufferedStreamBuf
-/// This streambuf base32-encodes all data written
-/// to it and forwards it to a connected
-/// ostream.
-///
-/// Note: The characters are directly written
-/// to the ostream's streambuf, thus bypassing
-/// the ostream. The ostream's state is therefore
-/// not updated to match the buffer's state.
-{
-public:
- Base32EncoderBuf(std::ostream & ostr, bool padding = true);
- ~Base32EncoderBuf();
-
- int close();
- /// Closes the stream buffer.
-
-private:
- int writeToDevice(char c);
-
- unsigned char _group[5];
- int _groupLength;
- std::streambuf & _buf;
- bool _doPadding;
-
- static const unsigned char OUT_ENCODING[32];
-
- friend class Base32DecoderBuf;
-
- Base32EncoderBuf(const Base32EncoderBuf &);
- Base32EncoderBuf & operator=(const Base32EncoderBuf &);
-};
-
-
-class Foundation_API Base32EncoderIOS : public virtual std::ios
-/// The base class for Base32Encoder.
-///
-/// This class is needed to ensure the correct initialization
-/// order of the stream buffer and base classes.
-{
-public:
- Base32EncoderIOS(std::ostream & ostr, bool padding = true);
- ~Base32EncoderIOS();
- int close();
- Base32EncoderBuf * rdbuf();
-
-protected:
- Base32EncoderBuf _buf;
-
-private:
- Base32EncoderIOS(const Base32EncoderIOS &);
- Base32EncoderIOS & operator=(const Base32EncoderIOS &);
-};
-
-
-class Foundation_API Base32Encoder : public Base32EncoderIOS, public std::ostream
-/// This ostream base32-encodes all data
-/// written to it and forwards it to
-/// a connected ostream.
-/// Always call close() when done
-/// writing data, to ensure proper
-/// completion of the encoding operation.
-///
-/// Note: The characters are directly written
-/// to the ostream's streambuf, thus bypassing
-/// the ostream. The ostream's state is therefore
-/// not updated to match the buffer's state.
-{
-public:
- Base32Encoder(std::ostream & ostr, bool padding = true);
- ~Base32Encoder();
-
-private:
- Base32Encoder(const Base32Encoder &);
- Base32Encoder & operator=(const Base32Encoder &);
-};
-
-
-} // namespace Poco
-
-
-#endif // Foundation_Base32Encoder_INCLUDED
diff --git a/base/poco/Foundation/include/Poco/ClassLibrary.h b/base/poco/Foundation/include/Poco/ClassLibrary.h
deleted file mode 100644
index deb43f26297..00000000000
--- a/base/poco/Foundation/include/Poco/ClassLibrary.h
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// ClassLibrary.h
-//
-// Library: Foundation
-// Package: SharedLibrary
-// Module: ClassLoader
-//
-// Definitions for class libraries.
-//
-// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Foundation_ClassLibrary_INCLUDED
-#define Foundation_ClassLibrary_INCLUDED
-
-
-#include
-#include "Poco/Foundation.h"
-#include "Poco/Manifest.h"
-
-
-# define POCO_LIBRARY_API
-
-
-//
-// the entry points for every class library
-//
-extern "C" {
-bool POCO_LIBRARY_API pocoBuildManifest(Poco::ManifestBase * pManifest);
-void POCO_LIBRARY_API pocoInitializeLibrary();
-void POCO_LIBRARY_API pocoUninitializeLibrary();
-}
-
-
-//
-// additional support for named manifests
-//
-#define POCO_DECLARE_NAMED_MANIFEST(name) \
- extern "C" { \
- bool POCO_LIBRARY_API POCO_JOIN(pocoBuildManifest, name)(Poco::ManifestBase * pManifest); \
- }
-
-
-//
-// Macros to automatically implement pocoBuildManifest
-//
-// usage:
-//
-// POCO_BEGIN_MANIFEST(MyBaseClass)
-// POCO_EXPORT_CLASS(MyFirstClass)
-// POCO_EXPORT_CLASS(MySecondClass)
-// ...
-// POCO_END_MANIFEST
-//
-#define POCO_BEGIN_MANIFEST_IMPL(fnName, base) \
- bool fnName(Poco::ManifestBase * pManifest_) \
- { \
- typedef base _Base; \
- typedef Poco::Manifest<_Base> _Manifest; \
- std::string requiredType(typeid(_Manifest).name()); \
- std::string actualType(pManifest_->className()); \
- if (requiredType == actualType) \
- { \
- Poco::Manifest<_Base> * pManifest = static_cast<_Manifest *>(pManifest_);
-
-
-#define POCO_BEGIN_MANIFEST(base) POCO_BEGIN_MANIFEST_IMPL(pocoBuildManifest, base)
-
-
-#define POCO_BEGIN_NAMED_MANIFEST(name, base) \
- POCO_DECLARE_NAMED_MANIFEST(name) \
- POCO_BEGIN_MANIFEST_IMPL(POCO_JOIN(pocoBuildManifest, name), base)
-
-
-#define POCO_END_MANIFEST \
- return true; \
- } \
- else return false; \
- }
-
-
-#define POCO_EXPORT_CLASS(cls) pManifest->insert(new Poco::MetaObject(#cls));
-
-
-#define POCO_EXPORT_SINGLETON(cls) pManifest->insert(new Poco::MetaSingleton(#cls));
-
-
-#endif // Foundation_ClassLibrary_INCLUDED
diff --git a/base/poco/Foundation/include/Poco/ClassLoader.h b/base/poco/Foundation/include/Poco/ClassLoader.h
deleted file mode 100644
index 6752a6e7ecd..00000000000
--- a/base/poco/Foundation/include/Poco/ClassLoader.h
+++ /dev/null
@@ -1,355 +0,0 @@
-//
-// ClassLoader.h
-//
-// Library: Foundation
-// Package: SharedLibrary
-// Module: ClassLoader
-//
-// Definition of the ClassLoader class.
-//
-// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
-// and Contributors.
-//
-// SPDX-License-Identifier: BSL-1.0
-//
-
-
-#ifndef Foundation_ClassLoader_INCLUDED
-#define Foundation_ClassLoader_INCLUDED
-
-
-#include