mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into attaching_parts
This commit is contained in:
commit
dc468a8aa3
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -59,6 +59,9 @@ At a minimum, the following information should be added (but add more as needed)
|
||||
- [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage
|
||||
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
|
||||
---
|
||||
- [ ] <!---ci_include_fuzzer--> Run only fuzzers related jobs (libFuzzer fuzzers, AST fuzzers, etc.)
|
||||
- [ ] <!---ci_exclude_ast--> Exclude: AST fuzzers
|
||||
---
|
||||
- [ ] <!---do_not_test--> Do not test
|
||||
- [ ] <!---woolen_wolfdog--> Woolen Wolfdog
|
||||
- [ ] <!---upload_all--> Upload binaries for special builds
|
||||
|
20
.github/actions/clean/action.yml
vendored
20
.github/actions/clean/action.yml
vendored
@ -1,11 +1,23 @@
|
||||
name: Clean runner
|
||||
description: Clean the runner's temp path on ending
|
||||
inputs:
|
||||
images:
|
||||
description: clean docker images
|
||||
default: false
|
||||
type: boolean
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clean
|
||||
- name: Clean Temp
|
||||
shell: bash
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "${{runner.temp}}"
|
||||
sudo rm -fr "${{runner.temp}}"
|
||||
- name: Clean Docker Containers
|
||||
shell: bash
|
||||
run: |
|
||||
docker rm -vf $(docker ps -aq) ||:
|
||||
- name: Clean Docker Images
|
||||
if: ${{ inputs.images }}
|
||||
shell: bash
|
||||
run: |
|
||||
docker rmi -f $(docker images -aq) ||:
|
||||
|
18
.github/actions/debug/action.yml
vendored
Normal file
18
.github/actions/debug/action.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: DebugInfo
|
||||
description: Prints workflow debug info
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Print envs
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Envs"
|
||||
env
|
||||
echo "::endgroup::"
|
||||
- name: Print Event.json
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Event.json"
|
||||
python3 -m json.tool "$GITHUB_EVENT_PATH"
|
||||
echo "::endgroup::"
|
99
.github/workflows/auto_releases.yml
vendored
Normal file
99
.github/workflows/auto_releases.yml
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
name: AutoReleases
|
||||
|
||||
env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
concurrency:
|
||||
group: autoreleases
|
||||
|
||||
on:
|
||||
# schedule:
|
||||
# - cron: '0 9 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
AutoReleaseInfo:
|
||||
runs-on: [self-hosted, release-maker]
|
||||
outputs:
|
||||
data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }}
|
||||
dry_run: ${{ steps.info.outputs.DRY_RUN }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
echo "DRY_RUN=true" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0 # full history needed
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Prepare Info
|
||||
id: info
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --prepare
|
||||
echo "::group::Auto Release Info"
|
||||
python3 -m json.tool /tmp/autorelease_info.json
|
||||
echo "::endgroup::"
|
||||
{
|
||||
echo 'AUTO_RELEASE_PARAMS<<EOF'
|
||||
cat /tmp/autorelease_params.json
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
if [[ "${{ github.event_name }}" == "schedule" ]]; then
|
||||
echo "DRY_RUN=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Post Release Branch statuses
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 auto_release.py --post-status
|
||||
- name: Clean up
|
||||
uses: ./.github/actions/clean
|
||||
|
||||
Releases:
|
||||
needs: AutoReleaseInfo
|
||||
strategy:
|
||||
matrix:
|
||||
release_params: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases }}
|
||||
max-parallel: 1
|
||||
name: Release ${{ matrix.release_params.release_branch }}
|
||||
uses: ./.github/workflows/create_release.yml
|
||||
with:
|
||||
ref: ${{ matrix.release_params.commit_sha }}
|
||||
type: patch
|
||||
dry-run: ${{ fromJson(needs.AutoReleaseInfo.outputs.dry_run) }}
|
||||
secrets:
|
||||
ROBOT_CLICKHOUSE_COMMIT_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
|
||||
CleanUp:
|
||||
needs: [Releases]
|
||||
runs-on: [self-hosted, release-maker]
|
||||
steps:
|
||||
- uses: ./.github/actions/clean
|
||||
with:
|
||||
images: true
|
||||
|
||||
# PostSlackMessage:
|
||||
# needs: [Releases]
|
||||
# runs-on: [self-hosted, release-maker]
|
||||
# if: ${{ !cancelled() }}
|
||||
# steps:
|
||||
# - name: Check out repository code
|
||||
# uses: ClickHouse/checkout@v1
|
||||
# - name: Post
|
||||
# run: |
|
||||
# cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }}
|
26
.github/workflows/create_release.yml
vendored
26
.github/workflows/create_release.yml
vendored
@ -2,6 +2,7 @@ name: CreateRelease
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
|
||||
'on':
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -26,6 +27,28 @@ concurrency:
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
workflow_call:
|
||||
inputs:
|
||||
ref:
|
||||
description: 'Git reference (branch or commit sha) from which to create the release'
|
||||
required: true
|
||||
type: string
|
||||
type:
|
||||
description: 'The type of release: "new" for a new release or "patch" for a patch release'
|
||||
required: true
|
||||
type: string
|
||||
only-repo:
|
||||
description: 'Run only repos updates including docker (repo-recovery, tests)'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
secrets:
|
||||
ROBOT_CLICKHOUSE_COMMIT_TOKEN:
|
||||
|
||||
jobs:
|
||||
CreateRelease:
|
||||
@ -101,6 +124,7 @@ jobs:
|
||||
--volume=".:/wd" --workdir="/wd" \
|
||||
clickhouse/style-test \
|
||||
./tests/ci/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \
|
||||
--jobs=5 \
|
||||
--output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }}
|
||||
git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md
|
||||
@ -129,9 +153,9 @@ jobs:
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
run: |
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
git reset --hard HEAD
|
||||
git checkout "$GITHUB_REF_NAME"
|
||||
python3 ./tests/ci/create_release.py --set-progress-completed
|
||||
- name: Create GH Release
|
||||
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
|
||||
shell: bash
|
||||
|
2
.github/workflows/jepsen.yml
vendored
2
.github/workflows/jepsen.yml
vendored
@ -67,7 +67,7 @@ jobs:
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
|
||||
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
4
.github/workflows/release_branches.yml
vendored
4
.github/workflows/release_branches.yml
vendored
@ -130,6 +130,7 @@ jobs:
|
||||
with:
|
||||
build_name: package_debug
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
force: true
|
||||
BuilderBinDarwin:
|
||||
needs: [RunConfig, BuildDockers]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
@ -482,7 +483,7 @@ jobs:
|
||||
if: ${{ !failure() }}
|
||||
run: |
|
||||
# update overall ci report
|
||||
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
|
||||
- name: Check Workflow results
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
@ -490,5 +491,4 @@ jobs:
|
||||
cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
|
||||
${{ toJson(needs) }}
|
||||
EOF
|
||||
|
||||
python3 ./tests/ci/ci_buddy.py --check-wf-status
|
||||
|
10
.gitmodules
vendored
10
.gitmodules
vendored
@ -108,7 +108,7 @@
|
||||
url = https://github.com/ClickHouse/icudata
|
||||
[submodule "contrib/icu"]
|
||||
path = contrib/icu
|
||||
url = https://github.com/unicode-org/icu
|
||||
url = https://github.com/ClickHouse/icu
|
||||
[submodule "contrib/flatbuffers"]
|
||||
path = contrib/flatbuffers
|
||||
url = https://github.com/ClickHouse/flatbuffers
|
||||
@ -230,9 +230,6 @@
|
||||
[submodule "contrib/minizip-ng"]
|
||||
path = contrib/minizip-ng
|
||||
url = https://github.com/zlib-ng/minizip-ng
|
||||
[submodule "contrib/annoy"]
|
||||
path = contrib/annoy
|
||||
url = https://github.com/ClickHouse/annoy
|
||||
[submodule "contrib/qpl"]
|
||||
path = contrib/qpl
|
||||
url = https://github.com/intel/qpl
|
||||
@ -341,16 +338,13 @@
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
[submodule "contrib/usearch"]
|
||||
path = contrib/usearch
|
||||
url = https://github.com/unum-cloud/usearch.git
|
||||
url = https://github.com/ClickHouse/usearch.git
|
||||
[submodule "contrib/SimSIMD"]
|
||||
path = contrib/SimSIMD
|
||||
url = https://github.com/ashvardanian/SimSIMD.git
|
||||
[submodule "contrib/FP16"]
|
||||
path = contrib/FP16
|
||||
url = https://github.com/Maratyszcza/FP16.git
|
||||
[submodule "contrib/robin-map"]
|
||||
path = contrib/robin-map
|
||||
url = https://github.com/Tessil/robin-map.git
|
||||
[submodule "contrib/aklomp-base64"]
|
||||
path = contrib/aklomp-base64
|
||||
url = https://github.com/aklomp/base64.git
|
||||
|
147
CHANGELOG.md
147
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v24.8 LTS, 2024-08-20](#243)**<br/>
|
||||
**[ClickHouse release v24.7, 2024-07-30](#247)**<br/>
|
||||
**[ClickHouse release v24.6, 2024-07-01](#246)**<br/>
|
||||
**[ClickHouse release v24.5, 2024-05-30](#245)**<br/>
|
||||
@ -10,6 +11,152 @@
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### <a id="248"></a> ClickHouse release 24.8 LTS, 2024-08-20
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)).
|
||||
* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)).
|
||||
* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### New Feature
|
||||
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)).
|
||||
* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)).
|
||||
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
|
||||
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).
|
||||
* Add a query `ALTER TABLE ... DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Add the `rows_before_aggregation_at_least` statistic to the query response when a new setting, `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)).
|
||||
* Support `OPTIMIZE` query on `Join` tables to reduce their memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Allow run query instantly in play if you add `&run=1` in the URL [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Implement a new `JSON` data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add the new `TimeSeries` table engine. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add new experimental `Kafka` storage engine to store offsets in Keeper instead of relying on committing them to Kafka. It makes the commit to ClickHouse tables atomic with regard to consumption from the queue. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Improve columns squashing (an operation happening in INSERT queries) for `String`/`Array`/`Map`/`Variant`/`Dynamic` types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Speed up `SYSTEM FLUSH LOGS` and flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Improved overall performance of merges by reducing the overhead of the scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Avoid allocating too much capacity for array column while writing ORC. Performance speeds up 15% for an Array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)).
|
||||
* Speed up mutations for non-replicated MergeTree significantly [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. This signifies that Analyzer is no longer in beta and is fully promoted to production. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Do not require a grant for REMOTE when creating a Distributed table: a grant for the Distributed engine is enough. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Do not pass logs for keeper explicitly in the Docker image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)).
|
||||
* Ignore `async_load_databases` for `ATTACH` query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Support proper `UUID` type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Add `DiskS3NoSuchKeyErrors` metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Ensure the `COMMENT` clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)).
|
||||
* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)).
|
||||
* Read configuration for `clickhouse-local` from `~/.clickhouse-local`. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
* Decrease level of the `Socket is not connected` error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)).
|
||||
* Add ability to load dashboards for `system.dashboards` from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The window functions in SQL are traditionally in snake case. ClickHouse uses `camelCase`, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)).
|
||||
* Autodetect configuration file format if is not `.xml`, `.yml` or `.yaml`. If the file begins with < it might be XML, otherwise it might be YAML. It is useful when providing a configuration file from a pipe: `clickhouse-server --config-file <(echo "hello: world")`. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)).
|
||||
* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)).
|
||||
* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix settings and `current_database` in `system.processes` for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeating works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Unblock the usage of CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* getauxval: avoid a crash under a sanitizer re-exec due to high ASLR entropy in newer Linux kernels. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix
|
||||
* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
|
||||
* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)).
|
||||
* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)).
|
||||
* Fix an error in optimization which converts OUTER JOIN to INNER JOIN. This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)).
|
||||
* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)).
|
||||
* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
|
||||
* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
|
||||
### <a id="247"></a> ClickHouse release 24.7, 2024-07-30
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -187,14 +187,6 @@ else ()
|
||||
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||
@ -330,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER)
|
||||
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
|
||||
endif()
|
||||
|
||||
# Before you start hating your debugger because it refuses to show variables ('<optimized out>'), try building with -DDEBUG_O_LEVEL="0"
|
||||
# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263
|
||||
set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (OS_DARWIN)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
@ -402,7 +398,7 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(ENABLE_GWP_ASAN OFF)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_FIU "Enable Fiu" ON)
|
||||
option (ENABLE_LIBFIU "Enable libfiu" ON)
|
||||
|
||||
option(WERROR "Enable -Werror compiler option" ON)
|
||||
|
||||
@ -428,12 +424,17 @@ if (NOT SANITIZE)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE)
|
||||
# Using '-no-pie' builds executables with fixed addresses, resulting in slightly more efficient code
|
||||
# and keeping binary addresses constant even with ASLR enabled.
|
||||
# Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5
|
||||
# Disabled on IBM S390X due to build issues with 'no-pie'
|
||||
# Disabled with sanitizers to avoid issues with maximum relocation size: https://github.com/ClickHouse/ClickHouse/pull/49145
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
else ()
|
||||
message (WARNING "ClickHouse is built as PIE, system.trace_log will contain invalid addresses after server restart.")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
@ -604,7 +605,9 @@ if (NATIVE_BUILD_TARGETS
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
@ -616,9 +619,13 @@ if (NATIVE_BUILD_TARGETS
|
||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||
${PROJECT_SOURCE_DIR}
|
||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
endif ()
|
||||
|
@ -51,8 +51,14 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
|
||||
endif()
|
||||
|
||||
# Default toolchain - this is needed to avoid dependency on OS files.
|
||||
execute_process(COMMAND uname -s OUTPUT_VARIABLE OS)
|
||||
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH)
|
||||
execute_process(COMMAND uname -s
|
||||
OUTPUT_VARIABLE OS
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
execute_process(COMMAND uname -m
|
||||
OUTPUT_VARIABLE ARCH
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
# By default, prefer clang on Linux
|
||||
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.
|
||||
|
30
README.md
30
README.md
@ -34,17 +34,41 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||
|
||||
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||
* [v24.9 Community Call](https://clickhouse.com/company/events/v24-9-community-release-call) - September 26
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||
|
||||
* MORE COMING SOON!
|
||||
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
|
||||
|
||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||
|
||||
Other upcoming meetups
|
||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
||||
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
||||
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||
|
||||
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
|
||||
* **Recording available**: [**v24.8 LTS Release Call**](https://www.youtube.com/watch?v=AeLmp2jc51k) All the features of 24.8 LTS, one convenient video! Watch it now!
|
||||
|
||||
## Interested in joining ClickHouse and making it your full-time job?
|
||||
|
||||
|
16
SECURITY.md
16
SECURITY.md
@ -14,25 +14,15 @@ The following versions of ClickHouse server are currently supported with securit
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 24.8 | ✔️ |
|
||||
| 24.7 | ✔️ |
|
||||
| 24.6 | ✔️ |
|
||||
| 24.5 | ✔️ |
|
||||
| 24.5 | ❌ |
|
||||
| 24.4 | ❌ |
|
||||
| 24.3 | ✔️ |
|
||||
| 24.2 | ❌ |
|
||||
| 24.1 | ❌ |
|
||||
| 23.12 | ❌ |
|
||||
| 23.11 | ❌ |
|
||||
| 23.10 | ❌ |
|
||||
| 23.9 | ❌ |
|
||||
| 23.8 | ✔️ |
|
||||
| 23.7 | ❌ |
|
||||
| 23.6 | ❌ |
|
||||
| 23.5 | ❌ |
|
||||
| 23.4 | ❌ |
|
||||
| 23.3 | ❌ |
|
||||
| 23.2 | ❌ |
|
||||
| 23.1 | ❌ |
|
||||
| 23.* | ❌ |
|
||||
| 22.* | ❌ |
|
||||
| 21.* | ❌ |
|
||||
| 20.* | ❌ |
|
||||
|
@ -1,4 +1,4 @@
|
||||
add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>)
|
||||
add_compile_options("$<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>")
|
||||
|
||||
if (USE_CLANG_TIDY)
|
||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||
@ -8,6 +8,8 @@ endif ()
|
||||
# when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16.
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
|
||||
configure_file(GitHash.cpp.in GitHash.generated.cpp)
|
||||
|
||||
set (SRCS
|
||||
argsToConfig.cpp
|
||||
cgroupsv2.cpp
|
||||
@ -33,6 +35,7 @@ set (SRCS
|
||||
safeExit.cpp
|
||||
throwError.cpp
|
||||
Numa.cpp
|
||||
GitHash.generated.cpp
|
||||
)
|
||||
|
||||
add_library (common ${SRCS})
|
||||
|
@ -27,27 +27,6 @@ bool cgroupsV2Enabled()
|
||||
#endif
|
||||
}
|
||||
|
||||
bool cgroupsV2MemoryControllerEnabled()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
chassert(cgroupsV2Enabled());
|
||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||
if (cgroup_dir.empty())
|
||||
return false;
|
||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||
if (!controllers_file.is_open())
|
||||
return false;
|
||||
std::string controllers;
|
||||
std::getline(controllers_file, controllers);
|
||||
return controllers.find("memory") != std::string::npos;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
fs::path cgroupV2PathOfProcess()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess()
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
||||
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
if (!cgroupsV2Enabled())
|
||||
return {};
|
||||
|
||||
fs::path current_cgroup = cgroupV2PathOfProcess();
|
||||
if (current_cgroup.empty())
|
||||
return {};
|
||||
|
||||
/// Return the bottom-most nested file. If there is no such file at the current
|
||||
/// level, try again at the parent level as settings are inherited.
|
||||
while (current_cgroup != default_cgroups_mount.parent_path())
|
||||
{
|
||||
const auto path = current_cgroup / file_name;
|
||||
if (fs::exists(path))
|
||||
return {current_cgroup};
|
||||
current_cgroup = current_cgroup.parent_path();
|
||||
}
|
||||
return {};
|
||||
#else
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <string_view>
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||
@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou
|
||||
/// Is cgroups v2 enabled on the system?
|
||||
bool cgroupsV2Enabled();
|
||||
|
||||
/// Is the memory controller of cgroups v2 enabled on the system?
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
bool cgroupsV2MemoryControllerEnabled();
|
||||
|
||||
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||
/// Returns an empty path the cgroup cannot be determined.
|
||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||
std::filesystem::path cgroupV2PathOfProcess();
|
||||
|
||||
/// Returns the most nested cgroup dir containing the specified file.
|
||||
/// If cgroups v2 is not enabled - returns an empty optional.
|
||||
std::optional<std::string> getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name);
|
||||
|
@ -19,9 +19,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
if (!cgroupsV2Enabled())
|
||||
return {};
|
||||
|
||||
if (!cgroupsV2MemoryControllerEnabled())
|
||||
return {};
|
||||
|
||||
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||
if (current_cgroup.empty())
|
||||
return {};
|
||||
|
@ -66,13 +66,11 @@ TRAP(gethostbyname)
|
||||
TRAP(gethostbyname2)
|
||||
TRAP(gethostent)
|
||||
TRAP(getlogin)
|
||||
TRAP(getmntent)
|
||||
TRAP(getnetbyaddr)
|
||||
TRAP(getnetbyname)
|
||||
TRAP(getnetent)
|
||||
TRAP(getnetgrent)
|
||||
TRAP(getnetgrent_r)
|
||||
TRAP(getopt)
|
||||
TRAP(getopt_long)
|
||||
TRAP(getopt_long_only)
|
||||
TRAP(getpass)
|
||||
@ -133,7 +131,6 @@ TRAP(nrand48)
|
||||
TRAP(__ppc_get_timebase_freq)
|
||||
TRAP(ptsname)
|
||||
TRAP(putchar_unlocked)
|
||||
TRAP(putenv)
|
||||
TRAP(pututline)
|
||||
TRAP(pututxline)
|
||||
TRAP(putwchar_unlocked)
|
||||
@ -148,7 +145,6 @@ TRAP(sethostent)
|
||||
TRAP(sethostid)
|
||||
TRAP(setkey)
|
||||
//TRAP(setlocale) // Used by replxx at startup
|
||||
TRAP(setlogmask)
|
||||
TRAP(setnetent)
|
||||
TRAP(setnetgrent)
|
||||
TRAP(setprotoent)
|
||||
@ -203,7 +199,6 @@ TRAP(lgammal)
|
||||
TRAP(nftw)
|
||||
TRAP(nl_langinfo)
|
||||
TRAP(putc_unlocked)
|
||||
TRAP(rand)
|
||||
/** In the current POSIX.1 specification (POSIX.1-2008), readdir() is not required to be thread-safe. However, in modern
|
||||
* implementations (including the glibc implementation), concurrent calls to readdir() that specify different directory streams
|
||||
* are thread-safe. In cases where multiple threads must read from the same directory stream, using readdir() with external
|
||||
@ -288,4 +283,14 @@ TRAP(tss_get)
|
||||
TRAP(tss_set)
|
||||
TRAP(tss_delete)
|
||||
|
||||
#ifndef USE_MUSL
|
||||
/// These produce duplicate symbol errors when statically linking with musl.
|
||||
/// Maybe we can remove them from the musl fork.
|
||||
TRAP(getopt)
|
||||
TRAP(putenv)
|
||||
TRAP(setlogmask)
|
||||
TRAP(rand)
|
||||
TRAP(getmntent)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "Poco/Exception.h"
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Mutex.h"
|
||||
#include "Poco/Message.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
@ -78,6 +79,10 @@ public:
|
||||
///
|
||||
/// The default implementation just breaks into the debugger.
|
||||
|
||||
virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {}
|
||||
/// Write a messages to the log
|
||||
/// Useful for logging from Poco
|
||||
|
||||
static void handle(const Exception & exc);
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
@ -87,6 +92,9 @@ public:
|
||||
static void handle();
|
||||
/// Invokes the currently registered ErrorHandler.
|
||||
|
||||
static void logMessage(Message::Priority priority, const std::string & msg);
|
||||
/// Invokes the currently registered ErrorHandler to log a message.
|
||||
|
||||
static ErrorHandler * set(ErrorHandler * pHandler);
|
||||
/// Registers the given handler as the current error handler.
|
||||
///
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler()
|
||||
|
||||
void ErrorHandler::exception(const Exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::exception(const std::exception& exc)
|
||||
{
|
||||
poco_debugger_msg(exc.what());
|
||||
poco_debugger_msg(exc.what());
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::exception()
|
||||
{
|
||||
poco_debugger_msg("unknown exception");
|
||||
poco_debugger_msg("unknown exception");
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle(const Exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ErrorHandler::handle(const std::exception& exc)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ErrorHandler::handle()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->exception();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
try
|
||||
{
|
||||
_pHandler->logMessageImpl(priority, msg);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler)
|
||||
{
|
||||
poco_check_ptr(pHandler);
|
||||
poco_check_ptr(pHandler);
|
||||
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
ErrorHandler* pOld = _pHandler;
|
||||
_pHandler = pHandler;
|
||||
return pOld;
|
||||
}
|
||||
|
||||
|
||||
ErrorHandler* ErrorHandler::defaultHandler()
|
||||
{
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
// NOTE: Since this is called to initialize the static _pHandler
|
||||
// variable, sh has to be a local static, otherwise we run
|
||||
// into static initialization order issues.
|
||||
static SingletonHolder<ErrorHandler> sh;
|
||||
return sh.get();
|
||||
}
|
||||
|
||||
|
||||
|
@ -48,25 +48,17 @@ std::string PathImpl::currentImpl()
|
||||
std::string PathImpl::homeImpl()
|
||||
{
|
||||
std::string path;
|
||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
||||
size_t buf_size = 1024; // Same as glibc use for getpwuid
|
||||
std::vector<char> buf(buf_size);
|
||||
struct passwd res;
|
||||
struct passwd* pwd = nullptr;
|
||||
|
||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||
#else
|
||||
struct passwd* pwd = getpwuid(getuid());
|
||||
#endif
|
||||
if (pwd)
|
||||
path = pwd->pw_dir;
|
||||
else
|
||||
{
|
||||
#if defined(_POSIX_C_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE)
|
||||
getpwuid_r(getuid(), &res, buf.data(), buf_size, &pwd);
|
||||
#else
|
||||
pwd = getpwuid(geteuid());
|
||||
#endif
|
||||
if (pwd)
|
||||
path = pwd->pw_dir;
|
||||
else
|
||||
@ -82,7 +74,7 @@ std::string PathImpl::configHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Preferences/");
|
||||
#else
|
||||
@ -97,7 +89,7 @@ std::string PathImpl::dataHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Application Support/");
|
||||
#else
|
||||
@ -112,7 +104,7 @@ std::string PathImpl::cacheHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Caches/");
|
||||
#else
|
||||
@ -127,7 +119,7 @@ std::string PathImpl::tempHomeImpl()
|
||||
{
|
||||
std::string path = PathImpl::homeImpl();
|
||||
std::string::size_type n = path.size();
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
if (n > 0 && path[n - 1] == '/')
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path.append("Library/Caches/");
|
||||
#else
|
||||
@ -159,7 +151,7 @@ std::string PathImpl::tempImpl()
|
||||
std::string PathImpl::configImpl()
|
||||
{
|
||||
std::string path;
|
||||
|
||||
|
||||
#if POCO_OS == POCO_OS_MAC_OS_X
|
||||
path = "/Library/Preferences/";
|
||||
#else
|
||||
|
@ -18,7 +18,9 @@
|
||||
#define Net_HTTPResponse_INCLUDED
|
||||
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "Poco/Net/HTTPCookie.h"
|
||||
#include "Poco/Net/HTTPMessage.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
@ -180,6 +182,8 @@ namespace Net
|
||||
/// May throw an exception in case of a malformed
|
||||
/// Set-Cookie header.
|
||||
|
||||
void getHeaders(std::map<std::string, std::string> & headers) const;
|
||||
|
||||
void write(std::ostream & ostr) const;
|
||||
/// Writes the HTTP response to the given
|
||||
/// output stream.
|
||||
|
@ -58,6 +58,10 @@ namespace Net
|
||||
|
||||
void setKeepAliveTimeout(Poco::Timespan keepAliveTimeout);
|
||||
|
||||
size_t getKeepAliveTimeout() const { return _keepAliveTimeout.totalSeconds(); }
|
||||
|
||||
size_t getMaxKeepAliveRequests() const { return _maxKeepAliveRequests; }
|
||||
|
||||
private:
|
||||
bool _firstRequest;
|
||||
Poco::Timespan _keepAliveTimeout;
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
|
||||
#include <ios>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include "Poco/Any.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
@ -33,6 +35,27 @@ namespace Net
|
||||
{
|
||||
|
||||
|
||||
class IHTTPSessionDataHooks
|
||||
/// Interface to control stream of data bytes being sent or received though socket by HTTPSession
|
||||
/// It allows to monitor, throttle and schedule data streams with syscall granulatrity
|
||||
{
|
||||
public:
|
||||
virtual ~IHTTPSessionDataHooks() = default;
|
||||
|
||||
virtual void atStart(int bytes) = 0;
|
||||
/// Called before sending/receiving data `bytes` to/from socket.
|
||||
|
||||
virtual void atFinish(int bytes) = 0;
|
||||
/// Called when sending/receiving of data `bytes` is successfully finished.
|
||||
|
||||
virtual void atFail() = 0;
|
||||
/// If an error occurred during send/receive `fail()` is called instead of `finish()`.
|
||||
};
|
||||
|
||||
|
||||
using HTTPSessionDataHooksPtr = std::shared_ptr<IHTTPSessionDataHooks>;
|
||||
|
||||
|
||||
class Net_API HTTPSession
|
||||
/// HTTPSession implements basic HTTP session management
|
||||
/// for both HTTP clients and HTTP servers.
|
||||
@ -73,6 +96,12 @@ namespace Net
|
||||
Poco::Timespan getReceiveTimeout() const;
|
||||
/// Returns receive timeout for the HTTP session.
|
||||
|
||||
void setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks = {});
|
||||
/// Sets data hooks that will be called on every sent to the socket.
|
||||
|
||||
void setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks = {});
|
||||
/// Sets data hooks that will be called on every receive from the socket.
|
||||
|
||||
bool connected() const;
|
||||
/// Returns true if the underlying socket is connected.
|
||||
|
||||
@ -211,6 +240,10 @@ namespace Net
|
||||
Poco::Exception * _pException;
|
||||
Poco::Any _data;
|
||||
|
||||
// Data hooks
|
||||
HTTPSessionDataHooksPtr _sendDataHooks;
|
||||
HTTPSessionDataHooksPtr _receiveDataHooks;
|
||||
|
||||
friend class HTTPStreamBuf;
|
||||
friend class HTTPHeaderStreamBuf;
|
||||
friend class HTTPFixedLengthStreamBuf;
|
||||
@ -246,6 +279,16 @@ namespace Net
|
||||
return _receiveTimeout;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setSendDataHooks(const HTTPSessionDataHooksPtr & sendDataHooks)
|
||||
{
|
||||
_sendDataHooks = sendDataHooks;
|
||||
}
|
||||
|
||||
inline void HTTPSession::setReceiveDataHooks(const HTTPSessionDataHooksPtr & receiveDataHooks)
|
||||
{
|
||||
_receiveDataHooks = receiveDataHooks;
|
||||
}
|
||||
|
||||
inline StreamSocket & HTTPSession::socket()
|
||||
{
|
||||
return _socket;
|
||||
|
@ -209,6 +209,15 @@ void HTTPResponse::getCookies(std::vector<HTTPCookie>& cookies) const
|
||||
}
|
||||
}
|
||||
|
||||
void HTTPResponse::getHeaders(std::map<std::string, std::string> & headers) const
|
||||
{
|
||||
headers.clear();
|
||||
for (const auto & it : *this)
|
||||
{
|
||||
headers.emplace(it.first, it.second);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HTTPResponse::write(std::ostream& ostr) const
|
||||
{
|
||||
|
@ -19,11 +19,11 @@ namespace Poco {
|
||||
namespace Net {
|
||||
|
||||
|
||||
HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParams::Ptr pParams):
|
||||
HTTPSession(socket, pParams->getKeepAlive()),
|
||||
_firstRequest(true),
|
||||
_keepAliveTimeout(pParams->getKeepAliveTimeout()),
|
||||
_maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
||||
HTTPServerSession::HTTPServerSession(const StreamSocket & socket, HTTPServerParams::Ptr pParams)
|
||||
: HTTPSession(socket, pParams->getKeepAlive())
|
||||
, _firstRequest(true)
|
||||
, _keepAliveTimeout(pParams->getKeepAliveTimeout())
|
||||
, _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
||||
{
|
||||
setTimeout(pParams->getTimeout());
|
||||
}
|
||||
@ -52,11 +52,12 @@ bool HTTPServerSession::hasMoreRequests()
|
||||
}
|
||||
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
|
||||
{
|
||||
if (_maxKeepAliveRequests > 0)
|
||||
--_maxKeepAliveRequests;
|
||||
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
||||
}
|
||||
else return false;
|
||||
if (_maxKeepAliveRequests > 0)
|
||||
--_maxKeepAliveRequests;
|
||||
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -128,14 +128,14 @@ int HTTPSession::get()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
refill();
|
||||
|
||||
|
||||
if (_pCurrent < _pEnd)
|
||||
return *_pCurrent++;
|
||||
else
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::peek()
|
||||
{
|
||||
if (_pCurrent == _pEnd)
|
||||
@ -147,7 +147,7 @@ int HTTPSession::peek()
|
||||
return std::char_traits<char>::eof();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int HTTPSession::read(char* buffer, std::streamsize length)
|
||||
{
|
||||
if (_pCurrent < _pEnd)
|
||||
@ -166,10 +166,17 @@ int HTTPSession::write(const char* buffer, std::streamsize length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atStart((int) length);
|
||||
int result = _socket.sendBytes(buffer, (int) length);
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_sendDataHooks)
|
||||
_sendDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
@ -180,10 +187,17 @@ int HTTPSession::receive(char* buffer, int length)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atStart(length);
|
||||
int result = _socket.receiveBytes(buffer, length);
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFinish(result);
|
||||
return result;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
if (_receiveDataHooks)
|
||||
_receiveDataHooks->atFail();
|
||||
setException(exc);
|
||||
throw;
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "Poco/Net/StreamSocketImpl.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/Timestamp.h"
|
||||
#include "Poco/ErrorHandler.h"
|
||||
#include <string.h> // FD_SET needs memset on some platforms, so we can't use <cstring>
|
||||
|
||||
|
||||
@ -62,7 +63,7 @@ bool checkIsBrokenTimeout()
|
||||
|
||||
SocketImpl::SocketImpl():
|
||||
_sockfd(POCO_INVALID_SOCKET),
|
||||
_blocking(true),
|
||||
_blocking(true),
|
||||
_isBrokenTimeout(checkIsBrokenTimeout())
|
||||
{
|
||||
}
|
||||
@ -81,7 +82,7 @@ SocketImpl::~SocketImpl()
|
||||
close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketImpl* SocketImpl::acceptConnection(SocketAddress& clientAddr)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -117,7 +118,7 @@ void SocketImpl::connect(const SocketAddress& address)
|
||||
rc = ::connect(_sockfd, address.addr(), address.length());
|
||||
}
|
||||
while (rc != 0 && lastError() == POCO_EINTR);
|
||||
if (rc != 0)
|
||||
if (rc != 0)
|
||||
{
|
||||
int err = lastError();
|
||||
error(err, address.toString());
|
||||
@ -204,7 +205,7 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
if (address.family() != SocketAddress::IPv6)
|
||||
throw Poco::InvalidArgumentException("SocketAddress must be an IPv6 address");
|
||||
|
||||
|
||||
if (_sockfd == POCO_INVALID_SOCKET)
|
||||
{
|
||||
init(address.af());
|
||||
@ -225,11 +226,11 @@ void SocketImpl::bind6(const SocketAddress& address, bool reuseAddress, bool reu
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::listen(int backlog)
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
|
||||
|
||||
int rc = ::listen(_sockfd, backlog);
|
||||
if (rc != 0) error();
|
||||
}
|
||||
@ -253,7 +254,7 @@ void SocketImpl::shutdownReceive()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdownSend()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -262,7 +263,7 @@ void SocketImpl::shutdownSend()
|
||||
if (rc != 0) error();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void SocketImpl::shutdown()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -317,7 +318,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int rc;
|
||||
do
|
||||
{
|
||||
@ -325,7 +326,7 @@ int SocketImpl::receiveBytes(void* buffer, int length, int flags)
|
||||
rc = ::recv(_sockfd, reinterpret_cast<char*>(buffer), length, flags);
|
||||
}
|
||||
while (blocking && rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0)
|
||||
if (rc < 0)
|
||||
{
|
||||
int err = lastError();
|
||||
if ((err == POCO_EAGAIN || err == POCO_EWOULDBLOCK) && !blocking)
|
||||
@ -363,7 +364,7 @@ int SocketImpl::receiveFrom(void* buffer, int length, SocketAddress& address, in
|
||||
throw TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sockaddr_storage abuffer;
|
||||
struct sockaddr* pSA = reinterpret_cast<struct sockaddr*>(&abuffer);
|
||||
poco_socklen_t saLen = sizeof(abuffer);
|
||||
@ -450,7 +451,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && lastError() == POCO_EINTR);
|
||||
if (rc < 0) error();
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#else
|
||||
|
||||
@ -493,7 +494,7 @@ bool SocketImpl::pollImpl(Poco::Timespan& remainingTime, int mode)
|
||||
}
|
||||
while (rc < 0 && errorCode == POCO_EINTR);
|
||||
if (rc < 0) error(errorCode);
|
||||
return rc > 0;
|
||||
return rc > 0;
|
||||
|
||||
#endif // POCO_HAVE_FD_POLL
|
||||
}
|
||||
@ -503,13 +504,13 @@ bool SocketImpl::poll(const Poco::Timespan& timeout, int mode)
|
||||
Poco::Timespan remainingTime(timeout);
|
||||
return pollImpl(remainingTime, mode);
|
||||
}
|
||||
|
||||
|
||||
void SocketImpl::setSendBufferSize(int size)
|
||||
{
|
||||
setOption(SOL_SOCKET, SO_SNDBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getSendBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -523,7 +524,7 @@ void SocketImpl::setReceiveBufferSize(int size)
|
||||
setOption(SOL_SOCKET, SO_RCVBUF, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int SocketImpl::getReceiveBufferSize()
|
||||
{
|
||||
int result;
|
||||
@ -569,7 +570,7 @@ Poco::Timespan SocketImpl::getReceiveTimeout()
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
SocketAddress SocketImpl::address()
|
||||
{
|
||||
if (_sockfd == POCO_INVALID_SOCKET) throw InvalidSocketException();
|
||||
@ -580,7 +581,7 @@ SocketAddress SocketImpl::address()
|
||||
int rc = ::getsockname(_sockfd, pSA, &saLen);
|
||||
if (rc == 0)
|
||||
return SocketAddress(pSA, saLen);
|
||||
else
|
||||
else
|
||||
error();
|
||||
return SocketAddress();
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter()
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams):
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
_socket(ServerSocket(portNumber)),
|
||||
_thread(threadName(_socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool();
|
||||
if (pParams)
|
||||
{
|
||||
int toAdd = pParams->getMaxThreads() - pool.capacity();
|
||||
if (toAdd > 0) pool.addCapacity(toAdd);
|
||||
}
|
||||
_pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams);
|
||||
}
|
||||
|
||||
|
||||
TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams):
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
_socket(socket),
|
||||
_pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)),
|
||||
_thread(threadName(socket)),
|
||||
_stopped(true)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
TCPServer::~TCPServer()
|
||||
{
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
try
|
||||
{
|
||||
stop();
|
||||
_pDispatcher->release();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
poco_unexpected();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const TCPServerParams& TCPServer::params() const
|
||||
{
|
||||
return _pDispatcher->params();
|
||||
return _pDispatcher->params();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::start()
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
_stopped = false;
|
||||
_thread.start(*this);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServer::stop()
|
||||
{
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
if (!_stopped)
|
||||
{
|
||||
_stopped = true;
|
||||
_thread.join();
|
||||
_pDispatcher->stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::run()
|
||||
{
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
while (!_stopped)
|
||||
{
|
||||
Poco::Timespan timeout(250000);
|
||||
try
|
||||
{
|
||||
if (_socket.poll(timeout, Socket::SELECT_READ))
|
||||
{
|
||||
try
|
||||
{
|
||||
StreamSocket ss = _socket.acceptConnection();
|
||||
|
||||
if (!_pConnectionFilter || _pConnectionFilter->accept(ss))
|
||||
{
|
||||
// enable nodelay per default: OSX really needs that
|
||||
#if defined(POCO_OS_FAMILY_UNIX)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
if (ss.address().family() != AddressFamily::UNIX_LOCAL)
|
||||
#endif
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
{
|
||||
ss.setNoDelay(true);
|
||||
}
|
||||
_pDispatcher->enqueue(ss);
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString());
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (std::exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ErrorHandler::handle();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::handle(exc);
|
||||
// possibly a resource issue since poll() failed;
|
||||
// give some time to recover before trying again
|
||||
Poco::Thread::sleep(50);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentThreads() const
|
||||
{
|
||||
return _pDispatcher->currentThreads();
|
||||
return _pDispatcher->currentThreads();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxThreads() const
|
||||
{
|
||||
return _pDispatcher->maxThreads();
|
||||
return _pDispatcher->maxThreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::totalConnections() const
|
||||
{
|
||||
return _pDispatcher->totalConnections();
|
||||
return _pDispatcher->totalConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::currentConnections() const
|
||||
{
|
||||
return _pDispatcher->currentConnections();
|
||||
return _pDispatcher->currentConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::maxConcurrentConnections() const
|
||||
{
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
return _pDispatcher->maxConcurrentConnections();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int TCPServer::queuedConnections() const
|
||||
{
|
||||
return _pDispatcher->queuedConnections();
|
||||
return _pDispatcher->queuedConnections();
|
||||
}
|
||||
|
||||
|
||||
int TCPServer::refusedConnections() const
|
||||
{
|
||||
return _pDispatcher->refusedConnections();
|
||||
return _pDispatcher->refusedConnections();
|
||||
}
|
||||
|
||||
|
||||
void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter)
|
||||
{
|
||||
poco_assert (_stopped);
|
||||
poco_assert (_stopped);
|
||||
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
_pConnectionFilter = pConnectionFilter;
|
||||
}
|
||||
|
||||
|
||||
std::string TCPServer::threadName(const ServerSocket& socket)
|
||||
{
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
std::string name("TCPServer: ");
|
||||
name.append(socket.address().toString());
|
||||
return name;
|
||||
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
@ -33,44 +33,44 @@ namespace Net {
|
||||
class TCPConnectionNotification: public Notification
|
||||
{
|
||||
public:
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
TCPConnectionNotification(const StreamSocket& socket):
|
||||
_socket(socket)
|
||||
{
|
||||
}
|
||||
|
||||
~TCPConnectionNotification()
|
||||
{
|
||||
}
|
||||
|
||||
const StreamSocket& socket() const
|
||||
{
|
||||
return _socket;
|
||||
}
|
||||
|
||||
private:
|
||||
StreamSocket _socket;
|
||||
StreamSocket _socket;
|
||||
};
|
||||
|
||||
|
||||
TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams):
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
_rc(1),
|
||||
_pParams(pParams),
|
||||
_currentThreads(0),
|
||||
_totalConnections(0),
|
||||
_currentConnections(0),
|
||||
_maxConcurrentConnections(0),
|
||||
_refusedConnections(0),
|
||||
_stopped(false),
|
||||
_pConnectionFactory(pFactory),
|
||||
_threadPool(threadPool)
|
||||
{
|
||||
poco_check_ptr (pFactory);
|
||||
poco_check_ptr (pFactory);
|
||||
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
if (!_pParams)
|
||||
_pParams = new TCPServerParams;
|
||||
|
||||
if (_pParams->getMaxThreads() == 0)
|
||||
_pParams->setMaxThreads(threadPool.capacity());
|
||||
}
|
||||
|
||||
|
||||
@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher()
|
||||
|
||||
void TCPServerDispatcher::duplicate()
|
||||
{
|
||||
++_rc;
|
||||
++_rc;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::release()
|
||||
{
|
||||
if (--_rc == 0) delete this;
|
||||
if (--_rc == 0) delete this;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::run()
|
||||
{
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (;;)
|
||||
{
|
||||
try
|
||||
{
|
||||
AutoPtr<Notification> pNf = _queue.waitDequeueNotification(idleTime);
|
||||
if (pNf && !_stopped)
|
||||
{
|
||||
TCPConnectionNotification* pCNf = dynamic_cast<TCPConnectionNotification*>(pNf.get());
|
||||
if (pCNf)
|
||||
{
|
||||
beginConnection();
|
||||
if (!_stopped)
|
||||
{
|
||||
std::unique_ptr<TCPServerConnection> pConnection(_pConnectionFactory->createConnection(pCNf->socket()));
|
||||
poco_check_ptr(pConnection.get());
|
||||
pConnection->start();
|
||||
}
|
||||
/// endConnection() should be called after destroying TCPServerConnection,
|
||||
/// otherwise currentConnections() could become zero while some connections are yet still alive.
|
||||
endConnection();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Poco::Exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (std::exception &exc) { ErrorHandler::handle(exc); }
|
||||
catch (...) { ErrorHandler::handle(); }
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
if (_stopped || (_currentThreads > 1 && _queue.empty()))
|
||||
{
|
||||
--_currentThreads;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
static const std::string threadName("TCPServerConnection");
|
||||
}
|
||||
|
||||
|
||||
|
||||
void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) +
|
||||
", current threads: " + std::to_string(_currentThreads) +
|
||||
", threads in pool: " + std::to_string(_threadPool.allocated()) +
|
||||
", current connections: " + std::to_string(_currentConnections));
|
||||
|
||||
|
||||
if (_queue.size() < _pParams->getMaxQueued())
|
||||
{
|
||||
/// NOTE: the condition below is wrong.
|
||||
/// Since the thread pool is shared between multiple servers/TCPServerDispatchers,
|
||||
/// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated.
|
||||
/// As a result, queue is useless and connections never wait in queue.
|
||||
/// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it
|
||||
/// and the connection get rejected.
|
||||
/// We could check _currentThreads < _threadPool.allocated() to make it work,
|
||||
/// but it's not clear if we want to make it work
|
||||
/// because it may be better to reject connection immediately if we don't have resources to handle it.
|
||||
if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads())
|
||||
{
|
||||
try
|
||||
{
|
||||
this->duplicate();
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " +
|
||||
socket.peerAddress().toString());
|
||||
ErrorHandler::handle(exc);
|
||||
this->release();
|
||||
++_refusedConnections;
|
||||
std::cerr << "Got exception while starting thread for connection. Error code: "
|
||||
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
|
||||
return;
|
||||
}
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
++_refusedConnections;
|
||||
}
|
||||
++_refusedConnections;
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if (!_queue.hasIdleThreads())
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " +
|
||||
socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size()));
|
||||
}
|
||||
_queue.enqueueNotification(new TCPConnectionNotification(socket));
|
||||
}
|
||||
else
|
||||
{
|
||||
ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() +
|
||||
", reached max queue size " + std::to_string(_pParams->getMaxQueued()));
|
||||
++_refusedConnections;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::stop()
|
||||
{
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
_stopped = true;
|
||||
_queue.clear();
|
||||
_queue.wakeUpAll();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentThreads() const
|
||||
{
|
||||
return _currentThreads;
|
||||
return _currentThreads;
|
||||
}
|
||||
|
||||
int TCPServerDispatcher::maxThreads() const
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
return _threadPool.capacity();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::totalConnections() const
|
||||
{
|
||||
return _totalConnections;
|
||||
return _totalConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::currentConnections() const
|
||||
{
|
||||
return _currentConnections;
|
||||
return _currentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::maxConcurrentConnections() const
|
||||
{
|
||||
return _maxConcurrentConnections;
|
||||
return _maxConcurrentConnections;
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::queuedConnections() const
|
||||
{
|
||||
return _queue.size();
|
||||
return _queue.size();
|
||||
}
|
||||
|
||||
|
||||
int TCPServerDispatcher::refusedConnections() const
|
||||
{
|
||||
return _refusedConnections;
|
||||
return _refusedConnections;
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::beginConnection()
|
||||
{
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
FastMutex::ScopedLock lock(_mutex);
|
||||
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
++_totalConnections;
|
||||
++_currentConnections;
|
||||
if (_currentConnections > _maxConcurrentConnections)
|
||||
_maxConcurrentConnections.store(_currentConnections);
|
||||
}
|
||||
|
||||
|
||||
void TCPServerDispatcher::endConnection()
|
||||
{
|
||||
--_currentConnections;
|
||||
--_currentConnections;
|
||||
}
|
||||
|
||||
|
||||
|
@ -311,6 +311,14 @@ int SecureSocketImpl::sendBytes(const void* buffer, int length, int flags)
|
||||
while (mustRetry(rc, remaining_time));
|
||||
if (rc <= 0)
|
||||
{
|
||||
// At this stage we still can have last not yet received SSL message containing SSL error
|
||||
// so make a read to force SSL to process possible SSL error
|
||||
if (SSL_get_error(_pSSL, rc) == SSL_ERROR_SYSCALL && SocketImpl::lastError() == POCO_ECONNRESET)
|
||||
{
|
||||
char c = 0;
|
||||
SSL_read(_pSSL, &c, 1);
|
||||
}
|
||||
|
||||
rc = handleError(rc);
|
||||
if (rc == 0) throw SSLConnectionUnexpectedlyClosedException();
|
||||
}
|
||||
|
@ -18,4 +18,4 @@ target_compile_options (_poco_util
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
)
|
||||
target_include_directories (_poco_util SYSTEM PUBLIC "include")
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML)
|
||||
target_link_libraries (_poco_util PUBLIC Poco::JSON Poco::XML Poco::Net)
|
||||
|
@ -241,6 +241,20 @@ namespace Util
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key) const;
|
||||
/// Returns the string value of the host property with the given name.
|
||||
/// Throws a NotFoundException if the key does not exist.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
std::string getHost(const std::string & key, const std::string & defaultValue) const;
|
||||
/// If a property with the given key exists, returns the host property's string value,
|
||||
/// otherwise returns the given default value.
|
||||
/// Throws a SyntaxException if the property is not a valid host (IP address or domain).
|
||||
/// If the value contains references to other properties (${<property>}), these
|
||||
/// are expanded.
|
||||
|
||||
virtual void setString(const std::string & key, const std::string & value);
|
||||
/// Sets the property with the given key to the given value.
|
||||
/// An already existing value for the key is overwritten.
|
||||
@ -339,12 +353,35 @@ namespace Util
|
||||
static bool parseBool(const std::string & value);
|
||||
void setRawWithEvent(const std::string & key, std::string value);
|
||||
|
||||
static void checkHostValidity(const std::string & value);
|
||||
/// Throws a SyntaxException if the value is not a valid host (IP address or domain).
|
||||
|
||||
virtual ~AbstractConfiguration();
|
||||
|
||||
private:
|
||||
std::string internalExpand(const std::string & value) const;
|
||||
std::string uncheckedExpand(const std::string & value) const;
|
||||
|
||||
static bool isValidIPv4Address(const std::string & value);
|
||||
/// IPv4 address considered valid if it is "0.0.0.0" or one of those,
|
||||
/// defined by inet_aton() or inet_addr()
|
||||
|
||||
static bool isValidIPv6Address(const std::string & value);
|
||||
/// IPv6 address considered valid if it is "::" or one of those,
|
||||
/// defined by inet_pton() with AF_INET6 flag
|
||||
/// (in this case it may have scope id and may be surrounded by '[', ']')
|
||||
|
||||
static bool isValidDomainName(const std::string & value);
|
||||
/// <domain> ::= <subdomain> [ "." ]
|
||||
/// <subdomain> ::= <label> | <subdomain> "." <label>
|
||||
/// <label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
|
||||
/// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
|
||||
/// <let-dig-hyp> ::= <let-dig> | "-"
|
||||
/// <let-dig> ::= <letter> | <digit>
|
||||
/// <letter> ::= any one of the 52 alphabetic characters A through Z in
|
||||
/// upper case and a through z in lower case
|
||||
/// <digit> ::= any one of the ten digits 0 through 9
|
||||
|
||||
AbstractConfiguration(const AbstractConfiguration &);
|
||||
AbstractConfiguration & operator=(const AbstractConfiguration &);
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/String.h"
|
||||
#include "Poco/Net/IPAddressImpl.h"
|
||||
|
||||
|
||||
using Poco::Mutex;
|
||||
@ -263,6 +264,41 @@ bool AbstractConfiguration::getBool(const std::string& key, bool defaultValue) c
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
throw NotFoundException(key);
|
||||
}
|
||||
|
||||
|
||||
std::string AbstractConfiguration::getHost(const std::string& key, const std::string& defaultValue) const
|
||||
{
|
||||
Mutex::ScopedLock lock(_mutex);
|
||||
|
||||
std::string value;
|
||||
if (getRaw(key, value))
|
||||
{
|
||||
std::string expandedValue = internalExpand(value);
|
||||
checkHostValidity(expandedValue);
|
||||
return expandedValue;
|
||||
}
|
||||
else
|
||||
{
|
||||
checkHostValidity(defaultValue);
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::setString(const std::string& key, const std::string& value)
|
||||
{
|
||||
setRawWithEvent(key, value);
|
||||
@ -529,4 +565,68 @@ void AbstractConfiguration::setRawWithEvent(const std::string& key, std::string
|
||||
}
|
||||
|
||||
|
||||
void AbstractConfiguration::checkHostValidity(const std::string& value)
|
||||
{
|
||||
if (!isValidIPv4Address(value) && !isValidIPv6Address(value) && !isValidDomainName(value))
|
||||
{
|
||||
throw SyntaxException("Property is not a valid host name", value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv4Address(const std::string& value)
|
||||
{
|
||||
using Poco::Net::Impl::IPv4AddressImpl;
|
||||
IPv4AddressImpl empty4 = IPv4AddressImpl();
|
||||
|
||||
IPv4AddressImpl ipAddress = IPv4AddressImpl::parse(value);
|
||||
return ipAddress != empty4 || value == "0.0.0.0";
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidIPv6Address(const std::string& value)
|
||||
{
|
||||
#if defined(POCO_HAVE_IPv6)
|
||||
using Poco::Net::Impl::IPv6AddressImpl;
|
||||
IPv6AddressImpl empty6 = IPv6AddressImpl();
|
||||
|
||||
IPv6AddressImpl ipAddress = IPv6AddressImpl::parse(value);
|
||||
return ipAddress != empty6 || value == "::";
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool AbstractConfiguration::isValidDomainName(const std::string& value)
|
||||
{
|
||||
if (value.empty() || value == "." || value.length() > 253)
|
||||
return false;
|
||||
int labelLength = 0;
|
||||
char oldChar = 0;
|
||||
|
||||
for (char ch : value)
|
||||
{
|
||||
if (ch == '.')
|
||||
{
|
||||
if (labelLength == 0 || labelLength > 63 || oldChar == '-')
|
||||
return false;
|
||||
labelLength = 0;
|
||||
}
|
||||
else if (isalnum(ch) || ch == '-')
|
||||
{
|
||||
if (labelLength == 0 && (ch == '-' || isdigit(ch)))
|
||||
return false;
|
||||
++labelLength;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
oldChar = ch;
|
||||
}
|
||||
return oldChar == '.' || (labelLength > 0 && labelLength <= 63 && oldChar != '-');
|
||||
}
|
||||
|
||||
|
||||
} } // namespace Poco::Util
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54489)
|
||||
SET(VERSION_REVISION 54490)
|
||||
SET(VERSION_MAJOR 24)
|
||||
SET(VERSION_MINOR 8)
|
||||
SET(VERSION_MINOR 9)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
|
||||
SET(VERSION_DESCRIBE v24.8.1.1-testing)
|
||||
SET(VERSION_STRING 24.8.1.1)
|
||||
SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608)
|
||||
SET(VERSION_DESCRIBE v24.9.1.1-testing)
|
||||
SET(VERSION_STRING 24.9.1.1)
|
||||
# end of autochange
|
||||
|
@ -9,10 +9,18 @@ endif ()
|
||||
file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/")
|
||||
message(STATUS "-Bprefix: ${bprefix}")
|
||||
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND
|
||||
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
# --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option
|
||||
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a")
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND
|
||||
${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endif()
|
||||
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a")
|
||||
message(FATAL_ERROR "libclang_rt.builtins had not been found")
|
||||
|
@ -8,4 +8,7 @@ set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd11")
|
||||
set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11")
|
||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
|
||||
|
||||
# dprintf is used in a patched version of replxx
|
||||
add_compile_definitions(_WITH_DPRINTF)
|
||||
|
||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||
|
@ -42,19 +42,9 @@ endif ()
|
||||
# But use 2 parallel jobs, since:
|
||||
# - this is what llvm does
|
||||
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
|
||||
if (ARCH_AARCH64)
|
||||
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
|
||||
set (PARALLEL_LINK_JOBS 1)
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
|
||||
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
|
||||
endif()
|
||||
elseif (PARALLEL_LINK_JOBS GREATER 2)
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
endif ()
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
|
||||
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
|
||||
set (PARALLEL_LINK_JOBS 2)
|
||||
endif()
|
||||
|
||||
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")
|
||||
|
@ -5,7 +5,11 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
||||
|
||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process (COMMAND
|
||||
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
|
||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
|
||||
if (SANITIZE STREQUAL undefined)
|
||||
|
@ -57,8 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
|
||||
|
||||
if (WITH_COVERAGE)
|
||||
message (STATUS "Enabled instrumentation for code coverage")
|
||||
set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||
set (COVERAGE_FLAGS -fprofile-instr-generate -fcoverage-mapping)
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||
endif()
|
||||
|
||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||
|
@ -5,7 +5,11 @@ if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
endif ()
|
||||
|
||||
# Print details to output
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
||||
OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
||||
|
||||
# Require minimum compiler versions
|
||||
|
@ -90,7 +90,10 @@ endfunction()
|
||||
|
||||
# Function get_cmake_properties returns list of all propreties that cmake supports
|
||||
function(get_cmake_properties outvar)
|
||||
execute_process(COMMAND cmake --help-property-list OUTPUT_VARIABLE cmake_properties)
|
||||
execute_process(COMMAND cmake --help-property-list
|
||||
OUTPUT_VARIABLE cmake_properties
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
# Convert command output into a CMake list
|
||||
string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}")
|
||||
string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}")
|
||||
|
10
contrib/CMakeLists.txt
vendored
10
contrib/CMakeLists.txt
vendored
@ -71,7 +71,6 @@ add_contrib (zlib-ng-cmake zlib-ng)
|
||||
add_contrib (bzip2-cmake bzip2)
|
||||
add_contrib (minizip-ng-cmake minizip-ng)
|
||||
add_contrib (snappy-cmake snappy)
|
||||
add_contrib (rocksdb-cmake rocksdb)
|
||||
add_contrib (thrift-cmake thrift)
|
||||
# parquet/arrow/orc
|
||||
add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion
|
||||
@ -148,6 +147,7 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro
|
||||
add_contrib (cppkafka-cmake cppkafka)
|
||||
add_contrib (libpqxx-cmake libpqxx)
|
||||
add_contrib (libpq-cmake libpq)
|
||||
add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
add_contrib (idna-cmake idna)
|
||||
@ -179,7 +179,7 @@ else()
|
||||
message(STATUS "Not using QPL")
|
||||
endif ()
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64)
|
||||
if (OS_LINUX AND ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER)
|
||||
option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_QATLIB)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64")
|
||||
@ -205,14 +205,12 @@ add_contrib (morton-nd-cmake morton-nd)
|
||||
if (ARCH_S390X)
|
||||
add_contrib(crc32-s390x-cmake crc32-s390x)
|
||||
endif()
|
||||
add_contrib (annoy-cmake annoy)
|
||||
|
||||
option(ENABLE_USEARCH "Enable USearch (Approximate Neighborhood Search, HNSW) support" ${ENABLE_LIBRARIES})
|
||||
option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_USEARCH)
|
||||
add_contrib (FP16-cmake FP16)
|
||||
add_contrib (robin-map-cmake robin-map)
|
||||
add_contrib (SimSIMD-cmake SimSIMD)
|
||||
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD
|
||||
add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD
|
||||
else ()
|
||||
message(STATUS "Not using USearch")
|
||||
endif ()
|
||||
|
@ -27,7 +27,7 @@ if (ENABLE_QAT_OUT_OF_TREE_BUILD)
|
||||
${QAT_AL_INCLUDE_DIR}
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC)
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
else () # In-tree build
|
||||
message(STATUS "Intel QATZSTD in-tree build")
|
||||
@ -78,7 +78,7 @@ else () # In-tree build
|
||||
${QAT_USDM_INCLUDE_DIR}
|
||||
${ZSTD_LIBRARY_DIR}
|
||||
${LIBQAT_HEADER_DIR})
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE)
|
||||
target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DINTREE)
|
||||
target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $<BUILD_INTERFACE:${QATZSTD_SRC_DIR}> $<INSTALL_INTERFACE:include>)
|
||||
add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin)
|
||||
endif ()
|
||||
|
2
contrib/SimSIMD
vendored
2
contrib/SimSIMD
vendored
@ -1 +1 @@
|
||||
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf
|
||||
Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26
|
1
contrib/annoy
vendored
1
contrib/annoy
vendored
@ -1 +0,0 @@
|
||||
Subproject commit f2ac8e7b48f9a9cf676d3b58286e5455aba8e956
|
@ -1,24 +0,0 @@
|
||||
option(ENABLE_ANNOY "Enable Annoy index support" ${ENABLE_LIBRARIES})
|
||||
|
||||
# Annoy index should be disabled with undefined sanitizer. Because of memory storage optimizations
|
||||
# (https://github.com/ClickHouse/annoy/blob/9d8a603a4cd252448589e84c9846f94368d5a289/src/annoylib.h#L442-L463)
|
||||
# UBSan fails and leads to crash. Simmilar issue is already opened in Annoy repo
|
||||
# https://github.com/spotify/annoy/issues/456
|
||||
# Problem with aligment can lead to errors like
|
||||
# (https://stackoverflow.com/questions/46790550/c-undefined-behavior-strict-aliasing-rule-or-incorrect-alignment)
|
||||
# or will lead to crash on arm https://developer.arm.com/documentation/ka003038/latest
|
||||
# This issues should be resolved before annoy became non-experimental (--> setting "allow_experimental_annoy_index")
|
||||
if ((NOT ENABLE_ANNOY) OR (SANITIZE STREQUAL "undefined") OR (ARCH_AARCH64))
|
||||
message (STATUS "Not using annoy")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(ANNOY_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/annoy")
|
||||
set(ANNOY_SOURCE_DIR "${ANNOY_PROJECT_DIR}/src")
|
||||
|
||||
add_library(_annoy INTERFACE)
|
||||
target_include_directories(_annoy SYSTEM INTERFACE ${ANNOY_SOURCE_DIR})
|
||||
|
||||
add_library(ch_contrib::annoy ALIAS _annoy)
|
||||
target_compile_definitions(_annoy INTERFACE ENABLE_ANNOY)
|
||||
target_compile_definitions(_annoy INTERFACE ANNOYLIB_MULTITHREADED_BUILD)
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
||||
Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf
|
||||
Subproject commit d5450d76abda556ce145ddabe7e0cc6a7644ec59
|
2
contrib/aws-crt-cpp
vendored
2
contrib/aws-crt-cpp
vendored
@ -1 +1 @@
|
||||
Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0
|
||||
Subproject commit e5aa45cacfdcda7719ead38760e7c61076f5745f
|
@ -37,7 +37,9 @@ message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}")
|
||||
execute_process(COMMAND
|
||||
bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -"
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
OUTPUT_VARIABLE TIMEZONES)
|
||||
OUTPUT_VARIABLE TIMEZONES
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n")
|
||||
file(APPEND ${TIMEZONES_FILE} "#include <incbin.h>\n")
|
||||
|
@ -359,7 +359,9 @@ else ()
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
@ -375,11 +377,15 @@ else ()
|
||||
"-DABSL_ENABLE_INSTALL=0"
|
||||
"${protobuf_source_dir}"
|
||||
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
endif ()
|
||||
|
||||
add_executable(protoc IMPORTED GLOBAL)
|
||||
|
@ -51,8 +51,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
set(OPENSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake")
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND mkdir -p ${OPENSSL_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||
@ -89,15 +90,21 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
"-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}"
|
||||
"${OPENSSL_SOURCE_DIR}"
|
||||
WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
# It's not important on which file we depend, we just want to specify right order
|
||||
add_library(openssl_for_grpc STATIC IMPORTED GLOBAL)
|
||||
@ -108,8 +115,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
set (GRPC_CPP_PLUGIN_BUILD_DIR "${_gRPC_BINARY_DIR}/build")
|
||||
|
||||
execute_process(
|
||||
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR}
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||
@ -140,11 +148,15 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
"-DgRPC_SSL_PROVIDER=package"
|
||||
"${_gRPC_SOURCE_DIR}"
|
||||
WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
COMMAND_ECHO STDOUT
|
||||
COMMAND_ERROR_IS_FATAL ANY
|
||||
)
|
||||
|
||||
add_executable(grpc_cpp_plugin IMPORTED GLOBAL)
|
||||
set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin")
|
||||
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
||||
Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625
|
||||
Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273
|
2
contrib/libfiu
vendored
2
contrib/libfiu
vendored
@ -1 +1 @@
|
||||
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
|
||||
Subproject commit a1290d8cd3d7b4541d6c976e0a54f572ac03f2a3
|
@ -1,20 +1,21 @@
|
||||
if (NOT ENABLE_FIU)
|
||||
message (STATUS "Not using fiu")
|
||||
if (NOT ENABLE_LIBFIU)
|
||||
message (STATUS "Not using libfiu")
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
||||
set(LIBFIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/")
|
||||
|
||||
set(FIU_SOURCES
|
||||
${FIU_DIR}/libfiu/fiu.c
|
||||
${FIU_DIR}/libfiu/fiu-rc.c
|
||||
${FIU_DIR}/libfiu/backtrace.c
|
||||
${FIU_DIR}/libfiu/wtable.c
|
||||
set(LIBFIU_SOURCES
|
||||
${LIBFIU_DIR}/libfiu/fiu.c
|
||||
${LIBFIU_DIR}/libfiu/fiu-rc.c
|
||||
${LIBFIU_DIR}/libfiu/backtrace.c
|
||||
${LIBFIU_DIR}/libfiu/wtable.c
|
||||
)
|
||||
|
||||
set(FIU_HEADERS "${FIU_DIR}/libfiu")
|
||||
set(LIBFIU_HEADERS "${LIBFIU_DIR}/libfiu")
|
||||
|
||||
add_library(_fiu ${FIU_SOURCES})
|
||||
target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE)
|
||||
target_include_directories(_fiu PUBLIC ${FIU_HEADERS})
|
||||
add_library(ch_contrib::fiu ALIAS _fiu)
|
||||
add_library(_libfiu ${LIBFIU_SOURCES})
|
||||
target_compile_definitions(_libfiu PUBLIC DUMMY_BACKTRACE)
|
||||
target_compile_definitions(_libfiu PUBLIC FIU_ENABLE)
|
||||
target_include_directories(_libfiu PUBLIC ${LIBFIU_HEADERS})
|
||||
add_library(ch_contrib::libfiu ALIAS _libfiu)
|
||||
|
2
contrib/libprotobuf-mutator
vendored
2
contrib/libprotobuf-mutator
vendored
@ -1 +1 @@
|
||||
Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d
|
||||
Subproject commit b922c8ab9004ef9944982e4f165e2747b13223fa
|
2
contrib/librdkafka
vendored
2
contrib/librdkafka
vendored
@ -1 +1 @@
|
||||
Subproject commit 2d2aab6f5b79db1cfca15d7bf0dee75d00d82082
|
||||
Subproject commit 39d4ed49ccf3406e2bf825d5d7b0903b5a290782
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
||||
Subproject commit a89d904befea07814628c6ce0b44083c4e149c62
|
||||
Subproject commit 601db0b0e03018c01710470a37703b618f9cf08b
|
@ -140,6 +140,12 @@ if (CMAKE_CROSSCOMPILING)
|
||||
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
|
||||
endif()
|
||||
|
||||
# llvm-project/llvm/cmake/config-ix.cmake does a weird thing: it defines _LARGEFILE64_SOURCE,
|
||||
# then checks if lseek64() function exists, then undefines _LARGEFILE64_SOURCE.
|
||||
# Then the actual code that uses this function *doesn't* define _LARGEFILE64_SOURCE, so lseek64()
|
||||
# may not exist and compilation fails. This happens with musl.
|
||||
add_compile_definitions("_LARGEFILE64_SOURCE")
|
||||
|
||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||
|
||||
set_directory_properties (PROPERTIES
|
||||
|
2
contrib/qpl
vendored
2
contrib/qpl
vendored
@ -1 +1 @@
|
||||
Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d
|
||||
Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8
|
@ -4,7 +4,6 @@ set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
|
||||
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
|
||||
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
|
||||
set (EFFICIENT_WAIT OFF)
|
||||
set (BLOCK_ON_FAULT ON)
|
||||
set (LOG_HW_INIT OFF)
|
||||
set (SANITIZE_MEMORY OFF)
|
||||
set (SANITIZE_THREADS OFF)
|
||||
@ -16,16 +15,20 @@ function(GetLibraryVersion _content _outputVar)
|
||||
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set (QPL_VERSION 1.2.0)
|
||||
set (QPL_VERSION 1.6.0)
|
||||
|
||||
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
||||
|
||||
# There are 5 source subdirectories under $QPL_SRC_DIR: isal, c_api, core-sw, middle-layer, c_api.
|
||||
# Generate 8 library targets: middle_layer_lib, isal, isal_asm, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, core_iaa, middle_layer_lib.
|
||||
# There are 5 source subdirectories under $QPL_SRC_DIR: c_api, core-iaa, core-sw, middle-layer and isal.
|
||||
# Generate 8 library targets: qpl_c_api, core_iaa, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, middle_layer_lib, isal and isal_asm,
|
||||
# which are then combined into static or shared qpl.
|
||||
# Output ch_contrib::qpl by linking with 8 library targets.
|
||||
|
||||
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
|
||||
# only upstream isal (ch_contrib::isal) but at this point we can't.
|
||||
# Note, QPL has integrated a customized version of ISA-L to meet specific needs.
|
||||
# This version has been significantly modified and there are no plans to maintain compatibility with the upstream version
|
||||
# or upgrade the current copy.
|
||||
|
||||
## cmake/CompileOptions.cmake and automatic wrappers generation
|
||||
|
||||
# ==========================================================================
|
||||
# Copyright (C) 2022 Intel Corporation
|
||||
@ -442,6 +445,7 @@ function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
# [SUBDIR]isal
|
||||
|
||||
enable_language(ASM_NASM)
|
||||
|
||||
@ -479,7 +483,6 @@ set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/igzip/stdmac.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm
|
||||
${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm
|
||||
@ -505,7 +508,6 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
# Setting external and internal interfaces for ISA-L library
|
||||
target_include_directories(isal
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/isal/include>
|
||||
PRIVATE ${QPL_SRC_DIR}/isal/include
|
||||
PUBLIC ${QPL_SRC_DIR}/isal/igzip)
|
||||
|
||||
set_target_properties(isal PROPERTIES
|
||||
@ -617,12 +619,9 @@ target_compile_options(qplcore_sw_dispatcher
|
||||
|
||||
# [SUBDIR]core-iaa
|
||||
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/aecs/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c
|
||||
${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/bit_rev.c)
|
||||
${QPL_SRC_DIR}/core-iaa/sources/*.c)
|
||||
|
||||
# Create library
|
||||
add_library(core_iaa OBJECT ${HW_PATH_SRC})
|
||||
@ -634,31 +633,27 @@ target_include_directories(core_iaa
|
||||
PRIVATE ${UUID_DIR}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
|
||||
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES> # for own_checkers.h
|
||||
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
target_compile_features(core_iaa PRIVATE c_std_11)
|
||||
|
||||
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
||||
PRIVATE $<$<BOOL:${BLOCK_ON_FAULT}>: BLOCK_ON_FAULT_ENABLED>
|
||||
PRIVATE $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>
|
||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>)
|
||||
|
||||
# [SUBDIR]middle-layer
|
||||
file(GLOB MIDDLE_LAYER_SRC
|
||||
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/c_wrapper/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/checksum/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/accelerator/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/analytics/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/common/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/other/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/util/*.cpp
|
||||
${QPL_SRC_DIR}/middle-layer/inflate/*.cpp
|
||||
${QPL_SRC_DIR}/core-iaa/sources/accelerator/*.cpp) # todo
|
||||
${QPL_SRC_DIR}/middle-layer/util/*.cpp)
|
||||
|
||||
add_library(middle_layer_lib OBJECT
|
||||
${MIDDLE_LAYER_SRC})
|
||||
@ -667,6 +662,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:middle_layer_lib>)
|
||||
|
||||
target_compile_options(middle_layer_lib
|
||||
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||
|
||||
target_compile_definitions(middle_layer_lib
|
||||
@ -682,6 +678,7 @@ target_include_directories(middle_layer_lib
|
||||
PRIVATE ${UUID_DIR}
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/middle-layer>
|
||||
PUBLIC $<TARGET_PROPERTY:_qpl,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PRIVATE $<TARGET_PROPERTY:qpl_c_api,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PUBLIC $<TARGET_PROPERTY:core_iaa,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
@ -689,31 +686,50 @@ target_include_directories(middle_layer_lib
|
||||
target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB)
|
||||
|
||||
# [SUBDIR]c_api
|
||||
file(GLOB_RECURSE QPL_C_API_SRC
|
||||
${QPL_SRC_DIR}/c_api/*.c
|
||||
${QPL_SRC_DIR}/c_api/*.cpp)
|
||||
file(GLOB QPL_C_API_SRC
|
||||
${QPL_SRC_DIR}/c_api/compression_operations/*.c
|
||||
${QPL_SRC_DIR}/c_api/compression_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/filter_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.c
|
||||
${QPL_SRC_DIR}/c_api/legacy_hw_path/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/other_operations/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/serialization/*.cpp
|
||||
${QPL_SRC_DIR}/c_api/*.cpp)
|
||||
|
||||
add_library(qpl_c_api OBJECT ${QPL_C_API_SRC})
|
||||
|
||||
target_include_directories(qpl_c_api
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api/>
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/include/> $<INSTALL_INTERFACE:include>
|
||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
set_target_properties(qpl_c_api PROPERTIES
|
||||
$<$<C_COMPILER_ID:GNU,Clang>:C_STANDARD 17
|
||||
CXX_STANDARD 17)
|
||||
|
||||
target_compile_options(qpl_c_api
|
||||
PRIVATE $<$<C_COMPILER_ID:GNU,Clang>:$<$<CONFIG:Release>:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>>
|
||||
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
||||
|
||||
target_compile_definitions(qpl_c_api
|
||||
PUBLIC -DQPL_BADARG_CHECK # own_checkers.h
|
||||
PUBLIC -DQPL_LIB # needed for middle_layer_lib
|
||||
PUBLIC $<$<BOOL:${LOG_HW_INIT}>:LOG_HW_INIT>) # needed for middle_layer_lib
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
||||
$<TARGET_OBJECTS:qpl_c_api>)
|
||||
|
||||
# Final _qpl target
|
||||
|
||||
get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS)
|
||||
|
||||
add_library(_qpl STATIC ${QPL_C_API_SRC} ${LIB_DEPS})
|
||||
add_library(_qpl STATIC ${LIB_DEPS})
|
||||
|
||||
target_include_directories(_qpl
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>
|
||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
|
||||
|
||||
target_compile_options(_qpl
|
||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||
|
||||
target_compile_definitions(_qpl
|
||||
PRIVATE -DQPL_LIB
|
||||
PRIVATE -DQPL_BADARG_CHECK
|
||||
PRIVATE $<$<BOOL:${DYNAMIC_LOADING_LIBACCEL_CONFIG}>:DYNAMIC_LOADING_LIBACCEL_CONFIG>
|
||||
PUBLIC -DENABLE_QPL_COMPRESSION)
|
||||
PUBLIC $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include/> $<INSTALL_INTERFACE:include>)
|
||||
|
||||
target_link_libraries(_qpl
|
||||
PRIVATE ch_contrib::accel-config
|
||||
PRIVATE ch_contrib::isal)
|
||||
PRIVATE ch_contrib::accel-config)
|
||||
|
||||
target_include_directories(_qpl SYSTEM BEFORE
|
||||
PUBLIC "${QPL_PROJECT_DIR}/include"
|
||||
|
2
contrib/replxx
vendored
2
contrib/replxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 5d04501f93a4fb7f0bb8b73b8f614bc986f9e25b
|
||||
Subproject commit 711c18e7f4d951255aa8b0851e5a55d5a5fb0ddb
|
1
contrib/robin-map
vendored
1
contrib/robin-map
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d
|
@ -1 +0,0 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79
|
||||
Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24
|
@ -5,36 +5,38 @@ if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL
|
||||
return()
|
||||
endif()
|
||||
|
||||
# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
|
||||
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||
|
||||
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||
|
||||
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||
option(WITH_LZ4 "build with lz4" ON)
|
||||
option(WITH_ZLIB "build with zlib" ON)
|
||||
option(WITH_ZSTD "build with zstd" ON)
|
||||
|
||||
if(WITH_SNAPPY)
|
||||
if (ENABLE_JEMALLOC AND OS_LINUX) # gives compile errors with jemalloc enabled for rocksdb on non-Linux
|
||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
||||
list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_LIBURING)
|
||||
add_definitions(-DROCKSDB_IOURING_PRESENT)
|
||||
list (APPEND THIRDPARTY_LIBS ch_contrib::liburing)
|
||||
endif ()
|
||||
|
||||
if (WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||
endif()
|
||||
|
||||
if(WITH_ZLIB)
|
||||
if (WITH_ZLIB)
|
||||
add_definitions(-DZLIB)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if(WITH_LZ4)
|
||||
if (WITH_LZ4)
|
||||
add_definitions(-DLZ4)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||
endif()
|
||||
|
||||
if(WITH_ZSTD)
|
||||
if (WITH_ZSTD)
|
||||
add_definitions(-DZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
@ -88,6 +90,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
@ -104,6 +107,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||
@ -124,6 +128,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||
@ -181,6 +186,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||
@ -368,6 +374,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||
@ -388,6 +395,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
@ -418,14 +426,18 @@ if(HAS_ARMV8_CRC)
|
||||
endif(HAS_ARMV8_CRC)
|
||||
|
||||
list(APPEND SOURCES
|
||||
"${ROCKSDB_SOURCE_DIR}/port/port_posix.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/env_posix.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
||||
${ROCKSDB_SOURCE_DIR}/port/port_posix.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_posix.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/io_posix.cc)
|
||||
|
||||
add_library(_rocksdb ${SOURCES})
|
||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||
|
||||
# Not in the native build system but useful anyways:
|
||||
# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers
|
||||
target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL)
|
||||
|
||||
# SYSTEM is required to overcome some issues
|
||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit cc385041b226d1fc28ead14dbab5d40a5f821dd8
|
||||
Subproject commit 5be834147d5b5dd77ca2b821f356982029320513
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
||||
Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f
|
||||
Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37
|
@ -1,17 +1,22 @@
|
||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||
set(USEARCH_SOURCE_DIR "${USEARCH_PROJECT_DIR}/include")
|
||||
|
||||
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
||||
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD-map")
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||
|
||||
add_library(_usearch INTERFACE)
|
||||
|
||||
target_include_directories(_usearch SYSTEM INTERFACE
|
||||
${FP16_PROJECT_DIR}/include
|
||||
${ROBIN_MAP_PROJECT_DIR}/include
|
||||
${SIMSIMD_PROJECT_DIR}/include
|
||||
${USEARCH_SOURCE_DIR})
|
||||
${USEARCH_PROJECT_DIR}/include)
|
||||
|
||||
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
|
||||
|
||||
# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
|
||||
# ^^ simsimd is not enabled at the moment. Reasons:
|
||||
# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW.
|
||||
# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD
|
||||
# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of
|
||||
# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86
|
||||
# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment.
|
||||
|
||||
add_library(ch_contrib::usearch ALIAS _usearch)
|
||||
target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH)
|
||||
|
2
contrib/zlib-ng
vendored
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
||||
Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c
|
||||
Subproject commit a2fbeffdc30a8b0ce6d54ee31208e2688eac4c9f
|
@ -14,6 +14,8 @@ add_definitions(-DHAVE_VISIBILITY_HIDDEN)
|
||||
add_definitions(-DHAVE_VISIBILITY_INTERNAL)
|
||||
add_definitions(-DHAVE_BUILTIN_CTZ)
|
||||
add_definitions(-DHAVE_BUILTIN_CTZLL)
|
||||
add_definitions(-DHAVE_ATTRIBUTE_ALIGNED)
|
||||
add_definitions(-DHAVE_POSIX_MEMALIGN)
|
||||
|
||||
set(ZLIB_ARCH_SRCS)
|
||||
set(ZLIB_ARCH_HDRS)
|
||||
@ -24,67 +26,74 @@ if(ARCH_AARCH64)
|
||||
set(ARCHDIR "${SOURCE_DIR}/arch/arm")
|
||||
|
||||
add_definitions(-DARM_FEATURES)
|
||||
add_definitions(-DHAVE_SYS_AUXV_H)
|
||||
add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP)
|
||||
add_definitions(-DARM_AUXV_HAS_NEON)
|
||||
add_definitions(-DARM_ACLE_CRC_HASH)
|
||||
add_definitions(-DARM_NEON_ADLER32 -DARM_NEON_CHUNKSET -DARM_NEON_SLIDEHASH)
|
||||
add_definitions(-DARM_ACLE)
|
||||
add_definitions(-DHAVE_ARM_ACLE_H)
|
||||
add_definitions(-DARM_NEON)
|
||||
add_definitions(-DARM_NEON_HASLD4)
|
||||
|
||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm.h)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/armfeature.c)
|
||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm_features.h)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/arm_features.c)
|
||||
set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS})
|
||||
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c ${ARCHDIR}/slide_neon.c)
|
||||
set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c
|
||||
${ARCHDIR}/compare256_neon.c ${ARCHDIR}/slide_hash_neon.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS})
|
||||
|
||||
elseif(ARCH_PPC64LE)
|
||||
set(ARCHDIR "${SOURCE_DIR}/arch/power")
|
||||
|
||||
add_definitions(-DPOWER8)
|
||||
add_definitions(-DPOWER_FEATURES)
|
||||
add_definitions(-DPOWER8_VSX_ADLER32)
|
||||
add_definitions(-DPOWER8_VSX_SLIDEHASH)
|
||||
add_definitions(-DHAVE_SYS_AUXV_H)
|
||||
|
||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power.h)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c)
|
||||
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/slide_hash_power8.c)
|
||||
if(POWER9)
|
||||
add_definitions(-DPOWER9)
|
||||
else()
|
||||
add_definitions(-DPOWER8)
|
||||
add_definitions(-DPOWER8_VSX)
|
||||
add_definitions(-DPOWER8_VSX_CRC32)
|
||||
endif()
|
||||
|
||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power_features.h)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power_features.c)
|
||||
set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/chunkset_power8.c ${ARCHDIR}/slide_hash_power8.c)
|
||||
list(APPEND POWER8_SRCS ${ARCHDIR}/crc32_power8.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS})
|
||||
|
||||
elseif(ARCH_AMD64)
|
||||
set(ARCHDIR "${SOURCE_DIR}/arch/x86")
|
||||
|
||||
add_definitions(-DX86_FEATURES)
|
||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86.h)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86.c)
|
||||
list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86_features.h)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86_features.c)
|
||||
if(ENABLE_AVX2)
|
||||
add_definitions(-DX86_AVX2 -DX86_AVX2_ADLER32 -DX86_AVX_CHUNKSET)
|
||||
set(AVX2_SRCS ${ARCHDIR}/slide_avx.c)
|
||||
list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx.c)
|
||||
list(APPEND AVX2_SRCS ${ARCHDIR}/compare258_avx.c)
|
||||
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx.c)
|
||||
add_definitions(-DX86_AVX2)
|
||||
set(AVX2_SRCS ${ARCHDIR}/slide_hash_avx2.c)
|
||||
list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx2.c)
|
||||
list(APPEND AVX2_SRCS ${ARCHDIR}/compare256_avx2.c)
|
||||
list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx2.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS})
|
||||
endif()
|
||||
if(ENABLE_SSE42)
|
||||
add_definitions(-DX86_SSE42_CRC_HASH)
|
||||
set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
|
||||
add_definitions(-DX86_SSE42_CRC_INTRIN)
|
||||
add_definitions(-DX86_SSE42_CMP_STR)
|
||||
set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c)
|
||||
add_definitions(-DX86_SSE42)
|
||||
set(SSE42_SRCS ${ARCHDIR}/adler32_sse42.c ${ARCHDIR}/insert_string_sse42.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS})
|
||||
endif()
|
||||
if(ENABLE_SSSE3)
|
||||
add_definitions(-DX86_SSSE3 -DX86_SSSE3_ADLER32)
|
||||
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c)
|
||||
add_definitions(-DX86_SSSE3)
|
||||
set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c ${ARCHDIR}/chunkset_ssse3.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS})
|
||||
endif()
|
||||
if(ENABLE_PCLMULQDQ)
|
||||
add_definitions(-DX86_PCLMULQDQ_CRC)
|
||||
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c)
|
||||
set(PCLMULQDQ_SRCS ${ARCHDIR}/crc32_pclmulqdq.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS})
|
||||
endif()
|
||||
|
||||
add_definitions(-DX86_SSE2 -DX86_SSE2_CHUNKSET -DX86_SSE2_SLIDEHASH)
|
||||
set(SSE2_SRCS ${ARCHDIR}/chunkset_sse.c ${ARCHDIR}/slide_sse.c)
|
||||
add_definitions(-DX86_SSE2)
|
||||
set(SSE2_SRCS ${ARCHDIR}/chunkset_sse2.c ${ARCHDIR}/compare256_sse2.c ${ARCHDIR}/slide_hash_sse2.c)
|
||||
list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS})
|
||||
add_definitions(-DX86_NOCHECK_SSE2)
|
||||
endif ()
|
||||
@ -106,39 +115,45 @@ generate_cmakein(${SOURCE_DIR}/zconf.h.in ${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cm
|
||||
|
||||
set(ZLIB_SRCS
|
||||
${SOURCE_DIR}/adler32.c
|
||||
${SOURCE_DIR}/adler32_fold.c
|
||||
${SOURCE_DIR}/chunkset.c
|
||||
${SOURCE_DIR}/compare258.c
|
||||
${SOURCE_DIR}/compare256.c
|
||||
${SOURCE_DIR}/compress.c
|
||||
${SOURCE_DIR}/crc32.c
|
||||
${SOURCE_DIR}/crc32_comb.c
|
||||
${SOURCE_DIR}/cpu_features.c
|
||||
${SOURCE_DIR}/crc32_braid.c
|
||||
${SOURCE_DIR}/crc32_braid_comb.c
|
||||
${SOURCE_DIR}/crc32_fold.c
|
||||
${SOURCE_DIR}/deflate.c
|
||||
${SOURCE_DIR}/deflate_fast.c
|
||||
${SOURCE_DIR}/deflate_huff.c
|
||||
${SOURCE_DIR}/deflate_medium.c
|
||||
${SOURCE_DIR}/deflate_quick.c
|
||||
${SOURCE_DIR}/deflate_rle.c
|
||||
${SOURCE_DIR}/deflate_slow.c
|
||||
${SOURCE_DIR}/deflate_stored.c
|
||||
${SOURCE_DIR}/functable.c
|
||||
${SOURCE_DIR}/infback.c
|
||||
${SOURCE_DIR}/inffast.c
|
||||
${SOURCE_DIR}/inflate.c
|
||||
${SOURCE_DIR}/inftrees.c
|
||||
${SOURCE_DIR}/insert_string.c
|
||||
${SOURCE_DIR}/insert_string_roll.c
|
||||
${SOURCE_DIR}/slide_hash.c
|
||||
${SOURCE_DIR}/trees.c
|
||||
${SOURCE_DIR}/uncompr.c
|
||||
${SOURCE_DIR}/zutil.c
|
||||
)
|
||||
|
||||
set(ZLIB_GZFILE_SRCS
|
||||
${SOURCE_DIR}/gzlib.c
|
||||
${SOURCE_DIR}/gzread.c
|
||||
${CMAKE_CURRENT_BINARY_DIR}/gzread.c
|
||||
${SOURCE_DIR}/gzwrite.c
|
||||
)
|
||||
|
||||
set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS})
|
||||
set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS} ${ZLIB_GZFILE_SRCS})
|
||||
|
||||
add_library(_zlib ${ZLIB_ALL_SRCS})
|
||||
add_library(ch_contrib::zlib ALIAS _zlib)
|
||||
|
||||
# https://github.com/zlib-ng/zlib-ng/pull/733
|
||||
# This is disabed by default
|
||||
add_compile_definitions(Z_TLS=__thread)
|
||||
|
||||
if(HAVE_UNISTD_H)
|
||||
SET(ZCONF_UNISTD_LINE "#if 1 /* was set to #if 1 by configure/cmake/etc */")
|
||||
else()
|
||||
@ -153,6 +168,9 @@ endif()
|
||||
set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc)
|
||||
configure_file(${SOURCE_DIR}/zlib.pc.cmakein ${ZLIB_PC} @ONLY)
|
||||
configure_file(${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY)
|
||||
configure_file(${SOURCE_DIR}/zlib.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib.h @ONLY)
|
||||
configure_file(${SOURCE_DIR}/zlib_name_mangling.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib_name_mangling.h @ONLY)
|
||||
configure_file(${SOURCE_DIR}/gzread.c.in ${CMAKE_CURRENT_BINARY_DIR}/gzread.c @ONLY)
|
||||
|
||||
# We should use same defines when including zlib.h as used when zlib compiled
|
||||
target_compile_definitions (_zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
||||
|
@ -47,8 +47,7 @@
|
||||
"docker/test/stateful": {
|
||||
"name": "clickhouse/stateful-test",
|
||||
"dependent": [
|
||||
"docker/test/stress",
|
||||
"docker/test/upgrade"
|
||||
"docker/test/stress"
|
||||
]
|
||||
},
|
||||
"docker/test/unit": {
|
||||
@ -59,10 +58,6 @@
|
||||
"name": "clickhouse/stress-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/upgrade": {
|
||||
"name": "clickhouse/upgrade-check",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/runner": {
|
||||
"name": "clickhouse/integration-tests-runner",
|
||||
"dependent": []
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.7.2.13"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -40,8 +40,6 @@ fi
|
||||
|
||||
DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}"
|
||||
LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}"
|
||||
LOG_PATH="${LOG_DIR}/clickhouse-keeper.log"
|
||||
ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log"
|
||||
COORDINATION_DIR="${DATA_DIR}/coordination"
|
||||
COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log"
|
||||
COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots"
|
||||
@ -84,7 +82,7 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
|
||||
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
||||
if [ -f "$KEEPER_CONFIG" ]; then
|
||||
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
|
||||
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
|
||||
fi
|
||||
|
||||
# There is no config file. Will use embedded one
|
||||
|
@ -108,7 +108,8 @@ if [ -n "$MAKE_DEB" ]; then
|
||||
bash -x /build/packages/build
|
||||
fi
|
||||
|
||||
mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output
|
||||
mv ./programs/clickhouse* /output ||:
|
||||
mv ./programs/*_fuzzer /output ||:
|
||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||
[ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output
|
||||
[ -x ./programs/self-extracting/clickhouse-keeper ] && mv ./programs/self-extracting/clickhouse-keeper /output
|
||||
|
@ -1,3 +1,5 @@
|
||||
# docker build -t clickhouse/cctools .
|
||||
|
||||
# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
|
||||
# It's based on the assumption that we don't care of the cctools version so much
|
||||
# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
|
||||
@ -30,5 +32,29 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
&& cd ../.. \
|
||||
&& rm -rf cctools-port
|
||||
|
||||
#
|
||||
# GDB
|
||||
#
|
||||
# ld from binutils is 2.38, which has the following error:
|
||||
#
|
||||
# DWARF error: invalid or unhandled FORM value: 0x23
|
||||
#
|
||||
ENV LD=ld.lld-${LLVM_VERSION}
|
||||
ARG GDB_VERSION=15.1
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes \
|
||||
libgmp-dev \
|
||||
libmpfr-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
RUN wget https://sourceware.org/pub/gdb/releases/gdb-$GDB_VERSION.tar.gz \
|
||||
&& tar -xvf gdb-$GDB_VERSION.tar.gz \
|
||||
&& cd gdb-$GDB_VERSION \
|
||||
&& ./configure --prefix=/opt/gdb \
|
||||
&& make -j $(nproc) \
|
||||
&& make install \
|
||||
&& rm -fr gdb-$GDB_VERSION gdb-$GDB_VERSION.tar.gz
|
||||
|
||||
FROM scratch
|
||||
COPY --from=builder /cctools /cctools
|
||||
COPY --from=builder /opt/gdb /opt/gdb
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.7.2.13"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.7.2.13"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
@ -28,12 +28,14 @@ RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_
|
||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment
|
||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment
|
||||
RUN echo "ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'" >> /etc/environment
|
||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'
|
||||
ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'
|
||||
ENV LSAN_OPTIONS='max_allocation_size_mb=32768'
|
||||
ENV ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'
|
||||
|
||||
# for external_symbolizer_path, and also ensure that llvm-symbolizer really
|
||||
# exists (since you don't want to fallback to addr2line, it is very slow)
|
||||
|
@ -218,6 +218,6 @@ function stop_logs_replication
|
||||
clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
|
||||
tee /dev/stderr
|
||||
} | {
|
||||
xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||
timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query "drop table {}"
|
||||
}
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
|
||||
# Give suid to gdb to grant it attach permissions
|
||||
# chmod 777 to make the container user independent
|
||||
RUN chmod u+s /usr/bin/gdb \
|
||||
RUN chmod u+s /opt/gdb/bin/gdb \
|
||||
&& mkdir -p /var/lib/clickhouse \
|
||||
&& chmod 777 /var/lib/clickhouse
|
||||
|
||||
@ -93,6 +93,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ENV COMMIT_SHA=''
|
||||
ENV PULL_REQUEST_NUMBER=''
|
||||
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -193,53 +193,60 @@ function fuzz
|
||||
|
||||
kill -0 $server_pid
|
||||
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)")
|
||||
if [[ "$IS_ASAN" = "1" ]];
|
||||
then
|
||||
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||
else
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
thread apply all backtrace full
|
||||
info registers
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
gdb -batch -command script.gdb -p $server_pid &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
gdb -batch -command script.gdb -p $server_pid &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||
|
||||
# Check connectivity after we attach gdb, because it might cause the server
|
||||
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
|
||||
for _ in {1..180}
|
||||
do
|
||||
if clickhouse-client --query "select 1"
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||
fi
|
||||
|
||||
# Check connectivity after we attach gdb, because it might cause the server
|
||||
# to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
|
||||
for _ in {1..180}
|
||||
do
|
||||
if clickhouse-client --query "select 1"
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||
echo 'Server started and responded.'
|
||||
|
||||
setup_logs_replication
|
||||
@ -264,8 +271,13 @@ quit
|
||||
# The fuzzer_pid belongs to the timeout process.
|
||||
actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid")
|
||||
|
||||
echo "Attaching gdb to the fuzzer itself"
|
||||
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
||||
if [[ "$IS_ASAN" = "1" ]];
|
||||
then
|
||||
echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections"
|
||||
else
|
||||
echo "Attaching gdb to the fuzzer itself"
|
||||
gdb -batch -command script.gdb -p $actual_fuzzer_pid &
|
||||
fi
|
||||
|
||||
# Wait for the fuzzer to complete.
|
||||
# Note that the 'wait || ...' thing is required so that the script doesn't
|
||||
|
@ -11,7 +11,6 @@ RUN apt-get update \
|
||||
curl \
|
||||
default-jre \
|
||||
g++ \
|
||||
gdb \
|
||||
iproute2 \
|
||||
krb5-user \
|
||||
libicu-dev \
|
||||
@ -73,3 +72,6 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \
|
||||
|
||||
ENV TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
@ -3,6 +3,8 @@
|
||||
|
||||
FROM alpine:3.18
|
||||
RUN apk add --no-cache -U iproute2 \
|
||||
&& for bin in iptables iptables-restore iptables-save; \
|
||||
&& for bin in \
|
||||
iptables iptables-restore iptables-save \
|
||||
ip6tables ip6tables-restore ip6tables-save; \
|
||||
do ln -sf xtables-nft-multi "/sbin/$bin"; \
|
||||
done
|
||||
|
@ -30,7 +30,6 @@ RUN apt-get update \
|
||||
luajit \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
gdb \
|
||||
default-jdk \
|
||||
software-properties-common \
|
||||
libkrb5-dev \
|
||||
@ -87,6 +86,8 @@ COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY misc/ /misc/
|
||||
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
||||
# Same options as in test/base/Dockerfile
|
||||
# (in case you need to override them in tests)
|
||||
|
@ -74,6 +74,7 @@ protobuf==4.25.2
|
||||
psycopg2-binary==2.9.6
|
||||
py4j==0.10.9.5
|
||||
py==1.11.0
|
||||
pyarrow==17.0.0
|
||||
pycparser==2.22
|
||||
pycryptodome==3.20.0
|
||||
pymongo==3.11.0
|
||||
@ -111,3 +112,5 @@ wadllib==1.3.6
|
||||
websocket-client==0.59.0
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
||||
deltalake==0.16.0
|
||||
|
||||
|
@ -9,7 +9,6 @@ RUN apt-get update \
|
||||
curl \
|
||||
dmidecode \
|
||||
g++ \
|
||||
gdb \
|
||||
git \
|
||||
gnuplot \
|
||||
imagemagick \
|
||||
@ -42,6 +41,9 @@ RUN pip3 --no-cache-dir install -r requirements.txt
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
||||
CMD ["bash", "/run.sh"]
|
||||
|
||||
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
|
||||
|
@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
numactl --hardware
|
||||
echo > compare.log
|
||||
numactl --hardware | tee -a compare.log
|
||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||
echo Will bind to NUMA node $node;
|
||||
echo Will bind to NUMA node $node | tee -a compare.log
|
||||
numactl --cpunodebind=$node --membind=$node $entry
|
||||
|
@ -35,12 +35,8 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
|
||||
|
||||
ENV TZ=Europe/Amsterdam
|
||||
ENV MAX_RUN_TIME=9000
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
|
||||
|
||||
RUN git clone --recursive ${sqllogic_test_repo}
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -22,7 +22,6 @@ ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
|
||||
RUN git clone ${sqltest_repo}
|
||||
|
||||
ENV TZ=UTC
|
||||
ENV MAX_RUN_TIME=900
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
COPY run.sh /
|
||||
|
@ -10,7 +10,3 @@ RUN apt-get update -y \
|
||||
npm \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY create.sql /
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -1 +0,0 @@
|
||||
../stateless/setup_minio.sh
|
@ -65,12 +65,11 @@ ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV NUM_TRIES=1
|
||||
ENV MAX_RUN_TIME=0
|
||||
|
||||
# Unrelated to vars in setup_minio.sh, but should be the same there
|
||||
# to have the same binaries for local running scenario
|
||||
ARG MINIO_SERVER_VERSION=2022-01-03T18-22-58Z
|
||||
ARG MINIO_CLIENT_VERSION=2022-01-05T23-52-51Z
|
||||
ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z
|
||||
ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z
|
||||
ARG TARGETARCH
|
||||
|
||||
# Download Minio-related binaries
|
||||
@ -86,18 +85,6 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
|
||||
ENV MINIO_ROOT_USER="clickhouse"
|
||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
|
||||
|
||||
RUN npm install -g azurite@3.30.0 \
|
||||
&& npm install -g tslib && npm install -g node
|
||||
|
||||
COPY run.sh /
|
||||
COPY setup_minio.sh /
|
||||
COPY setup_hdfs_minicluster.sh /
|
||||
COPY attach_gdb.lib /
|
||||
COPY utils.lib /
|
||||
|
||||
# We store stress_tests.lib in stateless image to avoid duplication of this file in stress and upgrade tests
|
||||
COPY stress_tests.lib /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -1,51 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./utils.lib
|
||||
source /utils.lib
|
||||
|
||||
function attach_gdb_to_clickhouse()
|
||||
{
|
||||
# Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog
|
||||
# and clickhouse-server can do fork-exec, for example, to run some bridge.
|
||||
# Do not set nostop noprint for all signals, because some it may cause gdb to hang,
|
||||
# explicitly ignore non-fatal signals that are used by server.
|
||||
# Number of SIGRTMIN can be determined only in runtime.
|
||||
RTMIN=$(kill -l SIGRTMIN)
|
||||
# shellcheck disable=SC2016
|
||||
echo "
|
||||
set follow-fork-mode parent
|
||||
handle SIGHUP nostop noprint pass
|
||||
handle SIGINT nostop noprint pass
|
||||
handle SIGQUIT nostop noprint pass
|
||||
handle SIGPIPE nostop noprint pass
|
||||
handle SIGTERM nostop noprint pass
|
||||
handle SIGUSR1 nostop noprint pass
|
||||
handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
backtrace full
|
||||
info registers
|
||||
p "top 1 KiB of the stack:"
|
||||
p/x *(uint64_t[128]*)"'$sp'"
|
||||
maintenance info sections
|
||||
thread apply all backtrace full
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
up
|
||||
disassemble /s
|
||||
p \"done\"
|
||||
detach
|
||||
quit
|
||||
" > script.gdb
|
||||
|
||||
# FIXME Hung check may work incorrectly because of attached gdb
|
||||
# We cannot attach another gdb to get stacktraces if some queries hung
|
||||
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log &
|
||||
sleep 5
|
||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||
run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'"
|
||||
}
|
||||
|
||||
# vi: ft=bash
|
@ -22,8 +22,5 @@ RUN apt-get update -y \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -4,4 +4,5 @@ ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
RUN chmod +x run.sh
|
||||
ENTRYPOINT ["/run.sh"]
|
||||
|
@ -1,5 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
# Need to keep error from tests after `tee`. Otherwise we don't alert on asan errors
|
||||
set -o pipefail
|
||||
set -e
|
||||
|
||||
timeout 40m gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms --gtest_output='json:test_output/test_result.json' | tee test_output/test_result.txt
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Expected exactly one argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" = "GDB" ];
|
||||
then
|
||||
timeout 40m \
|
||||
gdb -q -ex "set print inferior-events off" -ex "set confirm off" -ex "set print thread-events off" -ex run -ex bt -ex quit --args \
|
||||
./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \
|
||||
| tee test_output/test_result.txt
|
||||
elif [ "$1" = "NO_GDB" ];
|
||||
then
|
||||
timeout 40m \
|
||||
./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \
|
||||
| tee test_output/test_result.txt
|
||||
else
|
||||
echo "Unknown argument: $1"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -1,29 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/upgrade-check .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/stateful-test:$FROM_TAG
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
bash \
|
||||
tzdata \
|
||||
parallel \
|
||||
expect \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-termcolor \
|
||||
python3-requests \
|
||||
curl \
|
||||
sudo \
|
||||
openssl \
|
||||
netcat-openbsd \
|
||||
brotli \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
@ -44,7 +44,6 @@ RUN apt-get update \
|
||||
bash \
|
||||
bsdmainutils \
|
||||
build-essential \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
moreutils \
|
||||
@ -57,4 +56,5 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY process_functional_tests_result.py /
|
||||
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
|
||||
ENV PATH="/opt/gdb/bin:${PATH}"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user