mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into startup-sanity-checks
This commit is contained in:
commit
93d1b1d198
@ -33,6 +33,8 @@ Checks: '-*,
|
||||
performance-no-automatic-move,
|
||||
performance-trivially-destructible,
|
||||
performance-unnecessary-copy-initialization,
|
||||
performance-noexcept-move-constructor,
|
||||
performance-move-const-arg,
|
||||
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-const-return-type,
|
||||
@ -206,3 +208,5 @@ CheckOptions:
|
||||
value: CamelCase
|
||||
- key: modernize-loop-convert.UseCxx20ReverseRanges
|
||||
value: false
|
||||
- key: performance-move-const-arg.CheckTriviallyCopyableMove
|
||||
value: false
|
||||
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1,2 +1,3 @@
|
||||
contrib/* linguist-vendored
|
||||
*.h linguist-language=C++
|
||||
tests/queries/0_stateless/data_json/* binary
|
||||
|
2
.github/ISSUE_TEMPLATE/10_question.md
vendored
2
.github/ISSUE_TEMPLATE/10_question.md
vendored
@ -7,6 +7,6 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
> Make sure to check documentation https://clickhouse.yandex/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
|
||||
> Make sure to check documentation https://clickhouse.com/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
|
||||
|
||||
> If you still prefer GitHub issues, remove all this text and ask your question here.
|
||||
|
2
.github/ISSUE_TEMPLATE/50_build-issue.md
vendored
2
.github/ISSUE_TEMPLATE/50_build-issue.md
vendored
@ -7,7 +7,7 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.yandex/docs/en/development/build/
|
||||
> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
|
||||
|
||||
**Operating system**
|
||||
|
||||
|
17
.github/codecov.yml
vendored
17
.github/codecov.yml
vendored
@ -1,17 +0,0 @@
|
||||
codecov:
|
||||
max_report_age: "off"
|
||||
strict_yaml_branch: "master"
|
||||
|
||||
ignore:
|
||||
- "contrib"
|
||||
- "docs"
|
||||
- "benchmark"
|
||||
- "tests"
|
||||
- "docker"
|
||||
- "debian"
|
||||
- "cmake"
|
||||
|
||||
comment: false
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
43
.github/workflows/anchore-analysis.yml
vendored
43
.github/workflows/anchore-analysis.yml
vendored
@ -1,43 +0,0 @@
|
||||
# This workflow checks out code, performs an Anchore container image
|
||||
# vulnerability and compliance scan, and integrates the results with
|
||||
# GitHub Advanced Security code scanning feature. For more information on
|
||||
# the Anchore scan action usage and parameters, see
|
||||
# https://github.com/anchore/scan-action. For more information on
|
||||
# Anchore container image scanning in general, see
|
||||
# https://docs.anchore.com.
|
||||
|
||||
name: Docker Container Scan (clickhouse-server)
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
"on":
|
||||
pull_request:
|
||||
paths:
|
||||
- docker/server/Dockerfile
|
||||
- .github/workflows/anchore-analysis.yml
|
||||
schedule:
|
||||
- cron: '0 21 * * *'
|
||||
|
||||
jobs:
|
||||
Anchore-Build-Scan:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build the Docker image
|
||||
run: |
|
||||
cd docker/server
|
||||
perl -pi -e 's|=\$version||g' Dockerfile
|
||||
docker build . --file Dockerfile --tag localbuild/testimage:latest
|
||||
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
|
||||
uses: anchore/scan-action@v2
|
||||
id: scan
|
||||
with:
|
||||
image: "localbuild/testimage:latest"
|
||||
acs-report-enable: true
|
||||
- name: Upload Anchore Scan Report
|
||||
uses: github/codeql-action/upload-sarif@v1
|
||||
with:
|
||||
sarif_file: ${{ steps.scan.outputs.sarif }}
|
11
.github/workflows/debug.yml
vendored
Normal file
11
.github/workflows/debug.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# The CI for each commit, prints envs and content of GITHUB_EVENT_PATH
|
||||
name: Debug
|
||||
|
||||
'on':
|
||||
[push, pull_request, release]
|
||||
|
||||
jobs:
|
||||
DebugInfo:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: hmarr/debug-action@1201a20fc9d278ddddd5f0f46922d06513892491
|
36
.github/workflows/master.yml
vendored
36
.github/workflows/master.yml
vendored
@ -1065,6 +1065,41 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, s3 storage, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
@ -2844,6 +2879,7 @@ jobs:
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatelessTestReleaseS3
|
||||
- FunctionalStatefulTestAarch64
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
|
3
.github/workflows/nightly.yml
vendored
3
.github/workflows/nightly.yml
vendored
@ -6,7 +6,8 @@ env:
|
||||
|
||||
"on":
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
- cron: '13 3 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
DockerHubPushAarch64:
|
||||
|
111
.github/workflows/pull_request.yml
vendored
111
.github/workflows/pull_request.yml
vendored
@ -168,35 +168,6 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
PVSCheck:
|
||||
needs: [DockerHubPush, FastTest]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/pvs_check
|
||||
REPO_COPY=${{runner.temp}}/pvs_check/ClickHouse
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: PVS Check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 pvs_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
CompatibilityCheck:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
@ -1215,6 +1186,41 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, s3 storage, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
@ -1727,6 +1733,51 @@ jobs:
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
TestsBugfixCheck:
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/tests_bugfix_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Tests bugfix validate check (actions)
|
||||
KILL_TIMEOUT=3600
|
||||
REPO_COPY=${{runner.temp}}/tests_bugfix_check/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Bugfix test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
|
||||
TEMP_PATH="${TEMP_PATH}/integration" \
|
||||
REPORTS_PATH="${REPORTS_PATH}/integration" \
|
||||
python3 integration_test_check.py "Integration tests bugfix validate check" \
|
||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
||||
|
||||
TEMP_PATH="${TEMP_PATH}/stateless" \
|
||||
REPORTS_PATH="${REPORTS_PATH}/stateless" \
|
||||
python3 functional_test_check.py "Stateless tests bugfix validate check" "$KILL_TIMEOUT" \
|
||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
||||
|
||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill "$(docker ps -q)" ||:
|
||||
docker rm -f "$(docker ps -a -q)" ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
############################ FUNCTIONAl STATEFUL TESTS #######################################
|
||||
##############################################################################################
|
||||
@ -3037,6 +3088,7 @@ jobs:
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
||||
- FunctionalStatefulTestUBsan
|
||||
- FunctionalStatelessTestReleaseS3
|
||||
- StressTestDebug
|
||||
- StressTestAsan
|
||||
- StressTestTsan
|
||||
@ -3060,7 +3112,6 @@ jobs:
|
||||
- PerformanceComparison1
|
||||
- PerformanceComparison2
|
||||
- PerformanceComparison3
|
||||
- PVSCheck
|
||||
- UnitTestsAsan
|
||||
- UnitTestsTsan
|
||||
- UnitTestsMsan
|
||||
|
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@ -23,8 +23,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Download packages and push to Artifactory
|
||||
run: |
|
||||
rm -rf "$TEMP_PATH" && mkdir -p "$REPO_COPY"
|
||||
cp -r "$GITHUB_WORKSPACE" "$REPO_COPY"
|
||||
rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY"
|
||||
python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
|
||||
--commit '${{ github.sha }}' --all
|
||||
@ -32,7 +32,7 @@ jobs:
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: ${{runner.temp}}/release_packages/*
|
||||
file: ${{runner.temp}}/push_to_artifactory/*
|
||||
overwrite: true
|
||||
tag: ${{ github.ref }}
|
||||
file_glob: true
|
||||
|
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -27,6 +27,8 @@ jobs:
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv after ${{ env.GITHUB_TAG }}
|
||||
branch: auto/${{ env.GITHUB_TAG }}
|
||||
delete-branch: true
|
||||
|
118
.gitmodules
vendored
118
.gitmodules
vendored
@ -1,6 +1,6 @@
|
||||
[submodule "contrib/poco"]
|
||||
path = contrib/poco
|
||||
url = https://github.com/ClickHouse-Extras/poco.git
|
||||
url = https://github.com/ClickHouse/poco.git
|
||||
branch = clickhouse
|
||||
[submodule "contrib/zstd"]
|
||||
path = contrib/zstd
|
||||
@ -10,13 +10,13 @@
|
||||
url = https://github.com/lz4/lz4.git
|
||||
[submodule "contrib/librdkafka"]
|
||||
path = contrib/librdkafka
|
||||
url = https://github.com/ClickHouse-Extras/librdkafka.git
|
||||
url = https://github.com/ClickHouse/librdkafka.git
|
||||
[submodule "contrib/cctz"]
|
||||
path = contrib/cctz
|
||||
url = https://github.com/ClickHouse-Extras/cctz.git
|
||||
url = https://github.com/ClickHouse/cctz.git
|
||||
[submodule "contrib/zlib-ng"]
|
||||
path = contrib/zlib-ng
|
||||
url = https://github.com/ClickHouse-Extras/zlib-ng.git
|
||||
url = https://github.com/ClickHouse/zlib-ng.git
|
||||
branch = clickhouse-2.0.x
|
||||
[submodule "contrib/googletest"]
|
||||
path = contrib/googletest
|
||||
@ -32,51 +32,51 @@
|
||||
url = https://github.com/google/re2.git
|
||||
[submodule "contrib/llvm"]
|
||||
path = contrib/llvm
|
||||
url = https://github.com/ClickHouse-Extras/llvm
|
||||
url = https://github.com/ClickHouse/llvm
|
||||
[submodule "contrib/mariadb-connector-c"]
|
||||
path = contrib/mariadb-connector-c
|
||||
url = https://github.com/ClickHouse-Extras/mariadb-connector-c.git
|
||||
url = https://github.com/ClickHouse/mariadb-connector-c.git
|
||||
[submodule "contrib/jemalloc"]
|
||||
path = contrib/jemalloc
|
||||
url = https://github.com/ClickHouse-Extras/jemalloc.git
|
||||
url = https://github.com/jemalloc/jemalloc.git
|
||||
[submodule "contrib/unixodbc"]
|
||||
path = contrib/unixodbc
|
||||
url = https://github.com/ClickHouse-Extras/UnixODBC.git
|
||||
url = https://github.com/ClickHouse/UnixODBC.git
|
||||
[submodule "contrib/protobuf"]
|
||||
path = contrib/protobuf
|
||||
url = https://github.com/ClickHouse-Extras/protobuf.git
|
||||
url = https://github.com/ClickHouse/protobuf.git
|
||||
branch = v3.13.0.1
|
||||
[submodule "contrib/boost"]
|
||||
path = contrib/boost
|
||||
url = https://github.com/ClickHouse-Extras/boost.git
|
||||
url = https://github.com/ClickHouse/boost.git
|
||||
[submodule "contrib/base64"]
|
||||
path = contrib/base64
|
||||
url = https://github.com/ClickHouse-Extras/Turbo-Base64.git
|
||||
url = https://github.com/ClickHouse/Turbo-Base64.git
|
||||
[submodule "contrib/arrow"]
|
||||
path = contrib/arrow
|
||||
url = https://github.com/ClickHouse-Extras/arrow.git
|
||||
url = https://github.com/ClickHouse/arrow.git
|
||||
branch = blessed/release-6.0.1
|
||||
[submodule "contrib/thrift"]
|
||||
path = contrib/thrift
|
||||
url = https://github.com/apache/thrift.git
|
||||
[submodule "contrib/libhdfs3"]
|
||||
path = contrib/libhdfs3
|
||||
url = https://github.com/ClickHouse-Extras/libhdfs3.git
|
||||
url = https://github.com/ClickHouse/libhdfs3.git
|
||||
[submodule "contrib/libxml2"]
|
||||
path = contrib/libxml2
|
||||
url = https://github.com/GNOME/libxml2.git
|
||||
[submodule "contrib/libgsasl"]
|
||||
path = contrib/libgsasl
|
||||
url = https://github.com/ClickHouse-Extras/libgsasl.git
|
||||
url = https://github.com/ClickHouse/libgsasl.git
|
||||
[submodule "contrib/libcxx"]
|
||||
path = contrib/libcxx
|
||||
url = https://github.com/ClickHouse-Extras/libcxx.git
|
||||
url = https://github.com/ClickHouse/libcxx.git
|
||||
[submodule "contrib/libcxxabi"]
|
||||
path = contrib/libcxxabi
|
||||
url = https://github.com/ClickHouse-Extras/libcxxabi.git
|
||||
url = https://github.com/ClickHouse/libcxxabi.git
|
||||
[submodule "contrib/snappy"]
|
||||
path = contrib/snappy
|
||||
url = https://github.com/ClickHouse-Extras/snappy.git
|
||||
url = https://github.com/ClickHouse/snappy.git
|
||||
[submodule "contrib/cppkafka"]
|
||||
path = contrib/cppkafka
|
||||
url = https://github.com/mfontanini/cppkafka.git
|
||||
@ -85,95 +85,95 @@
|
||||
url = https://github.com/google/brotli.git
|
||||
[submodule "contrib/h3"]
|
||||
path = contrib/h3
|
||||
url = https://github.com/ClickHouse-Extras/h3
|
||||
url = https://github.com/ClickHouse/h3
|
||||
[submodule "contrib/hyperscan"]
|
||||
path = contrib/hyperscan
|
||||
url = https://github.com/ClickHouse-Extras/hyperscan.git
|
||||
url = https://github.com/ClickHouse/hyperscan.git
|
||||
[submodule "contrib/libunwind"]
|
||||
path = contrib/libunwind
|
||||
url = https://github.com/ClickHouse-Extras/libunwind.git
|
||||
url = https://github.com/ClickHouse/libunwind.git
|
||||
[submodule "contrib/simdjson"]
|
||||
path = contrib/simdjson
|
||||
url = https://github.com/simdjson/simdjson.git
|
||||
[submodule "contrib/rapidjson"]
|
||||
path = contrib/rapidjson
|
||||
url = https://github.com/ClickHouse-Extras/rapidjson
|
||||
url = https://github.com/ClickHouse/rapidjson
|
||||
[submodule "contrib/fastops"]
|
||||
path = contrib/fastops
|
||||
url = https://github.com/ClickHouse-Extras/fastops
|
||||
url = https://github.com/ClickHouse/fastops
|
||||
[submodule "contrib/orc"]
|
||||
path = contrib/orc
|
||||
url = https://github.com/ClickHouse-Extras/orc
|
||||
url = https://github.com/ClickHouse/orc
|
||||
[submodule "contrib/sparsehash-c11"]
|
||||
path = contrib/sparsehash-c11
|
||||
url = https://github.com/sparsehash/sparsehash-c11.git
|
||||
[submodule "contrib/grpc"]
|
||||
path = contrib/grpc
|
||||
url = https://github.com/ClickHouse-Extras/grpc.git
|
||||
url = https://github.com/ClickHouse/grpc.git
|
||||
branch = v1.33.2
|
||||
[submodule "contrib/aws"]
|
||||
path = contrib/aws
|
||||
url = https://github.com/ClickHouse-Extras/aws-sdk-cpp.git
|
||||
url = https://github.com/ClickHouse/aws-sdk-cpp.git
|
||||
[submodule "aws-c-event-stream"]
|
||||
path = contrib/aws-c-event-stream
|
||||
url = https://github.com/ClickHouse-Extras/aws-c-event-stream.git
|
||||
url = https://github.com/ClickHouse/aws-c-event-stream.git
|
||||
[submodule "aws-c-common"]
|
||||
path = contrib/aws-c-common
|
||||
url = https://github.com/ClickHouse-Extras/aws-c-common.git
|
||||
url = https://github.com/ClickHouse/aws-c-common.git
|
||||
[submodule "aws-checksums"]
|
||||
path = contrib/aws-checksums
|
||||
url = https://github.com/ClickHouse-Extras/aws-checksums.git
|
||||
url = https://github.com/ClickHouse/aws-checksums.git
|
||||
[submodule "contrib/curl"]
|
||||
path = contrib/curl
|
||||
url = https://github.com/curl/curl.git
|
||||
[submodule "contrib/icudata"]
|
||||
path = contrib/icudata
|
||||
url = https://github.com/ClickHouse-Extras/icudata.git
|
||||
url = https://github.com/ClickHouse/icudata.git
|
||||
[submodule "contrib/icu"]
|
||||
path = contrib/icu
|
||||
url = https://github.com/unicode-org/icu.git
|
||||
[submodule "contrib/flatbuffers"]
|
||||
path = contrib/flatbuffers
|
||||
url = https://github.com/ClickHouse-Extras/flatbuffers.git
|
||||
url = https://github.com/ClickHouse/flatbuffers.git
|
||||
[submodule "contrib/replxx"]
|
||||
path = contrib/replxx
|
||||
url = https://github.com/ClickHouse-Extras/replxx.git
|
||||
url = https://github.com/ClickHouse/replxx.git
|
||||
[submodule "contrib/avro"]
|
||||
path = contrib/avro
|
||||
url = https://github.com/ClickHouse-Extras/avro.git
|
||||
url = https://github.com/ClickHouse/avro.git
|
||||
ignore = untracked
|
||||
[submodule "contrib/msgpack-c"]
|
||||
path = contrib/msgpack-c
|
||||
url = https://github.com/msgpack/msgpack-c
|
||||
[submodule "contrib/libcpuid"]
|
||||
path = contrib/libcpuid
|
||||
url = https://github.com/ClickHouse-Extras/libcpuid.git
|
||||
url = https://github.com/ClickHouse/libcpuid.git
|
||||
[submodule "contrib/openldap"]
|
||||
path = contrib/openldap
|
||||
url = https://github.com/ClickHouse-Extras/openldap.git
|
||||
url = https://github.com/ClickHouse/openldap.git
|
||||
[submodule "contrib/AMQP-CPP"]
|
||||
path = contrib/AMQP-CPP
|
||||
url = https://github.com/ClickHouse-Extras/AMQP-CPP.git
|
||||
url = https://github.com/ClickHouse/AMQP-CPP.git
|
||||
[submodule "contrib/cassandra"]
|
||||
path = contrib/cassandra
|
||||
url = https://github.com/ClickHouse-Extras/cpp-driver.git
|
||||
url = https://github.com/ClickHouse/cpp-driver.git
|
||||
branch = clickhouse
|
||||
[submodule "contrib/libuv"]
|
||||
path = contrib/libuv
|
||||
url = https://github.com/ClickHouse-Extras/libuv.git
|
||||
url = https://github.com/ClickHouse/libuv.git
|
||||
branch = clickhouse
|
||||
[submodule "contrib/fmtlib"]
|
||||
path = contrib/fmtlib
|
||||
url = https://github.com/fmtlib/fmt.git
|
||||
[submodule "contrib/sentry-native"]
|
||||
path = contrib/sentry-native
|
||||
url = https://github.com/ClickHouse-Extras/sentry-native.git
|
||||
url = https://github.com/ClickHouse/sentry-native.git
|
||||
[submodule "contrib/krb5"]
|
||||
path = contrib/krb5
|
||||
url = https://github.com/ClickHouse-Extras/krb5
|
||||
url = https://github.com/ClickHouse/krb5
|
||||
[submodule "contrib/cyrus-sasl"]
|
||||
path = contrib/cyrus-sasl
|
||||
url = https://github.com/ClickHouse-Extras/cyrus-sasl
|
||||
url = https://github.com/ClickHouse/cyrus-sasl
|
||||
branch = cyrus-sasl-2.1
|
||||
[submodule "contrib/croaring"]
|
||||
path = contrib/croaring
|
||||
@ -184,7 +184,7 @@
|
||||
url = https://github.com/danlark1/miniselect
|
||||
[submodule "contrib/rocksdb"]
|
||||
path = contrib/rocksdb
|
||||
url = https://github.com/ClickHouse-Extras/rocksdb.git
|
||||
url = https://github.com/ClickHouse/rocksdb.git
|
||||
[submodule "contrib/xz"]
|
||||
path = contrib/xz
|
||||
url = https://github.com/xz-mirror/xz
|
||||
@ -194,53 +194,53 @@
|
||||
branch = lts_2021_11_02
|
||||
[submodule "contrib/dragonbox"]
|
||||
path = contrib/dragonbox
|
||||
url = https://github.com/ClickHouse-Extras/dragonbox.git
|
||||
url = https://github.com/ClickHouse/dragonbox.git
|
||||
[submodule "contrib/fast_float"]
|
||||
path = contrib/fast_float
|
||||
url = https://github.com/fastfloat/fast_float
|
||||
[submodule "contrib/libpq"]
|
||||
path = contrib/libpq
|
||||
url = https://github.com/ClickHouse-Extras/libpq
|
||||
url = https://github.com/ClickHouse/libpq
|
||||
[submodule "contrib/boringssl"]
|
||||
path = contrib/boringssl
|
||||
url = https://github.com/ClickHouse-Extras/boringssl.git
|
||||
url = https://github.com/ClickHouse/boringssl.git
|
||||
branch = MergeWithUpstream
|
||||
[submodule "contrib/NuRaft"]
|
||||
path = contrib/NuRaft
|
||||
url = https://github.com/ClickHouse-Extras/NuRaft.git
|
||||
url = https://github.com/ClickHouse/NuRaft.git
|
||||
[submodule "contrib/nanodbc"]
|
||||
path = contrib/nanodbc
|
||||
url = https://github.com/ClickHouse-Extras/nanodbc.git
|
||||
url = https://github.com/ClickHouse/nanodbc.git
|
||||
[submodule "contrib/datasketches-cpp"]
|
||||
path = contrib/datasketches-cpp
|
||||
url = https://github.com/ClickHouse-Extras/datasketches-cpp.git
|
||||
url = https://github.com/ClickHouse/datasketches-cpp.git
|
||||
[submodule "contrib/yaml-cpp"]
|
||||
path = contrib/yaml-cpp
|
||||
url = https://github.com/ClickHouse-Extras/yaml-cpp.git
|
||||
url = https://github.com/ClickHouse/yaml-cpp.git
|
||||
[submodule "contrib/cld2"]
|
||||
path = contrib/cld2
|
||||
url = https://github.com/ClickHouse-Extras/cld2.git
|
||||
url = https://github.com/ClickHouse/cld2.git
|
||||
[submodule "contrib/libstemmer_c"]
|
||||
path = contrib/libstemmer_c
|
||||
url = https://github.com/ClickHouse-Extras/libstemmer_c.git
|
||||
url = https://github.com/ClickHouse/libstemmer_c.git
|
||||
[submodule "contrib/wordnet-blast"]
|
||||
path = contrib/wordnet-blast
|
||||
url = https://github.com/ClickHouse-Extras/wordnet-blast.git
|
||||
url = https://github.com/ClickHouse/wordnet-blast.git
|
||||
[submodule "contrib/lemmagen-c"]
|
||||
path = contrib/lemmagen-c
|
||||
url = https://github.com/ClickHouse-Extras/lemmagen-c.git
|
||||
url = https://github.com/ClickHouse/lemmagen-c.git
|
||||
[submodule "contrib/libpqxx"]
|
||||
path = contrib/libpqxx
|
||||
url = https://github.com/ClickHouse-Extras/libpqxx.git
|
||||
url = https://github.com/ClickHouse/libpqxx.git
|
||||
[submodule "contrib/sqlite-amalgamation"]
|
||||
path = contrib/sqlite-amalgamation
|
||||
url = https://github.com/azadkuh/sqlite-amalgamation
|
||||
[submodule "contrib/s2geometry"]
|
||||
path = contrib/s2geometry
|
||||
url = https://github.com/ClickHouse-Extras/s2geometry.git
|
||||
url = https://github.com/ClickHouse/s2geometry.git
|
||||
[submodule "contrib/bzip2"]
|
||||
path = contrib/bzip2
|
||||
url = https://github.com/ClickHouse-Extras/bzip2.git
|
||||
url = https://github.com/ClickHouse/bzip2.git
|
||||
[submodule "contrib/magic_enum"]
|
||||
path = contrib/magic_enum
|
||||
url = https://github.com/Neargye/magic_enum
|
||||
@ -249,16 +249,16 @@
|
||||
url = https://github.com/google/libprotobuf-mutator
|
||||
[submodule "contrib/sysroot"]
|
||||
path = contrib/sysroot
|
||||
url = https://github.com/ClickHouse-Extras/sysroot.git
|
||||
url = https://github.com/ClickHouse/sysroot.git
|
||||
[submodule "contrib/nlp-data"]
|
||||
path = contrib/nlp-data
|
||||
url = https://github.com/ClickHouse-Extras/nlp-data.git
|
||||
url = https://github.com/ClickHouse/nlp-data.git
|
||||
[submodule "contrib/hive-metastore"]
|
||||
path = contrib/hive-metastore
|
||||
url = https://github.com/ClickHouse-Extras/hive-metastore
|
||||
url = https://github.com/ClickHouse/hive-metastore
|
||||
[submodule "contrib/azure"]
|
||||
path = contrib/azure
|
||||
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git
|
||||
url = https://github.com/ClickHouse/azure-sdk-for-cpp.git
|
||||
[submodule "contrib/minizip-ng"]
|
||||
path = contrib/minizip-ng
|
||||
url = https://github.com/zlib-ng/minizip-ng
|
||||
|
312
CHANGELOG.md
312
CHANGELOG.md
@ -1,4 +1,314 @@
|
||||
### ClickHouse release v22.1, 2022-01-18
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br>
|
||||
**[ClickHouse release v22.2, 2022-02-17](#222)**<br>
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br>
|
||||
**[Changelog for 2021](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats-new/changelog/2021.md)**<br>
|
||||
|
||||
|
||||
## <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Make `arrayCompact` function behave as other higher-order functions: perform compaction not of lambda function results but on the original array. If you're using nontrivial lambda functions in arrayCompact you may restore old behaviour by wrapping `arrayCompact` arguments into `arrayMap`. Closes [#34010](https://github.com/ClickHouse/ClickHouse/issues/34010) [#18535](https://github.com/ClickHouse/ClickHouse/issues/18535) [#14778](https://github.com/ClickHouse/ClickHouse/issues/14778). [#34795](https://github.com/ClickHouse/ClickHouse/pull/34795) ([Alexandre Snarskii](https://github.com/snar)).
|
||||
* Change implementation specific behavior on overflow of function `toDatetime`. It will be saturated to the nearest min/max supported instant of datetime instead of wraparound. This change is highlighted as "backward incompatible" because someone may unintentionally rely on the old behavior. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Support for caching data locally for remote filesystems. It can be enabled for `s3` disks. Closes [#28961](https://github.com/ClickHouse/ClickHouse/issues/28961). [#33717](https://github.com/ClickHouse/ClickHouse/pull/33717) ([Kseniia Sumarokova](https://github.com/kssenii)). In the meantime, we enabled the test suite on s3 filesystem and no more known issues exist, so it is started to be production ready.
|
||||
* Add new table function `hive`. It can be used as follows `hive('<hive metastore url>', '<hive database>', '<hive table name>', '<columns definition>', '<partition columns>')` for example `SELECT * FROM hive('thrift://hivetest:9083', 'test', 'demo', 'id Nullable(String), score Nullable(Int32), day Nullable(String)', 'day')`. [#34946](https://github.com/ClickHouse/ClickHouse/pull/34946) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Support authentication of users connected via SSL by their X.509 certificate. [#31484](https://github.com/ClickHouse/ClickHouse/pull/31484) ([eungenue](https://github.com/eungenue)).
|
||||
* Support schema inference for inserting into table functions `file`/`hdfs`/`s3`/`url`. [#34732](https://github.com/ClickHouse/ClickHouse/pull/34732) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Now you can read `system.zookeeper` table without restrictions on path or using `like` expression. This reads can generate quite heavy load for zookeeper so to enable this ability you have to enable setting `allow_unrestricted_reads_from_keeper`. [#34609](https://github.com/ClickHouse/ClickHouse/pull/34609) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Display CPU and memory metrics in clickhouse-local. Close [#34545](https://github.com/ClickHouse/ClickHouse/issues/34545). [#34605](https://github.com/ClickHouse/ClickHouse/pull/34605) ([李扬](https://github.com/taiyang-li)).
|
||||
* Implement `startsWith` and `endsWith` function for arrays, closes [#33982](https://github.com/ClickHouse/ClickHouse/issues/33982). [#34368](https://github.com/ClickHouse/ClickHouse/pull/34368) ([usurai](https://github.com/usurai)).
|
||||
* Add three functions for Map data type: 1. `mapReplace(map1, map2)` - replaces values for keys in map1 with the values of the corresponding keys in map2; adds keys from map2 that don't exist in map1. 2. `mapFilter` 3. `mapMap`. mapFilter and mapMap are higher order functions, accepting two arguments, the first argument is a lambda function with k, v pair as arguments, the second argument is a column of type Map. [#33698](https://github.com/ClickHouse/ClickHouse/pull/33698) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Allow getting default user and password for clickhouse-client from the `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables. Close [#34538](https://github.com/ClickHouse/ClickHouse/issues/34538). [#34947](https://github.com/ClickHouse/ClickHouse/pull/34947) ([DR](https://github.com/freedomDR)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* New data type `Object(<schema_format>)`, which supports storing of semi-structured data (for now JSON only). Data is written to such types as string. Then all paths are extracted according to format of semi-structured data and written as separate columns in most optimal types, that can store all their values. Those columns can be queried by names that match paths in source data. E.g `data.key1.key2` or with cast operator `data.key1.key2::Int64`.
|
||||
* Add `database_replicated_allow_only_replicated_engine` setting. When enabled, it only allowed to only create `Replicated` tables or tables with stateless engines in `Replicated` databases. [#35214](https://github.com/ClickHouse/ClickHouse/pull/35214) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). Note that `Replicated` database is still an experimental feature.
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Improve performance of insertion into `MergeTree` tables by optimizing sorting. Up to 2x improvement is observed on realistic benchmarks. [#34750](https://github.com/ClickHouse/ClickHouse/pull/34750) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Columns pruning when reading Parquet, ORC and Arrow files from URL and S3. Closes [#34163](https://github.com/ClickHouse/ClickHouse/issues/34163). [#34849](https://github.com/ClickHouse/ClickHouse/pull/34849) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Columns pruning when reading Parquet, ORC and Arrow files from Hive. [#34954](https://github.com/ClickHouse/ClickHouse/pull/34954) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* A bunch of performance optimizations from a performance superhero. Improve performance of processing queries with large `IN` section. Improve performance of `direct` dictionary if its source is `ClickHouse`. Improve performance of `detectCharset `, `detectLanguageUnknown ` functions. [#34888](https://github.com/ClickHouse/ClickHouse/pull/34888) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of `any` aggregate function by using more batching. [#34760](https://github.com/ClickHouse/ClickHouse/pull/34760) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Multiple improvements for performance of `clickhouse-keeper`: less locking [#35010](https://github.com/ClickHouse/ClickHouse/pull/35010) ([zhanglistar](https://github.com/zhanglistar)), lower memory usage by streaming reading and writing of snapshot instead of full copy. [#34584](https://github.com/ClickHouse/ClickHouse/pull/34584) ([zhanglistar](https://github.com/zhanglistar)), optimizing compaction of log store in the RAFT implementation. [#34534](https://github.com/ClickHouse/ClickHouse/pull/34534) ([zhanglistar](https://github.com/zhanglistar)), versioning of the internal data structure [#34486](https://github.com/ClickHouse/ClickHouse/pull/34486) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Allow asynchronous inserts to table functions. Fixes [#34864](https://github.com/ClickHouse/ClickHouse/issues/34864). [#34866](https://github.com/ClickHouse/ClickHouse/pull/34866) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Implicit type casting of the key argument for functions `dictGetHierarchy`, `dictIsIn`, `dictGetChildren`, `dictGetDescendants`. Closes [#34970](https://github.com/ClickHouse/ClickHouse/issues/34970). [#35027](https://github.com/ClickHouse/ClickHouse/pull/35027) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `EXPLAIN AST` query can output AST in form of a graph in Graphviz format: `EXPLAIN AST graph = 1 SELECT * FROM system.parts`. [#35173](https://github.com/ClickHouse/ClickHouse/pull/35173) ([李扬](https://github.com/taiyang-li)).
|
||||
* When large files were written with `s3` table function or table engine, the content type on the files was mistakenly set to `application/xml` due to a bug in the AWS SDK. This closes [#33964](https://github.com/ClickHouse/ClickHouse/issues/33964). [#34433](https://github.com/ClickHouse/ClickHouse/pull/34433) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Change restrictive row policies a bit to make them an easier alternative to permissive policies in easy cases. If for a particular table only restrictive policies exist (without permissive policies) users will be able to see some rows. Also `SHOW CREATE ROW POLICY` will always show `AS permissive` or `AS restrictive` in row policy's definition. [#34596](https://github.com/ClickHouse/ClickHouse/pull/34596) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improve schema inference with globs in File/S3/HDFS/URL engines. Try to use the next path for schema inference in case of error. [#34465](https://github.com/ClickHouse/ClickHouse/pull/34465) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Play UI now correctly detects the preferred light/dark theme from the OS. [#35068](https://github.com/ClickHouse/ClickHouse/pull/35068) ([peledni](https://github.com/peledni)).
|
||||
* Added `date_time_input_format = 'best_effort_us'`. Closes [#34799](https://github.com/ClickHouse/ClickHouse/issues/34799). [#34982](https://github.com/ClickHouse/ClickHouse/pull/34982) ([WenYao](https://github.com/Cai-Yao)).
|
||||
* A new settings called `allow_plaintext_password` and `allow_no_password` are added in server configuration which turn on/off authentication types that can be potentially insecure in some environments. They are allowed by default. [#34738](https://github.com/ClickHouse/ClickHouse/pull/34738) ([Heena Bansal](https://github.com/HeenaBansal2009)).
|
||||
* Support for `DateTime64` data type in `Arrow` format, closes [#8280](https://github.com/ClickHouse/ClickHouse/issues/8280) and closes [#28574](https://github.com/ClickHouse/ClickHouse/issues/28574). [#34561](https://github.com/ClickHouse/ClickHouse/pull/34561) ([李扬](https://github.com/taiyang-li)).
|
||||
* Reload `remote_url_allow_hosts` (filtering of outgoing connections) on config update. [#35294](https://github.com/ClickHouse/ClickHouse/pull/35294) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Support `--testmode` parameter for `clickhouse-local`. This parameter enables interpretation of test hints that we use in functional tests. [#35264](https://github.com/ClickHouse/ClickHouse/pull/35264) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `distributed_depth` to query log. It is like a more detailed variant of `is_initial_query` [#35207](https://github.com/ClickHouse/ClickHouse/pull/35207) ([李扬](https://github.com/taiyang-li)).
|
||||
* Respect `remote_url_allow_hosts` for `MySQL` and `PostgreSQL` table functions. [#35191](https://github.com/ClickHouse/ClickHouse/pull/35191) ([Heena Bansal](https://github.com/HeenaBansal2009)).
|
||||
* Added `disk_name` field to `system.part_log`. [#35178](https://github.com/ClickHouse/ClickHouse/pull/35178) ([Artyom Yurkov](https://github.com/Varinara)).
|
||||
* Do not retry non-rertiable errors when querying remote URLs. Closes [#35161](https://github.com/ClickHouse/ClickHouse/issues/35161). [#35172](https://github.com/ClickHouse/ClickHouse/pull/35172) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support distributed INSERT SELECT queries (the setting `parallel_distributed_insert_select`) table function `view()`. [#35132](https://github.com/ClickHouse/ClickHouse/pull/35132) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* More precise memory tracking during `INSERT` into `Buffer` with `AggregateFunction`. [#35072](https://github.com/ClickHouse/ClickHouse/pull/35072) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid division by zero in Query Profiler if Linux kernel has a bug. Closes [#34787](https://github.com/ClickHouse/ClickHouse/issues/34787). [#35032](https://github.com/ClickHouse/ClickHouse/pull/35032) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add more sanity checks for keeper configuration: now mixing of localhost and non-local servers is not allowed, also add checks for same value of internal raft port and keeper client port. [#35004](https://github.com/ClickHouse/ClickHouse/pull/35004) ([alesapin](https://github.com/alesapin)).
|
||||
* Currently, if the user changes the settings of the system tables there will be tons of logs and ClickHouse will rename the tables every minute. This fixes [#34929](https://github.com/ClickHouse/ClickHouse/issues/34929). [#34949](https://github.com/ClickHouse/ClickHouse/pull/34949) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Use connection pool for Hive metastore client. [#34940](https://github.com/ClickHouse/ClickHouse/pull/34940) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Ignore per-column `TTL` in `CREATE TABLE AS` if new table engine does not support it (i.e. if the engine is not of `MergeTree` family). [#34938](https://github.com/ClickHouse/ClickHouse/pull/34938) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow `LowCardinality` strings for `ngrambf_v1`/`tokenbf_v1` indexes. Closes [#21865](https://github.com/ClickHouse/ClickHouse/issues/21865). [#34911](https://github.com/ClickHouse/ClickHouse/pull/34911) ([Lars Hiller Eidnes](https://github.com/larspars)).
|
||||
* Allow opening empty sqlite db if the file doesn't exist. Closes [#33367](https://github.com/ClickHouse/ClickHouse/issues/33367). [#34907](https://github.com/ClickHouse/ClickHouse/pull/34907) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Implement memory statistics for FreeBSD - this is required for `max_server_memory_usage` to work correctly. [#34902](https://github.com/ClickHouse/ClickHouse/pull/34902) ([Alexandre Snarskii](https://github.com/snar)).
|
||||
* In previous versions the progress bar in clickhouse-client can jump forward near 50% for no reason. This closes [#34324](https://github.com/ClickHouse/ClickHouse/issues/34324). [#34801](https://github.com/ClickHouse/ClickHouse/pull/34801) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now `ALTER TABLE DROP COLUMN columnX` queries for `MergeTree` table engines will work instantly when `columnX` is an `ALIAS` column. Fixes [#34660](https://github.com/ClickHouse/ClickHouse/issues/34660). [#34786](https://github.com/ClickHouse/ClickHouse/pull/34786) ([alesapin](https://github.com/alesapin)).
|
||||
* Show hints when user mistyped the name of a data skipping index. Closes [#29698](https://github.com/ClickHouse/ClickHouse/issues/29698). [#34764](https://github.com/ClickHouse/ClickHouse/pull/34764) ([flynn](https://github.com/ucasfl)).
|
||||
* Support `remote()`/`cluster()` table functions for `parallel_distributed_insert_select`. [#34728](https://github.com/ClickHouse/ClickHouse/pull/34728) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Do not reset logging that configured via `--log-file`/`--errorlog-file` command line options in case of empty configuration in the config file. [#34718](https://github.com/ClickHouse/ClickHouse/pull/34718) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Extract schema only once on table creation and prevent reading from local files/external sources to extract schema on each server startup. [#34684](https://github.com/ClickHouse/ClickHouse/pull/34684) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow specifying argument names for executable UDFs. This is necessary for formats where argument name is part of serialization, like `Native`, `JSONEachRow`. Closes [#34604](https://github.com/ClickHouse/ClickHouse/issues/34604). [#34653](https://github.com/ClickHouse/ClickHouse/pull/34653) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `MaterializedMySQL` (experimental feature) now supports `materialized_mysql_tables_list` (a comma-separated list of MySQL database tables, which will be replicated by the MaterializedMySQL database engine. Default value: empty list — means all the tables will be replicated), mentioned at [#32977](https://github.com/ClickHouse/ClickHouse/issues/32977). [#34487](https://github.com/ClickHouse/ClickHouse/pull/34487) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Improve OpenTelemetry span logs for INSERT operation on distributed table. [#34480](https://github.com/ClickHouse/ClickHouse/pull/34480) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Make the znode `ctime` and `mtime` consistent between servers in ClickHouse Keeper. [#33441](https://github.com/ClickHouse/ClickHouse/pull/33441) ([小路](https://github.com/nicelulu)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Package repository is migrated to JFrog Artifactory (**Mikhail f. Shiryaev**).
|
||||
* Randomize some settings in functional tests, so more possible combinations of settings will be tested. This is yet another fuzzing method to ensure better test coverage. This closes [#32268](https://github.com/ClickHouse/ClickHouse/issues/32268). [#34092](https://github.com/ClickHouse/ClickHouse/pull/34092) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Drop PVS-Studio from our CI. [#34680](https://github.com/ClickHouse/ClickHouse/pull/34680) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add an ability to build stripped binaries with CMake. In previous versions it was performed by dh-tools. [#35196](https://github.com/ClickHouse/ClickHouse/pull/35196) ([alesapin](https://github.com/alesapin)).
|
||||
* Smaller "fat-free" `clickhouse-keeper` build. [#35031](https://github.com/ClickHouse/ClickHouse/pull/35031) ([alesapin](https://github.com/alesapin)).
|
||||
* Use @robot-clickhouse as an author and committer for PRs like https://github.com/ClickHouse/ClickHouse/pull/34685. [#34793](https://github.com/ClickHouse/ClickHouse/pull/34793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Limit DWARF version for debug info by 4 max, because our internal stack symbolizer cannot parse DWARF version 5. This makes sense if you compile ClickHouse with clang-15. [#34777](https://github.com/ClickHouse/ClickHouse/pull/34777) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove `clickhouse-test` debian package as unneeded complication. CI use tests from repository and standalone testing via deb package is no longer supported. [#34606](https://github.com/ClickHouse/ClickHouse/pull/34606) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* A fix for HDFS integration: When the inner buffer size is too small, NEED_MORE_INPUT in `HadoopSnappyDecoder` will run multi times (>=3) for one compressed block. This makes the input data be copied into the wrong place in `HadoopSnappyDecoder::buffer`. [#35116](https://github.com/ClickHouse/ClickHouse/pull/35116) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Ignore obsolete grants in ATTACH GRANT statements. This PR fixes [#34815](https://github.com/ClickHouse/ClickHouse/issues/34815). [#34855](https://github.com/ClickHouse/ClickHouse/pull/34855) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix segfault in Postgres database when getting create table query if database was created using named collections. Closes [#35312](https://github.com/ClickHouse/ClickHouse/issues/35312). [#35313](https://github.com/ClickHouse/ClickHouse/pull/35313) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix partial merge join duplicate rows bug, close [#31009](https://github.com/ClickHouse/ClickHouse/issues/31009). [#35311](https://github.com/ClickHouse/ClickHouse/pull/35311) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible `Assertion 'position() != working_buffer.end()' failed` while using bzip2 compression with small `max_read_buffer_size` setting value. The bug was found in https://github.com/ClickHouse/ClickHouse/pull/35047. [#35300](https://github.com/ClickHouse/ClickHouse/pull/35300) ([Kruglov Pavel](https://github.com/Avogar)). While using lz4 compression with a small max_read_buffer_size setting value. [#35296](https://github.com/ClickHouse/ClickHouse/pull/35296) ([Kruglov Pavel](https://github.com/Avogar)). While using lzma compression with small `max_read_buffer_size` setting value. [#35295](https://github.com/ClickHouse/ClickHouse/pull/35295) ([Kruglov Pavel](https://github.com/Avogar)). While using `brotli` compression with a small `max_read_buffer_size` setting value. The bug was found in https://github.com/ClickHouse/ClickHouse/pull/35047. [#35281](https://github.com/ClickHouse/ClickHouse/pull/35281) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible segfault in `JSONEachRow` schema inference. [#35291](https://github.com/ClickHouse/ClickHouse/pull/35291) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `CHECK TABLE` query in case when sparse columns are enabled in table. [#35274](https://github.com/ClickHouse/ClickHouse/pull/35274) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Avoid std::terminate in case of exception in reading from remote VFS. [#35257](https://github.com/ClickHouse/ClickHouse/pull/35257) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix reading port from config, close [#34776](https://github.com/ClickHouse/ClickHouse/issues/34776). [#35193](https://github.com/ClickHouse/ClickHouse/pull/35193) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix error in query with `WITH TOTALS` in case if `HAVING` returned empty result. This fixes [#33711](https://github.com/ClickHouse/ClickHouse/issues/33711). [#35186](https://github.com/ClickHouse/ClickHouse/pull/35186) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix a corner case of `replaceRegexpAll`, close [#35117](https://github.com/ClickHouse/ClickHouse/issues/35117). [#35182](https://github.com/ClickHouse/ClickHouse/pull/35182) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Schema inference didn't work properly on case of `INSERT INTO FUNCTION s3(...) FROM ...`, it tried to read schema from s3 file instead of from select query. [#35176](https://github.com/ClickHouse/ClickHouse/pull/35176) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix MaterializedPostgreSQL (experimental feature) `table overrides` for partition by, etc. Closes [#35048](https://github.com/ClickHouse/ClickHouse/issues/35048). [#35162](https://github.com/ClickHouse/ClickHouse/pull/35162) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix MaterializedPostgreSQL (experimental feature) adding new table to replication (ATTACH TABLE) after manually removing (DETACH TABLE). Closes [#33800](https://github.com/ClickHouse/ClickHouse/issues/33800). Closes [#34922](https://github.com/ClickHouse/ClickHouse/issues/34922). Closes [#34315](https://github.com/ClickHouse/ClickHouse/issues/34315). [#35158](https://github.com/ClickHouse/ClickHouse/pull/35158) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix partition pruning error when non-monotonic function is used with IN operator. This fixes [#35136](https://github.com/ClickHouse/ClickHouse/issues/35136). [#35146](https://github.com/ClickHouse/ClickHouse/pull/35146) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed slightly incorrect translation of YAML configs to XML. [#35135](https://github.com/ClickHouse/ClickHouse/pull/35135) ([Miel Donkers](https://github.com/mdonkers)).
|
||||
* Fix `optimize_skip_unused_shards_rewrite_in` for signed columns and negative values. [#35134](https://github.com/ClickHouse/ClickHouse/pull/35134) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The `update_lag` external dictionary configuration option was unusable showing the error message ``Unexpected key `update_lag` in dictionary source configuration``. [#35089](https://github.com/ClickHouse/ClickHouse/pull/35089) ([Jason Chu](https://github.com/1lann)).
|
||||
* Avoid possible deadlock on server shutdown. [#35081](https://github.com/ClickHouse/ClickHouse/pull/35081) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix missing alias after function is optimized to a subcolumn when setting `optimize_functions_to_subcolumns` is enabled. Closes [#33798](https://github.com/ClickHouse/ClickHouse/issues/33798). [#35079](https://github.com/ClickHouse/ClickHouse/pull/35079) ([qieqieplus](https://github.com/qieqieplus)).
|
||||
* Fix reading from `system.asynchronous_inserts` table if there exists asynchronous insert into table function. [#35050](https://github.com/ClickHouse/ClickHouse/pull/35050) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible exception `Reading for MergeTree family tables must be done with last position boundary` (relevant to operation on remote VFS). Closes [#34979](https://github.com/ClickHouse/ClickHouse/issues/34979). [#35001](https://github.com/ClickHouse/ClickHouse/pull/35001) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix unexpected result when use -State type aggregate function in window frame. [#34999](https://github.com/ClickHouse/ClickHouse/pull/34999) ([metahys](https://github.com/metahys)).
|
||||
* Fix possible segfault in FileLog (experimental feature). Closes [#30749](https://github.com/ClickHouse/ClickHouse/issues/30749). [#34996](https://github.com/ClickHouse/ClickHouse/pull/34996) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible rare error `Cannot push block to port which already has data`. [#34993](https://github.com/ClickHouse/ClickHouse/pull/34993) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix wrong schema inference for unquoted dates in CSV. Closes [#34768](https://github.com/ClickHouse/ClickHouse/issues/34768). [#34961](https://github.com/ClickHouse/ClickHouse/pull/34961) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Integration with Hive: Fix unexpected result when use `in` in `where` in hive query. [#34945](https://github.com/ClickHouse/ClickHouse/pull/34945) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Avoid busy polling in ClickHouse Keeper while searching for changelog files to delete. [#34931](https://github.com/ClickHouse/ClickHouse/pull/34931) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix DateTime64 conversion from PostgreSQL. Closes [#33364](https://github.com/ClickHouse/ClickHouse/issues/33364). [#34910](https://github.com/ClickHouse/ClickHouse/pull/34910) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible "Part directory doesn't exist" during `INSERT` into MergeTree table backed by VFS over s3. [#34876](https://github.com/ClickHouse/ClickHouse/pull/34876) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support DDLs like CREATE USER to be executed on cross replicated cluster. [#34860](https://github.com/ClickHouse/ClickHouse/pull/34860) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Fix bugs for multiple columns group by in `WindowView` (experimental feature). [#34859](https://github.com/ClickHouse/ClickHouse/pull/34859) ([vxider](https://github.com/Vxider)).
|
||||
* Fix possible failures in S2 functions when queries contain const columns. [#34745](https://github.com/ClickHouse/ClickHouse/pull/34745) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix bug for H3 funcs containing const columns which cause queries to fail. [#34743](https://github.com/ClickHouse/ClickHouse/pull/34743) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix `No such file or directory` with enabled `fsync_part_directory` and vertical merge. [#34739](https://github.com/ClickHouse/ClickHouse/pull/34739) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix serialization/printing for system queries `RELOAD MODEL`, `RELOAD FUNCTION`, `RESTART DISK` when used `ON CLUSTER`. Closes [#34514](https://github.com/ClickHouse/ClickHouse/issues/34514). [#34696](https://github.com/ClickHouse/ClickHouse/pull/34696) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix `allow_experimental_projection_optimization` with `enable_global_with_statement` (before it may lead to `Stack size too large` error in case of multiple expressions in `WITH` clause, and also it executes scalar subqueries again and again, so not it will be more optimal). [#34650](https://github.com/ClickHouse/ClickHouse/pull/34650) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Stop to select part for mutate when the other replica has already updated the transaction log for `ReplatedMergeTree` engine. [#34633](https://github.com/ClickHouse/ClickHouse/pull/34633) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)).
|
||||
|
||||
|
||||
### <a id="222"></a> ClickHouse release v22.2, 2022-02-17
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
* Applying data skipping indexes for queries with FINAL may produce incorrect result. In this release we disabled data skipping indexes by default for queries with FINAL (a new setting `use_skip_indexes_if_final` is introduced and disabled by default). [#34243](https://github.com/ClickHouse/ClickHouse/pull/34243) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Projections are production ready. Set `allow_experimental_projection_optimization` by default and deprecate this setting. [#34456](https://github.com/ClickHouse/ClickHouse/pull/34456) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* An option to create a new files on insert for `File`/`S3`/`HDFS` engines. Allow to overwrite a file in `HDFS`. Throw an exception in attempt to overwrite a file in `S3` by default. Throw an exception in attempt to append data to file in formats that have a suffix (and thus don't support appends, like `Parquet`, `ORC`). Closes [#31640](https://github.com/ClickHouse/ClickHouse/issues/31640) Closes [#31622](https://github.com/ClickHouse/ClickHouse/issues/31622) Closes [#23862](https://github.com/ClickHouse/ClickHouse/issues/23862) Closes [#15022](https://github.com/ClickHouse/ClickHouse/issues/15022) Closes [#16674](https://github.com/ClickHouse/ClickHouse/issues/16674). [#33302](https://github.com/ClickHouse/ClickHouse/pull/33302) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add a setting that allows a user to provide own deduplication semantic in `MergeTree`/`ReplicatedMergeTree` If provided, it's used instead of data digest to generate block ID. So, for example, by providing a unique value for the setting in each INSERT statement, the user can avoid the same inserted data being deduplicated. This closes: [#7461](https://github.com/ClickHouse/ClickHouse/issues/7461). [#32304](https://github.com/ClickHouse/ClickHouse/pull/32304) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Add support of `DEFAULT` keyword for INSERT statements. Closes [#6331](https://github.com/ClickHouse/ClickHouse/issues/6331). [#33141](https://github.com/ClickHouse/ClickHouse/pull/33141) ([Andrii Buriachevskyi](https://github.com/1over)).
|
||||
* `EPHEMERAL` column specifier is added to `CREATE TABLE` query. Closes [#9436](https://github.com/ClickHouse/ClickHouse/issues/9436). [#34424](https://github.com/ClickHouse/ClickHouse/pull/34424) ([yakov-olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support `IF EXISTS` clause for `TTL expr TO [DISK|VOLUME] [IF EXISTS] 'xxx'` feature. Parts will be moved to disk or volume only if it exists on replica, so `MOVE TTL` rules will be able to behave differently on replicas according to the existing storage policies. Resolves [#34455](https://github.com/ClickHouse/ClickHouse/issues/34455). [#34504](https://github.com/ClickHouse/ClickHouse/pull/34504) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allow set default table engine and to create tables without specifying ENGINE. [#34187](https://github.com/ClickHouse/ClickHouse/pull/34187) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Add table function `format(format_name, data)`. [#34125](https://github.com/ClickHouse/ClickHouse/pull/34125) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Detect format in `clickhouse-local` by file name even in the case when it is passed to stdin. [#33829](https://github.com/ClickHouse/ClickHouse/pull/33829) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add schema inference for `values` table function. Closes [#33811](https://github.com/ClickHouse/ClickHouse/issues/33811). [#34017](https://github.com/ClickHouse/ClickHouse/pull/34017) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Dynamic reload of server TLS certificates on config reload. Closes [#15764](https://github.com/ClickHouse/ClickHouse/issues/15764). [#15765](https://github.com/ClickHouse/ClickHouse/pull/15765) ([johnskopis](https://github.com/johnskopis)). [#31257](https://github.com/ClickHouse/ClickHouse/pull/31257) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Now ReplicatedMergeTree can recover data when some of its disks are broken. [#13544](https://github.com/ClickHouse/ClickHouse/pull/13544) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fault-tolerant connections in clickhouse-client: `clickhouse-client ... --host host1 --host host2 --port port2 --host host3 --port port --host host4`. [#34490](https://github.com/ClickHouse/ClickHouse/pull/34490) ([Kruglov Pavel](https://github.com/Avogar)). [#33824](https://github.com/ClickHouse/ClickHouse/pull/33824) ([Filippov Denis](https://github.com/DF5HSE)).
|
||||
* Add `DEGREES` and `RADIANS` functions for MySQL compatibility. [#33769](https://github.com/ClickHouse/ClickHouse/pull/33769) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `h3ToCenterChild` function. [#33313](https://github.com/ClickHouse/ClickHouse/pull/33313) ([Bharat Nallan](https://github.com/bharatnc)). Add new h3 miscellaneous functions: `edgeLengthKm`,`exactEdgeLengthKm`,`exactEdgeLengthM`,`exactEdgeLengthRads`,`numHexagons`. [#33621](https://github.com/ClickHouse/ClickHouse/pull/33621) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add function `bitSlice` to extract bit subsequences from String/FixedString. [#33360](https://github.com/ClickHouse/ClickHouse/pull/33360) ([RogerYK](https://github.com/RogerYK)).
|
||||
* Implemented `meanZTest` aggregate function. [#33354](https://github.com/ClickHouse/ClickHouse/pull/33354) ([achimbab](https://github.com/achimbab)).
|
||||
* Add confidence intervals to T-tests aggregate functions. [#33260](https://github.com/ClickHouse/ClickHouse/pull/33260) ([achimbab](https://github.com/achimbab)).
|
||||
* Add function `addressToLineWithInlines`. Close [#26211](https://github.com/ClickHouse/ClickHouse/issues/26211). [#33467](https://github.com/ClickHouse/ClickHouse/pull/33467) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Added `#!` and `# ` as a recognised start of a single line comment. Closes [#34138](https://github.com/ClickHouse/ClickHouse/issues/34138). [#34230](https://github.com/ClickHouse/ClickHouse/pull/34230) ([Aaron Katz](https://github.com/aaronstephenkatz)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Functions for text classification: language and charset detection. See [#23271](https://github.com/ClickHouse/ClickHouse/issues/23271). [#33314](https://github.com/ClickHouse/ClickHouse/pull/33314) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add memory overcommit to `MemoryTracker`. Added `guaranteed` settings for memory limits which represent soft memory limits. In case when hard memory limit is reached, `MemoryTracker` tries to cancel the most overcommited query. New setting `memory_usage_overcommit_max_wait_microseconds` specifies how long queries may wait another query to stop. Closes [#28375](https://github.com/ClickHouse/ClickHouse/issues/28375). [#31182](https://github.com/ClickHouse/ClickHouse/pull/31182) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Enable stream to table join in WindowView. [#33729](https://github.com/ClickHouse/ClickHouse/pull/33729) ([vxider](https://github.com/Vxider)).
|
||||
* Support `SET`, `YEAR`, `TIME` and `GEOMETRY` data types in `MaterializedMySQL` (experimental feature). Fixes [#18091](https://github.com/ClickHouse/ClickHouse/issues/18091), [#21536](https://github.com/ClickHouse/ClickHouse/issues/21536), [#26361](https://github.com/ClickHouse/ClickHouse/issues/26361). [#33429](https://github.com/ClickHouse/ClickHouse/pull/33429) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Fix various issues when projection is enabled by default. Each issue is described in separate commit. This is for [#33678](https://github.com/ClickHouse/ClickHouse/issues/33678) . This fixes [#34273](https://github.com/ClickHouse/ClickHouse/issues/34273). [#34305](https://github.com/ClickHouse/ClickHouse/pull/34305) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Support `optimize_read_in_order` if prefix of sorting key is already sorted. E.g. if we have sorting key `ORDER BY (a, b)` in table and query with `WHERE a = const ORDER BY b` clauses, now it will be applied reading in order of sorting key instead of full sort. [#32748](https://github.com/ClickHouse/ClickHouse/pull/32748) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Improve performance of partitioned insert into table functions `URL`, `S3`, `File`, `HDFS`. Closes [#34348](https://github.com/ClickHouse/ClickHouse/issues/34348). [#34510](https://github.com/ClickHouse/ClickHouse/pull/34510) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Multiple performance improvements of clickhouse-keeper. [#34484](https://github.com/ClickHouse/ClickHouse/pull/34484) [#34587](https://github.com/ClickHouse/ClickHouse/pull/34587) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* `FlatDictionary` improve performance of dictionary data load. [#33871](https://github.com/ClickHouse/ClickHouse/pull/33871) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of `mapPopulateSeries` function. Closes [#33944](https://github.com/ClickHouse/ClickHouse/issues/33944). [#34318](https://github.com/ClickHouse/ClickHouse/pull/34318) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `_file` and `_path` virtual columns (in file-like table engines) are made `LowCardinality` - it will make queries for multiple files faster. Closes [#34300](https://github.com/ClickHouse/ClickHouse/issues/34300). [#34317](https://github.com/ClickHouse/ClickHouse/pull/34317) ([flynn](https://github.com/ucasfl)).
|
||||
* Speed up loading of data parts. It was not parallelized before: the setting `part_loading_threads` did not have effect. See [#4699](https://github.com/ClickHouse/ClickHouse/issues/4699). [#34310](https://github.com/ClickHouse/ClickHouse/pull/34310) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of `LineAsString` format. This closes [#34303](https://github.com/ClickHouse/ClickHouse/issues/34303). [#34306](https://github.com/ClickHouse/ClickHouse/pull/34306) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Optimize `quantilesExact{Low,High}` to use `nth_element` instead of `sort`. [#34287](https://github.com/ClickHouse/ClickHouse/pull/34287) ([Danila Kutenin](https://github.com/danlark1)).
|
||||
* Slightly improve performance of `Regexp` format. [#34202](https://github.com/ClickHouse/ClickHouse/pull/34202) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Minor improvement for analysis of scalar subqueries. [#34128](https://github.com/ClickHouse/ClickHouse/pull/34128) ([Federico Rodriguez](https://github.com/fedrod)).
|
||||
* Make ORDER BY tuple almost as fast as ORDER BY columns. We have special optimizations for multiple column ORDER BY: https://github.com/ClickHouse/ClickHouse/pull/10831 . It's beneficial to also apply to tuple columns. [#34060](https://github.com/ClickHouse/ClickHouse/pull/34060) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Rework and reintroduce the scalar subqueries cache to Materialized Views execution. [#33958](https://github.com/ClickHouse/ClickHouse/pull/33958) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Slightly improve performance of `ORDER BY` by adding x86-64 AVX-512 support for `memcmpSmall` functions to accelerate memory comparison. It works only if you compile ClickHouse by yourself. [#33706](https://github.com/ClickHouse/ClickHouse/pull/33706) ([hanqf-git](https://github.com/hanqf-git)).
|
||||
* Improve `range_hashed` dictionary performance if for key there are a lot of intervals. Fixes [#23821](https://github.com/ClickHouse/ClickHouse/issues/23821). [#33516](https://github.com/ClickHouse/ClickHouse/pull/33516) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* For inserts and merges into S3, write files in parallel whenever possible (TODO: check if it's merged). [#33291](https://github.com/ClickHouse/ClickHouse/pull/33291) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Improve `clickhouse-keeper` performance and fix several memory leaks in NuRaft library. [#33329](https://github.com/ClickHouse/ClickHouse/pull/33329) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Support asynchronous inserts in `clickhouse-client` for queries with inlined data. [#34267](https://github.com/ClickHouse/ClickHouse/pull/34267) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Functions `dictGet`, `dictHas` implicitly cast key argument to dictionary key structure, if they are different. [#33672](https://github.com/ClickHouse/ClickHouse/pull/33672) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improvements for `range_hashed` dictionaries. Improve performance of load time if there are multiple attributes. Allow to create a dictionary without attributes. Added option to specify strategy when intervals `start` and `end` have `Nullable` type `convert_null_range_bound_to_open` by default is `true`. Closes [#29791](https://github.com/ClickHouse/ClickHouse/issues/29791). Allow to specify `Float`, `Decimal`, `DateTime64`, `Int128`, `Int256`, `UInt128`, `UInt256` as range types. `RangeHashedDictionary` added support for range values that extend `Int64` type. Closes [#28322](https://github.com/ClickHouse/ClickHouse/issues/28322). Added option `range_lookup_strategy` to specify range lookup type `min`, `max` by default is `min` . Closes [#21647](https://github.com/ClickHouse/ClickHouse/issues/21647). Fixed allocated bytes calculations. Fixed type name in `system.dictionaries` in case of `ComplexKeyHashedDictionary`. [#33927](https://github.com/ClickHouse/ClickHouse/pull/33927) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* `flat`, `hashed`, `hashed_array` dictionaries now support creating with empty attributes, with support of reading the keys and using `dictHas`. Fixes [#33820](https://github.com/ClickHouse/ClickHouse/issues/33820). [#33918](https://github.com/ClickHouse/ClickHouse/pull/33918) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added support for `DateTime64` data type in dictionaries. [#33914](https://github.com/ClickHouse/ClickHouse/pull/33914) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow to write `s3(url, access_key_id, secret_access_key)` (autodetect of data format and table structure, but with explicit credentials). [#34503](https://github.com/ClickHouse/ClickHouse/pull/34503) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added sending of the output format back to client like it's done in HTTP protocol as suggested in [#34362](https://github.com/ClickHouse/ClickHouse/issues/34362). Closes [#34362](https://github.com/ClickHouse/ClickHouse/issues/34362). [#34499](https://github.com/ClickHouse/ClickHouse/pull/34499) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Send ProfileEvents statistics in case of INSERT SELECT query (to display query metrics in `clickhouse-client` for this type of queries). [#34498](https://github.com/ClickHouse/ClickHouse/pull/34498) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Recognize `.jsonl` extension for JSONEachRow format. [#34496](https://github.com/ClickHouse/ClickHouse/pull/34496) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Improve schema inference in clickhouse-local. Allow to write just `clickhouse-local -q "select * from table" < data.format`. [#34495](https://github.com/ClickHouse/ClickHouse/pull/34495) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Privileges CREATE/ALTER/DROP ROW POLICY now can be granted on a table or on `database.*` as well as globally `*.*`. [#34489](https://github.com/ClickHouse/ClickHouse/pull/34489) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Allow to export arbitrary large files to `s3`. Add two new settings: `s3_upload_part_size_multiply_factor` and `s3_upload_part_size_multiply_parts_count_threshold`. Now each time `s3_upload_part_size_multiply_parts_count_threshold` uploaded to S3 from a single query `s3_min_upload_part_size` multiplied by `s3_upload_part_size_multiply_factor`. Fixes [#34244](https://github.com/ClickHouse/ClickHouse/issues/34244). [#34422](https://github.com/ClickHouse/ClickHouse/pull/34422) ([alesapin](https://github.com/alesapin)).
|
||||
* Allow to skip not found (404) URLs for globs when using URL storage / table function. Also closes [#34359](https://github.com/ClickHouse/ClickHouse/issues/34359). [#34392](https://github.com/ClickHouse/ClickHouse/pull/34392) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Default input and output formats for `clickhouse-local` that can be overriden by --input-format and --output-format. Close [#30631](https://github.com/ClickHouse/ClickHouse/issues/30631). [#34352](https://github.com/ClickHouse/ClickHouse/pull/34352) ([李扬](https://github.com/taiyang-li)).
|
||||
* Add options for `clickhouse-format`. Which close [#30528](https://github.com/ClickHouse/ClickHouse/issues/30528) - `max_query_size` - `max_parser_depth`. [#34349](https://github.com/ClickHouse/ClickHouse/pull/34349) ([李扬](https://github.com/taiyang-li)).
|
||||
* Better handling of pre-inputs before client start. This is for [#34308](https://github.com/ClickHouse/ClickHouse/issues/34308). [#34336](https://github.com/ClickHouse/ClickHouse/pull/34336) ([Amos Bird](https://github.com/amosbird)).
|
||||
* `REGEXP_MATCHES` and `REGEXP_REPLACE` function aliases for compatibility with PostgreSQL. Close [#30885](https://github.com/ClickHouse/ClickHouse/issues/30885). [#34334](https://github.com/ClickHouse/ClickHouse/pull/34334) ([李扬](https://github.com/taiyang-li)).
|
||||
* Some servers expect a User-Agent header in their HTTP requests. A `User-Agent` header entry has been added to HTTP requests of the form: User-Agent: ClickHouse/VERSION_STRING. [#34330](https://github.com/ClickHouse/ClickHouse/pull/34330) ([Saad Ur Rahman](https://github.com/surahman)).
|
||||
* Cancel merges before acquiring table lock for `TRUNCATE` query to avoid `DEADLOCK_AVOIDED` error in some cases. Fixes [#34302](https://github.com/ClickHouse/ClickHouse/issues/34302). [#34304](https://github.com/ClickHouse/ClickHouse/pull/34304) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Change severity of the "Cancelled merging parts" message in logs, because it's not an error. This closes [#34148](https://github.com/ClickHouse/ClickHouse/issues/34148). [#34232](https://github.com/ClickHouse/ClickHouse/pull/34232) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to compose PostgreSQL-style cast operator `::` with expressions using `[]` and `.` operators (array and tuple indexing). [#34229](https://github.com/ClickHouse/ClickHouse/pull/34229) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Recognize `YYYYMMDD-hhmmss` format in `parseDateTimeBestEffort` function. This closes [#34206](https://github.com/ClickHouse/ClickHouse/issues/34206). [#34208](https://github.com/ClickHouse/ClickHouse/pull/34208) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow carriage return in the middle of the line while parsing by `Regexp` format. This closes [#34200](https://github.com/ClickHouse/ClickHouse/issues/34200). [#34205](https://github.com/ClickHouse/ClickHouse/pull/34205) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow to parse dictionary's `PRIMARY KEY` as `PRIMARY KEY (id, value)`; previously supported only `PRIMARY KEY id, value`. Closes [#34135](https://github.com/ClickHouse/ClickHouse/issues/34135). [#34141](https://github.com/ClickHouse/ClickHouse/pull/34141) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* An optional argument for `splitByChar` to limit the number of resulting elements. close [#34081](https://github.com/ClickHouse/ClickHouse/issues/34081). [#34140](https://github.com/ClickHouse/ClickHouse/pull/34140) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improving the experience of multiple line editing for clickhouse-client. This is a follow-up of [#31123](https://github.com/ClickHouse/ClickHouse/pull/31123). [#34114](https://github.com/ClickHouse/ClickHouse/pull/34114) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add `UUID` suport in `MsgPack` input/output format. [#34065](https://github.com/ClickHouse/ClickHouse/pull/34065) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Tracing context (for OpenTelemetry) is now propagated from GRPC client metadata (this change is relevant for GRPC client-server protocol). [#34064](https://github.com/ClickHouse/ClickHouse/pull/34064) ([andremarianiello](https://github.com/andremarianiello)).
|
||||
* Supports all types of `SYSTEM` queries with `ON CLUSTER` clause. [#34005](https://github.com/ClickHouse/ClickHouse/pull/34005) ([小路](https://github.com/nicelulu)).
|
||||
* Improve memory accounting for queries that are using less than `max_untracker_memory`. [#34001](https://github.com/ClickHouse/ClickHouse/pull/34001) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed UTF-8 string case-insensitive search when lowercase and uppercase characters are represented by different number of bytes. Example is `ẞ` and `ß`. This closes [#7334](https://github.com/ClickHouse/ClickHouse/issues/7334). [#33992](https://github.com/ClickHouse/ClickHouse/pull/33992) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Detect format and schema from stdin in `clickhouse-local`. [#33960](https://github.com/ClickHouse/ClickHouse/pull/33960) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Correctly handle the case of misconfiguration when multiple disks are using the same path on the filesystem. [#29072](https://github.com/ClickHouse/ClickHouse/issues/29072). [#33905](https://github.com/ClickHouse/ClickHouse/pull/33905) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Try every resolved IP address while getting S3 proxy. S3 proxies are rarely used, mostly in Yandex Cloud. [#33862](https://github.com/ClickHouse/ClickHouse/pull/33862) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Support EXPLAIN AST CREATE FUNCTION query `EXPLAIN AST CREATE FUNCTION mycast AS (n) -> cast(n as String)` will return `EXPLAIN AST CREATE FUNCTION mycast AS n -> CAST(n, 'String')`. [#33819](https://github.com/ClickHouse/ClickHouse/pull/33819) ([李扬](https://github.com/taiyang-li)).
|
||||
* Added support for cast from `Map(Key, Value)` to `Array(Tuple(Key, Value))`. [#33794](https://github.com/ClickHouse/ClickHouse/pull/33794) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add some improvements and fixes for `Bool` data type. Fixes [#33244](https://github.com/ClickHouse/ClickHouse/issues/33244). [#33737](https://github.com/ClickHouse/ClickHouse/pull/33737) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Parse and store OpenTelemetry trace-id in big-endian order. [#33723](https://github.com/ClickHouse/ClickHouse/pull/33723) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Improvement for `fromUnixTimestamp64` family functions.. They now accept any integer value that can be converted to `Int64`. This closes: [#14648](https://github.com/ClickHouse/ClickHouse/issues/14648). [#33505](https://github.com/ClickHouse/ClickHouse/pull/33505) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Reimplement `_shard_num` from constants (see [#7624](https://github.com/ClickHouse/ClickHouse/issues/7624)) with `shardNum()` function (seee [#27020](https://github.com/ClickHouse/ClickHouse/issues/27020)), to avoid possible issues (like those that had been found in [#16947](https://github.com/ClickHouse/ClickHouse/issues/16947)). [#33392](https://github.com/ClickHouse/ClickHouse/pull/33392) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable binary arithmetic (plus, minus, multiply, division, least, greatest) between Decimal and Float. [#33355](https://github.com/ClickHouse/ClickHouse/pull/33355) ([flynn](https://github.com/ucasfl)).
|
||||
* Respect cgroups limits in max_threads autodetection. [#33342](https://github.com/ClickHouse/ClickHouse/pull/33342) ([JaySon](https://github.com/JaySon-Huang)).
|
||||
* Add new clickhouse-keeper setting `min_session_timeout_ms`. Now clickhouse-keeper will determine client session timeout according to `min_session_timeout_ms` and `session_timeout_ms` settings. [#33288](https://github.com/ClickHouse/ClickHouse/pull/33288) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Added `UUID` data type support for functions `hex` and `bin`. [#32170](https://github.com/ClickHouse/ClickHouse/pull/32170) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Fix reading of subcolumns with dots in their names. In particular fixed reading of `Nested` columns, if their element names contain dots (e.g ```Nested(`keys.name` String, `keys.id` UInt64, values UInt64)```). [#34228](https://github.com/ClickHouse/ClickHouse/pull/34228) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixes `parallel_view_processing = 0` not working when inserting into a table using `VALUES`. - Fixes `view_duration_ms` in the `query_views_log` not being set correctly for materialized views. [#34067](https://github.com/ClickHouse/ClickHouse/pull/34067) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix parsing tables structure from ZooKeeper: now metadata from ZooKeeper compared with local metadata in canonical form. It helps when canonical function names can change between ClickHouse versions. [#33933](https://github.com/ClickHouse/ClickHouse/pull/33933) ([sunny](https://github.com/sunny19930321)).
|
||||
* Properly escape some characters for interaction with LDAP. [#33401](https://github.com/ClickHouse/ClickHouse/pull/33401) ([IlyaTsoi](https://github.com/IlyaTsoi)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Remove unbundled build support. [#33690](https://github.com/ClickHouse/ClickHouse/pull/33690) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Ensure that tests don't depend on the result of non-stable sorting of equal elements. Added equal items ranges randomization in debug after sort to prevent issues when we rely on equal items sort order. [#34393](https://github.com/ClickHouse/ClickHouse/pull/34393) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add verbosity to a style check. [#34289](https://github.com/ClickHouse/ClickHouse/pull/34289) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove `clickhouse-test` debian package because it's obsolete. [#33948](https://github.com/ClickHouse/ClickHouse/pull/33948) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Multiple improvements for build system to remove the possibility of occasionally using packages from the OS and to enforce hermetic builds. [#33695](https://github.com/ClickHouse/ClickHouse/pull/33695) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Fixed the assertion in case of using `allow_experimental_parallel_reading_from_replicas` with `max_parallel_replicas` equals to 1. This fixes [#34525](https://github.com/ClickHouse/ClickHouse/issues/34525). [#34613](https://github.com/ClickHouse/ClickHouse/pull/34613) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix rare bug while reading of empty arrays, which could lead to `Data compressed with different methods` error. It can reproduce if you have mostly empty arrays, but not always. And reading is performed in backward direction with ORDER BY ... DESC. This error is extremely unlikely to happen. [#34327](https://github.com/ClickHouse/ClickHouse/pull/34327) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix wrong result of `round`/`roundBankers` if integer values of small types are rounded. Closes [#33267](https://github.com/ClickHouse/ClickHouse/issues/33267). [#34562](https://github.com/ClickHouse/ClickHouse/pull/34562) ([李扬](https://github.com/taiyang-li)).
|
||||
* Sometimes query cancellation did not work immediately when we were reading multiple files from s3 or HDFS. Fixes [#34301](https://github.com/ClickHouse/ClickHouse/issues/34301) Relates to [#34397](https://github.com/ClickHouse/ClickHouse/issues/34397). [#34539](https://github.com/ClickHouse/ClickHouse/pull/34539) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix exception `Chunk should have AggregatedChunkInfo in MergingAggregatedTransform` (in case of `optimize_aggregation_in_order = 1` and `distributed_aggregation_memory_efficient = 0`). Fixes [#34526](https://github.com/ClickHouse/ClickHouse/issues/34526). [#34532](https://github.com/ClickHouse/ClickHouse/pull/34532) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix comparison between integers and floats in index analysis. Previously it could lead to skipping some granules for reading by mistake. Fixes [#34493](https://github.com/ClickHouse/ClickHouse/issues/34493). [#34528](https://github.com/ClickHouse/ClickHouse/pull/34528) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix compression support in URL engine. [#34524](https://github.com/ClickHouse/ClickHouse/pull/34524) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Fix possible error 'file_size: Operation not supported' in files' schema autodetection. [#34479](https://github.com/ClickHouse/ClickHouse/pull/34479) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixes possible race with table deletion. [#34416](https://github.com/ClickHouse/ClickHouse/pull/34416) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible error `Cannot convert column Function to mask` in short circuit function evaluation. Closes [#34171](https://github.com/ClickHouse/ClickHouse/issues/34171). [#34415](https://github.com/ClickHouse/ClickHouse/pull/34415) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix potential crash when doing schema inference from url source. Closes [#34147](https://github.com/ClickHouse/ClickHouse/issues/34147). [#34405](https://github.com/ClickHouse/ClickHouse/pull/34405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* For UDFs access permissions were checked for database level instead of global level as it should be. Closes [#34281](https://github.com/ClickHouse/ClickHouse/issues/34281). [#34404](https://github.com/ClickHouse/ClickHouse/pull/34404) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix wrong engine syntax in result of `SHOW CREATE DATABASE` query for databases with engine `Memory`. This closes [#34335](https://github.com/ClickHouse/ClickHouse/issues/34335). [#34345](https://github.com/ClickHouse/ClickHouse/pull/34345) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed a couple of extremely rare race conditions that might lead to broken state of replication queue and "intersecting parts" error. [#34297](https://github.com/ClickHouse/ClickHouse/pull/34297) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix progress bar width. It was incorrectly rounded to integer number of characters. [#34275](https://github.com/ClickHouse/ClickHouse/pull/34275) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix current_user/current_address client information fields for inter-server communication (before this patch current_user/current_address will be preserved from the previous query). [#34263](https://github.com/ClickHouse/ClickHouse/pull/34263) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix memory leak in case of some Exception during query processing with `optimize_aggregation_in_order=1`. [#34234](https://github.com/ClickHouse/ClickHouse/pull/34234) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix metric `Query`, which shows the number of executing queries. In last several releases it was always 0. [#34224](https://github.com/ClickHouse/ClickHouse/pull/34224) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix schema inference for table runction `s3`. [#34186](https://github.com/ClickHouse/ClickHouse/pull/34186) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix rare and benign race condition in `HDFS`, `S3` and `URL` storage engines which can lead to additional connections. [#34172](https://github.com/ClickHouse/ClickHouse/pull/34172) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bug which can rarely lead to error "Cannot read all data" while reading LowCardinality columns of MergeTree table engines family which stores data on remote file system like S3 (virtual filesystem over s3 is an experimental feature that is not ready for production). [#34139](https://github.com/ClickHouse/ClickHouse/pull/34139) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix inserts to distributed tables in case of a change of native protocol. The last change was in the version 22.1, so there may be some failures of inserts to distributed tables after upgrade to that version. [#34132](https://github.com/ClickHouse/ClickHouse/pull/34132) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible data race in `File` table engine that was introduced in [#33960](https://github.com/ClickHouse/ClickHouse/pull/33960). Closes [#34111](https://github.com/ClickHouse/ClickHouse/issues/34111). [#34113](https://github.com/ClickHouse/ClickHouse/pull/34113) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed minor race condition that might cause "intersecting parts" error in extremely rare cases after ZooKeeper connection loss. [#34096](https://github.com/ClickHouse/ClickHouse/pull/34096) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix asynchronous inserts with `Native` format. [#34068](https://github.com/ClickHouse/ClickHouse/pull/34068) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bug which lead to inability for server to start when both replicated access storage and keeper (embedded in clickhouse-server) are used. Introduced two settings for keeper socket timeout instead of settings from default user: `keeper_server.socket_receive_timeout_sec` and `keeper_server.socket_send_timeout_sec`. Fixes [#33973](https://github.com/ClickHouse/ClickHouse/issues/33973). [#33988](https://github.com/ClickHouse/ClickHouse/pull/33988) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix segfault while parsing ORC file with corrupted footer. Closes [#33797](https://github.com/ClickHouse/ClickHouse/issues/33797). [#33984](https://github.com/ClickHouse/ClickHouse/pull/33984) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix parsing IPv6 from query parameter (prepared statements) and fix IPv6 to string conversion. Closes [#33928](https://github.com/ClickHouse/ClickHouse/issues/33928). [#33971](https://github.com/ClickHouse/ClickHouse/pull/33971) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash while reading of nested tuples. Fixes [#33838](https://github.com/ClickHouse/ClickHouse/issues/33838). [#33956](https://github.com/ClickHouse/ClickHouse/pull/33956) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix usage of functions `array` and `tuple` with literal arguments in distributed queries. Previously it could lead to `Not found columns` exception. [#33938](https://github.com/ClickHouse/ClickHouse/pull/33938) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Aggregate function combinator `-If` did not correctly process `Nullable` filter argument. This closes [#27073](https://github.com/ClickHouse/ClickHouse/issues/27073). [#33920](https://github.com/ClickHouse/ClickHouse/pull/33920) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix potential race condition when doing remote disk read (virtual filesystem over s3 is an experimental feature that is not ready for production). [#33912](https://github.com/ClickHouse/ClickHouse/pull/33912) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix crash if SQL UDF is created with lambda with non identifier arguments. Closes [#33866](https://github.com/ClickHouse/ClickHouse/issues/33866). [#33868](https://github.com/ClickHouse/ClickHouse/pull/33868) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix usage of sparse columns (which can be enabled by experimental setting `ratio_of_defaults_for_sparse_serialization`). [#33849](https://github.com/ClickHouse/ClickHouse/pull/33849) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed `replica is not readonly` logical error on `SYSTEM RESTORE REPLICA` query when replica is actually readonly. Fixes [#33806](https://github.com/ClickHouse/ClickHouse/issues/33806). [#33847](https://github.com/ClickHouse/ClickHouse/pull/33847) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix memory leak in `clickhouse-keeper` in case of compression is used (default). [#33840](https://github.com/ClickHouse/ClickHouse/pull/33840) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix index analysis with no common types available. [#33833](https://github.com/ClickHouse/ClickHouse/pull/33833) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix schema inference for `JSONEachRow` and `JSONCompactEachRow`. [#33830](https://github.com/ClickHouse/ClickHouse/pull/33830) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix usage of external dictionaries with `redis` source and large number of keys. [#33804](https://github.com/ClickHouse/ClickHouse/pull/33804) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bug in client that led to 'Connection reset by peer' in server. Closes [#33309](https://github.com/ClickHouse/ClickHouse/issues/33309). [#33790](https://github.com/ClickHouse/ClickHouse/pull/33790) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix parsing query INSERT INTO ... VALUES SETTINGS ... (...), ... [#33776](https://github.com/ClickHouse/ClickHouse/pull/33776) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug of check table when creating data part with wide format and projection. [#33774](https://github.com/ClickHouse/ClickHouse/pull/33774) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix tiny race between count() and INSERT/merges/... in MergeTree (it is possible to return incorrect number of rows for SELECT with optimize_trivial_count_query). [#33753](https://github.com/ClickHouse/ClickHouse/pull/33753) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Throw exception when directory listing request has failed in storage HDFS. [#33724](https://github.com/ClickHouse/ClickHouse/pull/33724) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Fix mutation when table contains projections. This fixes [#33010](https://github.com/ClickHouse/ClickHouse/issues/33010). This fixes [#33275](https://github.com/ClickHouse/ClickHouse/issues/33275). [#33679](https://github.com/ClickHouse/ClickHouse/pull/33679) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Correctly determine current database if `CREATE TEMPORARY TABLE AS SELECT` is queried inside a named HTTP session. This is a very rare use case. This closes [#8340](https://github.com/ClickHouse/ClickHouse/issues/8340). [#33676](https://github.com/ClickHouse/ClickHouse/pull/33676) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow some queries with sorting, LIMIT BY, ARRAY JOIN and lambda functions. This closes [#7462](https://github.com/ClickHouse/ClickHouse/issues/7462). [#33675](https://github.com/ClickHouse/ClickHouse/pull/33675) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug in "zero copy replication" (a feature that is under development and should not be used in production) which lead to data duplication in case of TTL move. Fixes [#33643](https://github.com/ClickHouse/ClickHouse/issues/33643). [#33642](https://github.com/ClickHouse/ClickHouse/pull/33642) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix `Chunk should have AggregatedChunkInfo in GroupingAggregatedTransform` (in case of `optimize_aggregation_in_order = 1`). [#33637](https://github.com/ClickHouse/ClickHouse/pull/33637) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix error `Bad cast from type ... to DB::DataTypeArray` which may happen when table has `Nested` column with dots in name, and default value is generated for it (e.g. during insert, when column is not listed). Continuation of [#28762](https://github.com/ClickHouse/ClickHouse/issues/28762). [#33588](https://github.com/ClickHouse/ClickHouse/pull/33588) ([Alexey Pavlenko](https://github.com/alexeypavlenko)).
|
||||
* Export into `lz4` files has been fixed. Closes [#31421](https://github.com/ClickHouse/ClickHouse/issues/31421). [#31862](https://github.com/ClickHouse/ClickHouse/pull/31862) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix potential crash if `group_by_overflow_mode` was set to `any` (approximate GROUP BY) and aggregation was performed by single column of type `LowCardinality`. [#34506](https://github.com/ClickHouse/ClickHouse/pull/34506) ([DR](https://github.com/freedomDR)).
|
||||
* Fix inserting to temporary tables via gRPC client-server protocol. Fixes [#34347](https://github.com/ClickHouse/ClickHouse/issues/34347), issue `#2`. [#34364](https://github.com/ClickHouse/ClickHouse/pull/34364) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix issue [#19429](https://github.com/ClickHouse/ClickHouse/issues/19429). [#34225](https://github.com/ClickHouse/ClickHouse/pull/34225) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix issue [#18206](https://github.com/ClickHouse/ClickHouse/issues/18206). [#33977](https://github.com/ClickHouse/ClickHouse/pull/33977) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
|
||||
### <a id="221"></a> ClickHouse release v22.1, 2022-01-18
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
|
@ -62,8 +62,8 @@ set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a pos
|
||||
# For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html
|
||||
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
|
||||
|
||||
# Check that submodules are present only if source was downloaded with git
|
||||
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
|
||||
# Check that submodules are present
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/sysroot/README.md")
|
||||
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
|
||||
endif ()
|
||||
|
||||
@ -248,7 +248,9 @@ endif()
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(USE_DEBUG_HELPERS ON)
|
||||
endif()
|
||||
|
||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
|
||||
|
||||
# Create BuildID when using lld. For other linkers it is created by default.
|
||||
if (LINKER_NAME MATCHES "lld$")
|
||||
@ -263,6 +265,14 @@ if (OBJCOPY_PATH AND YANDEX_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE))
|
||||
set (USE_BINARY_HASH 1)
|
||||
endif ()
|
||||
|
||||
# Allows to build stripped binary in a separate directory
|
||||
if (OBJCOPY_PATH AND READELF_PATH)
|
||||
option(INSTALL_STRIPPED_BINARIES "Build stripped binaries with debug info in separate directory" OFF)
|
||||
if (INSTALL_STRIPPED_BINARIES)
|
||||
set(STRIPPED_BINARIES_OUTPUT "stripped" CACHE STRING "A separate directory for stripped information")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
|
||||
|
||||
|
||||
@ -285,8 +295,13 @@ include(cmake/cpu_features.cmake)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||
|
||||
# Reproducible builds
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
# If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE().
|
||||
option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON)
|
||||
|
||||
if (ENABLE_BUILD_PATH_MAPPING)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
endif()
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
|
||||
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
|
||||
@ -340,16 +355,19 @@ if (WITH_COVERAGE AND COMPILER_GCC)
|
||||
set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage")
|
||||
endif()
|
||||
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS}")
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
||||
|
||||
# Our built-in unwinder only supports DWARF version up to 4.
|
||||
set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
|
||||
|
||||
set (CMAKE_BUILD_COLOR_MAKEFILE ON)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
if (OS_DARWIN)
|
||||
|
@ -10,6 +10,6 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
||||
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.
|
||||
|
@ -15,7 +15,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 20.x | :x: |
|
||||
| 21.1 | :x: |
|
||||
| 21.2 | :x: |
|
||||
| 21.3 | ✅ |
|
||||
| 21.3 | :x: |
|
||||
| 21.4 | :x: |
|
||||
| 21.5 | :x: |
|
||||
| 21.6 | :x: |
|
||||
@ -23,9 +23,11 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 21.8 | ✅ |
|
||||
| 21.9 | :x: |
|
||||
| 21.10 | :x: |
|
||||
| 21.11 | ✅ |
|
||||
| 21.12 | ✅ |
|
||||
| 21.11 | :x: |
|
||||
| 21.12 | :x: |
|
||||
| 22.1 | ✅ |
|
||||
| 22.2 | ✅ |
|
||||
| 22.3 | ✅ |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
|
@ -18,6 +18,7 @@ set (SRCS
|
||||
terminalColors.cpp
|
||||
errnoToString.cpp
|
||||
StringRef.cpp
|
||||
safeExit.cpp
|
||||
)
|
||||
|
||||
if (ENABLE_REPLXX)
|
||||
|
@ -109,10 +109,10 @@ void LineReader::Suggest::addWords(Words && new_words)
|
||||
std::lock_guard lock(mutex);
|
||||
addNewWords(words, new_words, std::less<std::string>{});
|
||||
addNewWords(words_no_case, new_words_no_case, NoCaseCompare{});
|
||||
}
|
||||
|
||||
assert(std::is_sorted(words.begin(), words.end()));
|
||||
assert(std::is_sorted(words_no_case.begin(), words_no_case.end(), NoCaseCompare{}));
|
||||
assert(std::is_sorted(words.begin(), words.end()));
|
||||
assert(std::is_sorted(words_no_case.begin(), words_no_case.end(), NoCaseCompare{}));
|
||||
}
|
||||
}
|
||||
|
||||
LineReader::LineReader(const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
|
||||
|
@ -19,6 +19,12 @@
|
||||
#if defined(__SSE4_2__)
|
||||
#include <smmintrin.h>
|
||||
#include <nmmintrin.h>
|
||||
#define CRC_INT _mm_crc32_u64
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
|
||||
#include <arm_acle.h>
|
||||
#define CRC_INT __crc32cd
|
||||
#endif
|
||||
|
||||
|
||||
@ -40,9 +46,9 @@ struct StringRef
|
||||
|
||||
constexpr StringRef(const char * data_, size_t size_) : data(data_), size(size_) {}
|
||||
|
||||
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
|
||||
StringRef(const std::string & s) : data(s.data()), size(s.size()) {} /// NOLINT
|
||||
constexpr explicit StringRef(std::string_view s) : data(s.data()), size(s.size()) {}
|
||||
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {}
|
||||
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {} /// NOLINT
|
||||
constexpr StringRef() = default;
|
||||
|
||||
std::string toString() const { return std::string(data, size); }
|
||||
@ -205,7 +211,7 @@ struct StringRefHash64
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
#if defined(CRC_INT)
|
||||
|
||||
/// Parts are taken from CityHash.
|
||||
|
||||
@ -281,13 +287,13 @@ struct CRC32Hash
|
||||
do
|
||||
{
|
||||
UInt64 word = unalignedLoad<UInt64>(pos);
|
||||
res = _mm_crc32_u64(res, word);
|
||||
res = CRC_INT(res, word);
|
||||
|
||||
pos += 8;
|
||||
} while (pos + 8 < end);
|
||||
|
||||
UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal.
|
||||
res = _mm_crc32_u64(res, word);
|
||||
res = CRC_INT(res, word);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -26,3 +26,27 @@ void insertAtEnd(std::vector<T> & dest, std::vector<T> && src)
|
||||
dest.insert(dest.end(), std::make_move_iterator(src.begin()), std::make_move_iterator(src.end()));
|
||||
src.clear();
|
||||
}
|
||||
|
||||
template <typename Container>
|
||||
void insertAtEnd(Container & dest, const Container & src)
|
||||
{
|
||||
if (src.empty())
|
||||
return;
|
||||
|
||||
dest.insert(dest.end(), src.begin(), src.end());
|
||||
}
|
||||
|
||||
template <typename Container>
|
||||
void insertAtEnd(Container & dest, Container && src)
|
||||
{
|
||||
if (src.empty())
|
||||
return;
|
||||
if (dest.empty())
|
||||
{
|
||||
dest.swap(src);
|
||||
return;
|
||||
}
|
||||
|
||||
dest.insert(dest.end(), std::make_move_iterator(src.begin()), std::make_move_iterator(src.end()));
|
||||
src.clear();
|
||||
}
|
||||
|
18
base/base/safeExit.cpp
Normal file
18
base/base/safeExit.cpp
Normal file
@ -0,0 +1,18 @@
|
||||
#if defined(OS_LINUX)
|
||||
# include <sys/syscall.h>
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
[[noreturn]] void safeExit(int code)
|
||||
{
|
||||
#if defined(THREAD_SANITIZER) && defined(OS_LINUX)
|
||||
/// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately,
|
||||
/// while connection handling threads are still run.
|
||||
(void)syscall(SYS_exit_group, code);
|
||||
__builtin_unreachable();
|
||||
#else
|
||||
_exit(code);
|
||||
#endif
|
||||
}
|
4
base/base/safeExit.h
Normal file
4
base/base/safeExit.h
Normal file
@ -0,0 +1,4 @@
|
||||
#pragma once
|
||||
|
||||
/// _exit() with a workaround for TSan.
|
||||
[[noreturn]] void safeExit(int code);
|
109
base/base/sort.h
109
base/base/sort.h
@ -2,6 +2,63 @@
|
||||
|
||||
#include <pdqsort.h>
|
||||
|
||||
#ifndef NDEBUG
|
||||
|
||||
#include <pcg_random.hpp>
|
||||
#include <base/getThreadId.h>
|
||||
|
||||
/** Same as libcxx std::__debug_less. Just without dependency on private part of standard library.
|
||||
* Check that Comparator induce strict weak ordering.
|
||||
*/
|
||||
template <typename Comparator>
|
||||
class DebugLessComparator
|
||||
{
|
||||
public:
|
||||
constexpr DebugLessComparator(Comparator & cmp_)
|
||||
: cmp(cmp_)
|
||||
{}
|
||||
|
||||
template <typename LhsType, typename RhsType>
|
||||
constexpr bool operator()(const LhsType & lhs, const RhsType & rhs)
|
||||
{
|
||||
bool lhs_less_than_rhs = cmp(lhs, rhs);
|
||||
if (lhs_less_than_rhs)
|
||||
assert(!cmp(rhs, lhs));
|
||||
|
||||
return lhs_less_than_rhs;
|
||||
}
|
||||
|
||||
template <typename LhsType, typename RhsType>
|
||||
constexpr bool operator()(LhsType & lhs, RhsType & rhs)
|
||||
{
|
||||
bool lhs_less_than_rhs = cmp(lhs, rhs);
|
||||
if (lhs_less_than_rhs)
|
||||
assert(!cmp(rhs, lhs));
|
||||
|
||||
return lhs_less_than_rhs;
|
||||
}
|
||||
|
||||
private:
|
||||
Comparator & cmp;
|
||||
};
|
||||
|
||||
template <typename Comparator>
|
||||
using ComparatorWrapper = DebugLessComparator<Comparator>;
|
||||
|
||||
template <typename RandomIt>
|
||||
void shuffle(RandomIt first, RandomIt last)
|
||||
{
|
||||
static thread_local pcg64 rng(getThreadId());
|
||||
std::shuffle(first, last, rng);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
template <typename Comparator>
|
||||
using ComparatorWrapper = Comparator;
|
||||
|
||||
#endif
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
|
||||
@ -10,19 +67,48 @@
|
||||
template <typename RandomIt>
|
||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||
{
|
||||
::miniselect::floyd_rivest_select(first, nth, last);
|
||||
}
|
||||
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||
using comparator = std::less<value_type>;
|
||||
|
||||
template <typename RandomIt>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
{
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last);
|
||||
comparator compare;
|
||||
ComparatorWrapper<comparator> compare_wrapper = compare;
|
||||
|
||||
#ifndef NDEBUG
|
||||
::shuffle(first, last);
|
||||
#endif
|
||||
|
||||
::miniselect::floyd_rivest_select(first, nth, last, compare_wrapper);
|
||||
|
||||
#ifndef NDEBUG
|
||||
::shuffle(first, nth);
|
||||
|
||||
if (nth != last)
|
||||
::shuffle(nth + 1, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename RandomIt, typename Compare>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||
{
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare);
|
||||
#ifndef NDEBUG
|
||||
::shuffle(first, last);
|
||||
#endif
|
||||
|
||||
ComparatorWrapper<Compare> compare_wrapper = compare;
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare_wrapper);
|
||||
|
||||
#ifndef NDEBUG
|
||||
::shuffle(middle, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename RandomIt>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
{
|
||||
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||
using comparator = std::less<value_type>;
|
||||
|
||||
::partial_sort(first, middle, last, comparator());
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
@ -30,7 +116,12 @@ void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compar
|
||||
template <typename RandomIt, typename Compare>
|
||||
void sort(RandomIt first, RandomIt last, Compare compare)
|
||||
{
|
||||
::pdqsort(first, last, compare);
|
||||
#ifndef NDEBUG
|
||||
::shuffle(first, last);
|
||||
#endif
|
||||
|
||||
ComparatorWrapper<Compare> compare_wrapper = compare;
|
||||
::pdqsort(first, last, compare_wrapper);
|
||||
}
|
||||
|
||||
template <typename RandomIt>
|
||||
@ -38,5 +129,5 @@ void sort(RandomIt first, RandomIt last)
|
||||
{
|
||||
using value_type = typename std::iterator_traits<RandomIt>::value_type;
|
||||
using comparator = std::less<value_type>;
|
||||
::pdqsort(first, last, comparator());
|
||||
::sort(first, last, comparator());
|
||||
}
|
||||
|
@ -12,6 +12,18 @@
|
||||
#include <tuple>
|
||||
#include <limits>
|
||||
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
/// Use same extended double for all platforms
|
||||
#if (LDBL_MANT_DIG == 64)
|
||||
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||
using FromDoubleIntermediateType = long double;
|
||||
#else
|
||||
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
|
||||
#define CONSTEXPR_FROM_DOUBLE
|
||||
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;
|
||||
#endif
|
||||
|
||||
namespace wide
|
||||
{
|
||||
@ -265,12 +277,23 @@ struct integer<Bits, Signed>::_impl
|
||||
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept
|
||||
{
|
||||
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
|
||||
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
|
||||
if (!std::isfinite(t))
|
||||
if constexpr (std::is_same_v<T, double>)
|
||||
{
|
||||
self = 0;
|
||||
return;
|
||||
if (!std::isfinite(t))
|
||||
{
|
||||
self = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!boost::math::isfinite(t))
|
||||
{
|
||||
self = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const T alpha = t / static_cast<T>(max_int);
|
||||
@ -278,13 +301,13 @@ struct integer<Bits, Signed>::_impl
|
||||
if (alpha <= static_cast<T>(max_int))
|
||||
self = static_cast<uint64_t>(alpha);
|
||||
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
|
||||
set_multiplier<double>(self, alpha);
|
||||
set_multiplier<double>(self, static_cast<double>(alpha));
|
||||
|
||||
self *= max_int;
|
||||
self += static_cast<uint64_t>(t - floor(alpha) * static_cast<T>(max_int)); // += b_i
|
||||
}
|
||||
|
||||
constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
|
||||
CONSTEXPR_FROM_DOUBLE static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
|
||||
{
|
||||
constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
|
||||
constexpr int64_t min_int = std::numeric_limits<int64_t>::lowest();
|
||||
@ -294,24 +317,17 @@ struct integer<Bits, Signed>::_impl
|
||||
/// the result may not fit in 64 bits.
|
||||
/// The example of such a number is 9.22337e+18.
|
||||
/// As to_Integral does a static_cast to int64_t, it may result in UB.
|
||||
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
|
||||
/// The necessary check here is that FromDoubleIntermediateType has enough significant (mantissa) bits to store the
|
||||
/// int64_t max value precisely.
|
||||
|
||||
// TODO Be compatible with Apple aarch64
|
||||
#if not (defined(__APPLE__) && defined(__aarch64__))
|
||||
static_assert(LDBL_MANT_DIG >= 64,
|
||||
"On your system long double has less than 64 precision bits, "
|
||||
"which may result in UB when initializing double from int64_t");
|
||||
#endif
|
||||
|
||||
if (rhs > static_cast<long double>(min_int) && rhs < static_cast<long double>(max_int))
|
||||
if (rhs > static_cast<FromDoubleIntermediateType>(min_int) && rhs < static_cast<FromDoubleIntermediateType>(max_int))
|
||||
{
|
||||
self = static_cast<int64_t>(rhs);
|
||||
return;
|
||||
}
|
||||
|
||||
const long double rhs_long_double = (static_cast<long double>(rhs) < 0)
|
||||
? -static_cast<long double>(rhs)
|
||||
const FromDoubleIntermediateType rhs_long_double = (static_cast<FromDoubleIntermediateType>(rhs) < 0)
|
||||
? -static_cast<FromDoubleIntermediateType>(rhs)
|
||||
: rhs;
|
||||
|
||||
set_multiplier(self, rhs_long_double);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "Common/config_version.h"
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_SENTRY
|
||||
#if USE_SENTRY && !defined(KEEPER_STANDALONE_BUILD)
|
||||
|
||||
# include <sentry.h>
|
||||
# include <stdio.h>
|
||||
|
@ -1,5 +1,13 @@
|
||||
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
||||
add_headers_and_sources(loggers .)
|
||||
|
||||
# Standard version depends on DBMS and works with text log
|
||||
add_library(loggers ${loggers_sources} ${loggers_headers})
|
||||
target_compile_definitions(loggers PUBLIC WITH_TEXT_LOG=1)
|
||||
target_link_libraries(loggers PRIVATE dbms clickhouse_common_io)
|
||||
target_include_directories(loggers PUBLIC ..)
|
||||
|
||||
# Lightweight version doesn't work with textlog and also doesn't depend on DBMS
|
||||
add_library(loggers_no_text_log ${loggers_sources} ${loggers_headers})
|
||||
target_link_libraries(loggers_no_text_log PRIVATE clickhouse_common_io)
|
||||
target_include_directories(loggers PUBLIC ..)
|
||||
|
@ -9,7 +9,11 @@
|
||||
#include <Poco/ConsoleChannel.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Net/RemoteSyslogChannel.h>
|
||||
#include <Interpreters/TextLog.h>
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
#include <Interpreters/TextLog.h>
|
||||
#endif
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
@ -30,17 +34,21 @@ static std::string createDirectory(const std::string & file)
|
||||
return path;
|
||||
};
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||
{
|
||||
text_log = log;
|
||||
text_log_max_priority = max_priority;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger /*_root*/, const std::string & cmd_name)
|
||||
{
|
||||
#ifdef WITH_TEXT_LOG
|
||||
if (split)
|
||||
if (auto log = text_log.lock())
|
||||
split->addTextLog(log, text_log_max_priority);
|
||||
#endif
|
||||
|
||||
auto current_logger = config.getString("logger", "");
|
||||
if (config_logger == current_logger) //-V1051
|
||||
@ -247,11 +255,8 @@ void Loggers::updateLevels(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
if (log_level > max_log_level)
|
||||
max_log_level = log_level;
|
||||
|
||||
const auto log_path = config.getString("logger.log", "");
|
||||
if (!log_path.empty())
|
||||
if (log_file)
|
||||
split->setLevel("log", log_level);
|
||||
else
|
||||
split->setLevel("log", 0);
|
||||
|
||||
// Set level to console
|
||||
bool is_daemon = config.getBool("application.runAsDaemon", false);
|
||||
@ -263,15 +268,13 @@ void Loggers::updateLevels(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
split->setLevel("console", 0);
|
||||
|
||||
// Set level to errorlog
|
||||
int errorlog_level = 0;
|
||||
const auto errorlog_path = config.getString("logger.errorlog", "");
|
||||
if (!errorlog_path.empty())
|
||||
if (error_log_file)
|
||||
{
|
||||
errorlog_level = Poco::Logger::parseLevel(config.getString("logger.errorlog_level", "notice"));
|
||||
int errorlog_level = Poco::Logger::parseLevel(config.getString("logger.errorlog_level", "notice"));
|
||||
if (errorlog_level > max_log_level)
|
||||
max_log_level = errorlog_level;
|
||||
split->setLevel("errorlog", errorlog_level);
|
||||
}
|
||||
split->setLevel("errorlog", errorlog_level);
|
||||
|
||||
// Set level to syslog
|
||||
int syslog_level = 0;
|
||||
|
@ -7,10 +7,12 @@
|
||||
#include <Poco/Util/Application.h>
|
||||
#include "OwnSplitChannel.h"
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
namespace DB
|
||||
{
|
||||
class TextLog;
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace Poco::Util
|
||||
{
|
||||
@ -27,7 +29,9 @@ public:
|
||||
/// Close log files. On next log write files will be reopened.
|
||||
void closeLogs(Poco::Logger & logger);
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
#endif
|
||||
|
||||
private:
|
||||
Poco::AutoPtr<Poco::FileChannel> log_file;
|
||||
@ -37,8 +41,10 @@ private:
|
||||
/// Previous value of logger element in config. It is used to reinitialize loggers whenever the value changed.
|
||||
std::string config_logger;
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
std::weak_ptr<DB::TextLog> text_log;
|
||||
int text_log_max_priority = -1;
|
||||
#endif
|
||||
|
||||
Poco::AutoPtr<DB::OwnSplitChannel> split;
|
||||
};
|
||||
|
@ -20,10 +20,13 @@ namespace DB
|
||||
{
|
||||
void OwnSplitChannel::log(const Poco::Message & msg)
|
||||
{
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
auto logs_queue = CurrentThread::getInternalTextLogsQueue();
|
||||
|
||||
if (channels.empty() && (logs_queue == nullptr || msg.getPriority() > logs_queue->max_priority))
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (auto * masker = SensitiveDataMasker::getInstance())
|
||||
{
|
||||
@ -86,6 +89,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
channel.first->log(msg); // ordinary child
|
||||
}
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
auto logs_queue = CurrentThread::getInternalTextLogsQueue();
|
||||
|
||||
/// Log to "TCP queue" if message is not too noisy
|
||||
@ -137,6 +141,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
if (text_log_locked)
|
||||
text_log_locked->add(elem);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -145,12 +150,14 @@ void OwnSplitChannel::addChannel(Poco::AutoPtr<Poco::Channel> channel, const std
|
||||
channels.emplace(name, ExtendedChannelPtrPair(std::move(channel), dynamic_cast<ExtendedLogChannel *>(channel.get())));
|
||||
}
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void OwnSplitChannel::addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(text_log_mutex);
|
||||
text_log = log;
|
||||
text_log_max_priority.store(max_priority, std::memory_order_relaxed);
|
||||
}
|
||||
#endif
|
||||
|
||||
void OwnSplitChannel::setLevel(const std::string & name, int level)
|
||||
{
|
||||
|
@ -7,10 +7,12 @@
|
||||
#include <Poco/Channel.h>
|
||||
#include "ExtendedLogChannel.h"
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
namespace DB
|
||||
{
|
||||
class TextLog;
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -25,7 +27,9 @@ public:
|
||||
/// Adds a child channel
|
||||
void addChannel(Poco::AutoPtr<Poco::Channel> channel, const std::string & name);
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
void addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||
#endif
|
||||
|
||||
void setLevel(const std::string & name, int level);
|
||||
|
||||
@ -40,8 +44,10 @@ private:
|
||||
|
||||
std::mutex text_log_mutex;
|
||||
|
||||
#ifdef WITH_TEXT_LOG
|
||||
std::weak_ptr<DB::TextLog> text_log;
|
||||
std::atomic<int> text_log_max_priority = -1;
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -4,11 +4,12 @@
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def parse_block(block=[], options=[]):
|
||||
|
||||
#print('block is here', block)
|
||||
#show_query = False
|
||||
#show_query = options.show_query
|
||||
# print('block is here', block)
|
||||
# show_query = False
|
||||
# show_query = options.show_query
|
||||
result = []
|
||||
query = block[0].strip()
|
||||
if len(block) > 4:
|
||||
@ -20,9 +21,9 @@ def parse_block(block=[], options=[]):
|
||||
timing2 = block[2].strip().split()[1]
|
||||
timing3 = block[3].strip().split()[1]
|
||||
if options.show_queries:
|
||||
result.append( query )
|
||||
result.append(query)
|
||||
if not options.show_first_timings:
|
||||
result += [ timing1 , timing2, timing3 ]
|
||||
result += [timing1, timing2, timing3]
|
||||
else:
|
||||
result.append(timing1)
|
||||
return result
|
||||
@ -37,12 +38,12 @@ def read_stats_file(options, fname):
|
||||
|
||||
for line in f.readlines():
|
||||
|
||||
if 'SELECT' in line:
|
||||
if "SELECT" in line:
|
||||
if len(block) > 1:
|
||||
result.append( parse_block(block, options) )
|
||||
block = [ line ]
|
||||
elif 'Time:' in line:
|
||||
block.append( line )
|
||||
result.append(parse_block(block, options))
|
||||
block = [line]
|
||||
elif "Time:" in line:
|
||||
block.append(line)
|
||||
|
||||
return result
|
||||
|
||||
@ -50,7 +51,7 @@ def read_stats_file(options, fname):
|
||||
def compare_stats_files(options, arguments):
|
||||
result = []
|
||||
file_output = []
|
||||
pyplot_colors = ['y', 'b', 'g', 'r']
|
||||
pyplot_colors = ["y", "b", "g", "r"]
|
||||
for fname in arguments[1:]:
|
||||
file_output.append((read_stats_file(options, fname)))
|
||||
if len(file_output[0]) > 0:
|
||||
@ -58,65 +59,92 @@ def compare_stats_files(options, arguments):
|
||||
for idx, data_set in enumerate(file_output):
|
||||
int_result = []
|
||||
for timing in data_set:
|
||||
int_result.append(float(timing[0])) #y values
|
||||
result.append([[x for x in range(0, len(int_result)) ], int_result,
|
||||
pyplot_colors[idx] + '^' ] )
|
||||
# result.append([x for x in range(1, len(int_result)) ]) #x values
|
||||
# result.append( pyplot_colors[idx] + '^' )
|
||||
int_result.append(float(timing[0])) # y values
|
||||
result.append(
|
||||
[
|
||||
[x for x in range(0, len(int_result))],
|
||||
int_result,
|
||||
pyplot_colors[idx] + "^",
|
||||
]
|
||||
)
|
||||
# result.append([x for x in range(1, len(int_result)) ]) #x values
|
||||
# result.append( pyplot_colors[idx] + '^' )
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_args():
|
||||
from optparse import OptionParser
|
||||
parser = OptionParser(usage='usage: %prog [options] [result_file_path]..')
|
||||
parser.add_option("-q", "--show-queries", help="Show statements along with timings", action="store_true", dest="show_queries")
|
||||
parser.add_option("-f", "--show-first-timings", help="Show only first tries timings", action="store_true", dest="show_first_timings")
|
||||
parser.add_option("-c", "--compare-mode", help="Prepare output for pyplot comparing result files.", action="store", dest="compare_mode")
|
||||
|
||||
parser = OptionParser(usage="usage: %prog [options] [result_file_path]..")
|
||||
parser.add_option(
|
||||
"-q",
|
||||
"--show-queries",
|
||||
help="Show statements along with timings",
|
||||
action="store_true",
|
||||
dest="show_queries",
|
||||
)
|
||||
parser.add_option(
|
||||
"-f",
|
||||
"--show-first-timings",
|
||||
help="Show only first tries timings",
|
||||
action="store_true",
|
||||
dest="show_first_timings",
|
||||
)
|
||||
parser.add_option(
|
||||
"-c",
|
||||
"--compare-mode",
|
||||
help="Prepare output for pyplot comparing result files.",
|
||||
action="store",
|
||||
dest="compare_mode",
|
||||
)
|
||||
(options, arguments) = parser.parse_args(sys.argv)
|
||||
if len(arguments) < 2:
|
||||
parser.print_usage()
|
||||
sys.exit(1)
|
||||
return ( options, arguments )
|
||||
return (options, arguments)
|
||||
|
||||
|
||||
def gen_pyplot_code(options, arguments):
|
||||
result = ''
|
||||
result = ""
|
||||
data_sets = compare_stats_files(options, arguments)
|
||||
for idx, data_set in enumerate(data_sets, start=0):
|
||||
x_values, y_values, line_style = data_set
|
||||
result += '\nplt.plot('
|
||||
result += '%s, %s, \'%s\'' % ( x_values, y_values, line_style )
|
||||
result += ', label=\'%s try\')' % idx
|
||||
print('import matplotlib.pyplot as plt')
|
||||
result += "\nplt.plot("
|
||||
result += "%s, %s, '%s'" % (x_values, y_values, line_style)
|
||||
result += ", label='%s try')" % idx
|
||||
print("import matplotlib.pyplot as plt")
|
||||
print(result)
|
||||
print( 'plt.xlabel(\'Try number\')' )
|
||||
print( 'plt.ylabel(\'Timing\')' )
|
||||
print( 'plt.title(\'Benchmark query timings\')' )
|
||||
print('plt.legend()')
|
||||
print('plt.show()')
|
||||
print("plt.xlabel('Try number')")
|
||||
print("plt.ylabel('Timing')")
|
||||
print("plt.title('Benchmark query timings')")
|
||||
print("plt.legend()")
|
||||
print("plt.show()")
|
||||
|
||||
|
||||
def gen_html_json(options, arguments):
|
||||
tuples = read_stats_file(options, arguments[1])
|
||||
print('{')
|
||||
print("{")
|
||||
print('"system: GreenPlum(x2),')
|
||||
print(('"version": "%s",' % '4.3.9.1'))
|
||||
print(('"version": "%s",' % "4.3.9.1"))
|
||||
print('"data_size": 10000000,')
|
||||
print('"time": "",')
|
||||
print('"comments": "",')
|
||||
print('"result":')
|
||||
print('[')
|
||||
print("[")
|
||||
for s in tuples:
|
||||
print(s)
|
||||
print(']')
|
||||
print('}')
|
||||
print("]")
|
||||
print("}")
|
||||
|
||||
|
||||
def main():
|
||||
( options, arguments ) = parse_args()
|
||||
(options, arguments) = parse_args()
|
||||
if len(arguments) > 2:
|
||||
gen_pyplot_code(options, arguments)
|
||||
else:
|
||||
gen_html_json(options, arguments)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -11,10 +11,6 @@ DATASET="${TABLE}_v1.tar.xz"
|
||||
QUERIES_FILE="queries.sql"
|
||||
TRIES=3
|
||||
|
||||
AMD64_BIN_URL="https://builds.clickhouse.com/master/amd64/clickhouse"
|
||||
AARCH64_BIN_URL="https://builds.clickhouse.com/master/aarch64/clickhouse"
|
||||
POWERPC64_BIN_URL="https://builds.clickhouse.com/master/ppc64le/clickhouse"
|
||||
|
||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||
|
||||
FASTER_DOWNLOAD=wget
|
||||
@ -33,20 +29,60 @@ fi
|
||||
mkdir -p clickhouse-benchmark-$SCALE
|
||||
pushd clickhouse-benchmark-$SCALE
|
||||
|
||||
if [[ ! -f clickhouse ]]; then
|
||||
CPU=$(uname -m)
|
||||
if [[ ($CPU == x86_64) || ($CPU == amd64) ]]; then
|
||||
$FASTER_DOWNLOAD "$AMD64_BIN_URL"
|
||||
elif [[ $CPU == aarch64 ]]; then
|
||||
$FASTER_DOWNLOAD "$AARCH64_BIN_URL"
|
||||
elif [[ $CPU == powerpc64le ]]; then
|
||||
$FASTER_DOWNLOAD "$POWERPC64_BIN_URL"
|
||||
else
|
||||
echo "Unsupported CPU type: $CPU"
|
||||
exit 1
|
||||
OS=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
||||
DIR=
|
||||
|
||||
if [ "${OS}" = "Linux" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="amd64"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="aarch64"
|
||||
elif [ "${ARCH}" = "powerpc64le" ]
|
||||
then
|
||||
DIR="powerpc64le"
|
||||
fi
|
||||
elif [ "${OS}" = "FreeBSD" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="freebsd"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="freebsd-aarch64"
|
||||
elif [ "${ARCH}" = "powerpc64le" ]
|
||||
then
|
||||
DIR="freebsd-powerpc64le"
|
||||
fi
|
||||
elif [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="macos"
|
||||
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
||||
then
|
||||
DIR="macos-aarch64"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "${DIR}" ]
|
||||
then
|
||||
echo "The '${OS}' operating system with the '${ARCH}' architecture is not supported."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
|
||||
echo
|
||||
echo "Will download ${URL}"
|
||||
echo
|
||||
curl -O "${URL}" && chmod a+x clickhouse || exit 1
|
||||
echo
|
||||
echo "Successfully downloaded the ClickHouse binary"
|
||||
|
||||
chmod a+x clickhouse
|
||||
|
||||
if [[ ! -f $QUERIES_FILE ]]; then
|
||||
@ -88,7 +124,12 @@ echo
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
||||
sync
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
||||
if [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
sudo purge > /dev/null
|
||||
else
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
||||
fi
|
||||
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
@ -104,27 +145,45 @@ echo
|
||||
echo "Benchmark complete. System info:"
|
||||
echo
|
||||
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
|
||||
lscpu
|
||||
echo '----Block Devices---------------'
|
||||
lsblk
|
||||
echo '----Disk Free and Total--------'
|
||||
df -h .
|
||||
echo '----Memory Free and Total-------'
|
||||
free -h
|
||||
echo '----Physical Memory Amount------'
|
||||
cat /proc/meminfo | grep MemTotal
|
||||
echo '----RAID Info-------------------'
|
||||
cat /proc/mdstat
|
||||
#echo '----PCI-------------------------'
|
||||
#lspci
|
||||
#echo '----All Hardware Info-----------'
|
||||
#lshw
|
||||
echo '--------------------------------'
|
||||
|
||||
if [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}', version())"
|
||||
sw_vers | grep BuildVersion
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
sysctl hw.model
|
||||
sysctl -a | grep -E 'hw.activecpu|hw.memsize|hw.byteorder|cachesize'
|
||||
echo '----Disk Free and Total--------'
|
||||
df -h .
|
||||
echo '----Memory Free and Total-------'
|
||||
vm_stat
|
||||
echo '----Physical Memory Amount------'
|
||||
ls -l /var/vm
|
||||
echo '--------------------------------'
|
||||
else
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
|
||||
lscpu
|
||||
echo '----Block Devices---------------'
|
||||
lsblk
|
||||
echo '----Disk Free and Total--------'
|
||||
df -h .
|
||||
echo '----Memory Free and Total-------'
|
||||
free -h
|
||||
echo '----Physical Memory Amount------'
|
||||
cat /proc/meminfo | grep MemTotal
|
||||
echo '----RAID Info-------------------'
|
||||
cat /proc/mdstat
|
||||
#echo '----PCI-------------------------'
|
||||
#lspci
|
||||
#echo '----All Hardware Info-----------'
|
||||
#lshw
|
||||
echo '--------------------------------'
|
||||
fi
|
||||
echo
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54459)
|
||||
SET(VERSION_REVISION 54461)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 2)
|
||||
SET(VERSION_MINOR 4)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH dfe64a2789bbf51046bb6b5476f874f7b59d124c)
|
||||
SET(VERSION_DESCRIBE v22.2.1.1-prestable)
|
||||
SET(VERSION_STRING 22.2.1.1)
|
||||
SET(VERSION_GITHASH 92ab33f560e638d1989c5ca543021ab53d110f5c)
|
||||
SET(VERSION_DESCRIBE v22.4.1.1-testing)
|
||||
SET(VERSION_STRING 22.4.1.1)
|
||||
# end of autochange
|
||||
|
@ -55,5 +55,5 @@ endif ()
|
||||
if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
|
||||
message(STATUS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${AVAILABLE_PHYSICAL_MEMORY} megabytes of memory.
|
||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS}")
|
||||
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
|
||||
endif ()
|
||||
|
28
cmake/strip.sh
Executable file
28
cmake/strip.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
BINARY_PATH=$1
|
||||
BINARY_NAME=$(basename "$BINARY_PATH")
|
||||
DESTINATION_STRIPPED_DIR=$2
|
||||
OBJCOPY_PATH=${3:objcopy}
|
||||
READELF_PATH=${4:readelf}
|
||||
|
||||
BUILD_ID=$($READELF_PATH -n "$1" | sed -n '/Build ID/ { s/.*: //p; q; }')
|
||||
BUILD_ID_PREFIX=${BUILD_ID:0:2}
|
||||
BUILD_ID_SUFFIX=${BUILD_ID:2}
|
||||
|
||||
DESTINATION_DEBUG_INFO_DIR="$DESTINATION_STRIPPED_DIR/lib/debug/.build-id"
|
||||
DESTINATION_STRIP_BINARY_DIR="$DESTINATION_STRIPPED_DIR/bin"
|
||||
|
||||
mkdir -p "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX"
|
||||
mkdir -p "$DESTINATION_STRIP_BINARY_DIR"
|
||||
|
||||
|
||||
cp "$BINARY_PATH" "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME"
|
||||
|
||||
$OBJCOPY_PATH --only-keep-debug --compress-debug-sections "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME" "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug"
|
||||
chmod 0644 "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug"
|
||||
chown 0:0 "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug"
|
||||
|
||||
strip --remove-section=.comment --remove-section=.note "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME"
|
||||
|
||||
$OBJCOPY_PATH --add-gnu-debuglink "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug" "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME"
|
26
cmake/strip_binary.cmake
Normal file
26
cmake/strip_binary.cmake
Normal file
@ -0,0 +1,26 @@
|
||||
macro(clickhouse_strip_binary)
|
||||
set(oneValueArgs TARGET DESTINATION_DIR BINARY_PATH)
|
||||
|
||||
cmake_parse_arguments(STRIP "" "${oneValueArgs}" "" ${ARGN})
|
||||
|
||||
if (NOT DEFINED STRIP_TARGET)
|
||||
message(FATAL_ERROR "A target name must be provided for stripping binary")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED STRIP_BINARY_PATH)
|
||||
message(FATAL_ERROR "A binary path name must be provided for stripping binary")
|
||||
endif()
|
||||
|
||||
|
||||
if (NOT DEFINED STRIP_DESTINATION_DIR)
|
||||
message(FATAL_ERROR "Destination directory for stripped binary must be provided")
|
||||
endif()
|
||||
|
||||
add_custom_command(TARGET ${STRIP_TARGET} POST_BUILD
|
||||
COMMAND bash ${ClickHouse_SOURCE_DIR}/cmake/strip.sh ${STRIP_BINARY_PATH} ${STRIP_DESTINATION_DIR} ${OBJCOPY_PATH} ${READELF_PATH}
|
||||
COMMENT "Stripping clickhouse binary" VERBATIM
|
||||
)
|
||||
|
||||
install(PROGRAMS ${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
install(DIRECTORY ${STRIP_DESTINATION_DIR}/lib/debug DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
|
||||
endmacro()
|
@ -169,3 +169,33 @@ if (OBJCOPY_PATH)
|
||||
else ()
|
||||
message (FATAL_ERROR "Cannot find objcopy.")
|
||||
endif ()
|
||||
|
||||
# Readelf (FIXME copypaste)
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (READELF_PATH NAMES "llvm-readelf" "llvm-readelf-13" "llvm-readelf-12" "llvm-readelf-11" "readelf")
|
||||
else ()
|
||||
find_program (READELF_PATH NAMES "llvm-readelf-${COMPILER_VERSION_MAJOR}" "llvm-readelf" "readelf")
|
||||
endif ()
|
||||
|
||||
if (NOT READELF_PATH AND OS_DARWIN)
|
||||
find_program (BREW_PATH NAMES "brew")
|
||||
if (BREW_PATH)
|
||||
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
|
||||
if (LLVM_PREFIX)
|
||||
find_program (READELF_PATH NAMES "llvm-readelf" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
|
||||
endif ()
|
||||
if (NOT READELF_PATH)
|
||||
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
|
||||
if (BINUTILS_PREFIX)
|
||||
find_program (READELF_PATH NAMES "readelf" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (READELF_PATH)
|
||||
message (STATUS "Using readelf: ${READELF_PATH}")
|
||||
else ()
|
||||
message (FATAL_ERROR "Cannot find readelf.")
|
||||
endif ()
|
||||
|
2
contrib/icu
vendored
2
contrib/icu
vendored
@ -1 +1 @@
|
||||
Subproject commit faa2f9f9e1fe74c5ed00eba371d2830134cdbea1
|
||||
Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668
|
@ -212,7 +212,9 @@ set(ICUUC_SOURCES
|
||||
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/restrace.cpp")
|
||||
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
|
||||
|
||||
set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
||||
@ -398,7 +400,6 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nounit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
|
||||
@ -446,12 +447,21 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp")
|
||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
|
||||
|
||||
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
||||
enable_language(ASM)
|
||||
set(ICUDATA_SOURCES
|
||||
"${ICUDATA_SOURCE_DIR}/icudt66l_dat.S"
|
||||
"${ICUDATA_SOURCE_DIR}/icudt70l_dat.S"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC
|
||||
)
|
||||
|
||||
|
2
contrib/icudata
vendored
2
contrib/icudata
vendored
@ -1 +1 @@
|
||||
Subproject commit f020820388e3faafb44cc643574a2d563dfde572
|
||||
Subproject commit 72d9a4a7febc904e2b0a534ccb25ae40fac5f1e5
|
2
contrib/jemalloc
vendored
2
contrib/jemalloc
vendored
@ -1 +1 @@
|
||||
Subproject commit a1404807211b1612539f840b3dcb1bf38d1a269e
|
||||
Subproject commit 78b58379c854a639df79beb3289351129d863d4b
|
@ -53,43 +53,75 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/src/background_thread.c"
|
||||
"${LIBRARY_DIR}/src/base.c"
|
||||
"${LIBRARY_DIR}/src/bin.c"
|
||||
"${LIBRARY_DIR}/src/bin_info.c"
|
||||
"${LIBRARY_DIR}/src/bitmap.c"
|
||||
"${LIBRARY_DIR}/src/buf_writer.c"
|
||||
"${LIBRARY_DIR}/src/cache_bin.c"
|
||||
"${LIBRARY_DIR}/src/ckh.c"
|
||||
"${LIBRARY_DIR}/src/counter.c"
|
||||
"${LIBRARY_DIR}/src/ctl.c"
|
||||
"${LIBRARY_DIR}/src/decay.c"
|
||||
"${LIBRARY_DIR}/src/div.c"
|
||||
"${LIBRARY_DIR}/src/ecache.c"
|
||||
"${LIBRARY_DIR}/src/edata.c"
|
||||
"${LIBRARY_DIR}/src/edata_cache.c"
|
||||
"${LIBRARY_DIR}/src/ehooks.c"
|
||||
"${LIBRARY_DIR}/src/emap.c"
|
||||
"${LIBRARY_DIR}/src/eset.c"
|
||||
"${LIBRARY_DIR}/src/exp_grow.c"
|
||||
"${LIBRARY_DIR}/src/extent.c"
|
||||
"${LIBRARY_DIR}/src/extent_dss.c"
|
||||
"${LIBRARY_DIR}/src/extent_mmap.c"
|
||||
"${LIBRARY_DIR}/src/hash.c"
|
||||
"${LIBRARY_DIR}/src/fxp.c"
|
||||
"${LIBRARY_DIR}/src/hook.c"
|
||||
"${LIBRARY_DIR}/src/hpa.c"
|
||||
"${LIBRARY_DIR}/src/hpa_hooks.c"
|
||||
"${LIBRARY_DIR}/src/hpdata.c"
|
||||
"${LIBRARY_DIR}/src/inspect.c"
|
||||
"${LIBRARY_DIR}/src/jemalloc.c"
|
||||
"${LIBRARY_DIR}/src/large.c"
|
||||
"${LIBRARY_DIR}/src/log.c"
|
||||
"${LIBRARY_DIR}/src/malloc_io.c"
|
||||
"${LIBRARY_DIR}/src/mutex.c"
|
||||
"${LIBRARY_DIR}/src/mutex_pool.c"
|
||||
"${LIBRARY_DIR}/src/nstime.c"
|
||||
"${LIBRARY_DIR}/src/pa.c"
|
||||
"${LIBRARY_DIR}/src/pac.c"
|
||||
"${LIBRARY_DIR}/src/pa_extra.c"
|
||||
"${LIBRARY_DIR}/src/pages.c"
|
||||
"${LIBRARY_DIR}/src/prng.c"
|
||||
"${LIBRARY_DIR}/src/pai.c"
|
||||
"${LIBRARY_DIR}/src/peak_event.c"
|
||||
"${LIBRARY_DIR}/src/prof.c"
|
||||
"${LIBRARY_DIR}/src/prof_data.c"
|
||||
"${LIBRARY_DIR}/src/prof_log.c"
|
||||
"${LIBRARY_DIR}/src/prof_recent.c"
|
||||
"${LIBRARY_DIR}/src/prof_stats.c"
|
||||
"${LIBRARY_DIR}/src/prof_sys.c"
|
||||
"${LIBRARY_DIR}/src/psset.c"
|
||||
"${LIBRARY_DIR}/src/rtree.c"
|
||||
"${LIBRARY_DIR}/src/safety_check.c"
|
||||
"${LIBRARY_DIR}/src/san_bump.c"
|
||||
"${LIBRARY_DIR}/src/san.c"
|
||||
"${LIBRARY_DIR}/src/sc.c"
|
||||
"${LIBRARY_DIR}/src/sec.c"
|
||||
"${LIBRARY_DIR}/src/stats.c"
|
||||
"${LIBRARY_DIR}/src/sz.c"
|
||||
"${LIBRARY_DIR}/src/tcache.c"
|
||||
"${LIBRARY_DIR}/src/test_hooks.c"
|
||||
"${LIBRARY_DIR}/src/thread_event.c"
|
||||
"${LIBRARY_DIR}/src/ticker.c"
|
||||
"${LIBRARY_DIR}/src/tsd.c"
|
||||
"${LIBRARY_DIR}/src/witness.c"
|
||||
"${LIBRARY_DIR}/src/safety_check.c"
|
||||
)
|
||||
if (OS_DARWIN)
|
||||
list(APPEND SRCS "${LIBRARY_DIR}/src/zone.c")
|
||||
endif ()
|
||||
|
||||
add_library(_jemalloc ${SRCS})
|
||||
# First include jemalloc-cmake files, to override anything that jemalloc has.
|
||||
# (for example if you were trying to build jemalloc inside contrib/jemalloc you
|
||||
# will have some files that may be out of date)
|
||||
target_include_directories(_jemalloc PUBLIC include)
|
||||
target_include_directories(_jemalloc PRIVATE "${LIBRARY_DIR}/include")
|
||||
target_include_directories(_jemalloc SYSTEM PUBLIC include)
|
||||
|
||||
set (JEMALLOC_INCLUDE_PREFIX)
|
||||
# OS_
|
||||
|
@ -1 +1,6 @@
|
||||
It allows to integrate JEMalloc into CMake project.
|
||||
|
||||
- Remove JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
|
||||
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
||||
- Add musl support (USE_MUSL)
|
||||
- Also note, that darwin build requires JEMALLOC_PREFIX, while others don not
|
||||
|
@ -4,8 +4,14 @@
|
||||
#include "jemalloc_internal_defs.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||
|
||||
#ifdef JEMALLOC_UTRACE
|
||||
#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
|
||||
#include <sys/ktrace.h>
|
||||
# if defined(JEMALLOC_UTRACE)
|
||||
# define UTRACE_CALL(p, l) utrace(p, l)
|
||||
# else
|
||||
# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
|
||||
# define JEMALLOC_UTRACE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_NO_DEMANGLE
|
||||
@ -180,6 +186,35 @@ static const bool config_opt_safety_checks =
|
||||
#endif
|
||||
;
|
||||
|
||||
/*
|
||||
* Extra debugging of sized deallocations too onerous to be included in the
|
||||
* general safety checks.
|
||||
*/
|
||||
static const bool config_opt_size_checks =
|
||||
#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
static const bool config_uaf_detection =
|
||||
#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Whether or not the C++ extensions are enabled. */
|
||||
static const bool config_enable_cxx =
|
||||
#ifdef JEMALLOC_ENABLE_CXX
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
@ -209,5 +244,20 @@ static const bool have_background_thread =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_high_res_timer =
|
||||
#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
static const bool have_memcntl =
|
||||
#ifdef JEMALLOC_HAVE_MEMCNTL
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
#endif /* JEMALLOC_PREAMBLE_H */
|
||||
|
@ -4,12 +4,21 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wredundant-decls"
|
||||
#endif
|
||||
|
||||
#include <jemalloc/jemalloc_defs.h>
|
||||
#include <jemalloc/jemalloc_rename.h>
|
||||
#include <jemalloc/jemalloc_macros.h>
|
||||
#include <jemalloc/jemalloc_protos.h>
|
||||
#include <jemalloc/jemalloc_typedefs.h>
|
||||
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -9,11 +9,17 @@
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
|
||||
|
||||
/* Defined if format(gnu_printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
|
||||
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
|
||||
|
||||
/* Defined if format(printf, ...) attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
|
||||
|
||||
/* Defined if fallthrough attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_FALLTHROUGH
|
||||
|
||||
/* Defined if cold attribute is supported. */
|
||||
#define JEMALLOC_HAVE_ATTR_COLD
|
||||
|
||||
/*
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
|
@ -4,13 +4,13 @@
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
|
||||
#define JEMALLOC_VERSION "5.3-RC"
|
||||
#define JEMALLOC_VERSION_MAJOR 5
|
||||
#define JEMALLOC_VERSION_MINOR 2
|
||||
#define JEMALLOC_VERSION_BUGFIX 1
|
||||
#define JEMALLOC_VERSION_MINOR 3
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
|
||||
#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
||||
#define JEMALLOC_VERSION_GID "ca709c3139f77f4c00a903cdee46d71e9028f6c6"
|
||||
#define JEMALLOC_VERSION_GID_IDENT ca709c3139f77f4c00a903cdee46d71e9028f6c6
|
||||
|
||||
#define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
@ -71,6 +71,7 @@
|
||||
# endif
|
||||
# define JEMALLOC_FORMAT_ARG(i)
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_FALLTHROUGH
|
||||
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||
# ifdef __cplusplus
|
||||
# define JEMALLOC_NOTHROW __declspec(nothrow)
|
||||
@ -84,6 +85,7 @@
|
||||
# else
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# endif
|
||||
# define JEMALLOC_COLD
|
||||
#elif defined(JEMALLOC_HAVE_ATTR)
|
||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||
@ -109,11 +111,21 @@
|
||||
# else
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# endif
|
||||
# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
|
||||
# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
|
||||
# else
|
||||
# define JEMALLOC_FALLTHROUGH
|
||||
# endif
|
||||
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
|
||||
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# ifdef JEMALLOC_HAVE_ATTR_COLD
|
||||
# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
|
||||
# else
|
||||
# define JEMALLOC_COLD
|
||||
# endif
|
||||
#else
|
||||
# define JEMALLOC_ATTR(s)
|
||||
# define JEMALLOC_ALIGNED(s)
|
||||
@ -121,9 +133,17 @@
|
||||
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
|
||||
# define JEMALLOC_EXPORT
|
||||
# define JEMALLOC_FORMAT_PRINTF(s, i)
|
||||
# define JEMALLOC_FALLTHROUGH
|
||||
# define JEMALLOC_NOINLINE
|
||||
# define JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_SECTION(s)
|
||||
# define JEMALLOC_RESTRICT_RETURN
|
||||
# define JEMALLOC_ALLOCATOR
|
||||
# define JEMALLOC_COLD
|
||||
#endif
|
||||
|
||||
#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
|
||||
# define JEMALLOC_SYS_NOTHROW
|
||||
#else
|
||||
# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
|
||||
#endif
|
||||
|
@ -4,6 +4,9 @@
|
||||
# undef JEMALLOC_NOTHROW
|
||||
# define JEMALLOC_NOTHROW
|
||||
|
||||
# undef JEMALLOC_SYS_NOTHROW
|
||||
# define JEMALLOC_SYS_NOTHROW
|
||||
|
||||
# undef JEMALLOC_CXX_THROW
|
||||
# define JEMALLOC_CXX_THROW
|
||||
#endif
|
||||
@ -18,21 +21,22 @@ extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_malloc(size_t size)
|
||||
void JEMALLOC_SYS_NOTHROW *je_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
void JEMALLOC_SYS_NOTHROW *je_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
|
||||
size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW je_posix_memalign(
|
||||
void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
void JEMALLOC_SYS_NOTHROW *je_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
void JEMALLOC_SYS_NOTHROW *je_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW je_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
@ -62,15 +66,19 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
#ifdef JEMALLOC_HAVE_MALLOC_SIZE
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_size(
|
||||
const void *ptr);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
void JEMALLOC_SYS_NOTHROW *je_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
void JEMALLOC_SYS_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* The jet_ prefix on the following public symbol declarations is an artifact
|
||||
* of namespace management, and should be omitted in application code unless
|
||||
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
|
||||
*/
|
||||
extern JEMALLOC_EXPORT const char *jet_malloc_conf;
|
||||
extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque,
|
||||
const char *s);
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_SYS_NOTHROW *jet_malloc(size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_SYS_NOTHROW *jet_calloc(size_t num, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
|
||||
JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW jet_posix_memalign(
|
||||
void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(nonnull(1));
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_SYS_NOTHROW *jet_aligned_alloc(size_t alignment,
|
||||
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
|
||||
JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_SYS_NOTHROW *jet_realloc(void *ptr, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW jet_free(void *ptr)
|
||||
JEMALLOC_CXX_THROW;
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size,
|
||||
int flags) JEMALLOC_ALLOC_SIZE(2);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size,
|
||||
size_t extra, int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr,
|
||||
int flags) JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size,
|
||||
int flags);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name,
|
||||
size_t *mibp, size_t *miblenp);
|
||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print(
|
||||
void (*write_cb)(void *, const char *), void *jet_cbopaque,
|
||||
const char *opts);
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size(
|
||||
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
|
||||
#ifdef JEMALLOC_HAVE_MALLOC_SIZE
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_size(
|
||||
const void *ptr);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_SYS_NOTHROW *jet_memalign(size_t alignment, size_t size)
|
||||
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
void JEMALLOC_SYS_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW
|
||||
JEMALLOC_ATTR(malloc);
|
||||
#endif
|
@ -13,11 +13,12 @@
|
||||
# define je_mallctlnametomib mallctlnametomib
|
||||
# define je_malloc malloc
|
||||
# define je_malloc_conf malloc_conf
|
||||
# define je_malloc_conf_2_conf_harder malloc_conf_2_conf_harder
|
||||
# define je_malloc_message malloc_message
|
||||
# define je_malloc_stats_print malloc_stats_print
|
||||
# define je_malloc_usable_size malloc_usable_size
|
||||
# define je_mallocx mallocx
|
||||
# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
|
||||
# define je_smallocx_ca709c3139f77f4c00a903cdee46d71e9028f6c6 smallocx_ca709c3139f77f4c00a903cdee46d71e9028f6c6
|
||||
# define je_nallocx nallocx
|
||||
# define je_posix_memalign posix_memalign
|
||||
# define je_rallocx rallocx
|
||||
|
@ -65,13 +65,13 @@ typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
|
||||
bool, unsigned);
|
||||
|
||||
struct extent_hooks_s {
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
extent_alloc_t *alloc;
|
||||
extent_dalloc_t *dalloc;
|
||||
extent_destroy_t *destroy;
|
||||
extent_commit_t *commit;
|
||||
extent_decommit_t *decommit;
|
||||
extent_purge_t *purge_lazy;
|
||||
extent_purge_t *purge_forced;
|
||||
extent_split_t *split;
|
||||
extent_merge_t *merge;
|
||||
};
|
||||
|
@ -45,17 +45,17 @@
|
||||
#define LG_VADDR 64
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -86,6 +86,12 @@
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
@ -99,7 +105,12 @@
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
|
||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
@ -163,6 +174,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -178,6 +192,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 14
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -291,17 +308,46 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MALLOC_SIZE
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -351,7 +397,7 @@
|
||||
/* #undef JEMALLOC_EXPORT */
|
||||
|
||||
/* config.malloc_conf options string. */
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF ""
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
/* #undef JEMALLOC_IS_MALLOC */
|
||||
@ -364,4 +410,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
#define JEMALLOC_HAVE_VM_MAKE_TAG
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -45,17 +45,17 @@
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -86,6 +86,12 @@
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
@ -99,12 +105,12 @@
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1
|
||||
#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
@ -168,6 +174,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -183,6 +192,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -296,17 +308,46 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
#define JEMALLOC_HAVE_MALLOC_SIZE
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -369,4 +410,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
#define JEMALLOC_HAVE_VM_MAKE_TAG
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -45,17 +45,17 @@
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -83,9 +83,16 @@
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Only since 12.1-STABLE */
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
// Only since 12.1-STABLE
|
||||
// #define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/* Only since 12.1-STABLE */
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP */
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
@ -95,7 +102,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
@ -105,7 +112,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
@ -128,7 +135,7 @@
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
#define JEMALLOC_MUTEX_INIT_CB 1
|
||||
#define JEMALLOC_MUTEX_INIT_CB
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
@ -161,7 +168,7 @@
|
||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||
* segment (DSS).
|
||||
*/
|
||||
/* #undef JEMALLOC_DSS */
|
||||
#define JEMALLOC_DSS
|
||||
|
||||
/* Support memory filling (junk/zero). */
|
||||
#define JEMALLOC_FILL
|
||||
@ -169,6 +176,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -184,6 +194,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -297,17 +310,46 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_NOCORE
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -348,7 +390,7 @@
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
@ -360,7 +402,7 @@
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
@ -370,4 +412,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -45,17 +45,17 @@
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -83,9 +83,16 @@
|
||||
/* Defined if pthread_atfork(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||
|
||||
/* Only since 12.1-STABLE */
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
// Only since 12.1-STABLE
|
||||
// #define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
|
||||
|
||||
/* Only since 12.1-STABLE */
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP */
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
@ -95,7 +102,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
@ -105,7 +112,7 @@
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME 1
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
@ -128,7 +135,7 @@
|
||||
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||
* to avoid recursive allocation during mutex initialization.
|
||||
*/
|
||||
#define JEMALLOC_MUTEX_INIT_CB 1
|
||||
#define JEMALLOC_MUTEX_INIT_CB
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
|
||||
@ -169,6 +176,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -184,6 +194,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -297,17 +310,46 @@
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_DONTDUMP */
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_NOCORE
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -348,7 +390,7 @@
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
@ -360,7 +402,7 @@
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
@ -370,4 +412,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -1,8 +0,0 @@
|
||||
Here are pre-generated files from jemalloc on Linux aarch64.
|
||||
You can obtain these files by running ./autogen.sh inside jemalloc source directory.
|
||||
|
||||
Added #define GNU_SOURCE
|
||||
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
|
||||
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
|
||||
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
|
||||
Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
@ -13,12 +13,14 @@
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#if !defined(USE_MUSL)
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#endif
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
@ -45,17 +47,17 @@
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -73,7 +75,7 @@
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -86,21 +88,32 @@
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
@ -163,6 +176,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -178,6 +194,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -291,17 +310,46 @@
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -342,7 +390,7 @@
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
@ -354,7 +402,7 @@
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
@ -364,4 +412,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -13,12 +13,14 @@
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#if !defined(USE_MUSL)
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#endif
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
@ -45,17 +47,17 @@
|
||||
#define LG_VADDR 64
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -73,7 +75,7 @@
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -86,21 +88,32 @@
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
@ -163,6 +176,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -178,6 +194,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -291,17 +310,46 @@
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -342,7 +390,7 @@
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
@ -354,7 +402,7 @@
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
@ -364,4 +412,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -1,8 +0,0 @@
|
||||
Here are pre-generated files from jemalloc on Linux risc-v.
|
||||
You can obtain these files by running ./autogen.sh inside jemalloc source directory.
|
||||
|
||||
Added #define GNU_SOURCE
|
||||
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
|
||||
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
|
||||
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
|
||||
Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
@ -13,12 +13,14 @@
|
||||
* Define overrides for non-standard allocator-related functions if they are
|
||||
* present on the system.
|
||||
*/
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#if !defined(USE_MUSL)
|
||||
#define JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_FREE
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||
#define JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||
#define JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||
#endif
|
||||
/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
|
||||
|
||||
/*
|
||||
@ -45,17 +47,17 @@
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -73,7 +75,7 @@
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -86,21 +88,32 @@
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
@ -163,6 +176,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -178,6 +194,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 16
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -291,17 +310,46 @@
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -342,7 +390,7 @@
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
@ -354,7 +402,7 @@
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
@ -364,4 +412,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -1,8 +0,0 @@
|
||||
Here are pre-generated files from jemalloc on Linux x86_64.
|
||||
You can obtain these files by running ./autogen.sh inside jemalloc source directory.
|
||||
|
||||
Added #define GNU_SOURCE
|
||||
Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not.
|
||||
Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
|
||||
Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5.
|
||||
Added JEMALLOC_CONFIG_MALLOC_CONF substitution
|
@ -47,17 +47,17 @@
|
||||
#define LG_VADDR 48
|
||||
|
||||
/* Defined if C11 atomics are available. */
|
||||
#define JEMALLOC_C11_ATOMICS 1
|
||||
#define JEMALLOC_C11_ATOMICS
|
||||
|
||||
/* Defined if GCC __atomic atomics are available. */
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
|
||||
|
||||
/* Defined if GCC __sync atomics are available. */
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_SYNC_ATOMICS
|
||||
/* and the 8-bit variant support. */
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
|
||||
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
|
||||
|
||||
/*
|
||||
* Defined if __builtin_clz() and __builtin_clzl() are available.
|
||||
@ -75,7 +75,7 @@
|
||||
/*
|
||||
* Defined if secure_getenv(3) is available.
|
||||
*/
|
||||
// #define JEMALLOC_HAVE_SECURE_GETENV
|
||||
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
|
||||
|
||||
/*
|
||||
* Defined if issetugid(2) is available.
|
||||
@ -88,21 +88,32 @@
|
||||
/* Defined if pthread_setname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
|
||||
/* Defined if pthread_getname_np(3) is available. */
|
||||
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
|
||||
|
||||
/* Defined if pthread_get_name_np(3) is available. */
|
||||
/* #undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
|
||||
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
|
||||
|
||||
/*
|
||||
* Defined if mach_absolute_time() is available.
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
|
||||
|
||||
/*
|
||||
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
|
||||
*/
|
||||
#define JEMALLOC_HAVE_CLOCK_REALTIME
|
||||
|
||||
/*
|
||||
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||
@ -165,6 +176,9 @@
|
||||
/* Support utrace(2)-based tracing. */
|
||||
/* #undef JEMALLOC_UTRACE */
|
||||
|
||||
/* Support utrace(2)-based tracing (label based signature). */
|
||||
/* #undef JEMALLOC_UTRACE_LABEL */
|
||||
|
||||
/* Support optional abort() on OOM. */
|
||||
/* #undef JEMALLOC_XMALLOC */
|
||||
|
||||
@ -180,6 +194,9 @@
|
||||
/* One page is 2^LG_PAGE bytes. */
|
||||
#define LG_PAGE 12
|
||||
|
||||
/* Maximum number of regions in a slab. */
|
||||
/* #undef CONFIG_LG_SLAB_MAXREGS */
|
||||
|
||||
/*
|
||||
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||
* system does not explicitly support huge pages; system calls that require
|
||||
@ -293,17 +310,46 @@
|
||||
*/
|
||||
#define JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
|
||||
*/
|
||||
/* #undef JEMALLOC_MADVISE_NOCORE */
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#define JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
*/
|
||||
/* #undef JEMALLOC_THP */
|
||||
|
||||
/* Defined if posix_madvise is available. */
|
||||
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
|
||||
|
||||
/*
|
||||
* Method for purging unused pages using posix_madvise.
|
||||
*
|
||||
* posix_madvise(..., POSIX_MADV_DONTNEED)
|
||||
*/
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
|
||||
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
|
||||
|
||||
/*
|
||||
* Defined if memcntl page admin call is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MEMCNTL */
|
||||
|
||||
/*
|
||||
* Defined if malloc_size is supported
|
||||
*/
|
||||
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
|
||||
|
||||
/* Define if operating system has alloca.h header. */
|
||||
#define JEMALLOC_HAS_ALLOCA_H 1
|
||||
#define JEMALLOC_HAS_ALLOCA_H
|
||||
|
||||
/* C99 restrict keyword supported. */
|
||||
#define JEMALLOC_HAS_RESTRICT 1
|
||||
#define JEMALLOC_HAS_RESTRICT
|
||||
|
||||
/* For use by hash code. */
|
||||
/* #undef JEMALLOC_BIG_ENDIAN */
|
||||
@ -344,7 +390,7 @@
|
||||
/*
|
||||
* If defined, all the features necessary for background threads are present.
|
||||
*/
|
||||
#define JEMALLOC_BACKGROUND_THREAD 1
|
||||
#define JEMALLOC_BACKGROUND_THREAD
|
||||
|
||||
/*
|
||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||
@ -356,7 +402,7 @@
|
||||
#define JEMALLOC_CONFIG_MALLOC_CONF "@JEMALLOC_CONFIG_MALLOC_CONF@"
|
||||
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#define JEMALLOC_IS_MALLOC 1
|
||||
#define JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
@ -366,4 +412,16 @@
|
||||
/* Performs additional safety checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
|
||||
|
||||
/* Is C++ support being built? */
|
||||
/* #undef JEMALLOC_ENABLE_CXX */
|
||||
|
||||
/* Performs additional size checks when defined. */
|
||||
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
|
||||
|
||||
/* Allows sampled junk and stash for checking use-after-free when defined. */
|
||||
/* #undef JEMALLOC_UAF_DETECTION */
|
||||
|
||||
/* Darwin VM_MAKE_TAG support */
|
||||
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -1,4 +1,8 @@
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
# During cross-compilation in our CI we have to use llvm-tblgen and other building tools
|
||||
# tools to be build for host architecture and everything else for target architecture (e.g. AArch64)
|
||||
# Possible workaround is to use llvm-tblgen from some package...
|
||||
# But lets just enable LLVM for native builds
|
||||
if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
@ -6,7 +10,7 @@ endif()
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
set (USE_EMBEDDED_COMPILER 0)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
endif()
|
||||
|
||||
@ -22,9 +26,6 @@ set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm")
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMExecutionEngine
|
||||
LLVMRuntimeDyld
|
||||
LLVMX86CodeGen
|
||||
LLVMX86Desc
|
||||
LLVMX86Info
|
||||
LLVMAsmPrinter
|
||||
LLVMDebugInfoDWARF
|
||||
LLVMGlobalISel
|
||||
@ -56,6 +57,12 @@ set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMDemangle
|
||||
)
|
||||
|
||||
if (ARCH_AMD64)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||
elseif (ARCH_AARCH64)
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
endif ()
|
||||
|
||||
#function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
|
||||
# llvm_map_components_to_libnames (result all)
|
||||
# if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)
|
||||
|
@ -71,7 +71,6 @@ foreach (FILENAME mutex.h)
|
||||
add_dependencies (re2_st transform_${FILENAME})
|
||||
endforeach ()
|
||||
|
||||
# NOTE: you should not change name of library here, since it is used for PVS
|
||||
# (see docker/test/pvs/Dockerfile), to generate required header (see above)
|
||||
# NOTE: you should not change name of library here, since it is used to generate required header (see above)
|
||||
add_library(ch_contrib::re2 ALIAS re2)
|
||||
add_library(ch_contrib::re2_st ALIAS re2_st)
|
||||
|
1
debian/.gitignore
vendored
1
debian/.gitignore
vendored
@ -10,7 +10,6 @@ clickhouse-common-static/
|
||||
clickhouse-server-base/
|
||||
clickhouse-server-common/
|
||||
clickhouse-server/
|
||||
clickhouse-test/
|
||||
debhelper-build-stamp
|
||||
files
|
||||
*.debhelper.log
|
||||
|
2
debian/clickhouse-test.install
vendored
2
debian/clickhouse-test.install
vendored
@ -1,2 +0,0 @@
|
||||
usr/bin/clickhouse-test
|
||||
usr/share/clickhouse-test/*
|
6
debian/control
vendored
6
debian/control
vendored
@ -56,9 +56,3 @@ Replaces: clickhouse-common-dbg
|
||||
Conflicts: clickhouse-common-dbg
|
||||
Description: debugging symbols for clickhouse-common-static
|
||||
This package contains the debugging symbols for clickhouse-common.
|
||||
|
||||
Package: clickhouse-test
|
||||
Priority: optional
|
||||
Architecture: all
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, bash, expect, python3, python3-lxml, python3-termcolor, python3-requests, curl, perl, sudo, openssl, netcat-openbsd, telnet, brotli, bsdutils
|
||||
Description: ClickHouse tests
|
||||
|
@ -7,7 +7,6 @@
|
||||
"name": "clickhouse/binary-builder",
|
||||
"dependent": [
|
||||
"docker/test/split_build_smoke_test",
|
||||
"docker/test/pvs",
|
||||
"docker/test/codebrowser"
|
||||
]
|
||||
},
|
||||
@ -31,11 +30,6 @@
|
||||
"name": "clickhouse/performance-comparison",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/pvs": {
|
||||
"only_amd64": true,
|
||||
"name": "clickhouse/pvs-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/util": {
|
||||
"name": "clickhouse/test-util",
|
||||
"dependent": [
|
||||
|
@ -3,26 +3,25 @@ compilers and build settings. Correctly configured Docker daemon is single depen
|
||||
|
||||
Usage:
|
||||
|
||||
Build deb package with `clang-11` in `debug` mode:
|
||||
Build deb package with `clang-14` in `debug` mode:
|
||||
```
|
||||
$ mkdir deb/test_output
|
||||
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-11 --build-type=debug
|
||||
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-14 --build-type=debug
|
||||
$ ls -l deb/test_output
|
||||
-rw-r--r-- 1 root root 3730 clickhouse-client_18.14.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_18.14.2+debug_amd64.deb
|
||||
-rw-r--r-- 1 root root 255967314 clickhouse-common-static-dbg_18.14.2+debug_amd64.deb
|
||||
-rw-r--r-- 1 root root 14940 clickhouse-server_18.14.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 340206010 clickhouse-server-base_18.14.2+debug_amd64.deb
|
||||
-rw-r--r-- 1 root root 7900 clickhouse-server-common_18.14.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 2880432 clickhouse-test_18.14.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb
|
||||
-rw-r--r-- 1 root root 255967314 clickhouse-common-static-dbg_22.2.2+debug_amd64.deb
|
||||
-rw-r--r-- 1 root root 14940 clickhouse-server_22.2.2+debug_all.deb
|
||||
-rw-r--r-- 1 root root 340206010 clickhouse-server-base_22.2.2+debug_amd64.deb
|
||||
-rw-r--r-- 1 root root 7900 clickhouse-server-common_22.2.2+debug_all.deb
|
||||
|
||||
```
|
||||
|
||||
Build ClickHouse binary with `clang-11` and `address` sanitizer in `relwithdebuginfo`
|
||||
Build ClickHouse binary with `clang-14` and `address` sanitizer in `relwithdebuginfo`
|
||||
mode:
|
||||
```
|
||||
$ mkdir $HOME/some_clickhouse
|
||||
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-11 --sanitizer=address
|
||||
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-14 --sanitizer=address
|
||||
$ ls -l $HOME/some_clickhouse
|
||||
-rwxr-xr-x 1 root root 787061952 clickhouse
|
||||
lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse
|
||||
|
@ -95,6 +95,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get install gcc-11 g++-11 --yes \
|
||||
&& apt-get clean
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
ARG NFPM_VERSION=2.15.0
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \
|
||||
&& dpkg -i /tmp/nfpm.deb \
|
||||
&& rm /tmp/nfpm.deb
|
||||
|
||||
COPY build.sh /
|
||||
CMD ["bash", "-c", "/build.sh 2>&1 | ts"]
|
||||
CMD ["bash", "-c", "/build.sh 2>&1"]
|
||||
|
@ -1,7 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
exec &> >(ts)
|
||||
set -x -e
|
||||
|
||||
cache_status () {
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
}
|
||||
|
||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||
@ -19,15 +25,23 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
env
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" ..
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
cache_status
|
||||
# clear cache stats
|
||||
ccache --zero-stats ||:
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086
|
||||
ninja $NINJA_FLAGS clickhouse-bundle
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
cache_status
|
||||
|
||||
if [ -n "$MAKE_DEB" ]; then
|
||||
rm -rf /build/packages/root
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086
|
||||
DESTDIR=/build/packages/root ninja $NINJA_FLAGS install
|
||||
bash -x /build/packages/build
|
||||
fi
|
||||
|
||||
mv ./programs/clickhouse* /output
|
||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
@ -84,8 +98,7 @@ fi
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
cache_status
|
||||
|
||||
if [ "${CCACHE_DEBUG:-}" == "1" ]
|
||||
then
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
import subprocess
|
||||
import os
|
||||
import argparse
|
||||
@ -8,36 +8,39 @@ import sys
|
||||
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
|
||||
IMAGE_MAP = {
|
||||
"deb": "clickhouse/deb-builder",
|
||||
"binary": "clickhouse/binary-builder",
|
||||
}
|
||||
|
||||
def check_image_exists_locally(image_name):
|
||||
try:
|
||||
output = subprocess.check_output("docker images -q {} 2> /dev/null".format(image_name), shell=True)
|
||||
output = subprocess.check_output(
|
||||
f"docker images -q {image_name} 2> /dev/null", shell=True
|
||||
)
|
||||
return output != ""
|
||||
except subprocess.CalledProcessError as ex:
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def pull_image(image_name):
|
||||
try:
|
||||
subprocess.check_call("docker pull {}".format(image_name), shell=True)
|
||||
subprocess.check_call(f"docker pull {image_name}", shell=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError as ex:
|
||||
logging.info("Cannot pull image {}".format(image_name))
|
||||
except subprocess.CalledProcessError:
|
||||
logging.info(f"Cannot pull image {image_name}".format())
|
||||
return False
|
||||
|
||||
|
||||
def build_image(image_name, filepath):
|
||||
context = os.path.dirname(filepath)
|
||||
build_cmd = "docker build --network=host -t {} -f {} {}".format(image_name, filepath, context)
|
||||
logging.info("Will build image with cmd: '{}'".format(build_cmd))
|
||||
build_cmd = f"docker build --network=host -t {image_name} -f {filepath} {context}"
|
||||
logging.info("Will build image with cmd: '%s'", build_cmd)
|
||||
subprocess.check_call(
|
||||
build_cmd,
|
||||
shell=True,
|
||||
)
|
||||
|
||||
def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache_dir, docker_image_version):
|
||||
|
||||
def run_docker_image_with_env(
|
||||
image_name, output, env_variables, ch_root, ccache_dir, docker_image_version
|
||||
):
|
||||
env_part = " -e ".join(env_variables)
|
||||
if env_part:
|
||||
env_part = " -e " + env_part
|
||||
@ -47,28 +50,52 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache
|
||||
else:
|
||||
interactive = ""
|
||||
|
||||
cmd = "docker run --network=host --rm --volume={output_path}:/output --volume={ch_root}:/build --volume={ccache_dir}:/ccache {env} {interactive} {img_name}".format(
|
||||
output_path=output,
|
||||
ch_root=ch_root,
|
||||
ccache_dir=ccache_dir,
|
||||
env=env_part,
|
||||
img_name=image_name + ":" + docker_image_version,
|
||||
interactive=interactive
|
||||
cmd = (
|
||||
f"docker run --network=host --rm --volume={output}:/output "
|
||||
f"--volume={ch_root}:/build --volume={ccache_dir}:/ccache {env_part} "
|
||||
f"{interactive} {image_name}:{docker_image_version}"
|
||||
)
|
||||
|
||||
logging.info("Will build ClickHouse pkg with cmd: '{}'".format(cmd))
|
||||
logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd)
|
||||
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries):
|
||||
|
||||
def is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
return (
|
||||
build_type == ""
|
||||
and package_type == "deb"
|
||||
and sanitizer == ""
|
||||
and not split_binary
|
||||
)
|
||||
|
||||
|
||||
def parse_env_variables(
|
||||
build_type,
|
||||
compiler,
|
||||
sanitizer,
|
||||
package_type,
|
||||
image_type,
|
||||
cache,
|
||||
distcc_hosts,
|
||||
split_binary,
|
||||
clang_tidy,
|
||||
version,
|
||||
author,
|
||||
official,
|
||||
additional_pkgs,
|
||||
with_coverage,
|
||||
with_binaries,
|
||||
):
|
||||
DARWIN_SUFFIX = "-darwin"
|
||||
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
|
||||
ARM_SUFFIX = "-aarch64"
|
||||
FREEBSD_SUFFIX = "-freebsd"
|
||||
PPC_SUFFIX = '-ppc64le'
|
||||
PPC_SUFFIX = "-ppc64le"
|
||||
|
||||
result = []
|
||||
cmake_flags = ['$CMAKE_FLAGS']
|
||||
result.append("OUTPUT_DIR=/output")
|
||||
cmake_flags = ["$CMAKE_FLAGS"]
|
||||
|
||||
is_cross_darwin = compiler.endswith(DARWIN_SUFFIX)
|
||||
is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX)
|
||||
@ -77,46 +104,72 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||
|
||||
if is_cross_darwin:
|
||||
cc = compiler[:-len(DARWIN_SUFFIX)]
|
||||
cc = compiler[: -len(DARWIN_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/x86_64-apple-darwin-ar")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/x86_64-apple-darwin-install_name_tool")
|
||||
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/"
|
||||
"x86_64-apple-darwin-install_name_tool"
|
||||
)
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib"
|
||||
)
|
||||
cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld")
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake"
|
||||
)
|
||||
elif is_cross_darwin_arm:
|
||||
cc = compiler[:-len(DARWIN_ARM_SUFFIX)]
|
||||
cc = compiler[: -len(DARWIN_ARM_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool")
|
||||
cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/"
|
||||
"aarch64-apple-darwin-install_name_tool"
|
||||
)
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib"
|
||||
)
|
||||
cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld")
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake")
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake"
|
||||
)
|
||||
elif is_cross_arm:
|
||||
cc = compiler[:-len(ARM_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake")
|
||||
result.append("DEB_ARCH_FLAG=-aarm64")
|
||||
cc = compiler[: -len(ARM_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake"
|
||||
)
|
||||
result.append("DEB_ARCH=arm64")
|
||||
elif is_cross_freebsd:
|
||||
cc = compiler[:-len(FREEBSD_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake")
|
||||
cc = compiler[: -len(FREEBSD_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake"
|
||||
)
|
||||
elif is_cross_ppc:
|
||||
cc = compiler[:-len(PPC_SUFFIX)]
|
||||
cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake")
|
||||
cc = compiler[: -len(PPC_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
|
||||
)
|
||||
else:
|
||||
cc = compiler
|
||||
result.append("DEB_ARCH_FLAG=-aamd64")
|
||||
result.append("DEB_ARCH=amd64")
|
||||
|
||||
cxx = cc.replace('gcc', 'g++').replace('clang', 'clang++')
|
||||
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
||||
|
||||
if image_type == "deb":
|
||||
result.append("DEB_CC={}".format(cc))
|
||||
result.append("DEB_CXX={}".format(cxx))
|
||||
# For building fuzzers
|
||||
result.append("CC={}".format(cc))
|
||||
result.append("CXX={}".format(cxx))
|
||||
elif image_type == "binary":
|
||||
result.append("CC={}".format(cc))
|
||||
result.append("CXX={}".format(cxx))
|
||||
cmake_flags.append('-DCMAKE_C_COMPILER=`which {}`'.format(cc))
|
||||
cmake_flags.append('-DCMAKE_CXX_COMPILER=`which {}`'.format(cxx))
|
||||
result.append("MAKE_DEB=true")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
cmake_flags.append("-DENABLE_UTILS=0")
|
||||
cmake_flags.append("-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON")
|
||||
cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON")
|
||||
cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||
if is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
cmake_flags.append("-DINSTALL_STRIPPED_BINARIES=ON")
|
||||
|
||||
result.append(f"CC={cc}")
|
||||
result.append(f"CXX={cxx}")
|
||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for split build and for performance tests.
|
||||
if package_type == "performance":
|
||||
@ -126,12 +179,14 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
result.append("COMBINED_OUTPUT=shared_build")
|
||||
|
||||
if sanitizer:
|
||||
result.append("SANITIZER={}".format(sanitizer))
|
||||
result.append(f"SANITIZER={sanitizer}")
|
||||
if build_type:
|
||||
result.append("BUILD_TYPE={}".format(build_type))
|
||||
result.append(f"BUILD_TYPE={build_type.capitalize()}")
|
||||
else:
|
||||
result.append("BUILD_TYPE=None")
|
||||
|
||||
if cache == 'distcc':
|
||||
result.append("CCACHE_PREFIX={}".format(cache))
|
||||
if cache == "distcc":
|
||||
result.append(f"CCACHE_PREFIX={cache}")
|
||||
|
||||
if cache:
|
||||
result.append("CCACHE_DIR=/ccache")
|
||||
@ -142,109 +197,188 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
|
||||
# result.append("CCACHE_UMASK=777")
|
||||
|
||||
if distcc_hosts:
|
||||
hosts_with_params = ["{}/24,lzo".format(host) for host in distcc_hosts] + ["localhost/`nproc`"]
|
||||
result.append('DISTCC_HOSTS="{}"'.format(" ".join(hosts_with_params)))
|
||||
hosts_with_params = [f"{host}/24,lzo" for host in distcc_hosts] + [
|
||||
"localhost/`nproc`"
|
||||
]
|
||||
result.append('DISTCC_HOSTS="' + " ".join(hosts_with_params) + '"')
|
||||
elif cache == "distcc":
|
||||
result.append('DISTCC_HOSTS="{}"'.format("localhost/`nproc`"))
|
||||
result.append('DISTCC_HOSTS="localhost/`nproc`"')
|
||||
|
||||
if alien_pkgs:
|
||||
result.append("ALIEN_PKGS='" + ' '.join(['--' + pkg for pkg in alien_pkgs]) + "'")
|
||||
if additional_pkgs:
|
||||
result.append("MAKE_APK=true")
|
||||
result.append("MAKE_RPM=true")
|
||||
result.append("MAKE_TGZ=true")
|
||||
|
||||
if with_binaries == "programs":
|
||||
result.append('BINARY_OUTPUT=programs')
|
||||
result.append("BINARY_OUTPUT=programs")
|
||||
elif with_binaries == "tests":
|
||||
result.append('ENABLE_TESTS=1')
|
||||
result.append('BINARY_OUTPUT=tests')
|
||||
cmake_flags.append('-DENABLE_TESTS=1')
|
||||
result.append("ENABLE_TESTS=1")
|
||||
result.append("BINARY_OUTPUT=tests")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
|
||||
if split_binary:
|
||||
cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1')
|
||||
cmake_flags.append(
|
||||
"-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 "
|
||||
"-DCLICKHOUSE_SPLIT_BINARY=1"
|
||||
)
|
||||
# We can't always build utils because it requires too much space, but
|
||||
# we have to build them at least in some way in CI. The split build is
|
||||
# probably the least heavy disk-wise.
|
||||
cmake_flags.append('-DENABLE_UTILS=1')
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
|
||||
if clang_tidy:
|
||||
cmake_flags.append('-DENABLE_CLANG_TIDY=1')
|
||||
cmake_flags.append('-DENABLE_UTILS=1')
|
||||
cmake_flags.append('-DENABLE_TESTS=1')
|
||||
cmake_flags.append('-DENABLE_EXAMPLES=1')
|
||||
cmake_flags.append("-DENABLE_CLANG_TIDY=1")
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
cmake_flags.append("-DENABLE_EXAMPLES=1")
|
||||
# Don't stop on first error to find more clang-tidy errors in one run.
|
||||
result.append('NINJA_FLAGS=-k0')
|
||||
result.append("NINJA_FLAGS=-k0")
|
||||
|
||||
if with_coverage:
|
||||
cmake_flags.append('-DWITH_COVERAGE=1')
|
||||
cmake_flags.append("-DWITH_COVERAGE=1")
|
||||
|
||||
if version:
|
||||
result.append("VERSION_STRING='{}'".format(version))
|
||||
result.append(f"VERSION_STRING='{version}'")
|
||||
|
||||
if author:
|
||||
result.append("AUTHOR='{}'".format(author))
|
||||
result.append(f"AUTHOR='{author}'")
|
||||
|
||||
if official:
|
||||
cmake_flags.append('-DYANDEX_OFFICIAL_BUILD=1')
|
||||
cmake_flags.append("-DYANDEX_OFFICIAL_BUILD=1")
|
||||
|
||||
result.append('CMAKE_FLAGS="' + ' '.join(cmake_flags) + '"')
|
||||
result.append('CMAKE_FLAGS="' + " ".join(cmake_flags) + '"')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse building script using prebuilt Docker image")
|
||||
# 'performance' creates a combined .tgz with server and configs to be used for performance test.
|
||||
parser.add_argument("--package-type", choices=['deb', 'binary', 'performance'], required=True)
|
||||
parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
description="ClickHouse building script using prebuilt Docker image",
|
||||
)
|
||||
# 'performance' creates a combined .tgz with server
|
||||
# and configs to be used for performance test.
|
||||
parser.add_argument(
|
||||
"--package-type",
|
||||
choices=("deb", "binary", "performance"),
|
||||
required=True,
|
||||
help="a build type",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clickhouse-repo-path",
|
||||
default=os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir
|
||||
),
|
||||
help="ClickHouse git repository",
|
||||
)
|
||||
parser.add_argument("--output-dir", required=True)
|
||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||
parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64",
|
||||
"clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64",
|
||||
"clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64", "clang-13-ppc64le",
|
||||
"clang-11-freebsd", "clang-12-freebsd", "clang-13-freebsd", "gcc-11"), default="clang-13")
|
||||
parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="")
|
||||
parser.add_argument(
|
||||
"--compiler",
|
||||
choices=(
|
||||
"clang-11",
|
||||
"clang-11-darwin",
|
||||
"clang-11-darwin-aarch64",
|
||||
"clang-11-aarch64",
|
||||
"clang-12",
|
||||
"clang-12-darwin",
|
||||
"clang-12-darwin-aarch64",
|
||||
"clang-12-aarch64",
|
||||
"clang-13",
|
||||
"clang-13-darwin",
|
||||
"clang-13-darwin-aarch64",
|
||||
"clang-13-aarch64",
|
||||
"clang-13-ppc64le",
|
||||
"clang-11-freebsd",
|
||||
"clang-12-freebsd",
|
||||
"clang-13-freebsd",
|
||||
"gcc-11",
|
||||
),
|
||||
default="clang-13",
|
||||
help="a compiler to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sanitizer",
|
||||
choices=("address", "thread", "memory", "undefined", ""),
|
||||
default="",
|
||||
)
|
||||
parser.add_argument("--split-binary", action="store_true")
|
||||
parser.add_argument("--clang-tidy", action="store_true")
|
||||
parser.add_argument("--cache", choices=("", "ccache", "distcc"), default="")
|
||||
parser.add_argument("--ccache_dir", default= os.getenv("HOME", "") + '/.ccache')
|
||||
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
|
||||
parser.add_argument(
|
||||
"--ccache_dir",
|
||||
default=os.getenv("HOME", "") + "/.ccache",
|
||||
help="a directory with ccache",
|
||||
)
|
||||
parser.add_argument("--distcc-hosts", nargs="+")
|
||||
parser.add_argument("--force-build-image", action="store_true")
|
||||
parser.add_argument("--version")
|
||||
parser.add_argument("--author", default="clickhouse")
|
||||
parser.add_argument("--author", default="clickhouse", help="a package author")
|
||||
parser.add_argument("--official", action="store_true")
|
||||
parser.add_argument("--alien-pkgs", nargs='+', default=[])
|
||||
parser.add_argument("--additional-pkgs", action="store_true")
|
||||
parser.add_argument("--with-coverage", action="store_true")
|
||||
parser.add_argument("--with-binaries", choices=("programs", "tests", ""), default="")
|
||||
parser.add_argument("--docker-image-version", default="latest")
|
||||
parser.add_argument(
|
||||
"--with-binaries", choices=("programs", "tests", ""), default=""
|
||||
)
|
||||
parser.add_argument(
|
||||
"--docker-image-version", default="latest", help="docker image tag to use"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if not os.path.isabs(args.output_dir):
|
||||
args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir))
|
||||
|
||||
image_type = 'binary' if args.package_type == 'performance' else args.package_type
|
||||
image_name = IMAGE_MAP[image_type]
|
||||
image_type = "binary" if args.package_type == "performance" else args.package_type
|
||||
image_name = "clickhouse/binary-builder"
|
||||
|
||||
if not os.path.isabs(args.clickhouse_repo_path):
|
||||
ch_root = os.path.abspath(os.path.join(os.getcwd(), args.clickhouse_repo_path))
|
||||
else:
|
||||
ch_root = args.clickhouse_repo_path
|
||||
|
||||
if args.alien_pkgs and not image_type == "deb":
|
||||
raise Exception("Can add alien packages only in deb build")
|
||||
if args.additional_pkgs and image_type != "deb":
|
||||
raise Exception("Can build additional packages only in deb build")
|
||||
|
||||
if args.with_binaries != "" and not image_type == "deb":
|
||||
if args.with_binaries != "" and image_type != "deb":
|
||||
raise Exception("Can add additional binaries only in deb build")
|
||||
|
||||
if args.with_binaries != "" and image_type == "deb":
|
||||
logging.info("Should place {} to output".format(args.with_binaries))
|
||||
logging.info("Should place %s to output", args.with_binaries)
|
||||
|
||||
dockerfile = os.path.join(ch_root, "docker/packager", image_type, "Dockerfile")
|
||||
image_with_version = image_name + ":" + args.docker_image_version
|
||||
if image_type != "freebsd" and not check_image_exists_locally(image_name) or args.force_build_image:
|
||||
if (
|
||||
image_type != "freebsd"
|
||||
and not check_image_exists_locally(image_name)
|
||||
or args.force_build_image
|
||||
):
|
||||
if not pull_image(image_with_version) or args.force_build_image:
|
||||
build_image(image_with_version, dockerfile)
|
||||
env_prepared = parse_env_variables(
|
||||
args.build_type, args.compiler, args.sanitizer, args.package_type, image_type,
|
||||
args.cache, args.distcc_hosts, args.split_binary, args.clang_tidy,
|
||||
args.version, args.author, args.official, args.alien_pkgs, args.with_coverage, args.with_binaries)
|
||||
args.build_type,
|
||||
args.compiler,
|
||||
args.sanitizer,
|
||||
args.package_type,
|
||||
image_type,
|
||||
args.cache,
|
||||
args.distcc_hosts,
|
||||
args.split_binary,
|
||||
args.clang_tidy,
|
||||
args.version,
|
||||
args.author,
|
||||
args.official,
|
||||
args.additional_pkgs,
|
||||
args.with_coverage,
|
||||
args.with_binaries,
|
||||
)
|
||||
|
||||
run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir, args.docker_image_version)
|
||||
logging.info("Output placed into {}".format(args.output_dir))
|
||||
run_docker_image_with_env(
|
||||
image_name,
|
||||
args.output_dir,
|
||||
env_prepared,
|
||||
ch_root,
|
||||
args.ccache_dir,
|
||||
args.docker_image_version,
|
||||
)
|
||||
logging.info("Output placed into %s", args.output_dir)
|
||||
|
@ -4,7 +4,7 @@ FROM ubuntu:20.04
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG repository="deb https://packages.clickhouse.com/deb stable main"
|
||||
ARG version=22.1.1.*
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
@ -58,7 +58,7 @@ RUN groupadd -r clickhouse --gid=101 \
|
||||
wget \
|
||||
tzdata \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
|
||||
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& if [ -n "$deb_location_url" ]; then \
|
||||
echo "installing from custom url with deb packages: $deb_location_url" \
|
||||
|
@ -1,16 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=22.1.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
mkdir -p /etc/apt/sources.list.d && \
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 && \
|
||||
echo $repository | tee /etc/apt/sources.list.d/clickhouse.list && \
|
||||
apt-get update && \
|
||||
env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y clickhouse-test && \
|
||||
rm -rf /var/lib/apt/lists/* /var/cache/debconf && \
|
||||
apt-get clean
|
||||
|
||||
ENTRYPOINT ["/usr/bin/clickhouse-test"]
|
@ -263,9 +263,20 @@ function run_tests
|
||||
if [[ $NPROC == 0 ]]; then
|
||||
NPROC=1
|
||||
fi
|
||||
time clickhouse-test --hung-check -j "${NPROC}" --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper --check-zookeeper-session \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
|
||||
local test_opts=(
|
||||
--hung-check
|
||||
--fast-tests-only
|
||||
--no-long
|
||||
--testname
|
||||
--shard
|
||||
--zookeeper
|
||||
--check-zookeeper-session
|
||||
--order random
|
||||
--print-time
|
||||
--jobs "${NPROC}"
|
||||
)
|
||||
time clickhouse-test "${test_opts[@]}" -- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee "$FASTTEST_OUTPUT/test_result.txt"
|
||||
set -e
|
||||
|
@ -11,7 +11,7 @@ def removesuffix(text, suffix):
|
||||
https://www.python.org/dev/peps/pep-0616/
|
||||
"""
|
||||
if suffix and text.endswith(suffix):
|
||||
return text[:-len(suffix)]
|
||||
return text[: -len(suffix)]
|
||||
else:
|
||||
return text[:]
|
||||
|
||||
|
@ -60,5 +60,5 @@ clientPort=2181 \n\
|
||||
maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg
|
||||
RUN mkdir /zookeeper && chmod -R 777 /zookeeper
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
@ -42,6 +42,9 @@ COPY prepare_hive_data.sh /
|
||||
COPY demo_data.txt /
|
||||
|
||||
ENV PATH=/apache-hive-2.3.9-bin/bin:/hadoop-3.1.0/bin:/hadoop-3.1.0/sbin:$PATH
|
||||
|
||||
RUN service ssh start && sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml && hdfs namenode -format
|
||||
RUN apt install -y python3 python3-pip
|
||||
RUN pip3 install flask requests
|
||||
COPY http_api_server.py /
|
||||
COPY start.sh /
|
||||
|
||||
|
73
docker/test/integration/hive_server/http_api_server.py
Normal file
73
docker/test/integration/hive_server/http_api_server.py
Normal file
@ -0,0 +1,73 @@
|
||||
import os
|
||||
import subprocess
|
||||
import datetime
|
||||
from flask import Flask, flash, request, redirect, url_for
|
||||
|
||||
|
||||
def run_command(command, wait=False):
|
||||
print("{} - execute shell command:{}".format(datetime.datetime.now(), command))
|
||||
lines = []
|
||||
p = subprocess.Popen(
|
||||
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True
|
||||
)
|
||||
if wait:
|
||||
for l in iter(p.stdout.readline, b""):
|
||||
lines.append(l)
|
||||
p.poll()
|
||||
return (lines, p.returncode)
|
||||
else:
|
||||
return (iter(p.stdout.readline, b""), 0)
|
||||
|
||||
|
||||
UPLOAD_FOLDER = "./"
|
||||
ALLOWED_EXTENSIONS = {"txt", "sh"}
|
||||
app = Flask(__name__)
|
||||
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def hello_world():
|
||||
return "Hello World"
|
||||
|
||||
|
||||
def allowed_file(filename):
|
||||
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
|
||||
|
||||
|
||||
@app.route("/upload", methods=["GET", "POST"])
|
||||
def upload_file():
|
||||
if request.method == "POST":
|
||||
# check if the post request has the file part
|
||||
if "file" not in request.files:
|
||||
flash("No file part")
|
||||
return redirect(request.url)
|
||||
file = request.files["file"]
|
||||
# If the user does not select a file, the browser submits an
|
||||
# empty file without a filename.
|
||||
if file.filename == "":
|
||||
flash("No selected file")
|
||||
return redirect(request.url)
|
||||
if file and allowed_file(file.filename):
|
||||
filename = file.filename
|
||||
file.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
|
||||
return redirect(url_for("upload_file", name=filename))
|
||||
return """
|
||||
<!doctype html>
|
||||
<title>Upload new File</title>
|
||||
<h1>Upload new File</h1>
|
||||
<form method=post enctype=multipart/form-data>
|
||||
<input type=file name=file>
|
||||
<input type=submit value=Upload>
|
||||
</form>
|
||||
"""
|
||||
|
||||
|
||||
@app.route("/run", methods=["GET", "POST"])
|
||||
def parse_request():
|
||||
data = request.data # data is empty
|
||||
run_command(data, wait=True)
|
||||
return "Ok"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(port=5011)
|
@ -2,5 +2,9 @@
|
||||
hive -e "create database test"
|
||||
|
||||
hive -e "create table test.demo(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'; create table test.demo_orc(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'; "
|
||||
hive -e "create table test.parquet_demo(id string, score int) PARTITIONED BY(day string, hour string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'"
|
||||
hive -e "create table test.demo_text(id string, score int, day string)row format delimited fields terminated by ','; load data local inpath '/demo_data.txt' into table test.demo_text "
|
||||
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.demo partition(day) select * from test.demo_text; insert into test.demo_orc partition(day) select * from test.demo_text"
|
||||
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.demo partition(day) select * from test.demo_text; insert into test.demo_orc partition(day) select * from test.demo_text"
|
||||
|
||||
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.parquet_demo partition(day, hour) select id, score, day, '00' as hour from test.demo;"
|
||||
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.parquet_demo partition(day, hour) select id, score, day, '01' as hour from test.demo;"
|
||||
|
@ -1,6 +1,5 @@
|
||||
service ssh start
|
||||
sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml
|
||||
hadoop namenode -format
|
||||
start-all.sh
|
||||
service mysql start
|
||||
mysql -u root -e "CREATE USER \"test\"@\"localhost\" IDENTIFIED BY \"test\""
|
||||
@ -9,4 +8,4 @@ schematool -initSchema -dbType mysql
|
||||
#nohup hiveserver2 &
|
||||
nohup hive --service metastore &
|
||||
bash /prepare_hive_data.sh
|
||||
while true; do sleep 1000; done
|
||||
python3 http_api_server.py
|
||||
|
@ -15,9 +15,10 @@ RUN curl -o krb5-libs-1.10.3-65.el6.x86_64.rpm ftp://ftp.pbone.net/mirror/vault.
|
||||
rm -fr *.rpm
|
||||
|
||||
RUN cd /tmp && \
|
||||
curl http://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz -o commons-daemon-1.0.15-src.tar.gz && \
|
||||
tar xzf commons-daemon-1.0.15-src.tar.gz && \
|
||||
cd commons-daemon-1.0.15-src/src/native/unix && \
|
||||
./configure && \
|
||||
make && \
|
||||
cp ./jsvc /usr/local/hadoop-2.7.0/sbin
|
||||
curl http://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz -o commons-daemon-1.0.15-src.tar.gz && \
|
||||
tar xzf commons-daemon-1.0.15-src.tar.gz && \
|
||||
cd commons-daemon-1.0.15-src/src/native/unix && \
|
||||
./configure && \
|
||||
make && \
|
||||
cp ./jsvc /usr/local/hadoop-2.7.0/sbin && \
|
||||
[ -e /usr/local/hadoop ] || ln -s ./hadoop-2.7.0 /usr/local/hadoop
|
||||
|
@ -40,7 +40,7 @@ RUN apt-get update \
|
||||
/tmp/* \
|
||||
&& apt-get clean
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
ENV TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
|
@ -4,7 +4,7 @@ services:
|
||||
kerberizedhdfs1:
|
||||
cap_add:
|
||||
- DAC_READ_SEARCH
|
||||
image: clickhouse/kerberized-hadoop
|
||||
image: clickhouse/kerberized-hadoop:${DOCKER_KERBERIZED_HADOOP_TAG:-latest}
|
||||
hostname: kerberizedhdfs1
|
||||
restart: always
|
||||
volumes:
|
||||
|
@ -45,6 +45,7 @@ export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||
export DOCKER_KERBERIZED_HADOOP_TAG=${DOCKER_KERBERIZED_HADOOP_TAG:=latest}
|
||||
|
||||
cd /ClickHouse/tests/integration
|
||||
exec "$@"
|
||||
|
@ -19,58 +19,126 @@ import xml.etree.ElementTree as et
|
||||
from threading import Thread
|
||||
from scipy import stats
|
||||
|
||||
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', level='WARNING')
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s: %(levelname)s: %(module)s: %(message)s", level="WARNING"
|
||||
)
|
||||
|
||||
total_start_seconds = time.perf_counter()
|
||||
stage_start_seconds = total_start_seconds
|
||||
|
||||
|
||||
def reportStageEnd(stage):
|
||||
global stage_start_seconds, total_start_seconds
|
||||
|
||||
current = time.perf_counter()
|
||||
print(f'stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}')
|
||||
print(
|
||||
f"stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}"
|
||||
)
|
||||
stage_start_seconds = current
|
||||
|
||||
|
||||
def tsv_escape(s):
|
||||
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
|
||||
return (
|
||||
s.replace("\\", "\\\\")
|
||||
.replace("\t", "\\t")
|
||||
.replace("\n", "\\n")
|
||||
.replace("\r", "")
|
||||
)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Run performance test.')
|
||||
parser = argparse.ArgumentParser(description="Run performance test.")
|
||||
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
|
||||
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||
parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--prewarm-max-query-seconds', type=int, default=180, help='For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
|
||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
|
||||
parser.add_argument('--keep-created-tables', action='store_true', help="Don't drop the created tables after the test.")
|
||||
parser.add_argument('--use-existing-tables', action='store_true', help="Don't create or drop the tables, use the existing ones instead.")
|
||||
parser.add_argument(
|
||||
"file",
|
||||
metavar="FILE",
|
||||
type=argparse.FileType("r", encoding="utf-8"),
|
||||
nargs=1,
|
||||
help="test description file",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--host",
|
||||
nargs="*",
|
||||
default=["localhost"],
|
||||
help="Space-separated list of server hostname(s). Corresponds to '--port' options.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
nargs="*",
|
||||
default=[9000],
|
||||
help="Space-separated list of server port(s). Corresponds to '--host' options.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--runs", type=int, default=1, help="Number of query runs per server."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-queries",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Test no more than this number of queries, chosen at random.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--queries-to-run",
|
||||
nargs="*",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Space-separated list of indexes of queries to test.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-query-seconds",
|
||||
type=int,
|
||||
default=15,
|
||||
help="For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prewarm-max-query-seconds",
|
||||
type=int,
|
||||
default=180,
|
||||
help="For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--profile-seconds",
|
||||
type=int,
|
||||
default=0,
|
||||
help="For how many seconds to profile a query for which the performance has changed.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--long", action="store_true", help="Do not skip the tests tagged as long."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--print-queries", action="store_true", help="Print test queries and exit."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--print-settings", action="store_true", help="Print test settings and exit."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-created-tables",
|
||||
action="store_true",
|
||||
help="Don't drop the created tables after the test.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use-existing-tables",
|
||||
action="store_true",
|
||||
help="Don't create or drop the tables, use the existing ones instead.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
reportStageEnd('start')
|
||||
reportStageEnd("start")
|
||||
|
||||
test_name = os.path.splitext(os.path.basename(args.file[0].name))[0]
|
||||
|
||||
tree = et.parse(args.file[0])
|
||||
root = tree.getroot()
|
||||
|
||||
reportStageEnd('parse')
|
||||
reportStageEnd("parse")
|
||||
|
||||
# Process query parameters
|
||||
subst_elems = root.findall('substitutions/substitution')
|
||||
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
|
||||
subst_elems = root.findall("substitutions/substitution")
|
||||
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
|
||||
for e in subst_elems:
|
||||
name = e.find('name').text
|
||||
values = [v.text for v in e.findall('values/value')]
|
||||
name = e.find("name").text
|
||||
values = [v.text for v in e.findall("values/value")]
|
||||
if not values:
|
||||
raise Exception(f'No values given for substitution {{{name}}}')
|
||||
raise Exception(f"No values given for substitution {{{name}}}")
|
||||
|
||||
available_parameters[name] = values
|
||||
|
||||
@ -78,7 +146,7 @@ for e in subst_elems:
|
||||
# parameters. The set of parameters is determined based on the first list.
|
||||
# Note: keep the order of queries -- sometimes we have DROP IF EXISTS
|
||||
# followed by CREATE in create queries section, so the order matters.
|
||||
def substitute_parameters(query_templates, other_templates = []):
|
||||
def substitute_parameters(query_templates, other_templates=[]):
|
||||
query_results = []
|
||||
other_results = [[]] * (len(other_templates))
|
||||
for i, q in enumerate(query_templates):
|
||||
@ -103,17 +171,21 @@ def substitute_parameters(query_templates, other_templates = []):
|
||||
# and reporting the queries marked as short.
|
||||
test_queries = []
|
||||
is_short = []
|
||||
for e in root.findall('query'):
|
||||
new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]])
|
||||
for e in root.findall("query"):
|
||||
new_queries, [new_is_short] = substitute_parameters(
|
||||
[e.text], [[e.attrib.get("short", "0")]]
|
||||
)
|
||||
test_queries += new_queries
|
||||
is_short += [eval(s) for s in new_is_short]
|
||||
|
||||
assert(len(test_queries) == len(is_short))
|
||||
assert len(test_queries) == len(is_short)
|
||||
|
||||
# If we're given a list of queries to run, check that it makes sense.
|
||||
for i in args.queries_to_run or []:
|
||||
if i < 0 or i >= len(test_queries):
|
||||
print(f'There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present')
|
||||
print(
|
||||
f"There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
# If we're only asked to print the queries, do that and exit.
|
||||
@ -125,60 +197,65 @@ if args.print_queries:
|
||||
# Print short queries
|
||||
for i, s in enumerate(is_short):
|
||||
if s:
|
||||
print(f'short\t{i}')
|
||||
print(f"short\t{i}")
|
||||
|
||||
# If we're only asked to print the settings, do that and exit. These are settings
|
||||
# for clickhouse-benchmark, so we print them as command line arguments, e.g.
|
||||
# '--max_memory_usage=10000000'.
|
||||
if args.print_settings:
|
||||
for s in root.findall('settings/*'):
|
||||
print(f'--{s.tag}={s.text}')
|
||||
for s in root.findall("settings/*"):
|
||||
print(f"--{s.tag}={s.text}")
|
||||
|
||||
exit(0)
|
||||
|
||||
# Skip long tests
|
||||
if not args.long:
|
||||
for tag in root.findall('.//tag'):
|
||||
if tag.text == 'long':
|
||||
print('skipped\tTest is tagged as long.')
|
||||
for tag in root.findall(".//tag"):
|
||||
if tag.text == "long":
|
||||
print("skipped\tTest is tagged as long.")
|
||||
sys.exit(0)
|
||||
|
||||
# Print report threshold for the test if it is set.
|
||||
ignored_relative_change = 0.05
|
||||
if 'max_ignored_relative_change' in root.attrib:
|
||||
if "max_ignored_relative_change" in root.attrib:
|
||||
ignored_relative_change = float(root.attrib["max_ignored_relative_change"])
|
||||
print(f'report-threshold\t{ignored_relative_change}')
|
||||
print(f"report-threshold\t{ignored_relative_change}")
|
||||
|
||||
reportStageEnd('before-connect')
|
||||
reportStageEnd("before-connect")
|
||||
|
||||
# Open connections
|
||||
servers = [{'host': host or args.host[0], 'port': port or args.port[0]} for (host, port) in itertools.zip_longest(args.host, args.port)]
|
||||
servers = [
|
||||
{"host": host or args.host[0], "port": port or args.port[0]}
|
||||
for (host, port) in itertools.zip_longest(args.host, args.port)
|
||||
]
|
||||
# Force settings_is_important to fail queries on unknown settings.
|
||||
all_connections = [clickhouse_driver.Client(**server, settings_is_important=True) for server in servers]
|
||||
all_connections = [
|
||||
clickhouse_driver.Client(**server, settings_is_important=True) for server in servers
|
||||
]
|
||||
|
||||
for i, s in enumerate(servers):
|
||||
print(f'server\t{i}\t{s["host"]}\t{s["port"]}')
|
||||
|
||||
reportStageEnd('connect')
|
||||
reportStageEnd("connect")
|
||||
|
||||
if not args.use_existing_tables:
|
||||
# Run drop queries, ignoring errors. Do this before all other activity,
|
||||
# because clickhouse_driver disconnects on error (this is not configurable),
|
||||
# and the new connection loses the changes in settings.
|
||||
drop_query_templates = [q.text for q in root.findall('drop_query')]
|
||||
drop_query_templates = [q.text for q in root.findall("drop_query")]
|
||||
drop_queries = substitute_parameters(drop_query_templates)
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
for q in drop_queries:
|
||||
try:
|
||||
c.execute(q)
|
||||
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
|
||||
print(f"drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}")
|
||||
except:
|
||||
pass
|
||||
|
||||
reportStageEnd('drop-1')
|
||||
reportStageEnd("drop-1")
|
||||
|
||||
# Apply settings.
|
||||
settings = root.findall('settings/*')
|
||||
settings = root.findall("settings/*")
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
for s in settings:
|
||||
# requires clickhouse-driver >= 1.1.5 to accept arbitrary new settings
|
||||
@ -189,48 +266,52 @@ for conn_index, c in enumerate(all_connections):
|
||||
# the test, which is wrong.
|
||||
c.execute("select 1")
|
||||
|
||||
reportStageEnd('settings')
|
||||
reportStageEnd("settings")
|
||||
|
||||
# Check tables that should exist. If they don't exist, just skip this test.
|
||||
tables = [e.text for e in root.findall('preconditions/table_exists')]
|
||||
tables = [e.text for e in root.findall("preconditions/table_exists")]
|
||||
for t in tables:
|
||||
for c in all_connections:
|
||||
try:
|
||||
res = c.execute("select 1 from {} limit 1".format(t))
|
||||
except:
|
||||
exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1]
|
||||
skipped_message = ' '.join(exception_message.split('\n')[:2])
|
||||
print(f'skipped\t{tsv_escape(skipped_message)}')
|
||||
skipped_message = " ".join(exception_message.split("\n")[:2])
|
||||
print(f"skipped\t{tsv_escape(skipped_message)}")
|
||||
sys.exit(0)
|
||||
|
||||
reportStageEnd('preconditions')
|
||||
reportStageEnd("preconditions")
|
||||
|
||||
if not args.use_existing_tables:
|
||||
# Run create and fill queries. We will run them simultaneously for both
|
||||
# servers, to save time. The weird XML search + filter is because we want to
|
||||
# keep the relative order of elements, and etree doesn't support the
|
||||
# appropriate xpath query.
|
||||
create_query_templates = [q.text for q in root.findall('./*')
|
||||
if q.tag in ('create_query', 'fill_query')]
|
||||
create_query_templates = [
|
||||
q.text for q in root.findall("./*") if q.tag in ("create_query", "fill_query")
|
||||
]
|
||||
create_queries = substitute_parameters(create_query_templates)
|
||||
|
||||
# Disallow temporary tables, because the clickhouse_driver reconnects on
|
||||
# errors, and temporary tables are destroyed. We want to be able to continue
|
||||
# after some errors.
|
||||
for q in create_queries:
|
||||
if re.search('create temporary table', q, flags=re.IGNORECASE):
|
||||
print(f"Temporary tables are not allowed in performance tests: '{q}'",
|
||||
file = sys.stderr)
|
||||
if re.search("create temporary table", q, flags=re.IGNORECASE):
|
||||
print(
|
||||
f"Temporary tables are not allowed in performance tests: '{q}'",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def do_create(connection, index, queries):
|
||||
for q in queries:
|
||||
connection.execute(q)
|
||||
print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}')
|
||||
print(f"create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}")
|
||||
|
||||
threads = [
|
||||
Thread(target = do_create, args = (connection, index, create_queries))
|
||||
for index, connection in enumerate(all_connections)]
|
||||
Thread(target=do_create, args=(connection, index, create_queries))
|
||||
for index, connection in enumerate(all_connections)
|
||||
]
|
||||
|
||||
for t in threads:
|
||||
t.start()
|
||||
@ -238,14 +319,16 @@ if not args.use_existing_tables:
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
reportStageEnd('create')
|
||||
reportStageEnd("create")
|
||||
|
||||
# By default, test all queries.
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
||||
if args.max_queries:
|
||||
# If specified, test a limited number of queries chosen at random.
|
||||
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries))
|
||||
queries_to_run = random.sample(
|
||||
range(0, len(test_queries)), min(len(test_queries), args.max_queries)
|
||||
)
|
||||
|
||||
if args.queries_to_run:
|
||||
# Run the specified queries.
|
||||
@ -255,16 +338,16 @@ if args.queries_to_run:
|
||||
profile_total_seconds = 0
|
||||
for query_index in queries_to_run:
|
||||
q = test_queries[query_index]
|
||||
query_prefix = f'{test_name}.query{query_index}'
|
||||
query_prefix = f"{test_name}.query{query_index}"
|
||||
|
||||
# We have some crazy long queries (about 100kB), so trim them to a sane
|
||||
# length. This means we can't use query text as an identifier and have to
|
||||
# use the test name + the test-wide query index.
|
||||
query_display_name = q
|
||||
if len(query_display_name) > 1000:
|
||||
query_display_name = f'{query_display_name[:1000]}...({query_index})'
|
||||
query_display_name = f"{query_display_name[:1000]}...({query_index})"
|
||||
|
||||
print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}')
|
||||
print(f"display-name\t{query_index}\t{tsv_escape(query_display_name)}")
|
||||
|
||||
# Prewarm: run once on both servers. Helps to bring the data into memory,
|
||||
# precompile the queries, etc.
|
||||
@ -272,10 +355,10 @@ for query_index in queries_to_run:
|
||||
# new one. We want to run them on the new server only, so that the PR author
|
||||
# can ensure that the test works properly. Remember the errors we had on
|
||||
# each server.
|
||||
query_error_on_connection = [None] * len(all_connections);
|
||||
query_error_on_connection = [None] * len(all_connections)
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
try:
|
||||
prewarm_id = f'{query_prefix}.prewarm0'
|
||||
prewarm_id = f"{query_prefix}.prewarm0"
|
||||
|
||||
try:
|
||||
# During the warmup runs, we will also:
|
||||
@ -283,25 +366,30 @@ for query_index in queries_to_run:
|
||||
# * collect profiler traces, which might be helpful for analyzing
|
||||
# test coverage. We disable profiler for normal runs because
|
||||
# it makes the results unstable.
|
||||
res = c.execute(q, query_id = prewarm_id,
|
||||
settings = {
|
||||
'max_execution_time': args.prewarm_max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000,
|
||||
'memory_profiler_step': '4Mi',
|
||||
})
|
||||
res = c.execute(
|
||||
q,
|
||||
query_id=prewarm_id,
|
||||
settings={
|
||||
"max_execution_time": args.prewarm_max_query_seconds,
|
||||
"query_profiler_real_time_period_ns": 10000000,
|
||||
"memory_profiler_step": "4Mi",
|
||||
},
|
||||
)
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (prewarm_id, *e.args)
|
||||
e.message = prewarm_id + ': ' + e.message
|
||||
e.message = prewarm_id + ": " + e.message
|
||||
raise
|
||||
|
||||
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
|
||||
print(
|
||||
f"prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}"
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
# FIXME the driver reconnects on error and we lose settings, so this
|
||||
# might lead to further errors or unexpected behavior.
|
||||
query_error_on_connection[conn_index] = traceback.format_exc();
|
||||
query_error_on_connection[conn_index] = traceback.format_exc()
|
||||
continue
|
||||
|
||||
# Report all errors that ocurred during prewarm and decide what to do next.
|
||||
@ -311,14 +399,14 @@ for query_index in queries_to_run:
|
||||
no_errors = []
|
||||
for i, e in enumerate(query_error_on_connection):
|
||||
if e:
|
||||
print(e, file = sys.stderr)
|
||||
print(e, file=sys.stderr)
|
||||
else:
|
||||
no_errors.append(i)
|
||||
|
||||
if len(no_errors) == 0:
|
||||
continue
|
||||
elif len(no_errors) < len(all_connections):
|
||||
print(f'partial\t{query_index}\t{no_errors}')
|
||||
print(f"partial\t{query_index}\t{no_errors}")
|
||||
|
||||
this_query_connections = [all_connections[index] for index in no_errors]
|
||||
|
||||
@ -337,27 +425,34 @@ for query_index in queries_to_run:
|
||||
all_server_times.append([])
|
||||
|
||||
while True:
|
||||
run_id = f'{query_prefix}.run{run}'
|
||||
run_id = f"{query_prefix}.run{run}"
|
||||
|
||||
for conn_index, c in enumerate(this_query_connections):
|
||||
try:
|
||||
res = c.execute(q, query_id = run_id, settings = {'max_execution_time': args.max_query_seconds})
|
||||
res = c.execute(
|
||||
q,
|
||||
query_id=run_id,
|
||||
settings={"max_execution_time": args.max_query_seconds},
|
||||
)
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (run_id, *e.args)
|
||||
e.message = run_id + ': ' + e.message
|
||||
e.message = run_id + ": " + e.message
|
||||
raise
|
||||
|
||||
elapsed = c.last_query.elapsed
|
||||
all_server_times[conn_index].append(elapsed)
|
||||
|
||||
server_seconds += elapsed
|
||||
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
|
||||
print(f"query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}")
|
||||
|
||||
if elapsed > args.max_query_seconds:
|
||||
# Do not stop processing pathologically slow queries,
|
||||
# since this may hide errors in other queries.
|
||||
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
|
||||
print(
|
||||
f"The query no. {query_index} is taking too long to run ({elapsed} s)",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
# Be careful with the counter, after this line it's the next iteration
|
||||
# already.
|
||||
@ -386,7 +481,7 @@ for query_index in queries_to_run:
|
||||
break
|
||||
|
||||
client_seconds = time.perf_counter() - start_seconds
|
||||
print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}')
|
||||
print(f"client-time\t{query_index}\t{client_seconds}\t{server_seconds}")
|
||||
|
||||
# Run additional profiling queries to collect profile data, but only if test times appeared to be different.
|
||||
# We have to do it after normal runs because otherwise it will affect test statistics too much
|
||||
@ -397,13 +492,15 @@ for query_index in queries_to_run:
|
||||
# Don't fail if for some reason there are not enough measurements.
|
||||
continue
|
||||
|
||||
pvalue = stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue
|
||||
pvalue = stats.ttest_ind(
|
||||
all_server_times[0], all_server_times[1], equal_var=False
|
||||
).pvalue
|
||||
median = [statistics.median(t) for t in all_server_times]
|
||||
# Keep this consistent with the value used in report. Should eventually move
|
||||
# to (median[1] - median[0]) / min(median), which is compatible with "times"
|
||||
# difference we use in report (max(median) / min(median)).
|
||||
relative_diff = (median[1] - median[0]) / median[0]
|
||||
print(f'diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}')
|
||||
print(f"diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}")
|
||||
if abs(relative_diff) < ignored_relative_change or pvalue > 0.05:
|
||||
continue
|
||||
|
||||
@ -412,25 +509,31 @@ for query_index in queries_to_run:
|
||||
profile_start_seconds = time.perf_counter()
|
||||
run = 0
|
||||
while time.perf_counter() - profile_start_seconds < args.profile_seconds:
|
||||
run_id = f'{query_prefix}.profile{run}'
|
||||
run_id = f"{query_prefix}.profile{run}"
|
||||
|
||||
for conn_index, c in enumerate(this_query_connections):
|
||||
try:
|
||||
res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000})
|
||||
print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
|
||||
res = c.execute(
|
||||
q,
|
||||
query_id=run_id,
|
||||
settings={"query_profiler_real_time_period_ns": 10000000},
|
||||
)
|
||||
print(
|
||||
f"profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}"
|
||||
)
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (run_id, *e.args)
|
||||
e.message = run_id + ': ' + e.message
|
||||
e.message = run_id + ": " + e.message
|
||||
raise
|
||||
|
||||
run += 1
|
||||
|
||||
profile_total_seconds += time.perf_counter() - profile_start_seconds
|
||||
|
||||
print(f'profile-total\t{profile_total_seconds}')
|
||||
print(f"profile-total\t{profile_total_seconds}")
|
||||
|
||||
reportStageEnd('run')
|
||||
reportStageEnd("run")
|
||||
|
||||
# Run drop queries
|
||||
if not args.keep_created_tables and not args.use_existing_tables:
|
||||
@ -438,6 +541,6 @@ if not args.keep_created_tables and not args.use_existing_tables:
|
||||
for conn_index, c in enumerate(all_connections):
|
||||
for q in drop_queries:
|
||||
c.execute(q)
|
||||
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
|
||||
print(f"drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}")
|
||||
|
||||
reportStageEnd('drop-2')
|
||||
reportStageEnd("drop-2")
|
||||
|
@ -12,9 +12,13 @@ import pprint
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
parser = argparse.ArgumentParser(description='Create performance test report')
|
||||
parser.add_argument('--report', default='main', choices=['main', 'all-queries'],
|
||||
help='Which report to build')
|
||||
parser = argparse.ArgumentParser(description="Create performance test report")
|
||||
parser.add_argument(
|
||||
"--report",
|
||||
default="main",
|
||||
choices=["main", "all-queries"],
|
||||
help="Which report to build",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
tables = []
|
||||
@ -31,8 +35,8 @@ unstable_partial_queries = 0
|
||||
# max seconds to run one query by itself, not counting preparation
|
||||
allowed_single_run_time = 2
|
||||
|
||||
color_bad='#ffb0c0'
|
||||
color_good='#b0d050'
|
||||
color_bad = "#ffb0c0"
|
||||
color_good = "#b0d050"
|
||||
|
||||
header_template = """
|
||||
<!DOCTYPE html>
|
||||
@ -151,24 +155,29 @@ tr:nth-child(odd) td {{filter: brightness(90%);}}
|
||||
table_anchor = 0
|
||||
row_anchor = 0
|
||||
|
||||
|
||||
def currentTableAnchor():
|
||||
global table_anchor
|
||||
return f'{table_anchor}'
|
||||
return f"{table_anchor}"
|
||||
|
||||
|
||||
def newTableAnchor():
|
||||
global table_anchor
|
||||
table_anchor += 1
|
||||
return currentTableAnchor()
|
||||
|
||||
|
||||
def currentRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
return f'{table_anchor}.{row_anchor}'
|
||||
return f"{table_anchor}.{row_anchor}"
|
||||
|
||||
|
||||
def nextRowAnchor():
|
||||
global row_anchor
|
||||
global table_anchor
|
||||
return f'{table_anchor}.{row_anchor + 1}'
|
||||
return f"{table_anchor}.{row_anchor + 1}"
|
||||
|
||||
|
||||
def advanceRowAnchor():
|
||||
global row_anchor
|
||||
@ -178,43 +187,58 @@ def advanceRowAnchor():
|
||||
|
||||
|
||||
def tr(x, anchor=None):
|
||||
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||
# return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||
anchor = anchor if anchor else advanceRowAnchor()
|
||||
return f'<tr id={anchor}>{x}</tr>'
|
||||
return f"<tr id={anchor}>{x}</tr>"
|
||||
|
||||
def td(value, cell_attributes = ''):
|
||||
return '<td {cell_attributes}>{value}</td>'.format(
|
||||
cell_attributes = cell_attributes,
|
||||
value = value)
|
||||
|
||||
def th(value, cell_attributes = ''):
|
||||
return '<th {cell_attributes}>{value}</th>'.format(
|
||||
cell_attributes = cell_attributes,
|
||||
value = value)
|
||||
def td(value, cell_attributes=""):
|
||||
return "<td {cell_attributes}>{value}</td>".format(
|
||||
cell_attributes=cell_attributes, value=value
|
||||
)
|
||||
|
||||
def tableRow(cell_values, cell_attributes = [], anchor=None):
|
||||
|
||||
def th(value, cell_attributes=""):
|
||||
return "<th {cell_attributes}>{value}</th>".format(
|
||||
cell_attributes=cell_attributes, value=value
|
||||
)
|
||||
|
||||
|
||||
def tableRow(cell_values, cell_attributes=[], anchor=None):
|
||||
return tr(
|
||||
''.join([td(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes,
|
||||
fillvalue = '')
|
||||
if a is not None and v is not None]),
|
||||
anchor)
|
||||
"".join(
|
||||
[
|
||||
td(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes, fillvalue=""
|
||||
)
|
||||
if a is not None and v is not None
|
||||
]
|
||||
),
|
||||
anchor,
|
||||
)
|
||||
|
||||
def tableHeader(cell_values, cell_attributes = []):
|
||||
|
||||
def tableHeader(cell_values, cell_attributes=[]):
|
||||
return tr(
|
||||
''.join([th(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes,
|
||||
fillvalue = '')
|
||||
if a is not None and v is not None]))
|
||||
"".join(
|
||||
[
|
||||
th(v, a)
|
||||
for v, a in itertools.zip_longest(
|
||||
cell_values, cell_attributes, fillvalue=""
|
||||
)
|
||||
if a is not None and v is not None
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def tableStart(title):
|
||||
cls = '-'.join(title.lower().split(' ')[:3]);
|
||||
cls = "-".join(title.lower().split(" ")[:3])
|
||||
global table_anchor
|
||||
table_anchor = cls
|
||||
anchor = currentTableAnchor()
|
||||
help_anchor = '-'.join(title.lower().split(' '));
|
||||
help_anchor = "-".join(title.lower().split(" "))
|
||||
return f"""
|
||||
<h2 id="{anchor}">
|
||||
<a class="cancela" href="#{anchor}">{title}</a>
|
||||
@ -223,12 +247,14 @@ def tableStart(title):
|
||||
<table class="{cls}">
|
||||
"""
|
||||
|
||||
|
||||
def tableEnd():
|
||||
return '</table>'
|
||||
return "</table>"
|
||||
|
||||
|
||||
def tsvRows(n):
|
||||
try:
|
||||
with open(n, encoding='utf-8') as fd:
|
||||
with open(n, encoding="utf-8") as fd:
|
||||
result = []
|
||||
for row in csv.reader(fd, delimiter="\t", quoting=csv.QUOTE_NONE):
|
||||
new_row = []
|
||||
@ -237,27 +263,32 @@ def tsvRows(n):
|
||||
# The second one (encode('latin1').decode('utf-8')) fixes the changes with unicode vs utf-8 chars, so
|
||||
# 'Чем зÐ<C2B7>нимаеÑ<C2B5>ЬÑ<C2AC>Ñ<EFBFBD>' is transformed back into 'Чем зАнимаешЬся'.
|
||||
|
||||
new_row.append(e.encode('utf-8').decode('unicode-escape').encode('latin1').decode('utf-8'))
|
||||
new_row.append(
|
||||
e.encode("utf-8")
|
||||
.decode("unicode-escape")
|
||||
.encode("latin1")
|
||||
.decode("utf-8")
|
||||
)
|
||||
result.append(new_row)
|
||||
return result
|
||||
|
||||
except:
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
return []
|
||||
|
||||
|
||||
def htmlRows(n):
|
||||
rawRows = tsvRows(n)
|
||||
result = ''
|
||||
result = ""
|
||||
for row in rawRows:
|
||||
result += tableRow(row)
|
||||
return result
|
||||
|
||||
|
||||
def addSimpleTable(caption, columns, rows, pos=None):
|
||||
global tables
|
||||
text = ''
|
||||
text = ""
|
||||
if not rows:
|
||||
return
|
||||
|
||||
@ -268,51 +299,63 @@ def addSimpleTable(caption, columns, rows, pos=None):
|
||||
text += tableEnd()
|
||||
tables.insert(pos if pos else len(tables), text)
|
||||
|
||||
|
||||
def add_tested_commits():
|
||||
global report_errors
|
||||
try:
|
||||
addSimpleTable('Tested Commits', ['Old', 'New'],
|
||||
[['<pre>{}</pre>'.format(x) for x in
|
||||
[open('left-commit.txt').read(),
|
||||
open('right-commit.txt').read()]]])
|
||||
addSimpleTable(
|
||||
"Tested Commits",
|
||||
["Old", "New"],
|
||||
[
|
||||
[
|
||||
"<pre>{}</pre>".format(x)
|
||||
for x in [
|
||||
open("left-commit.txt").read(),
|
||||
open("right-commit.txt").read(),
|
||||
]
|
||||
]
|
||||
],
|
||||
)
|
||||
except:
|
||||
# Don't fail if no commit info -- maybe it's a manual run.
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
|
||||
def add_report_errors():
|
||||
global tables
|
||||
global report_errors
|
||||
# Add the errors reported by various steps of comparison script
|
||||
try:
|
||||
report_errors += [l.strip() for l in open('report/errors.log')]
|
||||
report_errors += [l.strip() for l in open("report/errors.log")]
|
||||
except:
|
||||
report_errors.append(
|
||||
traceback.format_exception_only(
|
||||
*sys.exc_info()[:2])[-1])
|
||||
report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1])
|
||||
pass
|
||||
|
||||
if not report_errors:
|
||||
return
|
||||
|
||||
text = tableStart('Errors while Building the Report')
|
||||
text += tableHeader(['Error'])
|
||||
text = tableStart("Errors while Building the Report")
|
||||
text += tableHeader(["Error"])
|
||||
for x in report_errors:
|
||||
text += tableRow([x])
|
||||
text += tableEnd()
|
||||
# Insert after Tested Commits
|
||||
tables.insert(1, text)
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while building the report</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">There were some errors while building the report</a>'
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def add_errors_explained():
|
||||
if not errors_explained:
|
||||
return
|
||||
|
||||
text = '<a name="fail1"/>'
|
||||
text += tableStart('Error Summary')
|
||||
text += tableHeader(['Description'])
|
||||
text += tableStart("Error Summary")
|
||||
text += tableHeader(["Description"])
|
||||
for row in errors_explained:
|
||||
text += tableRow(row)
|
||||
text += tableEnd()
|
||||
@ -321,59 +364,81 @@ def add_errors_explained():
|
||||
tables.insert(1, text)
|
||||
|
||||
|
||||
if args.report == 'main':
|
||||
if args.report == "main":
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
||||
|
||||
run_error_rows = tsvRows('run-errors.tsv')
|
||||
run_error_rows = tsvRows("run-errors.tsv")
|
||||
error_tests += len(run_error_rows)
|
||||
addSimpleTable('Run Errors', ['Test', 'Error'], run_error_rows)
|
||||
addSimpleTable("Run Errors", ["Test", "Error"], run_error_rows)
|
||||
if run_error_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>'
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
||||
slow_on_client_rows = tsvRows("report/slow-on-client.tsv")
|
||||
error_tests += len(slow_on_client_rows)
|
||||
addSimpleTable('Slow on Client',
|
||||
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
||||
slow_on_client_rows)
|
||||
addSimpleTable(
|
||||
"Slow on Client",
|
||||
["Client time, s", "Server time, s", "Ratio", "Test", "Query"],
|
||||
slow_on_client_rows,
|
||||
)
|
||||
if slow_on_client_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>'
|
||||
]
|
||||
)
|
||||
|
||||
unmarked_short_rows = tsvRows('report/unexpected-query-duration.tsv')
|
||||
unmarked_short_rows = tsvRows("report/unexpected-query-duration.tsv")
|
||||
error_tests += len(unmarked_short_rows)
|
||||
addSimpleTable('Unexpected Query Duration',
|
||||
['Problem', 'Marked as "short"?', 'Run time, s', 'Test', '#', 'Query'],
|
||||
unmarked_short_rows)
|
||||
addSimpleTable(
|
||||
"Unexpected Query Duration",
|
||||
["Problem", 'Marked as "short"?', "Run time, s", "Test", "#", "Query"],
|
||||
unmarked_short_rows,
|
||||
)
|
||||
if unmarked_short_rows:
|
||||
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries have unexpected duration</a>']);
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">Some queries have unexpected duration</a>'
|
||||
]
|
||||
)
|
||||
|
||||
def add_partial():
|
||||
rows = tsvRows('report/partial-queries-report.tsv')
|
||||
rows = tsvRows("report/partial-queries-report.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
global unstable_partial_queries, slow_average_tests, tables
|
||||
text = tableStart('Partial Queries')
|
||||
columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query']
|
||||
text = tableStart("Partial Queries")
|
||||
columns = ["Median time, s", "Relative time variance", "Test", "#", "Query"]
|
||||
text += tableHeader(columns)
|
||||
attrs = ['' for c in columns]
|
||||
attrs = ["" for c in columns]
|
||||
for row in rows:
|
||||
anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}'
|
||||
anchor = f"{currentTableAnchor()}.{row[2]}.{row[3]}"
|
||||
if float(row[1]) > 0.10:
|
||||
attrs[1] = f'style="background: {color_bad}"'
|
||||
unstable_partial_queries += 1
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"#{anchor}\">The query no. {row[3]} of test '{row[2]}' has excessive variance of run time. Keep it below 10%</a>"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[1] = ''
|
||||
attrs[1] = ""
|
||||
if float(row[0]) > allowed_single_run_time:
|
||||
attrs[0] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"</a>'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"</a>'
|
||||
]
|
||||
)
|
||||
slow_average_tests += 1
|
||||
else:
|
||||
attrs[0] = ''
|
||||
attrs[0] = ""
|
||||
text += tableRow(row, attrs, anchor)
|
||||
text += tableEnd()
|
||||
tables.append(text)
|
||||
@ -381,41 +446,45 @@ if args.report == 'main':
|
||||
add_partial()
|
||||
|
||||
def add_changes():
|
||||
rows = tsvRows('report/changed-perf.tsv')
|
||||
rows = tsvRows("report/changed-perf.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
global faster_queries, slower_queries, tables
|
||||
|
||||
text = tableStart('Changes in Performance')
|
||||
text = tableStart("Changes in Performance")
|
||||
columns = [
|
||||
'Old, s', # 0
|
||||
'New, s', # 1
|
||||
'Ratio of speedup (-) or slowdown (+)', # 2
|
||||
'Relative difference (new − old) / old', # 3
|
||||
'p < 0.01 threshold', # 4
|
||||
'', # Failed # 5
|
||||
'Test', # 6
|
||||
'#', # 7
|
||||
'Query', # 8
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
"Old, s", # 0
|
||||
"New, s", # 1
|
||||
"Ratio of speedup (-) or slowdown (+)", # 2
|
||||
"Relative difference (new − old) / old", # 3
|
||||
"p < 0.01 threshold", # 4
|
||||
"", # Failed # 5
|
||||
"Test", # 6
|
||||
"#", # 7
|
||||
"Query", # 8
|
||||
]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[5] = None
|
||||
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
for row in rows:
|
||||
anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}'
|
||||
anchor = f"{currentTableAnchor()}.{row[6]}.{row[7]}"
|
||||
if int(row[5]):
|
||||
if float(row[3]) < 0.:
|
||||
if float(row[3]) < 0.0:
|
||||
faster_queries += 1
|
||||
attrs[2] = attrs[3] = f'style="background: {color_good}"'
|
||||
else:
|
||||
slower_queries += 1
|
||||
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"#{anchor}\">The query no. {row[7]} of test '{row[6]}' has slowed down</a>"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[2] = attrs[3] = ''
|
||||
attrs[2] = attrs[3] = ""
|
||||
|
||||
text += tableRow(row, attrs, anchor)
|
||||
|
||||
@ -427,35 +496,35 @@ if args.report == 'main':
|
||||
def add_unstable_queries():
|
||||
global unstable_queries, very_unstable_queries, tables
|
||||
|
||||
unstable_rows = tsvRows('report/unstable-queries.tsv')
|
||||
unstable_rows = tsvRows("report/unstable-queries.tsv")
|
||||
if not unstable_rows:
|
||||
return
|
||||
|
||||
unstable_queries += len(unstable_rows)
|
||||
|
||||
columns = [
|
||||
'Old, s', #0
|
||||
'New, s', #1
|
||||
'Relative difference (new - old)/old', #2
|
||||
'p < 0.01 threshold', #3
|
||||
'', # Failed #4
|
||||
'Test', #5
|
||||
'#', #6
|
||||
'Query' #7
|
||||
"Old, s", # 0
|
||||
"New, s", # 1
|
||||
"Relative difference (new - old)/old", # 2
|
||||
"p < 0.01 threshold", # 3
|
||||
"", # Failed #4
|
||||
"Test", # 5
|
||||
"#", # 6
|
||||
"Query", # 7
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[4] = None
|
||||
|
||||
text = tableStart('Unstable Queries')
|
||||
text = tableStart("Unstable Queries")
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
for r in unstable_rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}'
|
||||
anchor = f"{currentTableAnchor()}.{r[5]}.{r[6]}"
|
||||
if int(r[4]):
|
||||
very_unstable_queries += 1
|
||||
attrs[3] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[3] = ''
|
||||
attrs[3] = ""
|
||||
# Just don't add the slightly unstable queries we don't consider
|
||||
# errors. It's not clear what the user should do with them.
|
||||
continue
|
||||
@ -470,53 +539,70 @@ if args.report == 'main':
|
||||
|
||||
add_unstable_queries()
|
||||
|
||||
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
|
||||
addSimpleTable('Skipped Tests', ['Test', 'Reason'], skipped_tests_rows)
|
||||
skipped_tests_rows = tsvRows("analyze/skipped-tests.tsv")
|
||||
addSimpleTable("Skipped Tests", ["Test", "Reason"], skipped_tests_rows)
|
||||
|
||||
addSimpleTable('Test Performance Changes',
|
||||
['Test', 'Ratio of speedup (-) or slowdown (+)', 'Queries', 'Total not OK', 'Changed perf', 'Unstable'],
|
||||
tsvRows('report/test-perf-changes.tsv'))
|
||||
addSimpleTable(
|
||||
"Test Performance Changes",
|
||||
[
|
||||
"Test",
|
||||
"Ratio of speedup (-) or slowdown (+)",
|
||||
"Queries",
|
||||
"Total not OK",
|
||||
"Changed perf",
|
||||
"Unstable",
|
||||
],
|
||||
tsvRows("report/test-perf-changes.tsv"),
|
||||
)
|
||||
|
||||
def add_test_times():
|
||||
global slow_average_tests, tables
|
||||
rows = tsvRows('report/test-times.tsv')
|
||||
rows = tsvRows("report/test-times.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
columns = [
|
||||
'Test', #0
|
||||
'Wall clock time, entire test, s', #1
|
||||
'Total client time for measured query runs, s', #2
|
||||
'Queries', #3
|
||||
'Longest query, total for measured runs, s', #4
|
||||
'Wall clock time per query, s', #5
|
||||
'Shortest query, total for measured runs, s', #6
|
||||
'', # Runs #7
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
"Test", # 0
|
||||
"Wall clock time, entire test, s", # 1
|
||||
"Total client time for measured query runs, s", # 2
|
||||
"Queries", # 3
|
||||
"Longest query, total for measured runs, s", # 4
|
||||
"Wall clock time per query, s", # 5
|
||||
"Shortest query, total for measured runs, s", # 6
|
||||
"", # Runs #7
|
||||
]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[7] = None
|
||||
|
||||
text = tableStart('Test Times')
|
||||
text = tableStart("Test Times")
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs
|
||||
allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs
|
||||
for r in rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[0]}'
|
||||
anchor = f"{currentTableAnchor()}.{r[0]}"
|
||||
total_runs = (int(r[7]) + 1) * 2 # one prewarm run, two servers
|
||||
if r[0] != 'Total' and float(r[5]) > allowed_average_run_time * total_runs:
|
||||
if r[0] != "Total" and float(r[5]) > allowed_average_run_time * total_runs:
|
||||
# FIXME should be 15s max -- investigate parallel_insert
|
||||
slow_average_tests += 1
|
||||
attrs[5] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="#{anchor}">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"#{anchor}\">The test '{r[0]}' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[5] = ''
|
||||
attrs[5] = ""
|
||||
|
||||
if r[0] != 'Total' and float(r[4]) > allowed_single_run_time * total_runs:
|
||||
if r[0] != "Total" and float(r[4]) > allowed_single_run_time * total_runs:
|
||||
slow_average_tests += 1
|
||||
attrs[4] = f'style="background: {color_bad}"'
|
||||
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
|
||||
errors_explained.append(
|
||||
[
|
||||
f"<a href=\"./all-queries.html#all-query-times.{r[0]}.0\">Some query of the test '{r[0]}' is too slow to run. See the all queries report"
|
||||
]
|
||||
)
|
||||
else:
|
||||
attrs[4] = ''
|
||||
attrs[4] = ""
|
||||
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
@ -525,10 +611,17 @@ if args.report == 'main':
|
||||
|
||||
add_test_times()
|
||||
|
||||
addSimpleTable('Metric Changes',
|
||||
['Metric', 'Old median value', 'New median value',
|
||||
'Relative difference', 'Times difference'],
|
||||
tsvRows('metrics/changes.tsv'))
|
||||
addSimpleTable(
|
||||
"Metric Changes",
|
||||
[
|
||||
"Metric",
|
||||
"Old median value",
|
||||
"New median value",
|
||||
"Relative difference",
|
||||
"Times difference",
|
||||
],
|
||||
tsvRows("metrics/changes.tsv"),
|
||||
)
|
||||
|
||||
add_report_errors()
|
||||
add_errors_explained()
|
||||
@ -536,7 +629,8 @@ if args.report == 'main':
|
||||
for t in tables:
|
||||
print(t)
|
||||
|
||||
print(f"""
|
||||
print(
|
||||
f"""
|
||||
</div>
|
||||
<p class="links">
|
||||
<a href="all-queries.html">All queries</a>
|
||||
@ -546,104 +640,111 @@ if args.report == 'main':
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
status = 'success'
|
||||
message = 'See the report'
|
||||
status = "success"
|
||||
message = "See the report"
|
||||
message_array = []
|
||||
|
||||
if slow_average_tests:
|
||||
status = 'failure'
|
||||
message_array.append(str(slow_average_tests) + ' too long')
|
||||
status = "failure"
|
||||
message_array.append(str(slow_average_tests) + " too long")
|
||||
|
||||
if faster_queries:
|
||||
message_array.append(str(faster_queries) + ' faster')
|
||||
message_array.append(str(faster_queries) + " faster")
|
||||
|
||||
if slower_queries:
|
||||
if slower_queries > 3:
|
||||
status = 'failure'
|
||||
message_array.append(str(slower_queries) + ' slower')
|
||||
status = "failure"
|
||||
message_array.append(str(slower_queries) + " slower")
|
||||
|
||||
if unstable_partial_queries:
|
||||
very_unstable_queries += unstable_partial_queries
|
||||
status = 'failure'
|
||||
status = "failure"
|
||||
|
||||
# Don't show mildly unstable queries, only the very unstable ones we
|
||||
# treat as errors.
|
||||
if very_unstable_queries:
|
||||
if very_unstable_queries > 5:
|
||||
error_tests += very_unstable_queries
|
||||
status = 'failure'
|
||||
message_array.append(str(very_unstable_queries) + ' unstable')
|
||||
status = "failure"
|
||||
message_array.append(str(very_unstable_queries) + " unstable")
|
||||
|
||||
error_tests += slow_average_tests
|
||||
if error_tests:
|
||||
status = 'failure'
|
||||
message_array.insert(0, str(error_tests) + ' errors')
|
||||
status = "failure"
|
||||
message_array.insert(0, str(error_tests) + " errors")
|
||||
|
||||
if message_array:
|
||||
message = ', '.join(message_array)
|
||||
message = ", ".join(message_array)
|
||||
|
||||
if report_errors:
|
||||
status = 'failure'
|
||||
message = 'Errors while building the report.'
|
||||
status = "failure"
|
||||
message = "Errors while building the report."
|
||||
|
||||
print(("""
|
||||
print(
|
||||
(
|
||||
"""
|
||||
<!--status: {status}-->
|
||||
<!--message: {message}-->
|
||||
""".format(status=status, message=message)))
|
||||
""".format(
|
||||
status=status, message=message
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
elif args.report == 'all-queries':
|
||||
elif args.report == "all-queries":
|
||||
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
||||
def add_all_queries():
|
||||
rows = tsvRows('report/all-queries.tsv')
|
||||
rows = tsvRows("report/all-queries.tsv")
|
||||
if not rows:
|
||||
return
|
||||
|
||||
columns = [
|
||||
'', # Changed #0
|
||||
'', # Unstable #1
|
||||
'Old, s', #2
|
||||
'New, s', #3
|
||||
'Ratio of speedup (-) or slowdown (+)', #4
|
||||
'Relative difference (new − old) / old', #5
|
||||
'p < 0.01 threshold', #6
|
||||
'Test', #7
|
||||
'#', #8
|
||||
'Query', #9
|
||||
]
|
||||
attrs = ['' for c in columns]
|
||||
"", # Changed #0
|
||||
"", # Unstable #1
|
||||
"Old, s", # 2
|
||||
"New, s", # 3
|
||||
"Ratio of speedup (-) or slowdown (+)", # 4
|
||||
"Relative difference (new − old) / old", # 5
|
||||
"p < 0.01 threshold", # 6
|
||||
"Test", # 7
|
||||
"#", # 8
|
||||
"Query", # 9
|
||||
]
|
||||
attrs = ["" for c in columns]
|
||||
attrs[0] = None
|
||||
attrs[1] = None
|
||||
|
||||
text = tableStart('All Query Times')
|
||||
text = tableStart("All Query Times")
|
||||
text += tableHeader(columns, attrs)
|
||||
|
||||
for r in rows:
|
||||
anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}'
|
||||
anchor = f"{currentTableAnchor()}.{r[7]}.{r[8]}"
|
||||
if int(r[1]):
|
||||
attrs[6] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[6] = ''
|
||||
attrs[6] = ""
|
||||
|
||||
if int(r[0]):
|
||||
if float(r[5]) > 0.:
|
||||
if float(r[5]) > 0.0:
|
||||
attrs[4] = attrs[5] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[4] = attrs[5] = f'style="background: {color_good}"'
|
||||
else:
|
||||
attrs[4] = attrs[5] = ''
|
||||
attrs[4] = attrs[5] = ""
|
||||
|
||||
if (float(r[2]) + float(r[3])) / 2 > allowed_single_run_time:
|
||||
attrs[2] = f'style="background: {color_bad}"'
|
||||
attrs[3] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[2] = ''
|
||||
attrs[3] = ''
|
||||
attrs[2] = ""
|
||||
attrs[3] = ""
|
||||
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
@ -655,7 +756,8 @@ elif args.report == 'all-queries':
|
||||
for t in tables:
|
||||
print(t)
|
||||
|
||||
print(f"""
|
||||
print(
|
||||
f"""
|
||||
</div>
|
||||
<p class="links">
|
||||
<a href="report.html">Main report</a>
|
||||
@ -665,4 +767,5 @@ elif args.report == 'all-queries':
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
@ -1,50 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/pvs-test .
|
||||
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/binary-builder:$FROM_TAG
|
||||
|
||||
RUN apt-get update --yes \
|
||||
&& apt-get install \
|
||||
bash \
|
||||
wget \
|
||||
software-properties-common \
|
||||
gpg-agent \
|
||||
debsig-verify \
|
||||
strace \
|
||||
protobuf-compiler \
|
||||
protobuf-compiler-grpc \
|
||||
libprotoc-dev \
|
||||
libgrpc++-dev \
|
||||
libc-ares-dev \
|
||||
--yes --no-install-recommends
|
||||
|
||||
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
|
||||
#RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list
|
||||
#
|
||||
#RUN apt-get --allow-unauthenticated update -y \
|
||||
# && env DEBIAN_FRONTEND=noninteractive \
|
||||
# apt-get --allow-unauthenticated install --yes --no-install-recommends \
|
||||
# pvs-studio
|
||||
|
||||
ENV PKG_VERSION="pvs-studio-latest"
|
||||
|
||||
RUN set -x \
|
||||
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
|
||||
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
||||
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
||||
&& apt-key add /tmp/pubkey.txt \
|
||||
&& wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \
|
||||
&& { debsig-verify ${PKG_VERSION}.deb \
|
||||
|| echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \
|
||||
&& dpkg -i "${PKG_VERSION}.deb"
|
||||
|
||||
ENV CCACHE_DIR=/test_output/ccache
|
||||
|
||||
CMD echo "Running PVS version $PKG_VERSION" && mkdir -p $CCACHE_DIR && cd /repo_folder && pvs-studio-analyzer credentials $LICENCE_NAME $LICENCE_KEY -o ./licence.lic \
|
||||
&& cmake . -D"ENABLE_EMBEDDED_COMPILER"=OFF -D"DISABLE_HERMETIC_BUILD"=ON -DCMAKE_C_COMPILER=clang-13 -DCMAKE_CXX_COMPILER=clang\+\+-13 \
|
||||
&& ninja re2_st clickhouse_grpc_protos \
|
||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j "$(nproc)" -l ./licence.lic; \
|
||||
cp /repo_folder/pvs-studio.log /test_output; \
|
||||
plog-converter -a GA:1,2 -t fullhtml -o /test_output/pvs-studio-html-report pvs-studio.log; \
|
||||
plog-converter -a GA:1,2 -t tasklist -o /test_output/pvs-studio-task-report.txt pvs-studio.log
|
@ -7,18 +7,19 @@ import csv
|
||||
|
||||
RESULT_LOG_NAME = "run.log"
|
||||
|
||||
|
||||
def process_result(result_folder):
|
||||
|
||||
status = "success"
|
||||
description = 'Server started and responded'
|
||||
description = "Server started and responded"
|
||||
summary = [("Smoke test", "OK")]
|
||||
with open(os.path.join(result_folder, RESULT_LOG_NAME), 'r') as run_log:
|
||||
lines = run_log.read().split('\n')
|
||||
if not lines or lines[0].strip() != 'OK':
|
||||
with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log:
|
||||
lines = run_log.read().split("\n")
|
||||
if not lines or lines[0].strip() != "OK":
|
||||
status = "failure"
|
||||
logging.info("Lines is not ok: %s", str('\n'.join(lines)))
|
||||
logging.info("Lines is not ok: %s", str("\n".join(lines)))
|
||||
summary = [("Smoke test", "FAIL")]
|
||||
description = 'Server failed to respond, see result in logs'
|
||||
description = "Server failed to respond, see result in logs"
|
||||
|
||||
result_logs = []
|
||||
server_log_path = os.path.join(result_folder, "clickhouse-server.log")
|
||||
@ -38,20 +39,22 @@ def process_result(result_folder):
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of split build smoke test")
|
||||
parser.add_argument("--in-results-dir", default='/test_output/')
|
||||
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
|
||||
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of split build smoke test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
|
@ -10,11 +10,18 @@ def process_result(result_folder):
|
||||
status = "success"
|
||||
summary = []
|
||||
paths = []
|
||||
tests = ["TLPWhere", "TLPGroupBy", "TLPHaving", "TLPWhereGroupBy", "TLPDistinct", "TLPAggregate"]
|
||||
tests = [
|
||||
"TLPWhere",
|
||||
"TLPGroupBy",
|
||||
"TLPHaving",
|
||||
"TLPWhereGroupBy",
|
||||
"TLPDistinct",
|
||||
"TLPAggregate",
|
||||
]
|
||||
|
||||
for test in tests:
|
||||
err_path = '{}/{}.err'.format(result_folder, test)
|
||||
out_path = '{}/{}.out'.format(result_folder, test)
|
||||
err_path = "{}/{}.err".format(result_folder, test)
|
||||
out_path = "{}/{}.out".format(result_folder, test)
|
||||
if not os.path.exists(err_path):
|
||||
logging.info("No output err on path %s", err_path)
|
||||
summary.append((test, "SKIPPED"))
|
||||
@ -23,24 +30,24 @@ def process_result(result_folder):
|
||||
else:
|
||||
paths.append(err_path)
|
||||
paths.append(out_path)
|
||||
with open(err_path, 'r') as f:
|
||||
if 'AssertionError' in f.read():
|
||||
with open(err_path, "r") as f:
|
||||
if "AssertionError" in f.read():
|
||||
summary.append((test, "FAIL"))
|
||||
status = 'failure'
|
||||
status = "failure"
|
||||
else:
|
||||
summary.append((test, "OK"))
|
||||
|
||||
logs_path = '{}/logs.tar.gz'.format(result_folder)
|
||||
logs_path = "{}/logs.tar.gz".format(result_folder)
|
||||
if not os.path.exists(logs_path):
|
||||
logging.info("No logs tar on path %s", logs_path)
|
||||
else:
|
||||
paths.append(logs_path)
|
||||
stdout_path = '{}/stdout.log'.format(result_folder)
|
||||
stdout_path = "{}/stdout.log".format(result_folder)
|
||||
if not os.path.exists(stdout_path):
|
||||
logging.info("No stdout log on path %s", stdout_path)
|
||||
else:
|
||||
paths.append(stdout_path)
|
||||
stderr_path = '{}/stderr.log'.format(result_folder)
|
||||
stderr_path = "{}/stderr.log".format(result_folder)
|
||||
if not os.path.exists(stderr_path):
|
||||
logging.info("No stderr log on path %s", stderr_path)
|
||||
else:
|
||||
@ -52,20 +59,22 @@ def process_result(result_folder):
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of sqlancer test")
|
||||
parser.add_argument("--in-results-dir", default='/test_output/')
|
||||
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
|
||||
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of sqlancer test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
|
@ -13,6 +13,17 @@ COPY s3downloader /s3downloader
|
||||
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
ENV DATASETS="hits visits"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
# Download Minio-related binaries
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& wget "https://dl.min.io/server/minio/release/linux-${arch}/minio" \
|
||||
&& chmod +x ./minio \
|
||||
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/mc" \
|
||||
&& chmod +x ./mc
|
||||
ENV MINIO_ROOT_USER="clickhouse"
|
||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||
COPY setup_minio.sh /
|
||||
|
||||
COPY run.sh /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -11,11 +11,14 @@ dpkg -i package_folder/clickhouse-common-static_*.deb;
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
./setup_minio.sh
|
||||
|
||||
function start()
|
||||
{
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
@ -92,6 +95,8 @@ else
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits"
|
||||
fi
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
66
docker/test/stateful/setup_minio.sh
Executable file
66
docker/test/stateful/setup_minio.sh
Executable file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# TODO: Make this file shared with stateless tests
|
||||
#
|
||||
# Usage for local run:
|
||||
#
|
||||
# ./docker/test/stateful/setup_minio.sh ./tests/
|
||||
#
|
||||
|
||||
set -e -x -a -u
|
||||
|
||||
ls -lha
|
||||
|
||||
mkdir -p ./minio_data
|
||||
|
||||
if [ ! -f ./minio ]; then
|
||||
echo 'MinIO binary not found, downloading...'
|
||||
|
||||
BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-amd64/minio" \
|
||||
&& chmod +x ./minio \
|
||||
&& wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-amd64/mc" \
|
||||
&& chmod +x ./mc
|
||||
fi
|
||||
|
||||
MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
|
||||
MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
|
||||
|
||||
./minio server --address ":11111" ./minio_data &
|
||||
|
||||
while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied
|
||||
do
|
||||
echo "Trying to connect to minio"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
lsof -i :11111
|
||||
|
||||
sleep 5
|
||||
|
||||
./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
|
||||
./mc admin user add clickminio test testtest
|
||||
./mc admin policy set clickminio readwrite user=test
|
||||
./mc mb clickminio/test
|
||||
|
||||
|
||||
# Upload data to Minio. By default after unpacking all tests will in
|
||||
# /usr/share/clickhouse-test/queries
|
||||
|
||||
TEST_PATH=${1:-/usr/share/clickhouse-test}
|
||||
MINIO_DATA_PATH=${TEST_PATH}/queries/1_stateful/data_minio
|
||||
|
||||
# Iterating over globs will cause redudant FILE variale to be a path to a file, not a filename
|
||||
# shellcheck disable=SC2045
|
||||
for FILE in $(ls "${MINIO_DATA_PATH}"); do
|
||||
echo "$FILE";
|
||||
./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE";
|
||||
done
|
||||
|
||||
mkdir -p ~/.aws
|
||||
cat <<EOT >> ~/.aws/credentials
|
||||
[default]
|
||||
aws_access_key_id=clickhouse
|
||||
aws_secret_access_key=clickhouse
|
||||
EOT
|
@ -31,7 +31,8 @@ RUN apt-get update -y \
|
||||
wget \
|
||||
mysql-client=8.0* \
|
||||
postgresql-client \
|
||||
sqlite3
|
||||
sqlite3 \
|
||||
awscli
|
||||
|
||||
RUN pip3 install numpy scipy pandas Jinja2
|
||||
|
||||
@ -59,6 +60,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
|
||||
ENV MINIO_ROOT_USER="clickhouse"
|
||||
ENV MINIO_ROOT_PASSWORD="clickhouse"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
COPY run.sh /
|
||||
COPY setup_minio.sh /
|
||||
|
@ -2,7 +2,7 @@
|
||||
# 1. build base container
|
||||
# 2. run base conatiner with mounted volumes
|
||||
# 3. commit container as image
|
||||
FROM ubuntu:18.10 as clickhouse-test-runner-base
|
||||
FROM ubuntu:20.04 as clickhouse-test-runner-base
|
||||
|
||||
# A volume where directory with clickhouse packages to be mounted,
|
||||
# for later installing.
|
||||
@ -11,5 +11,4 @@ VOLUME /packages
|
||||
CMD apt-get update ;\
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
apt install -y /packages/clickhouse-common-static_*.deb \
|
||||
/packages/clickhouse-client_*.deb \
|
||||
/packages/clickhouse-test_*.deb
|
||||
/packages/clickhouse-client_*.deb
|
||||
|
@ -12,11 +12,8 @@ dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
if [[ -n "$TEST_CASES_FROM_DEB" ]] && [[ "$TEST_CASES_FROM_DEB" -eq 1 ]]; then
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
else
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
fi
|
||||
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
@ -89,6 +86,10 @@ function run_tests()
|
||||
# everything in parallel except DatabaseReplicated. See below.
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
ADDITIONAL_OPTIONS+=('--jobs')
|
||||
|
@ -55,3 +55,10 @@ for FILE in $(ls "${MINIO_DATA_PATH}"); do
|
||||
echo "$FILE";
|
||||
./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE";
|
||||
done
|
||||
|
||||
mkdir -p ~/.aws
|
||||
cat <<EOT >> ~/.aws/credentials
|
||||
[default]
|
||||
aws_access_key_id=clickhouse
|
||||
aws_secret_access_key=clickhouse
|
||||
EOT
|
||||
|
@ -30,5 +30,4 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
||||
dpkg -i package_folder/clickhouse-test_*.deb; \
|
||||
python3 -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --reruns=1 --timeout=600 --json=test_output/report.json --html=test_output/report.html --self-contained-html
|
||||
|
@ -25,9 +25,11 @@ RUN apt-get update -y \
|
||||
brotli
|
||||
|
||||
COPY ./stress /stress
|
||||
COPY ./download_previous_release /download_previous_release
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
ENV EXPORT_S3_STORAGE_POLICIES=1
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user