diff --git a/.clang-format b/.clang-format
index d8f273702c8..2da3911dced 100644
--- a/.clang-format
+++ b/.clang-format
@@ -21,7 +21,6 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
ExperimentalAutoDetectBinPacking: true
UseTab: Never
TabWidth: 4
-IndentWidth: 4
Standard: Cpp11
PointerAlignment: Middle
MaxEmptyLinesToKeep: 2
diff --git a/.clang-tidy b/.clang-tidy
index 4dd8b9859c9..85989d311a2 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -23,9 +23,12 @@ Checks: '*,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-narrowing-conversions,
-bugprone-not-null-terminated-result,
+ -bugprone-reserved-identifier, # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
-bugprone-unchecked-optional-access,
-cert-dcl16-c,
+ -cert-dcl37-c,
+ -cert-dcl51-cpp,
-cert-err58-cpp,
-cert-msc32-c,
-cert-msc51-cpp,
@@ -38,6 +41,8 @@ Checks: '*,
-clang-analyzer-security.insecureAPI.strcpy,
-cppcoreguidelines-avoid-c-arrays,
+ -cppcoreguidelines-avoid-const-or-ref-data-members,
+ -cppcoreguidelines-avoid-do-while,
-cppcoreguidelines-avoid-goto,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-avoid-non-const-global-variables,
@@ -105,6 +110,8 @@ Checks: '*,
-misc-const-correctness,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
+ -misc-confusable-identifiers, # useful but slooow
+ -misc-use-anonymous-namespace,
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
@@ -125,10 +132,12 @@ Checks: '*,
-portability-simd-intrinsics,
-readability-braces-around-statements,
+ -readability-convert-member-functions-to-static,
-readability-else-after-return,
-readability-function-cognitive-complexity,
-readability-function-size,
-readability-identifier-length,
+ -readability-identifier-naming, # useful but too slow
-readability-implicit-bool-conversion,
-readability-isolate-declaration,
-readability-magic-numbers,
@@ -140,74 +149,32 @@ Checks: '*,
-readability-uppercase-literal-suffix,
-readability-use-anyofallof,
- -zirkon-*,
-
- -misc-*, # temporarily disabled due to being too slow
- # also disable checks in other categories which are aliases of checks in misc-*:
- # https://releases.llvm.org/15.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
- -cert-dcl54-cpp, # alias of misc-new-delete-overloads
- -hicpp-new-delete-operators, # alias of misc-new-delete-overloads
- -cert-fio38-c, # alias of misc-non-copyable-objects
- -cert-dcl03-c, # alias of misc-static-assert
- -hicpp-static-assert, # alias of misc-static-assert
- -cert-err09-cpp, # alias of misc-throw-by-value-catch-by-reference
- -cert-err61-cpp, # alias of misc-throw-by-value-catch-by-reference
- -cppcoreguidelines-c-copy-assignment-signature, # alias of misc-unconventional-assign-operator
- -cppcoreguidelines-non-private-member-variables-in-classes, # alias of misc-non-private-member-variables-in-classes
+ -zircon-*,
'
WarningsAsErrors: '*'
-# TODO: use dictionary syntax for CheckOptions when minimum clang-tidy level rose to 15
-# some-check.SomeOption: 'some value'
-# instead of
-# - key: some-check.SomeOption
-# value: 'some value'
CheckOptions:
- - key: readability-identifier-naming.ClassCase
- value: CamelCase
- - key: readability-identifier-naming.EnumCase
- value: CamelCase
- - key: readability-identifier-naming.LocalVariableCase
- value: lower_case
- - key: readability-identifier-naming.StaticConstantCase
- value: aNy_CasE
- - key: readability-identifier-naming.MemberCase
- value: lower_case
- - key: readability-identifier-naming.PrivateMemberPrefix
- value: ''
- - key: readability-identifier-naming.ProtectedMemberPrefix
- value: ''
- - key: readability-identifier-naming.PublicMemberCase
- value: lower_case
- - key: readability-identifier-naming.MethodCase
- value: camelBack
- - key: readability-identifier-naming.PrivateMethodPrefix
- value: ''
- - key: readability-identifier-naming.ProtectedMethodPrefix
- value: ''
- - key: readability-identifier-naming.ParameterPackCase
- value: lower_case
- - key: readability-identifier-naming.StructCase
- value: CamelCase
- - key: readability-identifier-naming.TemplateTemplateParameterCase
- value: CamelCase
- - key: readability-identifier-naming.TemplateUsingCase
- value: lower_case
- - key: readability-identifier-naming.TypeTemplateParameterCase
- value: CamelCase
- - key: readability-identifier-naming.TypedefCase
- value: CamelCase
- - key: readability-identifier-naming.UnionCase
- value: CamelCase
- - key: readability-identifier-naming.UsingCase
- value: CamelCase
- - key: modernize-loop-convert.UseCxx20ReverseRanges
- value: false
- - key: performance-move-const-arg.CheckTriviallyCopyableMove
- value: false
- # Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097
- - key: readability-identifier-naming.TypeTemplateParameterIgnoredRegexp
- value: expr-type
- - key: cppcoreguidelines-avoid-do-while.IgnoreMacros
- value: true
+ readability-identifier-naming.ClassCase: CamelCase
+ readability-identifier-naming.EnumCase: CamelCase
+ readability-identifier-naming.LocalVariableCase: lower_case
+ readability-identifier-naming.StaticConstantCase: aNy_CasE
+ readability-identifier-naming.MemberCase: lower_case
+ readability-identifier-naming.PrivateMemberPrefix: ''
+ readability-identifier-naming.ProtectedMemberPrefix: ''
+ readability-identifier-naming.PublicMemberCase: lower_case
+ readability-identifier-naming.MethodCase: camelBack
+ readability-identifier-naming.PrivateMethodPrefix: ''
+ readability-identifier-naming.ProtectedMethodPrefix: ''
+ readability-identifier-naming.ParameterPackCase: lower_case
+ readability-identifier-naming.StructCase: CamelCase
+ readability-identifier-naming.TemplateTemplateParameterCase: CamelCase
+ readability-identifier-naming.TemplateParameterCase: lower_case
+ readability-identifier-naming.TypeTemplateParameterCase: CamelCase
+ readability-identifier-naming.TypedefCase: CamelCase
+ readability-identifier-naming.UnionCase: CamelCase
+ modernize-loop-convert.UseCxx20ReverseRanges: false
+ performance-move-const-arg.CheckTriviallyCopyableMove: false
+ # Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097
+ readability-identifier-naming.TypeTemplateParameterIgnoredRegexp: expr-type
+ cppcoreguidelines-avoid-do-while.IgnoreMacros: true
diff --git a/.clangd b/.clangd
new file mode 100644
index 00000000000..ad471db8d8b
--- /dev/null
+++ b/.clangd
@@ -0,0 +1,16 @@
+Diagnostics:
+ # clangd does parse .clang-tidy, but some checks are too slow to run in
+ # clang-tidy build, so let's enable them explicitly for clangd at least.
+ ClangTidy:
+ # The following checks had been disabled due to slowliness with C++23,
+ # for more details see [1].
+ #
+ # [1]: https://github.com/llvm/llvm-project/issues/61418
+ #
+ # But the code base had been written in a style that had been checked
+ # by this check, so at least, let's enable it for clangd.
+ Add: [
+ # configured in .clang-tidy
+ readability-identifier-naming,
+ bugprone-reserved-identifier,
+ ]
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 5d09d3a9ef3..db170c3e28f 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -2,16 +2,16 @@
A technical comment, you are free to remove or leave it as it is when PR is created
The following categories are used in the next scripts, update them accordingly
utils/changelog/changelog.py
-tests/ci/run_check.py
+tests/ci/cancel_and_rerun_workflow_lambda/app.py
-->
### Changelog category (leave one):
- New Feature
- Improvement
-- Bug Fix (user-visible misbehavior in official stable or prestable release)
- Performance Improvement
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
+- Bug Fix (user-visible misbehavior in an official stable release)
- Not for changelog (changelog entry is not required)
diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 110c06631c7..d69168b01ee 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -9,8 +9,22 @@ on: # yamllint disable-line rule:truthy
branches:
- 'backport/**'
jobs:
+ CheckLabels:
+ runs-on: [self-hosted, style-checker]
+ # Run the first check always, even if the CI is cancelled
+ if: ${{ always() }}
+ steps:
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Labels check
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 run_check.py
PythonUnitTests:
runs-on: [self-hosted, style-checker]
+ needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
@@ -22,6 +36,7 @@ jobs:
python3 -m unittest discover -s . -p '*_test.py'
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
+ needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
@@ -38,6 +53,7 @@ jobs:
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
+ needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
@@ -79,7 +95,7 @@ jobs:
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
- CompatibilityCheck:
+ CompatibilityCheckX86:
needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker]
steps:
@@ -98,12 +114,43 @@ jobs:
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- - name: CompatibilityCheck
+ - name: CompatibilityCheckX86
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ CompatibilityCheckAarch64:
+ needs: [BuilderDebAarch64]
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/compatibility_check
+ REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ EOF
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: CompatibilityCheckAarch64
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
- name: Cleanup
if: always()
run: |
@@ -302,6 +349,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@@ -421,8 +475,9 @@ jobs:
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type head --no-push
- python3 docker_server.py --release-type head --no-push --no-ubuntu \
+ python3 docker_server.py --release-type head --no-push \
+ --image-repo clickhouse/clickhouse-server --image-path docker/server
+ python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
@@ -741,7 +796,8 @@ jobs:
- FunctionalStatefulTestDebug
- StressTestTsan
- IntegrationTestsRelease
- - CompatibilityCheck
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml
index 065e584182b..8d1e2055978 100644
--- a/.github/workflows/cherry_pick.yml
+++ b/.github/workflows/cherry_pick.yml
@@ -35,7 +35,6 @@ jobs:
fetch-depth: 0
- name: Cherry pick
run: |
- sudo pip install GitPython
cd "$GITHUB_WORKSPACE/tests/ci"
python3 cherry_pick.py
- name: Cleanup
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 7e045992dee..f0741b5465f 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -110,7 +110,7 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
- CompatibilityCheck:
+ CompatibilityCheckX86:
needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker]
steps:
@@ -129,12 +129,43 @@ jobs:
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- - name: CompatibilityCheck
+ - name: CompatibilityCheckX86
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ CompatibilityCheckAarch64:
+ needs: [BuilderDebAarch64]
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/compatibility_check
+ REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ EOF
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: CompatibilityCheckAarch64
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
- name: Cleanup
if: always()
run: |
@@ -456,6 +487,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@@ -829,8 +867,9 @@ jobs:
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type head
- python3 docker_server.py --release-type head --no-ubuntu \
+ python3 docker_server.py --release-type head \
+ --image-repo clickhouse/clickhouse-server --image-path docker/server
+ python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
@@ -1099,7 +1138,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=2
+ RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1135,6 +1174,114 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestReleaseDatabaseReplicated2:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_database_replicated
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (release, DatabaseReplicated)
+ REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=2
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestReleaseDatabaseReplicated3:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_database_replicated
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (release, DatabaseReplicated)
+ REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestReleaseS3_0:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_s3_storage
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (release, s3 storage)
+ REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
@@ -1158,7 +1305,7 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestReleaseS3:
+ FunctionalStatelessTestReleaseS3_1:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
@@ -1170,6 +1317,42 @@ jobs:
CHECK_NAME=Stateless tests (release, s3 storage)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=1
+ RUN_BY_HASH_TOTAL=2
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestReleaseAnalyzer:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_analyzer
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (release, analyzer)
+ REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
+ KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1239,7 +1422,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=2
+ RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1275,7 +1458,79 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=2
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestAsan2:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_debug
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (asan)
+ REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=2
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestAsan3:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_debug
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (asan)
+ REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1311,7 +1566,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1347,7 +1602,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1383,7 +1638,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1406,7 +1661,79 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
- FunctionalStatelessTestUBsan:
+ FunctionalStatelessTestTsan3:
+ needs: [BuilderDebTsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_tsan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (tsan)
+ REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=5
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestTsan4:
+ needs: [BuilderDebTsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_tsan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (tsan)
+ REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=4
+ RUN_BY_HASH_TOTAL=5
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestUBsan0:
needs: [BuilderDebUBsan]
runs-on: [self-hosted, func-tester]
steps:
@@ -1418,6 +1745,44 @@ jobs:
CHECK_NAME=Stateless tests (ubsan)
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=0
+ RUN_BY_HASH_TOTAL=2
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestUBsan1:
+ needs: [BuilderDebUBsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_ubsan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (ubsan)
+ REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=1
+ RUN_BY_HASH_TOTAL=2
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1453,7 +1818,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1489,7 +1854,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1525,7 +1890,115 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestMsan3:
+ needs: [BuilderDebMsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_memory
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (msan)
+ REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestMsan4:
+ needs: [BuilderDebMsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_memory
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (msan)
+ REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=4
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestMsan5:
+ needs: [BuilderDebMsan]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_memory
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (msan)
+ REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=5
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1561,7 +2034,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1597,7 +2070,7 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -1633,7 +2106,79 @@ jobs:
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=5
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestDebug3:
+ needs: [BuilderDebDebug]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_debug
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (debug)
+ REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=5
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestDebug4:
+ needs: [BuilderDebDebug]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_debug
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (debug)
+ REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
+ KILL_TIMEOUT=10800
+ RUN_BY_HASH_NUM=4
+ RUN_BY_HASH_TOTAL=5
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2084,7 +2629,7 @@ jobs:
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2119,7 +2664,7 @@ jobs:
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2154,7 +2699,112 @@ jobs:
CHECK_NAME=Integration tests (asan)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=3
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAsan3:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAsan4:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=4
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAsan5:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=5
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2189,7 +2839,7 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=4
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2224,7 +2874,7 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=4
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2259,7 +2909,7 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=2
- RUN_BY_HASH_TOTAL=4
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2294,7 +2944,77 @@ jobs:
CHECK_NAME=Integration tests (tsan)
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
RUN_BY_HASH_NUM=3
- RUN_BY_HASH_TOTAL=4
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsTsan4:
+ needs: [BuilderDebTsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_tsan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (tsan)
+ REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
+ RUN_BY_HASH_NUM=4
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsTsan5:
+ needs: [BuilderDebTsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_tsan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (tsan)
+ REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
+ RUN_BY_HASH_NUM=5
+ RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2329,7 +3049,7 @@ jobs:
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=0
- RUN_BY_HASH_TOTAL=2
+ RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -2364,7 +3084,77 @@ jobs:
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
RUN_BY_HASH_NUM=1
- RUN_BY_HASH_TOTAL=2
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsRelease2:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_release
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (release)
+ REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
+ RUN_BY_HASH_NUM=2
+ RUN_BY_HASH_TOTAL=4
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsRelease3:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_release
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (release)
+ REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=4
EOF
- name: Download json reports
uses: actions/download-artifact@v3
@@ -3084,23 +3874,36 @@ jobs:
- FunctionalStatelessTestDebug0
- FunctionalStatelessTestDebug1
- FunctionalStatelessTestDebug2
+ - FunctionalStatelessTestDebug3
+ - FunctionalStatelessTestDebug4
- FunctionalStatelessTestRelease
- FunctionalStatelessTestReleaseDatabaseOrdinary
- FunctionalStatelessTestReleaseDatabaseReplicated0
- FunctionalStatelessTestReleaseDatabaseReplicated1
+ - FunctionalStatelessTestReleaseDatabaseReplicated2
+ - FunctionalStatelessTestReleaseDatabaseReplicated3
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan0
- FunctionalStatelessTestAsan1
+ - FunctionalStatelessTestAsan2
+ - FunctionalStatelessTestAsan3
- FunctionalStatelessTestTsan0
- FunctionalStatelessTestTsan1
- FunctionalStatelessTestTsan2
+ - FunctionalStatelessTestTsan3
+ - FunctionalStatelessTestTsan4
- FunctionalStatelessTestMsan0
- FunctionalStatelessTestMsan1
- FunctionalStatelessTestMsan2
- - FunctionalStatelessTestUBsan
+ - FunctionalStatelessTestMsan3
+ - FunctionalStatelessTestMsan4
+ - FunctionalStatelessTestMsan5
+ - FunctionalStatelessTestUBsan0
+ - FunctionalStatelessTestUBsan1
- FunctionalStatefulTestDebug
- FunctionalStatefulTestRelease
- - FunctionalStatelessTestReleaseS3
+ - FunctionalStatelessTestReleaseS3_0
+ - FunctionalStatelessTestReleaseS3_1
- FunctionalStatefulTestAarch64
- FunctionalStatefulTestAsan
- FunctionalStatefulTestTsan
@@ -3114,17 +3917,25 @@ jobs:
- IntegrationTestsAsan0
- IntegrationTestsAsan1
- IntegrationTestsAsan2
+ - IntegrationTestsAsan3
+ - IntegrationTestsAsan4
+ - IntegrationTestsAsan5
- IntegrationTestsRelease0
- IntegrationTestsRelease1
+ - IntegrationTestsRelease2
+ - IntegrationTestsRelease3
- IntegrationTestsTsan0
- IntegrationTestsTsan1
- IntegrationTestsTsan2
- IntegrationTestsTsan3
+ - IntegrationTestsTsan4
+ - IntegrationTestsTsan5
- PerformanceComparisonX86-0
- PerformanceComparisonX86-1
- PerformanceComparisonX86-2
- PerformanceComparisonX86-3
- - CompatibilityCheck
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
- ASTFuzzerTestDebug
- ASTFuzzerTestAsan
- ASTFuzzerTestTsan
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index f6d6d192f48..acf6bbe8f6a 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -72,6 +72,9 @@ jobs:
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
+ Codebrowser:
+ needs: [DockerHubPush]
+ uses: ./.github/workflows/woboq.yml
BuilderCoverity:
needs: DockerHubPush
runs-on: [self-hosted, builder]
@@ -118,13 +121,15 @@ jobs:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
SonarCloud:
+ # TODO: Remove if: whenever SonarCloud supports c++23
+ if: ${{ false }}
runs-on: [self-hosted, builder]
env:
- SONAR_SCANNER_VERSION: 4.7.0.2747
+ SONAR_SCANNER_VERSION: 4.8.0.2856
SONAR_SERVER_URL: "https://sonarcloud.io"
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
- CC: clang-15
- CXX: clang++-15
+ CC: clang-16
+ CXX: clang++-16
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
@@ -173,4 +178,4 @@ jobs:
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
--define sonar.projectKey="ClickHouse_ClickHouse" \
--define sonar.organization="clickhouse-java" \
- --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
+ --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" \
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index 7d410f833c5..506ed451b6d 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -37,7 +37,6 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
PythonUnitTests:
- needs: CheckLabels
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
@@ -174,7 +173,7 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
- CompatibilityCheck:
+ CompatibilityCheckX86:
needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker]
steps:
@@ -193,12 +192,43 @@ jobs:
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- - name: CompatibilityCheck
+ - name: CompatibilityCheckX86
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ CompatibilityCheckAarch64:
+ needs: [BuilderDebAarch64]
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/compatibility_check
+ REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ EOF
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: CompatibilityCheckAarch64
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
- name: Cleanup
if: always()
run: |
@@ -520,6 +550,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@@ -886,8 +923,9 @@ jobs:
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type head --no-push
- python3 docker_server.py --release-type head --no-push --no-ubuntu \
+ python3 docker_server.py --release-type head --no-push \
+ --image-repo clickhouse/clickhouse-server --image-path docker/server
+ python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
@@ -1270,6 +1308,40 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
+ FunctionalStatelessTestReleaseAnalyzer:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/stateless_analyzer
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Stateless tests (release, analyzer)
+ REPO_COPY=${{runner.temp}}/stateless_analyzer/ClickHouse
+ KILL_TIMEOUT=10800
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Functional test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3_0:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
@@ -4717,6 +4789,7 @@ jobs:
- FunctionalStatelessTestReleaseDatabaseReplicated2
- FunctionalStatelessTestReleaseDatabaseReplicated3
- FunctionalStatelessTestReleaseWideParts
+ - FunctionalStatelessTestReleaseAnalyzer
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan0
- FunctionalStatelessTestAsan1
@@ -4792,7 +4865,8 @@ jobs:
- UnitTestsMsan
- UnitTestsUBsan
- UnitTestsReleaseClang
- - CompatibilityCheck
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
- IntegrationTestsFlakyCheck
- SQLancerTestRelease
- SQLancerTestDebug
@@ -4807,3 +4881,41 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py
python3 merge_pr.py --check-approved
+##############################################################################################
+########################### SQLLOGIC TEST ###################################################
+##############################################################################################
+ SQLLogicTestRelease:
+ needs: [BuilderDebRelease]
+ runs-on: [self-hosted, func-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/sqllogic_debug
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Sqllogic test (release)
+ REPO_COPY=${{runner.temp}}/sqllogic_debug/ClickHouse
+ KILL_TIMEOUT=10800
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v2
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Clear repository
+ run: |
+ sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
+ - name: Check out repository code
+ uses: actions/checkout@v2
+ - name: Sqllogic test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 sqllogic_test.py "$CHECK_NAME" "$KILL_TIMEOUT"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 2ef05fe989b..0742ebfd449 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -7,15 +7,28 @@ on: # yamllint disable-line rule:truthy
release:
types:
- published
+ workflow_dispatch:
+ inputs:
+ tag:
+ description: 'Release tag'
+ required: true
+ type: string
jobs:
ReleasePublish:
runs-on: [self-hosted, style-checker]
steps:
+ - name: Set tag from input
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
+ - name: Set tag from REF
+ if: github.event_name == 'release'
+ run: |
+ echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
- name: Deploy packages and assets
run: |
- GITHUB_TAG="${GITHUB_REF#refs/tags/}"
- curl --silent --data '' \
+ curl --silent --data '' --no-buffer \
'${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true'
############################################################################################
##################################### Docker images #######################################
@@ -23,16 +36,26 @@ jobs:
DockerServerImages:
runs-on: [self-hosted, style-checker]
steps:
+ - name: Set tag from input
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
+ - name: Set tag from REF
+ if: github.event_name == 'release'
+ run: |
+ echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # otherwise we will have no version info
+ ref: ${{ env.GITHUB_TAG }}
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type auto --version "${{ github.ref }}"
- python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \
+ python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
+ --image-repo clickhouse/clickhouse-server --image-path docker/server
+ python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index 4d2a99c2106..21284815583 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -71,7 +71,7 @@ jobs:
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
- CompatibilityCheck:
+ CompatibilityCheckX86:
needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker]
steps:
@@ -90,12 +90,43 @@ jobs:
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- - name: CompatibilityCheck
+ - name: CompatibilityCheckX86
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
- cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ CompatibilityCheckAarch64:
+ needs: [BuilderDebAarch64]
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/compatibility_check
+ REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ EOF
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: CompatibilityCheckAarch64
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
- name: Cleanup
if: always()
run: |
@@ -375,6 +406,13 @@ jobs:
with:
clear-repository: true
submodules: true
+ - name: Apply sparse checkout for contrib # in order to check that it doesn't break build
+ run: |
+ rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
+ git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
+ "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
+ du -hs "$GITHUB_WORKSPACE/contrib" ||:
+ find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
@@ -494,8 +532,9 @@ jobs:
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type head --no-push
- python3 docker_server.py --release-type head --no-push --no-ubuntu \
+ python3 docker_server.py --release-type head --no-push \
+ --image-repo clickhouse/clickhouse-server --image-path docker/server
+ python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
@@ -1947,7 +1986,8 @@ jobs:
- IntegrationTestsTsan1
- IntegrationTestsTsan2
- IntegrationTestsTsan3
- - CompatibilityCheck
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
diff --git a/.github/workflows/woboq.yml b/.github/workflows/woboq.yml
index 363652c9f33..bdfbc8fef9c 100644
--- a/.github/workflows/woboq.yml
+++ b/.github/workflows/woboq.yml
@@ -6,9 +6,8 @@ env:
concurrency:
group: woboq
on: # yamllint disable-line rule:truthy
- schedule:
- - cron: '0 */18 * * *'
workflow_dispatch:
+ workflow_call:
jobs:
# don't use dockerhub push because this image updates so rarely
WoboqCodebrowser:
@@ -26,6 +25,10 @@ jobs:
with:
clear-repository: true
submodules: 'true'
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.IMAGES_PATH }}
- name: Codebrowser
run: |
sudo rm -fr "$TEMP_PATH"
diff --git a/.gitignore b/.gitignore
index 14b860244c2..a04c60d5ca3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -129,7 +129,6 @@ website/package-lock.json
/.ccls-cache
# clangd cache
-/.clangd
/.cache
/compile_commands.json
diff --git a/.gitmodules b/.gitmodules
index ca55281e643..f0984fec4db 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -267,7 +267,7 @@
url = https://github.com/ClickHouse/nats.c
[submodule "contrib/vectorscan"]
path = contrib/vectorscan
- url = https://github.com/VectorCamp/vectorscan
+ url = https://github.com/VectorCamp/vectorscan.git
[submodule "contrib/c-ares"]
path = contrib/c-ares
url = https://github.com/ClickHouse/c-ares
@@ -296,6 +296,9 @@
[submodule "contrib/libdivide"]
path = contrib/libdivide
url = https://github.com/ridiculousfish/libdivide
+[submodule "contrib/libbcrypt"]
+ path = contrib/libbcrypt
+ url = https://github.com/rg3/libbcrypt.git
[submodule "contrib/ulid-c"]
path = contrib/ulid-c
url = https://github.com/ClickHouse/ulid-c.git
@@ -335,3 +338,9 @@
[submodule "contrib/liburing"]
path = contrib/liburing
url = https://github.com/axboe/liburing
+[submodule "contrib/libfiu"]
+ path = contrib/libfiu
+ url = https://github.com/ClickHouse/libfiu.git
+[submodule "contrib/isa-l"]
+ path = contrib/isa-l
+ url = https://github.com/ClickHouse/isa-l.git
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e22377e2332..1ccd4f9846d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,10 +1,343 @@
### Table of Contents
+**[ClickHouse release v23.4, 2023-04-26](#234)**
+**[ClickHouse release v23.3 LTS, 2023-03-30](#233)**
**[ClickHouse release v23.2, 2023-02-23](#232)**
**[ClickHouse release v23.1, 2023-01-25](#231)**
**[Changelog for 2022](https://clickhouse.com/docs/en/whats-new/changelog/2022/)**
# 2023 Changelog
+### ClickHouse release 23.4, 2023-04-26
+
+#### Backward Incompatible Change
+* Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
+* This change makes sense only if you are using the virtual filesystem cache. If `path` in the virtual filesystem cache configuration is not empty and is not an absolute path, then it will be put in `/caches/`. [#48784](https://github.com/ClickHouse/ClickHouse/pull/48784) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Primary/secondary indices and sorting keys with identical expressions are now rejected. This behavior can be disabled using setting `allow_suspicious_indices`. [#48536](https://github.com/ClickHouse/ClickHouse/pull/48536) ([凌涛](https://github.com/lingtaolf)).
+
+#### New Feature
+* Support new aggregate function `quantileGK`/`quantilesGK`, like [approx_percentile](https://spark.apache.org/docs/latest/api/sql/index.html#approx_percentile) in spark. Greenwald-Khanna algorithm refer to http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf. [#46428](https://github.com/ClickHouse/ClickHouse/pull/46428) ([李扬](https://github.com/taiyang-li)).
+* Add a statement `SHOW COLUMNS` which shows distilled information from system.columns. [#48017](https://github.com/ClickHouse/ClickHouse/pull/48017) ([Robert Schulze](https://github.com/rschu1ze)).
+* Added `LIGHTWEIGHT` and `PULL` modifiers for `SYSTEM SYNC REPLICA` query. `LIGHTWEIGHT` version waits for fetches and drop-ranges only (merges and mutations are ignored). `PULL` version pulls new entries from ZooKeeper and does not wait for them. Fixes [#47794](https://github.com/ClickHouse/ClickHouse/issues/47794). [#48085](https://github.com/ClickHouse/ClickHouse/pull/48085) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* Add `kafkaMurmurHash` function for compatibility with Kafka DefaultPartitioner. Closes [#47834](https://github.com/ClickHouse/ClickHouse/issues/47834). [#48185](https://github.com/ClickHouse/ClickHouse/pull/48185) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Allow to easily create a user with the same grants as the current user by using `GRANT CURRENT GRANTS`. [#48262](https://github.com/ClickHouse/ClickHouse/pull/48262) ([pufit](https://github.com/pufit)).
+* Add statistical aggregate function `kolmogorovSmirnovTest`. Close [#48228](https://github.com/ClickHouse/ClickHouse/issues/48228). [#48325](https://github.com/ClickHouse/ClickHouse/pull/48325) ([FFFFFFFHHHHHHH](https://github.com/FFFFFFFHHHHHHH)).
+* Added a `lost_part_count` column to the `system.replicas` table. The column value shows the total number of lost parts in the corresponding table. Value is stored in zookeeper and can be used instead of not persistent `ReplicatedDataLoss` profile event for monitoring. [#48526](https://github.com/ClickHouse/ClickHouse/pull/48526) ([Sergei Trifonov](https://github.com/serxa)).
+* Add `soundex` function for compatibility. Closes [#39880](https://github.com/ClickHouse/ClickHouse/issues/39880). [#48567](https://github.com/ClickHouse/ClickHouse/pull/48567) ([FriendLey](https://github.com/FriendLey)).
+* Support `Map` type for JSONExtract. [#48629](https://github.com/ClickHouse/ClickHouse/pull/48629) ([李扬](https://github.com/taiyang-li)).
+* Add `PrettyJSONEachRow` format to output pretty JSON with new line delimiters and 4 space indents. [#48898](https://github.com/ClickHouse/ClickHouse/pull/48898) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add `ParquetMetadata` input format to read Parquet file metadata. [#48911](https://github.com/ClickHouse/ClickHouse/pull/48911) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add `extractKeyValuePairs` function to extract key value pairs from strings. Input strings might contain noise (i.e. log files / do not need to be 100% formatted in key-value-pair format), the algorithm will look for key value pairs matching the arguments passed to the function. As of now, function accepts the following arguments: `data_column` (mandatory), `key_value_pair_delimiter` (defaults to `:`), `pair_delimiters` (defaults to `\space \, \;`) and `quoting_character` (defaults to double quotes). [#43606](https://github.com/ClickHouse/ClickHouse/pull/43606) ([Arthur Passos](https://github.com/arthurpassos)).
+* Functions replaceOne(), replaceAll(), replaceRegexpOne() and replaceRegexpAll() can now be called with non-const pattern and replacement arguments. [#46589](https://github.com/ClickHouse/ClickHouse/pull/46589) ([Robert Schulze](https://github.com/rschu1ze)).
+* Added functions to work with columns of type `Map`: `mapConcat`, `mapSort`, `mapExists`. [#48071](https://github.com/ClickHouse/ClickHouse/pull/48071) ([Anton Popov](https://github.com/CurtizJ)).
+
+#### Performance Improvement
+* Reading files in `Parquet` format is now much faster. IO and decoding are parallelized (controlled by `max_threads` setting), and only required data ranges are read. [#47964](https://github.com/ClickHouse/ClickHouse/pull/47964) ([Michael Kolupaev](https://github.com/al13n321)).
+* If we run a mutation with IN (subquery) like this: `ALTER TABLE t UPDATE col='new value' WHERE id IN (SELECT id FROM huge_table)` and the table `t` has multiple parts than for each part a set for subquery `SELECT id FROM huge_table` is built in memory. And if there are many parts then this might consume a lot of memory (and lead to an OOM) and CPU. The solution is to introduce a short-lived cache of sets that are currently being built by mutation tasks. If another task of the same mutation is executed concurrently it can look up the set in the cache, wait for it to be built and reuse it. [#46835](https://github.com/ClickHouse/ClickHouse/pull/46835) ([Alexander Gololobov](https://github.com/davenger)).
+* Only check dependencies if necessary when applying `ALTER TABLE` queries. [#48062](https://github.com/ClickHouse/ClickHouse/pull/48062) ([Raúl Marín](https://github.com/Algunenano)).
+* Optimize function `mapUpdate`. [#48118](https://github.com/ClickHouse/ClickHouse/pull/48118) ([Anton Popov](https://github.com/CurtizJ)).
+* Now an internal query to local replica is sent explicitly and data from it received through loopback interface. Setting `prefer_localhost_replica` is not respected for parallel replicas. This is needed for better scheduling and makes the code cleaner: the initiator is only responsible for coordinating of the reading process and merging results, continuously answering for requests while all the secondary queries read the data. Note: Using loopback interface is not so performant, otherwise some replicas could starve for tasks which could lead to even slower query execution and not utilizing all possible resources. The initialization of the coordinator is now even more lazy. All incoming requests contain the information about the reading algorithm we initialize the coordinator with it when first request comes. If any replica decides to read with a different algorithm–an exception will be thrown and a query will be aborted. [#48246](https://github.com/ClickHouse/ClickHouse/pull/48246) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Do not build set for the right side of `IN` clause with subquery when it is used only for analysis of skip indexes, and they are disabled by setting (`use_skip_indexes=0`). Previously it might affect the performance of queries. [#48299](https://github.com/ClickHouse/ClickHouse/pull/48299) ([Anton Popov](https://github.com/CurtizJ)).
+* Query processing is parallelized right after reading `FROM file(...)`. Related to [#38755](https://github.com/ClickHouse/ClickHouse/issues/38755). [#48525](https://github.com/ClickHouse/ClickHouse/pull/48525) ([Igor Nikonov](https://github.com/devcrafter)). Query processing is parallelized right after reading from any data source. Affected data sources are mostly simple or external storages like table functions `url`, `file`. [#48727](https://github.com/ClickHouse/ClickHouse/pull/48727) ([Igor Nikonov](https://github.com/devcrafter)). This is controlled by the setting `parallelize_output_from_storages` which is not enabled by default.
+* Lowered contention of ThreadPool mutex (may increase performance for a huge amount of small jobs). [#48750](https://github.com/ClickHouse/ClickHouse/pull/48750) ([Sergei Trifonov](https://github.com/serxa)).
+* Reduce memory usage for multiple `ALTER DELETE` mutations. [#48522](https://github.com/ClickHouse/ClickHouse/pull/48522) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Remove the excessive connection attempts if the `skip_unavailable_shards` setting is enabled. [#48771](https://github.com/ClickHouse/ClickHouse/pull/48771) ([Azat Khuzhin](https://github.com/azat)).
+
+#### Experimental Feature
+* Entries in the query cache are now squashed to max_block_size and compressed. [#45912](https://github.com/ClickHouse/ClickHouse/pull/45912) ([Robert Schulze](https://github.com/rschu1ze)).
+* It is now possible to define per-user quotas in the query cache. [#48284](https://github.com/ClickHouse/ClickHouse/pull/48284) ([Robert Schulze](https://github.com/rschu1ze)).
+* Some fixes for parallel replicas [#48433](https://github.com/ClickHouse/ClickHouse/pull/48433) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Implement zero-copy-replication (an experimental feature) on encrypted disks. [#48741](https://github.com/ClickHouse/ClickHouse/pull/48741) ([Vitaly Baranov](https://github.com/vitlibar)).
+
+#### Improvement
+* Increase default value for `connect_timeout_with_failover_ms` to 1000 ms (because of adding async connections in https://github.com/ClickHouse/ClickHouse/pull/47229) . Closes [#5188](https://github.com/ClickHouse/ClickHouse/issues/5188). [#49009](https://github.com/ClickHouse/ClickHouse/pull/49009) ([Kruglov Pavel](https://github.com/Avogar)).
+* Several improvements around data lakes: - Make `Iceberg` work with non-partitioned data. - Support `Iceberg` format version v2 (previously only v1 was supported) - Support reading partitioned data for `DeltaLake`/`Hudi` - Faster reading of `DeltaLake` metadata by using Delta's checkpoint files - Fixed incorrect `Hudi` reads: previously it incorrectly chose which data to read and therefore was able to read correctly only small size tables - Made these engines to pickup updates of changed data (previously the state was set on table creation) - Make proper testing for `Iceberg`/`DeltaLake`/`Hudi` using spark. [#47307](https://github.com/ClickHouse/ClickHouse/pull/47307) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Add async connection to socket and async writing to socket. Make creating connections and sending query/external tables async across shards. Refactor code with fibers. Closes [#46931](https://github.com/ClickHouse/ClickHouse/issues/46931). We will be able to increase `connect_timeout_with_failover_ms` by default after this PR (https://github.com/ClickHouse/ClickHouse/issues/5188). [#47229](https://github.com/ClickHouse/ClickHouse/pull/47229) ([Kruglov Pavel](https://github.com/Avogar)).
+* Support config sections `keeper`/`keeper_server` as an alternative to `zookeeper`. Close [#34766](https://github.com/ClickHouse/ClickHouse/issues/34766) , [#34767](https://github.com/ClickHouse/ClickHouse/issues/34767). [#35113](https://github.com/ClickHouse/ClickHouse/pull/35113) ([李扬](https://github.com/taiyang-li)).
+* It is possible to set _secure_ flag in named_collections for a dictionary with a ClickHouse table source. Addresses [#38450](https://github.com/ClickHouse/ClickHouse/issues/38450) . [#46323](https://github.com/ClickHouse/ClickHouse/pull/46323) ([Ilya Golshtein](https://github.com/ilejn)).
+* `bitCount` function support `FixedString` and `String` data type. [#49044](https://github.com/ClickHouse/ClickHouse/pull/49044) ([flynn](https://github.com/ucasfl)).
+* Added configurable retries for all operations with [Zoo]Keeper for Backup queries. [#47224](https://github.com/ClickHouse/ClickHouse/pull/47224) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Enable `use_environment_credentials` for S3 by default, so the entire provider chain is constructed by default. [#47397](https://github.com/ClickHouse/ClickHouse/pull/47397) ([Antonio Andelic](https://github.com/antonio2368)).
+* Currently, the JSON_VALUE function is similar as spark's get_json_object function, which support to get value from JSON string by a path like '$.key'. But still has something different - 1. in spark's get_json_object will return null while the path is not exist, but in JSON_VALUE will return empty string; - 2. in spark's get_json_object will return a complex type value, such as a JSON object/array value, but in JSON_VALUE will return empty string. [#47494](https://github.com/ClickHouse/ClickHouse/pull/47494) ([KevinyhZou](https://github.com/KevinyhZou)).
+* For `use_structure_from_insertion_table_in_table_functions` more flexible insert table structure propagation to table function. Fixed an issue with name mapping and using virtual columns. No more need for 'auto' setting. [#47962](https://github.com/ClickHouse/ClickHouse/pull/47962) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Do not continue retrying to connect to Keeper if the query is killed or over limits. [#47985](https://github.com/ClickHouse/ClickHouse/pull/47985) ([Raúl Marín](https://github.com/Algunenano)).
+* Support Enum output/input in `BSONEachRow`, allow all map key types and avoid extra calculations on output. [#48122](https://github.com/ClickHouse/ClickHouse/pull/48122) ([Kruglov Pavel](https://github.com/Avogar)).
+* Support more ClickHouse types in `ORC`/`Arrow`/`Parquet` formats: Enum(8|16), (U)Int(128|256), Decimal256 (for ORC), allow reading IPv4 from Int32 values (ORC outputs IPv4 as Int32, and we couldn't read it back), fix reading Nullable(IPv6) from binary data for `ORC`. [#48126](https://github.com/ClickHouse/ClickHouse/pull/48126) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add columns `perform_ttl_move_on_insert`, `load_balancing` for table `system.storage_policies`, modify column `volume_type` type to `Enum8`. [#48167](https://github.com/ClickHouse/ClickHouse/pull/48167) ([lizhuoyu5](https://github.com/lzydmxy)).
+* Added support for `BACKUP ALL` command which backups all tables and databases, including temporary and system ones. [#48189](https://github.com/ClickHouse/ClickHouse/pull/48189) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Function mapFromArrays supports `Map` type as an input. [#48207](https://github.com/ClickHouse/ClickHouse/pull/48207) ([李扬](https://github.com/taiyang-li)).
+* The output of some SHOW PROCESSLIST is now sorted. [#48241](https://github.com/ClickHouse/ClickHouse/pull/48241) ([Robert Schulze](https://github.com/rschu1ze)).
+* Per-query/per-server throttling for remote IO/local IO/BACKUPs (server settings: `max_remote_read_network_bandwidth_for_server`, `max_remote_write_network_bandwidth_for_server`, `max_local_read_bandwidth_for_server`, `max_local_write_bandwidth_for_server`, `max_backup_bandwidth_for_server`, settings: `max_remote_read_network_bandwidth`, `max_remote_write_network_bandwidth`, `max_local_read_bandwidth`, `max_local_write_bandwidth`, `max_backup_bandwidth`). [#48242](https://github.com/ClickHouse/ClickHouse/pull/48242) ([Azat Khuzhin](https://github.com/azat)).
+* Support more types in `CapnProto` format: Map, (U)Int(128|256), Decimal(128|256). Allow integer conversions during input/output. [#48257](https://github.com/ClickHouse/ClickHouse/pull/48257) ([Kruglov Pavel](https://github.com/Avogar)).
+* Don't throw CURRENT_WRITE_BUFFER_IS_EXHAUSTED for normal behaviour. [#48288](https://github.com/ClickHouse/ClickHouse/pull/48288) ([Raúl Marín](https://github.com/Algunenano)).
+* Add new setting `keeper_map_strict_mode` which enforces extra guarantees on operations made on top of `KeeperMap` tables. [#48293](https://github.com/ClickHouse/ClickHouse/pull/48293) ([Antonio Andelic](https://github.com/antonio2368)).
+* Check primary key type for simple dictionary is native unsigned integer type Add setting `check_dictionary_primary_key ` for compatibility(set `check_dictionary_primary_key =false` to disable checking). [#48335](https://github.com/ClickHouse/ClickHouse/pull/48335) ([lizhuoyu5](https://github.com/lzydmxy)).
+* Don't replicate mutations for `KeeperMap` because it's unnecessary. [#48354](https://github.com/ClickHouse/ClickHouse/pull/48354) ([Antonio Andelic](https://github.com/antonio2368)).
+* Allow to write/read unnamed tuple as nested Message in Protobuf format. Tuple elements and Message fields are matched by position. [#48390](https://github.com/ClickHouse/ClickHouse/pull/48390) ([Kruglov Pavel](https://github.com/Avogar)).
+* Support `additional_table_filters` and `additional_result_filter` settings in the new planner. Also, add a documentation entry for `additional_result_filter`. [#48405](https://github.com/ClickHouse/ClickHouse/pull/48405) ([Dmitry Novik](https://github.com/novikd)).
+* `parseDateTime` now understands format string '%f' (fractional seconds). [#48420](https://github.com/ClickHouse/ClickHouse/pull/48420) ([Robert Schulze](https://github.com/rschu1ze)).
+* Format string "%f" in formatDateTime() now prints "000000" if the formatted value has no fractional seconds, the previous behavior (single zero) can be restored using setting "formatdatetime_f_prints_single_zero = 1". [#48422](https://github.com/ClickHouse/ClickHouse/pull/48422) ([Robert Schulze](https://github.com/rschu1ze)).
+* Don't replicate DELETE and TRUNCATE for KeeperMap. [#48434](https://github.com/ClickHouse/ClickHouse/pull/48434) ([Antonio Andelic](https://github.com/antonio2368)).
+* Generate valid Decimals and Bools in generateRandom function. [#48436](https://github.com/ClickHouse/ClickHouse/pull/48436) ([Kruglov Pavel](https://github.com/Avogar)).
+* Allow trailing commas in expression list of SELECT query, for example `SELECT a, b, c, FROM table`. Closes [#37802](https://github.com/ClickHouse/ClickHouse/issues/37802). [#48438](https://github.com/ClickHouse/ClickHouse/pull/48438) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Override `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables with `--user` and `--password` client parameters. Closes [#38909](https://github.com/ClickHouse/ClickHouse/issues/38909). [#48440](https://github.com/ClickHouse/ClickHouse/pull/48440) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Added retries to loading of data parts in `MergeTree` tables in case of retryable errors. [#48442](https://github.com/ClickHouse/ClickHouse/pull/48442) ([Anton Popov](https://github.com/CurtizJ)).
+* Add support for `Date`, `Date32`, `DateTime`, `DateTime64` data types to `arrayMin`, `arrayMax`, `arrayDifference` functions. Closes [#21645](https://github.com/ClickHouse/ClickHouse/issues/21645). [#48445](https://github.com/ClickHouse/ClickHouse/pull/48445) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Add support for `{server_uuid}` macro. It is useful for identifying replicas in autoscaled clusters when new replicas are constantly added and removed in runtime. This closes [#48554](https://github.com/ClickHouse/ClickHouse/issues/48554). [#48563](https://github.com/ClickHouse/ClickHouse/pull/48563) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* The installation script will create a hard link instead of copying if it is possible. [#48578](https://github.com/ClickHouse/ClickHouse/pull/48578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Support `SHOW TABLE` syntax meaning the same as `SHOW CREATE TABLE`. Closes [#48580](https://github.com/ClickHouse/ClickHouse/issues/48580). [#48591](https://github.com/ClickHouse/ClickHouse/pull/48591) ([flynn](https://github.com/ucasfl)).
+* HTTP temporary buffers now support working by evicting data from the virtual filesystem cache. [#48664](https://github.com/ClickHouse/ClickHouse/pull/48664) ([Vladimir C](https://github.com/vdimir)).
+* Make Schema inference works for `CREATE AS SELECT`. Closes [#47599](https://github.com/ClickHouse/ClickHouse/issues/47599). [#48679](https://github.com/ClickHouse/ClickHouse/pull/48679) ([flynn](https://github.com/ucasfl)).
+* Added a `replicated_max_mutations_in_one_entry` setting for `ReplicatedMergeTree` that allows limiting the number of mutation commands per one `MUTATE_PART` entry (default is 10000). [#48731](https://github.com/ClickHouse/ClickHouse/pull/48731) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* In AggregateFunction types, don't count unused arena bytes as `read_bytes`. [#48745](https://github.com/ClickHouse/ClickHouse/pull/48745) ([Raúl Marín](https://github.com/Algunenano)).
+* Fix some MySQL-related settings not being handled with the MySQL dictionary source + named collection. Closes [#48402](https://github.com/ClickHouse/ClickHouse/issues/48402). [#48759](https://github.com/ClickHouse/ClickHouse/pull/48759) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* If a user set `max_single_part_upload_size` to a very large value, it can lead to a crash due to a bug in the AWS S3 SDK. This fixes [#47679](https://github.com/ClickHouse/ClickHouse/issues/47679). [#48816](https://github.com/ClickHouse/ClickHouse/pull/48816) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Fix data race in `RabbitMQ` ([report](https://pastila.nl/?004f7100/de1505289ab5bb355e67ebe6c7cc8707)), refactor the code. [#48845](https://github.com/ClickHouse/ClickHouse/pull/48845) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Add aliases `name` and `part_name` form `system.parts` and `system.part_log`. Closes [#48718](https://github.com/ClickHouse/ClickHouse/issues/48718). [#48850](https://github.com/ClickHouse/ClickHouse/pull/48850) ([sichenzhao](https://github.com/sichenzhao)).
+* Functions "arrayDifferenceSupport()", "arrayCumSum()" and "arrayCumSumNonNegative()" now support input arrays of wide integer types (U)Int128/256. [#48866](https://github.com/ClickHouse/ClickHouse/pull/48866) ([cluster](https://github.com/infdahai)).
+* Multi-line history in clickhouse-client is now no longer padded. This makes pasting more natural. [#48870](https://github.com/ClickHouse/ClickHouse/pull/48870) ([Joanna Hulboj](https://github.com/jh0x)).
+* Implement a slight improvement for the rare case when ClickHouse is run inside LXC and LXCFS is used. The LXCFS has an issue: sometimes it returns an error "Transport endpoint is not connected" on reading from the file inside `/proc`. This error was correctly logged into ClickHouse's server log. We have additionally workaround this issue by reopening a file. This is a minuscule change. [#48922](https://github.com/ClickHouse/ClickHouse/pull/48922) ([Real](https://github.com/RunningXie)).
+* Improve memory accounting for prefetches. Randomise prefetch settings In CI. [#48973](https://github.com/ClickHouse/ClickHouse/pull/48973) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Correctly set headers for native copy operations on GCS. [#48981](https://github.com/ClickHouse/ClickHouse/pull/48981) ([Antonio Andelic](https://github.com/antonio2368)).
+* Add support for specifying setting names in the command line with dashes instead of underscores, for example, `--max-threads` instead of `--max_threads`. Additionally, support Unicode dash characters like `—` instead of `--` - this is useful when you communicate with a team in another company, and a manager from that team copy-pasted code from MS Word. [#48985](https://github.com/ClickHouse/ClickHouse/pull/48985) ([alekseygolub](https://github.com/alekseygolub)).
+* Add fallback to password authentication when authentication with SSL user certificate has failed. Closes [#48974](https://github.com/ClickHouse/ClickHouse/issues/48974). [#48989](https://github.com/ClickHouse/ClickHouse/pull/48989) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Improve the embedded dashboard. Close [#46671](https://github.com/ClickHouse/ClickHouse/issues/46671). [#49036](https://github.com/ClickHouse/ClickHouse/pull/49036) ([Kevin Zhang](https://github.com/Kinzeng)).
+* Add profile events for log messages, so you can easily see the count of log messages by severity. [#49042](https://github.com/ClickHouse/ClickHouse/pull/49042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* In previous versions, the `LineAsString` format worked inconsistently when the parallel parsing was enabled or not, in presence of DOS or macOS Classic line breaks. This closes [#49039](https://github.com/ClickHouse/ClickHouse/issues/49039). [#49052](https://github.com/ClickHouse/ClickHouse/pull/49052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* The exception message about the unparsed query parameter will also tell about the name of the parameter. Reimplement [#48878](https://github.com/ClickHouse/ClickHouse/issues/48878). Close [#48772](https://github.com/ClickHouse/ClickHouse/issues/48772). [#49061](https://github.com/ClickHouse/ClickHouse/pull/49061) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+
+#### Build/Testing/Packaging Improvement
+* Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Reduce the number of dependencies in the header files to speed up the build. [#47984](https://github.com/ClickHouse/ClickHouse/pull/47984) ([Dmitry Novik](https://github.com/novikd)).
+* Randomize compression of marks and indices in tests. [#48286](https://github.com/ClickHouse/ClickHouse/pull/48286) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Bump internal ZSTD from 1.5.4 to 1.5.5. [#46797](https://github.com/ClickHouse/ClickHouse/pull/46797) ([Robert Schulze](https://github.com/rschu1ze)).
+* Randomize vertical merges from compact to wide parts in tests. [#48287](https://github.com/ClickHouse/ClickHouse/pull/48287) ([Raúl Marín](https://github.com/Algunenano)).
+* Support for CRC32 checksum in HDFS. Fix performance issues. [#48614](https://github.com/ClickHouse/ClickHouse/pull/48614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Remove remainders of GCC support. [#48671](https://github.com/ClickHouse/ClickHouse/pull/48671) ([Robert Schulze](https://github.com/rschu1ze)).
+* Add CI run with new analyzer infrastructure enabled. [#48719](https://github.com/ClickHouse/ClickHouse/pull/48719) ([Dmitry Novik](https://github.com/novikd)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Fix system.query_views_log for MVs that are pushed from background threads [#46668](https://github.com/ClickHouse/ClickHouse/pull/46668) ([Azat Khuzhin](https://github.com/azat)).
+* Fix several `RENAME COLUMN` bugs [#46946](https://github.com/ClickHouse/ClickHouse/pull/46946) ([alesapin](https://github.com/alesapin)).
+* Fix minor hiliting issues in clickhouse-format [#47610](https://github.com/ClickHouse/ClickHouse/pull/47610) ([Natasha Murashkina](https://github.com/murfel)).
+* Fix a bug in LLVM's libc++ leading to a crash for uploading parts to S3 which size is greater than INT_MAX [#47693](https://github.com/ClickHouse/ClickHouse/pull/47693) ([Azat Khuzhin](https://github.com/azat)).
+* Fix overflow in the `sparkbar` function [#48121](https://github.com/ClickHouse/ClickHouse/pull/48121) ([Vladimir C](https://github.com/vdimir)).
+* Fix race in S3 [#48190](https://github.com/ClickHouse/ClickHouse/pull/48190) ([Anton Popov](https://github.com/CurtizJ)).
+* Disable JIT for aggregate functions due to inconsistent behavior [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Fix alter formatting (minor) [#48289](https://github.com/ClickHouse/ClickHouse/pull/48289) ([Natasha Murashkina](https://github.com/murfel)).
+* Fix CPU usage in RabbitMQ (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Fix crash in EXPLAIN PIPELINE for Merge over Distributed [#48320](https://github.com/ClickHouse/ClickHouse/pull/48320) ([Azat Khuzhin](https://github.com/azat)).
+* Fix serializing LowCardinality as Arrow dictionary [#48361](https://github.com/ClickHouse/ClickHouse/pull/48361) ([Kruglov Pavel](https://github.com/Avogar)).
+* Reset downloader for cache file segment in TemporaryFileStream [#48386](https://github.com/ClickHouse/ClickHouse/pull/48386) ([Vladimir C](https://github.com/vdimir)).
+* Fix possible SYSTEM SYNC REPLICA stuck in case of DROP/REPLACE PARTITION [#48391](https://github.com/ClickHouse/ClickHouse/pull/48391) ([Azat Khuzhin](https://github.com/azat)).
+* Fix a startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
+* Don't check dependencies when renaming system tables automatically [#48431](https://github.com/ClickHouse/ClickHouse/pull/48431) ([Raúl Marín](https://github.com/Algunenano)).
+* Update only affected rows in KeeperMap storage [#48435](https://github.com/ClickHouse/ClickHouse/pull/48435) ([Antonio Andelic](https://github.com/antonio2368)).
+* Fix possible segfault in the VFS cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* `toTimeZone` function throws an error when no constant string is provided [#48471](https://github.com/ClickHouse/ClickHouse/pull/48471) ([Jordi Villar](https://github.com/jrdi)).
+* Fix logical error with IPv4 in Protobuf, add support for Date32 [#48486](https://github.com/ClickHouse/ClickHouse/pull/48486) ([Kruglov Pavel](https://github.com/Avogar)).
+* "changed" flag in system.settings was calculated incorrectly for settings with multiple values [#48516](https://github.com/ClickHouse/ClickHouse/pull/48516) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
+* Fix storage `Memory` with enabled compression [#48517](https://github.com/ClickHouse/ClickHouse/pull/48517) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix bracketed-paste mode messing up password input in the event of client reconnection [#48528](https://github.com/ClickHouse/ClickHouse/pull/48528) ([Michael Kolupaev](https://github.com/al13n321)).
+* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Fix an uncaught exception in case of parallel loader for hashed dictionaries [#48571](https://github.com/ClickHouse/ClickHouse/pull/48571) ([Azat Khuzhin](https://github.com/azat)).
+* The `groupArray` aggregate function correctly works for empty result over nullable types [#48593](https://github.com/ClickHouse/ClickHouse/pull/48593) ([lgbo](https://github.com/lgbo-ustc)).
+* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
+* Allow IPv4 comparison operators with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Fix possible error from cache [#48636](https://github.com/ClickHouse/ClickHouse/pull/48636) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Async inserts with empty data will no longer throw exception. [#48663](https://github.com/ClickHouse/ClickHouse/pull/48663) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix table dependencies in case of failed RENAME TABLE [#48683](https://github.com/ClickHouse/ClickHouse/pull/48683) ([Azat Khuzhin](https://github.com/azat)).
+* If the primary key has duplicate columns (which is only possible for projections), in previous versions it might lead to a bug [#48838](https://github.com/ClickHouse/ClickHouse/pull/48838) ([Amos Bird](https://github.com/amosbird)).
+* Fix for a race condition in ZooKeeper when joining send_thread/receive_thread [#48849](https://github.com/ClickHouse/ClickHouse/pull/48849) ([Alexander Gololobov](https://github.com/davenger)).
+* Fix unexpected part name error when trying to drop a ignored detached part with zero copy replication [#48862](https://github.com/ClickHouse/ClickHouse/pull/48862) ([Michael Lex](https://github.com/mlex)).
+* Fix reading `Date32` Parquet/Arrow column into not a `Date32` column [#48864](https://github.com/ClickHouse/ClickHouse/pull/48864) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix `UNKNOWN_IDENTIFIER` error while selecting from table with row policy and column with dots [#48976](https://github.com/ClickHouse/ClickHouse/pull/48976) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix aggregation by empty nullable strings [#48999](https://github.com/ClickHouse/ClickHouse/pull/48999) ([LiuNeng](https://github.com/liuneng1994)).
+
+### ClickHouse release 23.3 LTS, 2023-03-30
+
+#### Upgrade Notes
+* Lightweight DELETEs are production ready and enabled by default. The `DELETE` query for MergeTree tables is now available by default.
+* The behavior of `*domain*RFC` and `netloc` functions is slightly changed: relaxed the set of symbols that are allowed in the URL authority for better conformance. [#46841](https://github.com/ClickHouse/ClickHouse/pull/46841) ([Azat Khuzhin](https://github.com/azat)).
+* Prohibited creating tables based on KafkaEngine with DEFAULT/EPHEMERAL/ALIAS/MATERIALIZED statements for columns. [#47138](https://github.com/ClickHouse/ClickHouse/pull/47138) ([Aleksandr Musorin](https://github.com/AVMusorin)).
+* An "asynchronous connection drain" feature is removed. Related settings and metrics are removed as well. It was an internal feature, so the removal should not affect users who had never heard about that feature. [#47486](https://github.com/ClickHouse/ClickHouse/pull/47486) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* Support 256-bit Decimal data type (more than 38 digits) in `arraySum`/`Min`/`Max`/`Avg`/`Product`, `arrayCumSum`/`CumSumNonNegative`, `arrayDifference`, array construction, IN operator, query parameters, `groupArrayMovingSum`, statistical functions, `min`/`max`/`any`/`argMin`/`argMax`, PostgreSQL wire protocol, MySQL table engine and function, `sumMap`, `mapAdd`, `mapSubtract`, `arrayIntersect`. Add support for big integers in `arrayIntersect`. Statistical aggregate functions involving moments (such as `corr` or various `TTest`s) will use `Float64` as their internal representation (they were using `Decimal128` before this change, but it was pointless), and these functions can return `nan` instead of `inf` in case of infinite variance. Some functions were allowed on `Decimal256` data types but returned `Decimal128` in previous versions - now it is fixed. This closes [#47569](https://github.com/ClickHouse/ClickHouse/issues/47569). This closes [#44864](https://github.com/ClickHouse/ClickHouse/issues/44864). This closes [#28335](https://github.com/ClickHouse/ClickHouse/issues/28335). [#47594](https://github.com/ClickHouse/ClickHouse/pull/47594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Make backup_threads/restore_threads server settings (instead of user settings). [#47881](https://github.com/ClickHouse/ClickHouse/pull/47881) ([Azat Khuzhin](https://github.com/azat)).
+* Do not allow const and non-deterministic secondary indices [#46839](https://github.com/ClickHouse/ClickHouse/pull/46839) ([Anton Popov](https://github.com/CurtizJ)).
+
+#### New Feature
+* Add a new mode for splitting the work on replicas using settings `parallel_replicas_custom_key` and `parallel_replicas_custom_key_filter_type`. If the cluster consists of a single shard with multiple replicas, up to `max_parallel_replicas` will be randomly picked and turned into shards. For each shard, a corresponding filter is added to the query on the initiator before being sent to the shard. If the cluster consists of multiple shards, it will behave the same as `sample_key` but with the possibility to define an arbitrary key. [#45108](https://github.com/ClickHouse/ClickHouse/pull/45108) ([Antonio Andelic](https://github.com/antonio2368)).
+* An option to display partial result on cancel: Added query setting `partial_result_on_first_cancel` allowing the canceled query (e.g. due to Ctrl-C) to return a partial result. [#45689](https://github.com/ClickHouse/ClickHouse/pull/45689) ([Alexey Perevyshin](https://github.com/alexX512)).
+* Added support of arbitrary tables engines for temporary tables (except for Replicated and KeeperMap engines). Close [#31497](https://github.com/ClickHouse/ClickHouse/issues/31497). [#46071](https://github.com/ClickHouse/ClickHouse/pull/46071) ([Roman Vasin](https://github.com/rvasin)).
+* Add support for replication of user-defined SQL functions using centralized storage in Keeper. [#46085](https://github.com/ClickHouse/ClickHouse/pull/46085) ([Aleksei Filatov](https://github.com/aalexfvk)).
+* Implement `system.server_settings` (similar to `system.settings`), which will contain server configurations. [#46550](https://github.com/ClickHouse/ClickHouse/pull/46550) ([pufit](https://github.com/pufit)).
+* Support for `UNDROP TABLE` query. Closes [#46811](https://github.com/ClickHouse/ClickHouse/issues/46811). [#47241](https://github.com/ClickHouse/ClickHouse/pull/47241) ([chen](https://github.com/xiedeyantu)).
+* Allow separate grants for named collections (e.g. to be able to give `SHOW/CREATE/ALTER/DROP named collection` access only to certain collections, instead of all at once). Closes [#40894](https://github.com/ClickHouse/ClickHouse/issues/40894). Add new access type `NAMED_COLLECTION_CONTROL` which is not given to user default unless explicitly added to the user config (is required to be able to do `GRANT ALL`), also `show_named_collections` is no longer obligatory to be manually specified for user default to be able to have full access rights as was in 23.2. [#46241](https://github.com/ClickHouse/ClickHouse/pull/46241) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Allow nested custom disks. Previously custom disks supported only flat disk structure. [#47106](https://github.com/ClickHouse/ClickHouse/pull/47106) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Introduce a function `widthBucket` (with a `WIDTH_BUCKET` alias for compatibility). [#42974](https://github.com/ClickHouse/ClickHouse/issues/42974). [#46790](https://github.com/ClickHouse/ClickHouse/pull/46790) ([avoiderboi](https://github.com/avoiderboi)).
+* Add new function `parseDateTime`/`parseDateTimeInJodaSyntax` according to the specified format string. parseDateTime parses String to DateTime in MySQL syntax, parseDateTimeInJodaSyntax parses in Joda syntax. [#46815](https://github.com/ClickHouse/ClickHouse/pull/46815) ([李扬](https://github.com/taiyang-li)).
+* Use `dummy UInt8` for the default structure of table function `null`. Closes [#46930](https://github.com/ClickHouse/ClickHouse/issues/46930). [#47006](https://github.com/ClickHouse/ClickHouse/pull/47006) ([flynn](https://github.com/ucasfl)).
+* Support for date format with a comma, like `Dec 15, 2021` in the `parseDateTimeBestEffort` function. Closes [#46816](https://github.com/ClickHouse/ClickHouse/issues/46816). [#47071](https://github.com/ClickHouse/ClickHouse/pull/47071) ([chen](https://github.com/xiedeyantu)).
+* Add settings `http_wait_end_of_query` and `http_response_buffer_size` that corresponds to URL params `wait_end_of_query` and `buffer_size` for the HTTP interface. This allows changing these settings in the profiles. [#47108](https://github.com/ClickHouse/ClickHouse/pull/47108) ([Vladimir C](https://github.com/vdimir)).
+* Add `system.dropped_tables` table that shows tables that were dropped from `Atomic` databases but were not completely removed yet. [#47364](https://github.com/ClickHouse/ClickHouse/pull/47364) ([chen](https://github.com/xiedeyantu)).
+* Add `INSTR` as alias of `positionCaseInsensitive` for MySQL compatibility. Closes [#47529](https://github.com/ClickHouse/ClickHouse/issues/47529). [#47535](https://github.com/ClickHouse/ClickHouse/pull/47535) ([flynn](https://github.com/ucasfl)).
+* Added `toDecimalString` function allowing to convert numbers to string with fixed precision. [#47838](https://github.com/ClickHouse/ClickHouse/pull/47838) ([Andrey Zvonov](https://github.com/zvonand)).
+* Add a merge tree setting `max_number_of_mutations_for_replica`. It limits the number of part mutations per replica to the specified amount. Zero means no limit on the number of mutations per replica (the execution can still be constrained by other settings). [#48047](https://github.com/ClickHouse/ClickHouse/pull/48047) ([Vladimir C](https://github.com/vdimir)).
+* Add the Map-related function `mapFromArrays`, which allows the creation of a map from a pair of arrays. [#31125](https://github.com/ClickHouse/ClickHouse/pull/31125) ([李扬](https://github.com/taiyang-li)).
+* Allow control of compression in Parquet/ORC/Arrow output formats, adds support for more compression input formats. This closes [#13541](https://github.com/ClickHouse/ClickHouse/issues/13541). [#47114](https://github.com/ClickHouse/ClickHouse/pull/47114) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add SSL User Certificate authentication to the native protocol. Closes [#47077](https://github.com/ClickHouse/ClickHouse/issues/47077). [#47596](https://github.com/ClickHouse/ClickHouse/pull/47596) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Add *OrNull() and *OrZero() variants for `parseDateTime`, add alias `str_to_date` for MySQL parity. [#48000](https://github.com/ClickHouse/ClickHouse/pull/48000) ([Robert Schulze](https://github.com/rschu1ze)).
+* Added operator `REGEXP` (similar to operators "LIKE", "IN", "MOD" etc.) for better compatibility with MySQL [#47869](https://github.com/ClickHouse/ClickHouse/pull/47869) ([Robert Schulze](https://github.com/rschu1ze)).
+
+#### Performance Improvement
+* Marks in memory are now compressed, using 3-6x less memory. [#47290](https://github.com/ClickHouse/ClickHouse/pull/47290) ([Michael Kolupaev](https://github.com/al13n321)).
+* Backups for large numbers of files were unbelievably slow in previous versions. Not anymore. Now they are unbelievably fast. [#47251](https://github.com/ClickHouse/ClickHouse/pull/47251) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Introduced a separate thread pool for backup's IO operations. This will allow scaling it independently of other pools and increase performance. [#47174](https://github.com/ClickHouse/ClickHouse/pull/47174) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). Use MultiRead request and retries for collecting metadata at the final stage of backup processing. [#47243](https://github.com/ClickHouse/ClickHouse/pull/47243) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). If a backup and restoring data are both in S3 then server-side copy should be used from now on. [#47546](https://github.com/ClickHouse/ClickHouse/pull/47546) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Fixed excessive reading in queries with `FINAL`. [#47801](https://github.com/ClickHouse/ClickHouse/pull/47801) ([Nikita Taranov](https://github.com/nickitat)).
+* Setting `max_final_threads` would be set to the number of cores at server startup (by the same algorithm as used for `max_threads`). This improves the concurrency of `final` execution on servers with high number of CPUs. [#47915](https://github.com/ClickHouse/ClickHouse/pull/47915) ([Nikita Taranov](https://github.com/nickitat)).
+* Allow executing reading pipeline for DIRECT dictionary with CLICKHOUSE source in multiple threads. To enable set `dictionary_use_async_executor=1` in `SETTINGS` section for source in `CREATE DICTIONARY` statement. [#47986](https://github.com/ClickHouse/ClickHouse/pull/47986) ([Vladimir C](https://github.com/vdimir)).
+* Optimize one nullable key aggregate performance. [#45772](https://github.com/ClickHouse/ClickHouse/pull/45772) ([LiuNeng](https://github.com/liuneng1994)).
+* Implemented lowercase `tokenbf_v1` index utilization for `hasTokenOrNull`, `hasTokenCaseInsensitive` and `hasTokenCaseInsensitiveOrNull`. [#46252](https://github.com/ClickHouse/ClickHouse/pull/46252) ([ltrk2](https://github.com/ltrk2)).
+* Optimize functions `position` and `LIKE` by searching the first two chars using SIMD. [#46289](https://github.com/ClickHouse/ClickHouse/pull/46289) ([Jiebin Sun](https://github.com/jiebinn)).
+* Optimize queries from the `system.detached_parts`, which could be significantly large. Added several sources with respect to the block size limitation; in each block, an IO thread pool is used to calculate the part size, i.e. to make syscalls in parallel. [#46624](https://github.com/ClickHouse/ClickHouse/pull/46624) ([Sema Checherinda](https://github.com/CheSema)).
+* Increase the default value of `max_replicated_merges_in_queue` for ReplicatedMergeTree tables from 16 to 1000. It allows faster background merge operation on clusters with a very large number of replicas, such as clusters with shared storage in ClickHouse Cloud. [#47050](https://github.com/ClickHouse/ClickHouse/pull/47050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Updated `clickhouse-copier` to use `GROUP BY` instead of `DISTINCT` to get the list of partitions. For large tables, this reduced the select time from over 500s to under 1s. [#47386](https://github.com/ClickHouse/ClickHouse/pull/47386) ([Clayton McClure](https://github.com/cmcclure-twilio)).
+* Fix performance degradation in `ASOF JOIN`. [#47544](https://github.com/ClickHouse/ClickHouse/pull/47544) ([Ongkong](https://github.com/ongkong)).
+* Even more batching in Keeper. Improve performance by avoiding breaking batches on read requests. [#47978](https://github.com/ClickHouse/ClickHouse/pull/47978) ([Antonio Andelic](https://github.com/antonio2368)).
+* Allow PREWHERE for Merge with different DEFAULT expressions for columns. [#46831](https://github.com/ClickHouse/ClickHouse/pull/46831) ([Azat Khuzhin](https://github.com/azat)).
+
+#### Experimental Feature
+* Parallel replicas: Improved the overall performance by better utilizing the local replica, and forbid the reading with parallel replicas from non-replicated MergeTree by default. [#47858](https://github.com/ClickHouse/ClickHouse/pull/47858) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Support filter push down to left table for JOIN with `Join`, `Dictionary` and `EmbeddedRocksDB` tables if the experimental Analyzer is enabled. [#47280](https://github.com/ClickHouse/ClickHouse/pull/47280) ([Maksim Kita](https://github.com/kitaisreal)).
+* Now ReplicatedMergeTree with zero copy replication has less load to Keeper. [#47676](https://github.com/ClickHouse/ClickHouse/pull/47676) ([alesapin](https://github.com/alesapin)).
+* Fix create materialized view with MaterializedPostgreSQL [#40807](https://github.com/ClickHouse/ClickHouse/pull/40807) ([Maksim Buren](https://github.com/maks-buren630501)).
+
+#### Improvement
+* Enable `input_format_json_ignore_unknown_keys_in_named_tuple` by default. [#46742](https://github.com/ClickHouse/ClickHouse/pull/46742) ([Kruglov Pavel](https://github.com/Avogar)).
+* Allow errors to be ignored while pushing to MATERIALIZED VIEW (add new setting `materialized_views_ignore_errors`, by default to `false`, but it is set to `true` for flushing logs to `system.*_log` tables unconditionally). [#46658](https://github.com/ClickHouse/ClickHouse/pull/46658) ([Azat Khuzhin](https://github.com/azat)).
+* Track the file queue of distributed sends in memory. [#45491](https://github.com/ClickHouse/ClickHouse/pull/45491) ([Azat Khuzhin](https://github.com/azat)).
+* Now `X-ClickHouse-Query-Id` and `X-ClickHouse-Timezone` headers are added to responses in all queries via HTTP protocol. Previously it was done only for `SELECT` queries. [#46364](https://github.com/ClickHouse/ClickHouse/pull/46364) ([Anton Popov](https://github.com/CurtizJ)).
+* External tables from `MongoDB`: support for connection to a replica set via a URI with a host:port enum and support for the readPreference option in MongoDB dictionaries. Example URI: mongodb://db0.example.com:27017,db1.example.com:27017,db2.example.com:27017/?replicaSet=myRepl&readPreference=primary. [#46524](https://github.com/ClickHouse/ClickHouse/pull/46524) ([artem-yadr](https://github.com/artem-yadr)).
+* This improvement should be invisible for users. Re-implement projection analysis on top of query plan. Added setting `query_plan_optimize_projection=1` to switch between old and new version. Fixes [#44963](https://github.com/ClickHouse/ClickHouse/issues/44963). [#46537](https://github.com/ClickHouse/ClickHouse/pull/46537) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Use Parquet format v2 instead of v1 in output format by default. Add setting `output_format_parquet_version` to control parquet version, possible values `1.0`, `2.4`, `2.6`, `2.latest` (default). [#46617](https://github.com/ClickHouse/ClickHouse/pull/46617) ([Kruglov Pavel](https://github.com/Avogar)).
+* It is now possible to use the new configuration syntax to configure Kafka topics with periods (`.`) in their name. [#46752](https://github.com/ClickHouse/ClickHouse/pull/46752) ([Robert Schulze](https://github.com/rschu1ze)).
+* Fix heuristics that check hyperscan patterns for problematic repeats. [#46819](https://github.com/ClickHouse/ClickHouse/pull/46819) ([Robert Schulze](https://github.com/rschu1ze)).
+* Don't report ZK node exists to system.errors when a block was created concurrently by a different replica. [#46820](https://github.com/ClickHouse/ClickHouse/pull/46820) ([Raúl Marín](https://github.com/Algunenano)).
+* Increase the limit for opened files in `clickhouse-local`. It will be able to read from `web` tables on servers with a huge number of CPU cores. Do not back off reading from the URL table engine in case of too many opened files. This closes [#46852](https://github.com/ClickHouse/ClickHouse/issues/46852). [#46853](https://github.com/ClickHouse/ClickHouse/pull/46853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Exceptions thrown when numbers cannot be parsed now have an easier-to-read exception message. [#46917](https://github.com/ClickHouse/ClickHouse/pull/46917) ([Robert Schulze](https://github.com/rschu1ze)).
+* Added update `system.backups` after every processed task to track the progress of backups. [#46989](https://github.com/ClickHouse/ClickHouse/pull/46989) ([Aleksandr Musorin](https://github.com/AVMusorin)).
+* Allow types conversion in Native input format. Add settings `input_format_native_allow_types_conversion` that controls it (enabled by default). [#46990](https://github.com/ClickHouse/ClickHouse/pull/46990) ([Kruglov Pavel](https://github.com/Avogar)).
+* Allow IPv4 in the `range` function to generate IP ranges. [#46995](https://github.com/ClickHouse/ClickHouse/pull/46995) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Improve exception message when it's impossible to move a part from one volume/disk to another. [#47032](https://github.com/ClickHouse/ClickHouse/pull/47032) ([alesapin](https://github.com/alesapin)).
+* Support `Bool` type in `JSONType` function. Previously `Null` type was mistakenly returned for bool values. [#47046](https://github.com/ClickHouse/ClickHouse/pull/47046) ([Anton Popov](https://github.com/CurtizJ)).
+* Use `_request_body` parameter to configure predefined HTTP queries. [#47086](https://github.com/ClickHouse/ClickHouse/pull/47086) ([Constantine Peresypkin](https://github.com/pkit)).
+* Automatic indentation in the built-in UI SQL editor when Enter is pressed. [#47113](https://github.com/ClickHouse/ClickHouse/pull/47113) ([Alexey Korepanov](https://github.com/alexkorep)).
+* Self-extraction with 'sudo' will attempt to set uid and gid of extracted files to running user. [#47116](https://github.com/ClickHouse/ClickHouse/pull/47116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Previously, the `repeat` function's second argument only accepted an unsigned integer type, which meant it could not accept values such as -1. This behavior differed from that of the Spark function. In this update, the repeat function has been modified to match the behavior of the Spark function. It now accepts the same types of inputs, including negative integers. Extensive testing has been performed to verify the correctness of the updated implementation. [#47134](https://github.com/ClickHouse/ClickHouse/pull/47134) ([KevinyhZou](https://github.com/KevinyhZou)). Note: the changelog entry was rewritten by ChatGPT.
+* Remove `::__1` part from stacktraces. Display `std::basic_string ClickHouse release 23.2, 2023-02-23
#### Backward Incompatible Change
@@ -140,7 +473,7 @@
* Upgrade Intel QPL from v0.3.0 to v1.0.0 2. Build libaccel-config and link it statically to QPL library instead of dynamically. [#45809](https://github.com/ClickHouse/ClickHouse/pull/45809) ([jasperzhu](https://github.com/jinjunzh)).
-#### Bug Fix (user-visible misbehavior in official stable or prestable release)
+#### Bug Fix (user-visible misbehavior in official stable release)
* Flush data exactly by `rabbitmq_flush_interval_ms` or by `rabbitmq_max_block_size` in `StorageRabbitMQ`. Closes [#42389](https://github.com/ClickHouse/ClickHouse/issues/42389). Closes [#45160](https://github.com/ClickHouse/ClickHouse/issues/45160). [#44404](https://github.com/ClickHouse/ClickHouse/pull/44404) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Use PODArray to render in sparkBar function, so we can control the memory usage. Close [#44467](https://github.com/ClickHouse/ClickHouse/issues/44467). [#44489](https://github.com/ClickHouse/ClickHouse/pull/44489) ([Duc Canh Le](https://github.com/canhld94)).
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cbb666b81c3..5d6ed75bb29 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -57,8 +57,8 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
# set CPU time limit to 1000 seconds
set (RLIMIT_CPU 1000)
- # gcc10/gcc10/clang -fsanitize=memory is too heavy
- if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
+ # -fsanitize=memory is too heavy
+ if (SANITIZE STREQUAL "memory")
set (RLIMIT_DATA 10000000000) # 10G
endif()
@@ -102,6 +102,17 @@ if (ENABLE_FUZZING)
set (ENABLE_PROTOBUF 1)
endif()
+option (ENABLE_WOBOQ_CODEBROWSER "Build for woboq codebrowser" OFF)
+
+if (ENABLE_WOBOQ_CODEBROWSER)
+ set (ENABLE_EMBEDDED_COMPILER 0)
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-poison-system-directories")
+ # woboq codebrowser uses clang tooling, and they could add default system
+ # clang includes, and later clang will warn for those added by itself
+ # includes.
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-poison-system-directories")
+endif()
+
# Global libraries
# See:
# - default_libs.cmake
@@ -121,6 +132,7 @@ if (ENABLE_COLORED_BUILD AND CMAKE_GENERATOR STREQUAL "Ninja")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always")
# ... such manually setting of flags can be removed once CMake supports a variable to
# activate colors in *all* build systems: https://gitlab.kitware.com/cmake/cmake/-/issues/15502
+ # --> available since CMake 3.24: https://stackoverflow.com/a/73349744
endif ()
include (cmake/check_flags.cmake)
@@ -134,24 +146,15 @@ if (COMPILER_CLANG)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
- if (HAS_USE_CTOR_HOMING)
- # For more info see https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
- if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
- set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
- endif()
+ # See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
+ if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
endif ()
-# If compiler has support for -Wreserved-identifier. It is difficult to detect by clang version,
-# because there are two different branches of clang: clang and AppleClang.
-# (AppleClang is not supported by ClickHouse, but some developers have misfortune to use it).
-if (HAS_RESERVED_IDENTIFIER)
- add_compile_definitions (HAS_RESERVED_IDENTIFIER)
-endif ()
-
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
@@ -178,32 +181,11 @@ else ()
set(NO_WHOLE_ARCHIVE --no-whole-archive)
endif ()
-option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON)
-if (OS_DARWIN)
- # Disable the curl, azure, senry build on MacOS
- set (ENABLE_CURL_BUILD OFF)
-endif ()
-
-# Ignored if `lld` is used
-option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
-
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# Can be lld or ld-lld or lld-13 or /path/to/lld.
- if (LINKER_NAME MATCHES "lld" AND OS_LINUX)
+ if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
- set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
- # we use another tool for gdb-index, because gold linker removes section .debug_aranges, which used inside clickhouse stacktraces
- # http://sourceware-org.1504.n7.nabble.com/gold-No-debug-aranges-section-when-linking-with-gdb-index-td540965.html#a556932
- elseif (LINKER_NAME MATCHES "gold$" AND ADD_GDB_INDEX_FOR_GOLD)
- find_program (GDB_ADD_INDEX_EXE NAMES "gdb-add-index" DOC "Path to gdb-add-index executable")
- if (NOT GDB_ADD_INDEX_EXE)
- set (USE_GDB_ADD_INDEX 0)
- message (WARNING "Cannot add gdb index to binaries, because gold linker is used, but gdb-add-index executable not found.")
- else()
- set (USE_GDB_ADD_INDEX 1)
- message (STATUS "gdb-add-index found: ${GDB_ADD_INDEX_EXE}")
- endif()
endif ()
endif()
@@ -235,7 +217,7 @@ endif ()
# Create BuildID when using lld. For other linkers it is created by default.
# (NOTE: LINKER_NAME can be either path or name, and in different variants)
-if (LINKER_NAME MATCHES "lld" AND OS_LINUX)
+if (LINKER_NAME MATCHES "lld")
# SHA1 is not cryptographically secure but it is the best what lld is offering.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
endif ()
@@ -288,8 +270,8 @@ endif ()
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
if (ENABLE_BUILD_PATH_MAPPING)
- set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
- set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
+ set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
+ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
endif ()
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
@@ -301,59 +283,39 @@ if (ENABLE_BUILD_PROFILING)
endif ()
endif ()
-set (CMAKE_CXX_STANDARD 20)
-set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
+set (CMAKE_CXX_STANDARD 23)
+set (CMAKE_CXX_EXTENSIONS OFF)
set (CMAKE_CXX_STANDARD_REQUIRED ON)
set (CMAKE_C_STANDARD 11)
-set (CMAKE_C_EXTENSIONS ON)
+set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
-if (COMPILER_GCC OR COMPILER_CLANG)
- # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
-endif ()
-
-# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
-# benchmarks.
-if (COMPILER_GCC OR COMPILER_CLANG)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
-endif ()
-
-if (ARCH_AMD64)
- # align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
- # which makes benchmark results more stable.
- set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
- if (COMPILER_GCC)
- # gcc is in assembler, need to add "-Wa," prefix
- set(BRANCHES_WITHIN_32B_BOUNDARIES "-Wa,${BRANCHES_WITHIN_32B_BOUNDARIES}")
- endif()
-
- include(CheckCXXCompilerFlag)
- check_cxx_compiler_flag("${BRANCHES_WITHIN_32B_BOUNDARIES}" HAS_BRANCHES_WITHIN_32B_BOUNDARIES)
- if (HAS_BRANCHES_WITHIN_32B_BOUNDARIES)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
- endif()
-endif()
-
-if (COMPILER_GCC)
- set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcoroutines")
-endif ()
-
-# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
+# Compiler-specific coverage flags e.g. -fcoverage-mapping
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
-if (WITH_COVERAGE AND COMPILER_CLANG)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
- # If we want to disable coverage for specific translation units
- set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
-endif()
+if (COMPILER_CLANG)
+ # Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
+ # See https://reviews.llvm.org/D112921
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
-if (WITH_COVERAGE AND COMPILER_GCC)
- set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage")
- set(COVERAGE_OPTION "-lgcov")
- set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage")
-endif()
+ # falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
+ # benchmarks.
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
+
+ if (ARCH_AMD64)
+ # align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
+ # which makes benchmark results more stable.
+ set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
+ endif()
+
+ if (WITH_COVERAGE)
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
+ # If we want to disable coverage for specific translation units
+ set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
+ endif()
+endif ()
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
@@ -391,13 +353,6 @@ if (COMPILER_CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
- # Set new experimental pass manager, it's a performance, build time and binary size win.
- # Can be removed after https://reviews.llvm.org/D66490 merged and released to at least two versions of clang.
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexperimental-new-pass-manager")
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fexperimental-new-pass-manager")
- endif ()
-
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
@@ -436,15 +391,22 @@ else()
endif ()
option (ENABLE_GWP_ASAN "Enable Gwp-Asan" ON)
-if (NOT OS_LINUX AND NOT OS_ANDROID)
+# We use mmap for allocations more heavily in debug builds,
+# but GWP-ASan also wants to use mmap frequently,
+# and due to a large number of memory mappings,
+# it does not work together well.
+if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
set(ENABLE_GWP_ASAN OFF)
endif ()
+option (ENABLE_FIU "Enable Fiu" ON)
+
option(WERROR "Enable -Werror compiler option" ON)
if (WERROR)
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
# Instead, adopt modern cmake usage requirement.
+ # TODO: Set CMAKE_COMPILE_WARNING_AS_ERROR (cmake 3.24)
target_compile_options(global-group INTERFACE "-Werror")
endif ()
@@ -459,8 +421,11 @@ endif ()
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
-set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
-if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X))
+if (NOT SANITIZE)
+ set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
+endif()
+
+if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE)
# Slightly more efficient code can be generated
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
@@ -563,6 +528,26 @@ include (cmake/print_flags.cmake)
if (ENABLE_RUST)
add_subdirectory (rust)
+
+ # With LTO Rust adds few symbols with global visiblity, the most common is
+ # rust_eh_personality. And this leads to linking errors because multiple
+ # Rust libraries contains the same symbol.
+ #
+ # If it was shared library, that we could use version script for linker to
+ # hide this symbols, but libraries are static.
+ #
+ # we could in theory compile everything to one library but this will be a
+ # mess
+ #
+ # But this should be OK since CI has lots of other builds that are done
+ # without LTO and it will find multiple definitions if there will be any.
+ #
+ # More information about this behaviour in Rust can be found here
+ # - https://github.com/rust-lang/rust/issues/44322
+ # - https://alanwu.space/post/symbol-hygiene/
+ if (ENABLE_THINLTO)
+ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--allow-multiple-definition")
+ endif()
endif()
add_subdirectory (base)
@@ -583,7 +568,7 @@ if (NATIVE_BUILD_TARGETS
)
message (STATUS "Building native targets...")
- set (NATIVE_BUILD_DIR "${CMAKE_BINARY_DIR}/native")
+ set (NATIVE_BUILD_DIR "${PROJECT_BINARY_DIR}/native")
execute_process(
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
@@ -593,11 +578,11 @@ if (NATIVE_BUILD_TARGETS
COMMAND ${CMAKE_COMMAND}
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
- "-DENABLE_CCACHE=${ENABLE_CCACHE}"
+ "-DCOMPILER_CACHE=${COMPILER_CACHE}"
# Avoid overriding .cargo/config.toml with native toolchain.
"-DENABLE_RUST=OFF"
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
- ${CMAKE_SOURCE_DIR}
+ ${PROJECT_SOURCE_DIR}
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
COMMAND_ECHO STDOUT)
diff --git a/PreLoad.cmake b/PreLoad.cmake
index 0e1ee70fc8f..b456c724cc6 100644
--- a/PreLoad.cmake
+++ b/PreLoad.cmake
@@ -19,8 +19,8 @@ endif()
if (NOT "$ENV{CFLAGS}" STREQUAL ""
OR NOT "$ENV{CXXFLAGS}" STREQUAL ""
OR NOT "$ENV{LDFLAGS}" STREQUAL ""
- OR CMAKE_C_FLAGS OR CMAKE_CXX_FLAGS OR CMAKE_EXE_LINKER_FLAGS OR CMAKE_SHARED_LINKER_FLAGS OR CMAKE_MODULE_LINKER_FLAGS
- OR CMAKE_C_FLAGS_INIT OR CMAKE_CXX_FLAGS_INIT OR CMAKE_EXE_LINKER_FLAGS_INIT OR CMAKE_SHARED_LINKER_FLAGS_INIT OR CMAKE_MODULE_LINKER_FLAGS_INIT)
+ OR CMAKE_C_FLAGS OR CMAKE_CXX_FLAGS OR CMAKE_EXE_LINKER_FLAGS OR CMAKE_MODULE_LINKER_FLAGS
+ OR CMAKE_C_FLAGS_INIT OR CMAKE_CXX_FLAGS_INIT OR CMAKE_EXE_LINKER_FLAGS_INIT OR CMAKE_MODULE_LINKER_FLAGS_INIT)
# if $ENV
message("CFLAGS: $ENV{CFLAGS}")
@@ -36,7 +36,6 @@ if (NOT "$ENV{CFLAGS}" STREQUAL ""
message("CMAKE_C_FLAGS_INIT: ${CMAKE_C_FLAGS_INIT}")
message("CMAKE_CXX_FLAGS_INIT: ${CMAKE_CXX_FLAGS_INIT}")
message("CMAKE_EXE_LINKER_FLAGS_INIT: ${CMAKE_EXE_LINKER_FLAGS_INIT}")
- message("CMAKE_SHARED_LINKER_FLAGS_INIT: ${CMAKE_SHARED_LINKER_FLAGS_INIT}")
message("CMAKE_MODULE_LINKER_FLAGS_INIT: ${CMAKE_MODULE_LINKER_FLAGS_INIT}")
message(FATAL_ERROR "
diff --git a/README.md b/README.md
index fcbe65e8223..bbedea364fc 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/clickhouse-presentations/raw/master/images/logo-400x240.png)](https://clickhouse.com)
+[](https://clickhouse.com?utm_source=github)
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
@@ -14,18 +14,32 @@ curl https://clickhouse.com/ | sh
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
-* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
+* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Upcoming Events
-* [**v23.2 Release Webinar**](https://clickhouse.com/company/events/v23-2-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-02) - Feb 23 - 23.2 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
-* [**ClickHouse Meetup in Amsterdam**](https://www.meetup.com/clickhouse-netherlands-user-group/events/291485868/) - Mar 9 - The first ClickHouse Amsterdam Meetup of 2023 is here! 🎉 Join us for short lightning talks and long discussions. Food, drinks & good times on us.
-* [**ClickHouse Meetup in SF Bay Area**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/291490121/) - Mar 14 - A night to meet with ClickHouse team in the San Francisco area! Food and drink are a given...but networking is the primary focus.
-* [**ClickHouse Meetup in Austin**](https://www.meetup.com/clickhouse-austin-user-group/events/291486654/) - Mar 16 - The first ClickHouse Meetup in Austin is happening soon! Interested in speaking, let us know!
+
+* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - May 31 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
+* [**ClickHouse Meetup in Barcelona**](https://www.meetup.com/clickhouse-barcelona-user-group/events/292892669) - May 25
+* [**ClickHouse Meetup in London**](https://www.meetup.com/clickhouse-london-user-group/events/292892824) - May 25
+* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
+* [**ClickHouse Meetup in Stockholm**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - Jun 13
+
+Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler clickhouse com.
## Recent Recordings
-* **FOSDEM 2023**: In the "Fast and Streaming Data" room Alexey gave a talk entitled "Building Analytical Apps With ClickHouse" that looks at the landscape of data tools, an interesting data set, and how you can interact with data quickly. Check out the recording on **[YouTube](https://www.youtube.com/watch?v=JlcI2Vfz_uk)**.
-* **Recording available**: [**v23.1 Release Webinar**](https://www.youtube.com/watch?v=zYSZXBnTMSE) 23.1 is the ClickHouse New Year release. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release. Inverted indices, query cache, and so -- very -- much more.
+* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
+* **Recording available**: [**v23.4 Release Webinar**](https://www.youtube.com/watch?v=4rrf6bk_mOg) Faster Parquet Reading, Asynchonous Connections to Reoplicas, Trailing Comma before FROM, extractKeyValuePairs, integrations updates, and so much more! Watch it now!
+* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
+
+
+ ## Interested in joining ClickHouse and making it your full time job?
+
+We are a globally diverse and distributed team, united behind a common goal of creating industry-leading, real-time analytics. Here, you will have an opportunity to solve some of the most cutting edge technical challenges and have direct ownership of your work and vision. If you are a contributor by nature, a thinker as well as a doer - we’ll definitely click!
+
+Check out our **current openings** here: https://clickhouse.com/company/careers
+
+Cant find what you are looking for, but want to let us know you are interested in joining ClickHouse? Email careers@clickhouse.com!
diff --git a/SECURITY.md b/SECURITY.md
index 7c6648c70eb..75c1a9d7d6a 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -13,20 +13,16 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported |
|:-|:-|
+| 23.4 | ✔️ |
+| 23.3 | ✔️ |
| 23.2 | ✔️ |
-| 23.1 | ✔️ |
-| 22.12 | ✔️ |
+| 23.1 | ❌ |
+| 22.12 | ❌ |
| 22.11 | ❌ |
| 22.10 | ❌ |
| 22.9 | ❌ |
| 22.8 | ✔️ |
-| 22.7 | ❌ |
-| 22.6 | ❌ |
-| 22.5 | ❌ |
-| 22.4 | ❌ |
-| 22.3 | ✔️ |
-| 22.2 | ❌ |
-| 22.1 | ❌ |
+| 22.* | ❌ |
| 21.* | ❌ |
| 20.* | ❌ |
| 19.* | ❌ |
diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt
index 64785d575c5..8ab3c8a0711 100644
--- a/base/base/CMakeLists.txt
+++ b/base/base/CMakeLists.txt
@@ -2,6 +2,10 @@ if (USE_CLANG_TIDY)
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
endif ()
+# TODO: Remove this. We like to compile with C++23 (set by top-level CMakeLists) but Clang crashes with our libcxx
+# when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16.
+set (CMAKE_CXX_STANDARD 20)
+
set (SRCS
argsToConfig.cpp
coverage.cpp
diff --git a/base/base/Decimal.h b/base/base/Decimal.h
index 22cb577b1b2..2405ba9ca0d 100644
--- a/base/base/Decimal.h
+++ b/base/base/Decimal.h
@@ -1,5 +1,6 @@
#pragma once
#include
+#include
#if !defined(NO_SANITIZE_UNDEFINED)
#if defined(__clang__)
@@ -19,23 +20,6 @@ using Decimal64 = Decimal;
using Decimal128 = Decimal;
using Decimal256 = Decimal;
-template
-concept is_decimal =
- std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v;
-
-template
-concept is_over_big_int =
- std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v
- || std::is_same_v;
-
template struct NativeTypeT { using Type = T; };
template struct NativeTypeT { using Type = typename T::NativeType; };
template using NativeType = typename NativeTypeT::Type;
diff --git a/base/base/Decimal_fwd.h b/base/base/Decimal_fwd.h
new file mode 100644
index 00000000000..589d6224917
--- /dev/null
+++ b/base/base/Decimal_fwd.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#include
+
+namespace wide
+{
+
+template
+class integer;
+
+}
+
+using Int128 = wide::integer<128, signed>;
+using UInt128 = wide::integer<128, unsigned>;
+using Int256 = wide::integer<256, signed>;
+using UInt256 = wide::integer<256, unsigned>;
+
+namespace DB
+{
+
+template struct Decimal;
+
+using Decimal32 = Decimal;
+using Decimal64 = Decimal;
+using Decimal128 = Decimal;
+using Decimal256 = Decimal;
+
+class DateTime64;
+
+template
+concept is_decimal =
+ std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v;
+
+template
+concept is_over_big_int =
+ std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v
+ || std::is_same_v;
+}
diff --git a/base/base/IPv4andIPv6.h b/base/base/IPv4andIPv6.h
index 0e97d83b07e..7b745ec7b84 100644
--- a/base/base/IPv4andIPv6.h
+++ b/base/base/IPv4andIPv6.h
@@ -51,3 +51,15 @@ namespace DB
};
}
+
+namespace std
+{
+ template <>
+ struct hash
+ {
+ size_t operator()(const DB::IPv6 & x) const
+ {
+ return std::hash()(x.toUnderType());
+ }
+ };
+}
diff --git a/base/base/JSON.cpp b/base/base/JSON.cpp
index 315bcce38da..4c6d97b4444 100644
--- a/base/base/JSON.cpp
+++ b/base/base/JSON.cpp
@@ -466,9 +466,8 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
{
if (!it->hasEscapes())
{
- if (static_cast(size) + 2 > it->dataEnd() - it->data())
- continue;
- if (!strncmp(data, it->data() + 1, size))
+ const auto current_name = it->getRawName();
+ if (current_name.size() == size && 0 == memcmp(current_name.data(), data, size))
break;
}
else
diff --git a/base/base/StringRef.h b/base/base/StringRef.h
index a3e32ff5058..f428b7c747f 100644
--- a/base/base/StringRef.h
+++ b/base/base/StringRef.h
@@ -3,6 +3,7 @@
#include
#include // for std::logic_error
#include
+#include
#include
#include
#include
@@ -326,5 +327,16 @@ namespace ZeroTraits
inline void set(StringRef & x) { x.size = 0; }
}
+namespace PackedZeroTraits
+{
+ template class PackedPairNoInit>
+ inline bool check(const PackedPairNoInit p)
+ { return 0 == p.key.size; }
+
+ template class PackedPairNoInit>
+ inline void set(PackedPairNoInit & p)
+ { p.key.size = 0; }
+}
+
std::ostream & operator<<(std::ostream & os, const StringRef & str);
diff --git a/base/base/TypeList.h b/base/base/TypeList.h
index 244403b1c6b..310f0c0c586 100644
--- a/base/base/TypeList.h
+++ b/base/base/TypeList.h
@@ -4,7 +4,6 @@
#include
#include
#include "defines.h"
-#include "TypePair.h"
/// General-purpose typelist. Easy on compilation times as it does not use recursion.
template
@@ -28,7 +27,7 @@ namespace TypeListUtils /// In some contexts it's more handy to use functions in
constexpr Root changeRoot(TypeList) { return {}; }
template
- constexpr void forEach(TypeList, F && f) { (std::forward(f)(Id{}), ...); }
+ constexpr void forEach(TypeList, F && f) { (std::forward(f)(TypeList{}), ...); }
}
template
diff --git a/base/base/TypePair.h b/base/base/TypePair.h
deleted file mode 100644
index 8c2f380618c..00000000000
--- a/base/base/TypePair.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#pragma once
-
-template struct TypePair {};
-template struct Id {};
diff --git a/base/base/argsToConfig.cpp b/base/base/argsToConfig.cpp
index d7983779d2d..faa1462218d 100644
--- a/base/base/argsToConfig.cpp
+++ b/base/base/argsToConfig.cpp
@@ -3,13 +3,29 @@
#include
#include
-
-void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority)
+void argsToConfig(const Poco::Util::Application::ArgVec & argv,
+ Poco::Util::LayeredConfiguration & config,
+ int priority,
+ const std::unordered_set* alias_names)
{
/// Parsing all args and converting to config layer
/// Test: -- --1=1 --1=2 --3 5 7 8 -9 10 -11=12 14= 15== --16==17 --=18 --19= --20 21 22 --23 --24 25 --26 -27 28 ---29=30 -- ----31 32 --33 3-4
Poco::AutoPtr map_config = new Poco::Util::MapConfiguration;
std::string key;
+
+ auto add_arg = [&map_config, &alias_names](const std::string & k, const std::string & v)
+ {
+ map_config->setString(k, v);
+
+ if (alias_names && !alias_names->contains(k))
+ {
+ std::string alias_key = k;
+ std::replace(alias_key.begin(), alias_key.end(), '-', '_');
+ if (alias_names->contains(alias_key))
+ map_config->setString(alias_key, v);
+ }
+ };
+
for (const auto & arg : argv)
{
auto key_start = arg.find_first_not_of('-');
@@ -19,7 +35,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
// old saved '--key', will set to some true value "1"
if (!key.empty() && pos_minus != std::string::npos && pos_minus < key_start)
{
- map_config->setString(key, "1");
+ add_arg(key, "1");
key = "";
}
@@ -29,7 +45,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
{
if (pos_minus == std::string::npos || pos_minus > key_start)
{
- map_config->setString(key, arg);
+ add_arg(key, arg);
}
key = "";
}
@@ -55,7 +71,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
if (arg.size() > pos_eq)
value = arg.substr(pos_eq + 1);
- map_config->setString(key, value);
+ add_arg(key, value);
key = "";
}
diff --git a/base/base/argsToConfig.h b/base/base/argsToConfig.h
index 9b7b44b7b7f..ef34a8a2145 100644
--- a/base/base/argsToConfig.h
+++ b/base/base/argsToConfig.h
@@ -1,6 +1,8 @@
#pragma once
#include
+#include
+#include
namespace Poco::Util
{
@@ -8,4 +10,7 @@ class LayeredConfiguration; // NOLINT(cppcoreguidelines-virtual-class-destructor
}
/// Import extra command line arguments to configuration. These are command line arguments after --.
-void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority);
+void argsToConfig(const Poco::Util::Application::ArgVec & argv,
+ Poco::Util::LayeredConfiguration & config,
+ int priority,
+ const std::unordered_set* registered_alias_names = nullptr);
diff --git a/base/base/coverage.cpp b/base/base/coverage.cpp
index 043f97f9593..1027638be3d 100644
--- a/base/base/coverage.cpp
+++ b/base/base/coverage.cpp
@@ -2,6 +2,8 @@
#if WITH_COVERAGE
+#pragma GCC diagnostic ignored "-Wreserved-identifier"
+
# include
# include
diff --git a/base/base/defines.h b/base/base/defines.h
index 91c35dc28b6..6abf8155b95 100644
--- a/base/base/defines.h
+++ b/base/base/defines.h
@@ -73,18 +73,6 @@
# endif
#endif
-#if defined(ADDRESS_SANITIZER)
-# define BOOST_USE_ASAN 1
-# define BOOST_USE_UCONTEXT 1
-#endif
-
-#if defined(THREAD_SANITIZER)
-# define BOOST_USE_TSAN 1
-# define BOOST_USE_UCONTEXT 1
-#endif
-
-/// TODO: Strange enough, there is no way to detect UB sanitizer.
-
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
diff --git a/base/base/find_symbols.h b/base/base/find_symbols.h
index 83f53773ae7..a8747ecc9b7 100644
--- a/base/base/find_symbols.h
+++ b/base/base/find_symbols.h
@@ -34,9 +34,51 @@
* If no such characters, returns nullptr.
*/
+struct SearchSymbols
+{
+ static constexpr auto BUFFER_SIZE = 16;
+
+ SearchSymbols() = default;
+
+ explicit SearchSymbols(std::string in)
+ : str(std::move(in))
+ {
+#if defined(__SSE4_2__)
+ if (str.size() > BUFFER_SIZE)
+ {
+ throw std::runtime_error("SearchSymbols can contain at most " + std::to_string(BUFFER_SIZE) + " symbols and " + std::to_string(str.size()) + " was provided\n");
+ }
+
+ char tmp_safety_buffer[BUFFER_SIZE] = {0};
+
+ memcpy(tmp_safety_buffer, str.data(), str.size());
+
+ simd_vector = _mm_loadu_si128(reinterpret_cast(tmp_safety_buffer));
+#endif
+ }
+
+#if defined(__SSE4_2__)
+ __m128i simd_vector;
+#endif
+ std::string str;
+};
+
namespace detail
{
-template constexpr bool is_in(char x) { return ((x == chars) || ...); }
+template constexpr bool is_in(char x) { return ((x == chars) || ...); } // NOLINT(misc-redundant-expression)
+
+static bool is_in(char c, const char * symbols, size_t num_chars)
+{
+ for (size_t i = 0u; i < num_chars; ++i)
+ {
+ if (c == symbols[i])
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
#if defined(__SSE2__)
template
@@ -53,6 +95,43 @@ inline __m128i mm_is_in(__m128i bytes)
__m128i eq = mm_is_in(bytes);
return _mm_or_si128(eq0, eq);
}
+
+inline __m128i mm_is_in(__m128i bytes, const char * symbols, size_t num_chars)
+{
+ __m128i accumulator = _mm_setzero_si128();
+ for (size_t i = 0; i < num_chars; ++i)
+ {
+ __m128i eq = _mm_cmpeq_epi8(bytes, _mm_set1_epi8(symbols[i]));
+ accumulator = _mm_or_si128(accumulator, eq);
+ }
+
+ return accumulator;
+}
+
+inline std::array<__m128i, 16u> mm_is_in_prepare(const char * symbols, size_t num_chars)
+{
+ std::array<__m128i, 16u> result {};
+
+ for (size_t i = 0; i < num_chars; ++i)
+ {
+ result[i] = _mm_set1_epi8(symbols[i]);
+ }
+
+ return result;
+}
+
+inline __m128i mm_is_in_execute(__m128i bytes, const std::array<__m128i, 16u> & needles)
+{
+ __m128i accumulator = _mm_setzero_si128();
+
+ for (const auto & needle : needles)
+ {
+ __m128i eq = _mm_cmpeq_epi8(bytes, needle);
+ accumulator = _mm_or_si128(accumulator, eq);
+ }
+
+ return accumulator;
+}
#endif
template
@@ -99,6 +178,32 @@ inline const char * find_first_symbols_sse2(const char * const begin, const char
return return_mode == ReturnMode::End ? end : nullptr;
}
+template
+inline const char * find_first_symbols_sse2(const char * const begin, const char * const end, const char * symbols, size_t num_chars)
+{
+ const char * pos = begin;
+
+#if defined(__SSE2__)
+ const auto needles = mm_is_in_prepare(symbols, num_chars);
+ for (; pos + 15 < end; pos += 16)
+ {
+ __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos));
+
+ __m128i eq = mm_is_in_execute(bytes, needles);
+
+ uint16_t bit_mask = maybe_negate(uint16_t(_mm_movemask_epi8(eq)));
+ if (bit_mask)
+ return pos + __builtin_ctz(bit_mask);
+ }
+#endif
+
+ for (; pos < end; ++pos)
+ if (maybe_negate(is_in(*pos, symbols, num_chars)))
+ return pos;
+
+ return return_mode == ReturnMode::End ? end : nullptr;
+}
+
template
inline const char * find_last_symbols_sse2(const char * const begin, const char * const end)
@@ -159,26 +264,61 @@ inline const char * find_first_symbols_sse42(const char * const begin, const cha
#endif
for (; pos < end; ++pos)
- if ( (num_chars >= 1 && maybe_negate(*pos == c01))
- || (num_chars >= 2 && maybe_negate(*pos == c02))
- || (num_chars >= 3 && maybe_negate(*pos == c03))
- || (num_chars >= 4 && maybe_negate(*pos == c04))
- || (num_chars >= 5 && maybe_negate(*pos == c05))
- || (num_chars >= 6 && maybe_negate(*pos == c06))
- || (num_chars >= 7 && maybe_negate(*pos == c07))
- || (num_chars >= 8 && maybe_negate(*pos == c08))
- || (num_chars >= 9 && maybe_negate(*pos == c09))
- || (num_chars >= 10 && maybe_negate(*pos == c10))
- || (num_chars >= 11 && maybe_negate(*pos == c11))
- || (num_chars >= 12 && maybe_negate(*pos == c12))
- || (num_chars >= 13 && maybe_negate(*pos == c13))
- || (num_chars >= 14 && maybe_negate(*pos == c14))
- || (num_chars >= 15 && maybe_negate(*pos == c15))
- || (num_chars >= 16 && maybe_negate(*pos == c16)))
+ if ( (num_chars == 1 && maybe_negate(is_in(*pos)))
+ || (num_chars == 2 && maybe_negate(is_in(*pos)))
+ || (num_chars == 3 && maybe_negate(is_in(*pos)))
+ || (num_chars == 4 && maybe_negate(is_in(*pos)))
+ || (num_chars == 5 && maybe_negate(is_in(*pos)))
+ || (num_chars == 6 && maybe_negate(is_in(*pos)))
+ || (num_chars == 7 && maybe_negate(is_in(*pos)))
+ || (num_chars == 8 && maybe_negate(is_in(*pos)))
+ || (num_chars == 9 && maybe_negate(is_in(*pos)))
+ || (num_chars == 10 && maybe_negate(is_in(*pos)))
+ || (num_chars == 11 && maybe_negate(is_in(*pos)))
+ || (num_chars == 12 && maybe_negate(is_in(*pos)))
+ || (num_chars == 13 && maybe_negate(is_in(*pos)))
+ || (num_chars == 14 && maybe_negate(is_in(*pos)))
+ || (num_chars == 15 && maybe_negate(is_in(*pos)))
+ || (num_chars == 16 && maybe_negate(is_in(*pos))))
return pos;
return return_mode == ReturnMode::End ? end : nullptr;
}
+template
+inline const char * find_first_symbols_sse42(const char * const begin, const char * const end, const SearchSymbols & symbols)
+{
+ const char * pos = begin;
+
+ const auto num_chars = symbols.str.size();
+
+#if defined(__SSE4_2__)
+ constexpr int mode = _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT;
+
+ const __m128i set = symbols.simd_vector;
+
+ for (; pos + 15 < end; pos += 16)
+ {
+ __m128i bytes = _mm_loadu_si128(reinterpret_cast(pos));
+
+ if constexpr (positive)
+ {
+ if (_mm_cmpestrc(set, num_chars, bytes, 16, mode))
+ return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode);
+ }
+ else
+ {
+ if (_mm_cmpestrc(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY))
+ return pos + _mm_cmpestri(set, num_chars, bytes, 16, mode | _SIDD_NEGATIVE_POLARITY);
+ }
+ }
+#endif
+
+ for (; pos < end; ++pos)
+ if (maybe_negate(is_in(*pos, symbols.str.data(), num_chars)))
+ return pos;
+
+ return return_mode == ReturnMode::End ? end : nullptr;
+}
/// NOTE No SSE 4.2 implementation for find_last_symbols_or_null. Not worth to do.
@@ -194,6 +334,17 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
return find_first_symbols_sse2(begin, end);
}
+template
+inline const char * find_first_symbols_dispatch(const std::string_view haystack, const SearchSymbols & symbols)
+{
+#if defined(__SSE4_2__)
+ if (symbols.str.size() >= 5)
+ return find_first_symbols_sse42(haystack.begin(), haystack.end(), symbols);
+ else
+#endif
+ return find_first_symbols_sse2(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
+}
+
}
@@ -211,6 +362,11 @@ inline char * find_first_symbols(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_symbols(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
+
template
inline const char * find_first_not_symbols(const char * begin, const char * end)
{
@@ -223,6 +379,11 @@ inline char * find_first_not_symbols(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_not_symbols(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
+
template
inline const char * find_first_symbols_or_null(const char * begin, const char * end)
{
@@ -235,6 +396,11 @@ inline char * find_first_symbols_or_null(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
+
template
inline const char * find_first_not_symbols_or_null(const char * begin, const char * end)
{
@@ -247,6 +413,10 @@ inline char * find_first_not_symbols_or_null(char * begin, char * end)
return const_cast(detail::find_first_symbols_dispatch(begin, end));
}
+inline const char * find_first_not_symbols_or_null(std::string_view haystack, const SearchSymbols & symbols)
+{
+ return detail::find_first_symbols_dispatch(haystack, symbols);
+}
template
inline const char * find_last_symbols_or_null(const char * begin, const char * end)
diff --git a/base/base/hex.h b/base/base/hex.h
new file mode 100644
index 00000000000..b8cf95db893
--- /dev/null
+++ b/base/base/hex.h
@@ -0,0 +1,215 @@
+#pragma once
+
+#include
+#include
+#include "types.h"
+
+/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
+
+constexpr inline std::string_view hex_digit_to_char_uppercase_table = "0123456789ABCDEF";
+constexpr inline std::string_view hex_digit_to_char_lowercase_table = "0123456789abcdef";
+
+constexpr char hexDigitUppercase(unsigned char c)
+{
+ return hex_digit_to_char_uppercase_table[c];
+}
+constexpr char hexDigitLowercase(unsigned char c)
+{
+ return hex_digit_to_char_lowercase_table[c];
+}
+
+/// Maps 0..255 to 00..FF or 00..ff correspondingly
+
+constexpr inline std::string_view hex_byte_to_char_uppercase_table = //
+ "000102030405060708090A0B0C0D0E0F"
+ "101112131415161718191A1B1C1D1E1F"
+ "202122232425262728292A2B2C2D2E2F"
+ "303132333435363738393A3B3C3D3E3F"
+ "404142434445464748494A4B4C4D4E4F"
+ "505152535455565758595A5B5C5D5E5F"
+ "606162636465666768696A6B6C6D6E6F"
+ "707172737475767778797A7B7C7D7E7F"
+ "808182838485868788898A8B8C8D8E8F"
+ "909192939495969798999A9B9C9D9E9F"
+ "A0A1A2A3A4A5A6A7A8A9AAABACADAEAF"
+ "B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF"
+ "C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF"
+ "D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF"
+ "E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF"
+ "F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF";
+
+constexpr inline std::string_view hex_byte_to_char_lowercase_table = //
+ "000102030405060708090a0b0c0d0e0f"
+ "101112131415161718191a1b1c1d1e1f"
+ "202122232425262728292a2b2c2d2e2f"
+ "303132333435363738393a3b3c3d3e3f"
+ "404142434445464748494a4b4c4d4e4f"
+ "505152535455565758595a5b5c5d5e5f"
+ "606162636465666768696a6b6c6d6e6f"
+ "707172737475767778797a7b7c7d7e7f"
+ "808182838485868788898a8b8c8d8e8f"
+ "909192939495969798999a9b9c9d9e9f"
+ "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+ "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+ "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+ "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
+ "e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
+ "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
+
+inline void writeHexByteUppercase(UInt8 byte, void * out)
+{
+ memcpy(out, &hex_byte_to_char_uppercase_table[static_cast(byte) * 2], 2);
+}
+
+inline void writeHexByteLowercase(UInt8 byte, void * out)
+{
+ memcpy(out, &hex_byte_to_char_lowercase_table[static_cast(byte) * 2], 2);
+}
+
+constexpr inline std::string_view bin_byte_to_char_table = //
+ "0000000000000001000000100000001100000100000001010000011000000111"
+ "0000100000001001000010100000101100001100000011010000111000001111"
+ "0001000000010001000100100001001100010100000101010001011000010111"
+ "0001100000011001000110100001101100011100000111010001111000011111"
+ "0010000000100001001000100010001100100100001001010010011000100111"
+ "0010100000101001001010100010101100101100001011010010111000101111"
+ "0011000000110001001100100011001100110100001101010011011000110111"
+ "0011100000111001001110100011101100111100001111010011111000111111"
+ "0100000001000001010000100100001101000100010001010100011001000111"
+ "0100100001001001010010100100101101001100010011010100111001001111"
+ "0101000001010001010100100101001101010100010101010101011001010111"
+ "0101100001011001010110100101101101011100010111010101111001011111"
+ "0110000001100001011000100110001101100100011001010110011001100111"
+ "0110100001101001011010100110101101101100011011010110111001101111"
+ "0111000001110001011100100111001101110100011101010111011001110111"
+ "0111100001111001011110100111101101111100011111010111111001111111"
+ "1000000010000001100000101000001110000100100001011000011010000111"
+ "1000100010001001100010101000101110001100100011011000111010001111"
+ "1001000010010001100100101001001110010100100101011001011010010111"
+ "1001100010011001100110101001101110011100100111011001111010011111"
+ "1010000010100001101000101010001110100100101001011010011010100111"
+ "1010100010101001101010101010101110101100101011011010111010101111"
+ "1011000010110001101100101011001110110100101101011011011010110111"
+ "1011100010111001101110101011101110111100101111011011111010111111"
+ "1100000011000001110000101100001111000100110001011100011011000111"
+ "1100100011001001110010101100101111001100110011011100111011001111"
+ "1101000011010001110100101101001111010100110101011101011011010111"
+ "1101100011011001110110101101101111011100110111011101111011011111"
+ "1110000011100001111000101110001111100100111001011110011011100111"
+ "1110100011101001111010101110101111101100111011011110111011101111"
+ "1111000011110001111100101111001111110100111101011111011011110111"
+ "1111100011111001111110101111101111111100111111011111111011111111";
+
+inline void writeBinByte(UInt8 byte, void * out)
+{
+ memcpy(out, &bin_byte_to_char_table[static_cast(byte) * 8], 8);
+}
+
+/// Produces hex representation of an unsigned int with leading zeros (for checksums)
+template
+inline void writeHexUIntImpl(TUInt uint_, char * out, std::string_view table)
+{
+ union
+ {
+ TUInt value;
+ UInt8 uint8[sizeof(TUInt)];
+ };
+
+ value = uint_;
+
+ for (size_t i = 0; i < sizeof(TUInt); ++i)
+ {
+ if constexpr (std::endian::native == std::endian::little)
+ memcpy(out + i * 2, &table[static_cast(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
+ else
+ memcpy(out + i * 2, &table[static_cast(uint8[i]) * 2], 2);
+ }
+}
+
+template
+inline void writeHexUIntUppercase(TUInt uint_, char * out)
+{
+ writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
+}
+
+template
+inline void writeHexUIntLowercase(TUInt uint_, char * out)
+{
+ writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
+}
+
+template
+std::string getHexUIntUppercase(TUInt uint_)
+{
+ std::string res(sizeof(TUInt) * 2, '\0');
+ writeHexUIntUppercase(uint_, res.data());
+ return res;
+}
+
+template
+std::string getHexUIntLowercase(TUInt uint_)
+{
+ std::string res(sizeof(TUInt) * 2, '\0');
+ writeHexUIntLowercase(uint_, res.data());
+ return res;
+}
+
+/// Maps 0..9, A..F, a..f to 0..15. Other chars are mapped to implementation specific value.
+
+constexpr inline std::string_view hex_char_to_digit_table
+ = {"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" //0-9
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //A-Z
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" //a-z
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
+ 256};
+
+constexpr UInt8 unhex(char c)
+{
+ return hex_char_to_digit_table[static_cast(c)];
+}
+
+constexpr UInt8 unhex2(const char * data)
+{
+ return static_cast(unhex(data[0])) * 0x10 + static_cast(unhex(data[1]));
+}
+
+constexpr UInt16 unhex4(const char * data)
+{
+ return static_cast(unhex(data[0])) * 0x1000 + static_cast(unhex(data[1])) * 0x100
+ + static_cast(unhex(data[2])) * 0x10 + static_cast(unhex(data[3]));
+}
+
+template
+constexpr TUInt unhexUInt(const char * data)
+{
+ TUInt res = 0;
+ if constexpr ((sizeof(TUInt) <= 8) || ((sizeof(TUInt) % 8) != 0))
+ {
+ for (size_t i = 0; i < sizeof(TUInt) * 2; ++i, ++data)
+ {
+ res <<= 4;
+ res += unhex(*data);
+ }
+ }
+ else
+ {
+ for (size_t i = 0; i < sizeof(TUInt) / 8; ++i, data += 16)
+ {
+ res <<= 64;
+ res += unhexUInt(data);
+ }
+ }
+ return res;
+}
diff --git a/base/base/interpolate.h b/base/base/interpolate.h
new file mode 100644
index 00000000000..1d4fc0b6257
--- /dev/null
+++ b/base/base/interpolate.h
@@ -0,0 +1,13 @@
+#pragma once
+#include
+#include
+
+/** Linear interpolation in logarithmic coordinates.
+ * Exponential interpolation is related to linear interpolation
+ * exactly in same way as geometric mean is related to arithmetic mean.
+ */
+constexpr double interpolateExponential(double min, double max, double ratio)
+{
+ assert(min > 0 && ratio >= 0 && ratio <= 1);
+ return min * std::pow(max / min, ratio);
+}
diff --git a/base/base/phdr_cache.cpp b/base/base/phdr_cache.cpp
index c3d7fed2d3f..7d37f01b560 100644
--- a/base/base/phdr_cache.cpp
+++ b/base/base/phdr_cache.cpp
@@ -1,6 +1,4 @@
-#ifdef HAS_RESERVED_IDENTIFIER
#pragma clang diagnostic ignored "-Wreserved-identifier"
-#endif
/// This code was based on the code by Fedor Korotkiy https://www.linkedin.com/in/fedor-korotkiy-659a1838/
diff --git a/base/base/strong_typedef.h b/base/base/strong_typedef.h
index 2ddea6412f5..b3b8bced688 100644
--- a/base/base/strong_typedef.h
+++ b/base/base/strong_typedef.h
@@ -35,7 +35,7 @@ public:
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
// NOLINTBEGIN(google-explicit-constructor)
- operator const T & () const { return t; }
+ constexpr operator const T & () const { return t; }
operator T & () { return t; }
// NOLINTEND(google-explicit-constructor)
diff --git a/base/base/unaligned.h b/base/base/unaligned.h
index fcaaa38f2fe..3ab25c803bb 100644
--- a/base/base/unaligned.h
+++ b/base/base/unaligned.h
@@ -5,44 +5,6 @@
#include
-inline void reverseMemcpy(void * dst, const void * src, size_t size)
-{
- uint8_t * uint_dst = reinterpret_cast(dst);
- const uint8_t * uint_src = reinterpret_cast(src);
-
- uint_dst += size;
- while (size)
- {
- --uint_dst;
- *uint_dst = *uint_src;
- ++uint_src;
- --size;
- }
-}
-
-template
-inline T unalignedLoadLE(const void * address)
-{
- T res {};
- if constexpr (std::endian::native == std::endian::little)
- memcpy(&res, address, sizeof(res));
- else
- reverseMemcpy(&res, address, sizeof(res));
- return res;
-}
-
-
-template
-inline void unalignedStoreLE(void * address,
- const typename std::enable_if::type & src)
-{
- static_assert(std::is_trivially_copyable_v);
- if constexpr (std::endian::native == std::endian::little)
- memcpy(address, &src, sizeof(src));
- else
- reverseMemcpy(address, &src, sizeof(src));
-}
-
template
inline T unalignedLoad(const void * address)
{
@@ -62,3 +24,70 @@ inline void unalignedStore(void * address,
static_assert(std::is_trivially_copyable_v);
memcpy(address, &src, sizeof(src));
}
+
+
+inline void reverseMemcpy(void * dst, const void * src, size_t size)
+{
+ uint8_t * uint_dst = reinterpret_cast(dst);
+ const uint8_t * uint_src = reinterpret_cast(src);
+
+ uint_dst += size;
+ while (size)
+ {
+ --uint_dst;
+ *uint_dst = *uint_src;
+ ++uint_src;
+ --size;
+ }
+}
+
+template
+inline T unalignedLoadEndian(const void * address)
+{
+ T res {};
+ if constexpr (std::endian::native == endian)
+ memcpy(&res, address, sizeof(res));
+ else
+ reverseMemcpy(&res, address, sizeof(res));
+ return res;
+}
+
+
+template
+inline void unalignedStoreEndian(void * address, T & src)
+{
+ static_assert(std::is_trivially_copyable_v);
+ if constexpr (std::endian::native == endian)
+ memcpy(address, &src, sizeof(src));
+ else
+ reverseMemcpy(address, &src, sizeof(src));
+}
+
+
+template
+inline T unalignedLoadLittleEndian(const void * address)
+{
+ return unalignedLoadEndian(address);
+}
+
+
+template
+inline void unalignedStoreLittleEndian(void * address,
+ const typename std::enable_if::type & src)
+{
+ unalignedStoreEndian(address, src);
+}
+
+template
+inline T unalignedLoadBigEndian(const void * address)
+{
+ return unalignedLoadEndian(address);
+}
+
+
+template
+inline void unalignedStoreBigEndian(void * address,
+ const typename std::enable_if::type & src)
+{
+ unalignedStoreEndian(address, src);
+}
diff --git a/base/base/unit.h b/base/base/unit.h
index 1fb530be1f0..0fc314af479 100644
--- a/base/base/unit.h
+++ b/base/base/unit.h
@@ -5,10 +5,8 @@ constexpr size_t KiB = 1024;
constexpr size_t MiB = 1024 * KiB;
constexpr size_t GiB = 1024 * MiB;
-#ifdef HAS_RESERVED_IDENTIFIER
-# pragma clang diagnostic push
-# pragma clang diagnostic ignored "-Wreserved-identifier"
-#endif
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wreserved-identifier"
// NOLINTBEGIN(google-runtime-int)
constexpr size_t operator"" _KiB(unsigned long long val) { return val * KiB; }
@@ -16,6 +14,4 @@ constexpr size_t operator"" _MiB(unsigned long long val) { return val * MiB; }
constexpr size_t operator"" _GiB(unsigned long long val) { return val * GiB; }
// NOLINTEND(google-runtime-int)
-#ifdef HAS_RESERVED_IDENTIFIER
-# pragma clang diagnostic pop
-#endif
+#pragma clang diagnostic pop
diff --git a/base/base/wide_integer_impl.h b/base/base/wide_integer_impl.h
index 4a54c0fb2a4..4a80c176829 100644
--- a/base/base/wide_integer_impl.h
+++ b/base/base/wide_integer_impl.h
@@ -155,13 +155,13 @@ struct common_type, Arithmetic>
std::is_floating_point_v,
Arithmetic,
std::conditional_t<
- sizeof(Arithmetic) < Bits * sizeof(long),
+ sizeof(Arithmetic) * 8 < Bits,
wide::integer,
std::conditional_t<
- Bits * sizeof(long) < sizeof(Arithmetic),
+ Bits < sizeof(Arithmetic) * 8,
Arithmetic,
std::conditional_t<
- Bits * sizeof(long) == sizeof(Arithmetic) && (std::is_same_v || std::is_signed_v),
+ Bits == sizeof(Arithmetic) * 8 && (std::is_same_v || std::is_signed_v),
Arithmetic,
wide::integer>>>>;
};
@@ -314,7 +314,14 @@ struct integer::_impl
const T alpha = t / static_cast(max_int);
- if (alpha <= static_cast(max_int))
+ /** Here we have to use strict comparison.
+ * The max_int is 2^64 - 1.
+ * When casted to floating point type, it will be rounded to the closest representable number,
+ * which is 2^64.
+ * But 2^64 is not representable in uint64_t,
+ * so the maximum representable number will be strictly less.
+ */
+ if (alpha < static_cast(max_int))
self = static_cast(alpha);
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
set_multiplier(self, static_cast(alpha));
@@ -732,9 +739,10 @@ public:
if (std::numeric_limits::is_signed && (is_negative(lhs) != is_negative(rhs)))
return is_negative(rhs);
+ integer t = rhs;
for (unsigned i = 0; i < item_count; ++i)
{
- base_type rhs_item = get_item(rhs, big(i));
+ base_type rhs_item = get_item(t, big(i));
if (lhs.items[big(i)] != rhs_item)
return lhs.items[big(i)] > rhs_item;
@@ -757,9 +765,10 @@ public:
if (std::numeric_limits::is_signed && (is_negative(lhs) != is_negative(rhs)))
return is_negative(lhs);
+ integer t = rhs;
for (unsigned i = 0; i < item_count; ++i)
{
- base_type rhs_item = get_item(rhs, big(i));
+ base_type rhs_item = get_item(t, big(i));
if (lhs.items[big(i)] != rhs_item)
return lhs.items[big(i)] < rhs_item;
@@ -779,9 +788,10 @@ public:
{
if constexpr (should_keep_size())
{
+ integer t = rhs;
for (unsigned i = 0; i < item_count; ++i)
{
- base_type rhs_item = get_item(rhs, any(i));
+ base_type rhs_item = get_item(t, any(i));
if (lhs.items[any(i)] != rhs_item)
return false;
@@ -1239,7 +1249,7 @@ constexpr integer::operator long double() const noexcept
for (unsigned i = 0; i < _impl::item_count; ++i)
{
long double t = res;
- res *= std::numeric_limits::max();
+ res *= static_cast(std::numeric_limits::max());
res += t;
res += tmp.items[_impl::big(i)];
}
diff --git a/base/base/wide_integer_to_string.h b/base/base/wide_integer_to_string.h
index 160bf599516..c2cbe8d82e3 100644
--- a/base/base/wide_integer_to_string.h
+++ b/base/base/wide_integer_to_string.h
@@ -64,6 +64,6 @@ struct fmt::formatter>
template
auto format(const wide::integer & value, FormatContext & ctx)
{
- return format_to(ctx.out(), "{}", to_string(value));
+ return fmt::format_to(ctx.out(), "{}", to_string(value));
}
};
diff --git a/base/glibc-compatibility/glibc-compatibility.c b/base/glibc-compatibility/glibc-compatibility.c
index 7e8ea5051d7..49bb81a58be 100644
--- a/base/glibc-compatibility/glibc-compatibility.c
+++ b/base/glibc-compatibility/glibc-compatibility.c
@@ -235,6 +235,17 @@ ssize_t getrandom(void *buf, size_t buflen, unsigned flags)
return syscall(SYS_getrandom, buf, buflen, flags);
}
+/* Structure for scatter/gather I/O. */
+struct iovec
+{
+ void *iov_base; /* Pointer to data. */
+ size_t iov_len; /* Length of data. */
+};
+
+ssize_t preadv(int __fd, const struct iovec *__iovec, int __count, __off_t __offset)
+{
+ return syscall(SYS_preadv, __fd, __iovec, __count, (long)(__offset), (long)(__offset>>32));
+}
#include
#include
diff --git a/base/glibc-compatibility/musl/expf.c b/base/glibc-compatibility/musl/expf.c
new file mode 100644
index 00000000000..0a59236d1c0
--- /dev/null
+++ b/base/glibc-compatibility/musl/expf.c
@@ -0,0 +1,81 @@
+/* origin: FreeBSD /usr/src/lib/msun/src/e_expf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+#include "libm.h"
+
+static const float
+ half[2] = {0.5,-0.5},
+ ln2hi = 6.9314575195e-1f, /* 0x3f317200 */
+ ln2lo = 1.4286067653e-6f, /* 0x35bfbe8e */
+ invln2 = 1.4426950216e+0f, /* 0x3fb8aa3b */
+ /*
+ * Domain [-0.34568, 0.34568], range ~[-4.278e-9, 4.447e-9]:
+ * |x*(exp(x)+1)/(exp(x)-1) - p(x)| < 2**-27.74
+ */
+ P1 = 1.6666625440e-1f, /* 0xaaaa8f.0p-26 */
+ P2 = -2.7667332906e-3f; /* -0xb55215.0p-32 */
+
+float expf(float x)
+{
+ float_t hi, lo, c, xx, y;
+ int k, sign;
+ uint32_t hx;
+
+ GET_FLOAT_WORD(hx, x);
+ sign = hx >> 31; /* sign bit of x */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* special cases */
+ if (hx >= 0x42aeac50) { /* if |x| >= -87.33655f or NaN */
+ if (hx >= 0x42b17218 && !sign) { /* x >= 88.722839f */
+ /* overflow */
+ x *= 0x1p127f;
+ return x;
+ }
+ if (sign) {
+ /* underflow */
+ FORCE_EVAL(-0x1p-149f/x);
+ if (hx >= 0x42cff1b5) /* x <= -103.972084f */
+ return 0;
+ }
+ }
+
+ /* argument reduction */
+ if (hx > 0x3eb17218) { /* if |x| > 0.5 ln2 */
+ if (hx > 0x3f851592) /* if |x| > 1.5 ln2 */
+ k = invln2*x + half[sign];
+ else
+ k = 1 - sign - sign;
+ hi = x - k*ln2hi; /* k*ln2hi is exact here */
+ lo = k*ln2lo;
+ x = hi - lo;
+ } else if (hx > 0x39000000) { /* |x| > 2**-14 */
+ k = 0;
+ hi = x;
+ lo = 0;
+ } else {
+ /* raise inexact */
+ FORCE_EVAL(0x1p127f + x);
+ return 1 + x;
+ }
+
+ /* x is now in primary range */
+ xx = x*x;
+ c = x - xx*(P1+xx*P2);
+ y = 1 + (x*c/(2-c) - lo + hi);
+ if (k == 0)
+ return y;
+ return scalbnf(y, k);
+}
\ No newline at end of file
diff --git a/base/glibc-compatibility/musl/logf.c b/base/glibc-compatibility/musl/logf.c
index 7ee5d7fe623..e4c2237caa2 100644
--- a/base/glibc-compatibility/musl/logf.c
+++ b/base/glibc-compatibility/musl/logf.c
@@ -53,7 +53,7 @@ float logf(float x)
tmp = ix - OFF;
i = (tmp >> (23 - LOGF_TABLE_BITS)) % N;
k = (int32_t)tmp >> 23; /* arithmetic shift */
- iz = ix - (tmp & 0x1ff << 23);
+ iz = ix - (tmp & 0xff800000);
invc = T[i].invc;
logc = T[i].logc;
z = (double_t)asfloat(iz);
diff --git a/base/glibc-compatibility/musl/scalbnf.c b/base/glibc-compatibility/musl/scalbnf.c
new file mode 100644
index 00000000000..cf56cacfb5f
--- /dev/null
+++ b/base/glibc-compatibility/musl/scalbnf.c
@@ -0,0 +1,31 @@
+#include
+#include
+
+float scalbnf(float x, int n)
+{
+ union {float f; uint32_t i;} u;
+ float_t y = x;
+
+ if (n > 127) {
+ y *= 0x1p127f;
+ n -= 127;
+ if (n > 127) {
+ y *= 0x1p127f;
+ n -= 127;
+ if (n > 127)
+ n = 127;
+ }
+ } else if (n < -126) {
+ y *= 0x1p-126f;
+ n += 126;
+ if (n < -126) {
+ y *= 0x1p-126f;
+ n += 126;
+ if (n < -126)
+ n = -126;
+ }
+ }
+ u.i = (uint32_t)(0x7f+n)<<23;
+ x = y * u.f;
+ return x;
+}
diff --git a/base/harmful/harmful.c b/base/harmful/harmful.c
index 6112f9a339c..78796ca0c05 100644
--- a/base/harmful/harmful.c
+++ b/base/harmful/harmful.c
@@ -31,7 +31,8 @@ TRAP(argp_state_help)
TRAP(argp_usage)
TRAP(asctime)
TRAP(clearenv)
-TRAP(crypt)
+// Redefined at contrib/libbcrypt/crypt_blowfish/wrapper.c:186
+// TRAP(crypt)
TRAP(ctime)
TRAP(cuserid)
TRAP(drand48)
diff --git a/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h b/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h
index 2a72861a84e..d051ef1b768 100644
--- a/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h
+++ b/base/poco/Crypto/include/Poco/Crypto/ECKeyImpl.h
@@ -90,20 +90,6 @@ namespace Crypto
std::string groupName() const;
/// Returns the EC key group name.
- void save(const std::string & publicKeyFile, const std::string & privateKeyFile = "", const std::string & privateKeyPassphrase = "")
- const;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- void
- save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
static std::string getCurveName(int nid = -1);
/// Returns elliptical curve name corresponding to
/// the given nid; if nid is not found, returns
@@ -150,22 +136,6 @@ namespace Crypto
{
return OBJ_nid2sn(groupId());
}
-
-
- inline void
- ECKeyImpl::save(const std::string & publicKeyFile, const std::string & privateKeyFile, const std::string & privateKeyPassphrase) const
- {
- EVPPKey(_pEC).save(publicKeyFile, privateKeyFile, privateKeyPassphrase);
- }
-
-
- inline void
- ECKeyImpl::save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream, const std::string & privateKeyPassphrase) const
- {
- EVPPKey(_pEC).save(pPublicKeyStream, pPrivateKeyStream, privateKeyPassphrase);
- }
-
-
}
} // namespace Poco::Crypto
diff --git a/base/poco/Crypto/include/Poco/Crypto/KeyPair.h b/base/poco/Crypto/include/Poco/Crypto/KeyPair.h
index 36adbec6a4d..291a0f8b749 100644
--- a/base/poco/Crypto/include/Poco/Crypto/KeyPair.h
+++ b/base/poco/Crypto/include/Poco/Crypto/KeyPair.h
@@ -56,24 +56,6 @@ namespace Crypto
virtual int size() const;
/// Returns the RSA modulus size.
- virtual void save(
- const std::string & publicKeyPairFile,
- const std::string & privateKeyPairFile = "",
- const std::string & privateKeyPairPassphrase = "") const;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- virtual void save(
- std::ostream * pPublicKeyPairStream,
- std::ostream * pPrivateKeyPairStream = 0,
- const std::string & privateKeyPairPassphrase = "") const;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
KeyPairImpl::Ptr impl() const;
/// Returns the impl object.
@@ -97,21 +79,6 @@ namespace Crypto
return _pImpl->size();
}
-
- inline void
- KeyPair::save(const std::string & publicKeyFile, const std::string & privateKeyFile, const std::string & privateKeyPassphrase) const
- {
- _pImpl->save(publicKeyFile, privateKeyFile, privateKeyPassphrase);
- }
-
-
- inline void
- KeyPair::save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream, const std::string & privateKeyPassphrase) const
- {
- _pImpl->save(pPublicKeyStream, pPrivateKeyStream, privateKeyPassphrase);
- }
-
-
inline const std::string & KeyPair::name() const
{
return _pImpl->name();
diff --git a/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h b/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h
index 155efd20b9c..ecafbef0241 100644
--- a/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h
+++ b/base/poco/Crypto/include/Poco/Crypto/KeyPairImpl.h
@@ -55,22 +55,6 @@ namespace Crypto
virtual int size() const = 0;
/// Returns the key size.
- virtual void save(
- const std::string & publicKeyFile,
- const std::string & privateKeyFile = "",
- const std::string & privateKeyPassphrase = "") const = 0;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- virtual void save(
- std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const = 0;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
const std::string & name() const;
/// Returns key pair name
diff --git a/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h b/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h
index 4ccbb324c06..010c68bacd7 100644
--- a/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h
+++ b/base/poco/Crypto/include/Poco/Crypto/RSAKeyImpl.h
@@ -96,20 +96,6 @@ namespace Crypto
ByteVec decryptionExponent() const;
/// Returns the RSA decryption exponent.
- void save(const std::string & publicKeyFile, const std::string & privateKeyFile = "", const std::string & privateKeyPassphrase = "")
- const;
- /// Exports the public and private keys to the given files.
- ///
- /// If an empty filename is specified, the corresponding key
- /// is not exported.
-
- void
- save(std::ostream * pPublicKeyStream, std::ostream * pPrivateKeyStream = 0, const std::string & privateKeyPassphrase = "") const;
- /// Exports the public and private key to the given streams.
- ///
- /// If a null pointer is passed for a stream, the corresponding
- /// key is not exported.
-
private:
RSAKeyImpl();
@@ -139,4 +125,4 @@ namespace Crypto
} // namespace Poco::Crypto
-#endif // Crypto_RSAKeyImplImpl_INCLUDED
\ No newline at end of file
+#endif // Crypto_RSAKeyImplImpl_INCLUDED
diff --git a/base/poco/Crypto/src/RSAKeyImpl.cpp b/base/poco/Crypto/src/RSAKeyImpl.cpp
index eb6e758343a..229a3bce828 100644
--- a/base/poco/Crypto/src/RSAKeyImpl.cpp
+++ b/base/poco/Crypto/src/RSAKeyImpl.cpp
@@ -269,103 +269,6 @@ RSAKeyImpl::ByteVec RSAKeyImpl::decryptionExponent() const
}
-void RSAKeyImpl::save(const std::string& publicKeyFile,
- const std::string& privateKeyFile,
- const std::string& privateKeyPassphrase) const
-{
- if (!publicKeyFile.empty())
- {
- BIO* bio = BIO_new(BIO_s_file());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing public key file", publicKeyFile);
- try
- {
- if (BIO_write_filename(bio, const_cast(publicKeyFile.c_str())))
- {
- if (!PEM_write_bio_RSAPublicKey(bio, _pRSA))
- throw Poco::WriteFileException("Failed to write public key to file", publicKeyFile);
- }
- else throw Poco::CreateFileException("Cannot create public key file");
- }
- catch (...)
- {
- BIO_free(bio);
- throw;
- }
- BIO_free(bio);
- }
-
- if (!privateKeyFile.empty())
- {
- BIO* bio = BIO_new(BIO_s_file());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing private key file", privateKeyFile);
- try
- {
- if (BIO_write_filename(bio, const_cast(privateKeyFile.c_str())))
- {
- int rc = 0;
- if (privateKeyPassphrase.empty())
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, 0, 0, 0, 0, 0);
- else
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, EVP_des_ede3_cbc(),
- reinterpret_cast(const_cast(privateKeyPassphrase.c_str())),
- static_cast(privateKeyPassphrase.length()), 0, 0);
- if (!rc) throw Poco::FileException("Failed to write private key to file", privateKeyFile);
- }
- else throw Poco::CreateFileException("Cannot create private key file", privateKeyFile);
- }
- catch (...)
- {
- BIO_free(bio);
- throw;
- }
- BIO_free(bio);
- }
-}
-
-
-void RSAKeyImpl::save(std::ostream* pPublicKeyStream,
- std::ostream* pPrivateKeyStream,
- const std::string& privateKeyPassphrase) const
-{
- if (pPublicKeyStream)
- {
- BIO* bio = BIO_new(BIO_s_mem());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing public key");
- if (!PEM_write_bio_RSAPublicKey(bio, _pRSA))
- {
- BIO_free(bio);
- throw Poco::WriteFileException("Failed to write public key to stream");
- }
- char* pData;
- long size = BIO_get_mem_data(bio, &pData);
- pPublicKeyStream->write(pData, static_cast(size));
- BIO_free(bio);
- }
-
- if (pPrivateKeyStream)
- {
- BIO* bio = BIO_new(BIO_s_mem());
- if (!bio) throw Poco::IOException("Cannot create BIO for writing public key");
- int rc = 0;
- if (privateKeyPassphrase.empty())
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, 0, 0, 0, 0, 0);
- else
- rc = PEM_write_bio_RSAPrivateKey(bio, _pRSA, EVP_des_ede3_cbc(),
- reinterpret_cast(const_cast(privateKeyPassphrase.c_str())),
- static_cast(privateKeyPassphrase.length()), 0, 0);
- if (!rc)
- {
- BIO_free(bio);
- throw Poco::FileException("Failed to write private key to stream");
- }
- char* pData;
- long size = BIO_get_mem_data(bio, &pData);
- pPrivateKeyStream->write(pData, static_cast