Merge remote-tracking branch 'upstream/master' into make_system_replicas_parallel

This commit is contained in:
Nikolay Degterinsky 2023-01-05 02:36:07 +00:00
commit 673ba769e5
1932 changed files with 119346 additions and 21450 deletions

View File

@ -16,6 +16,7 @@ Checks: '*,
-android-*,
-bugprone-assignment-in-if-condition,
-bugprone-branch-clone,
-bugprone-easily-swappable-parameters,
-bugprone-exception-escape,
@ -23,7 +24,6 @@ Checks: '*,
-bugprone-narrowing-conversions,
-bugprone-not-null-terminated-result,
-bugprone-unchecked-optional-access,
-bugprone-assignment-in-if-condition,
-cert-dcl16-c,
-cert-err58-cpp,
@ -34,7 +34,6 @@ Checks: '*,
-clang-analyzer-optin.performance.Padding,
-clang-analyzer-optin.portability.UnixAPI,
-clang-analyzer-security.insecureAPI.bzero,
-clang-analyzer-security.insecureAPI.strcpy,
@ -103,12 +102,13 @@ Checks: '*,
-openmp-*,
-misc-const-correctness,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-const-correctness,
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
-modernize-macro-to-enum,
-modernize-pass-by-value,
-modernize-return-braced-init-list,
-modernize-use-auto,
@ -117,7 +117,6 @@ Checks: '*,
-modernize-use-nodiscard,
-modernize-use-override,
-modernize-use-trailing-return-type,
-modernize-macro-to-enum,
-performance-inefficient-string-concatenation,
-performance-no-int-to-ptr,
@ -135,17 +134,35 @@ Checks: '*,
-readability-magic-numbers,
-readability-named-parameter,
-readability-redundant-declaration,
-readability-simplify-boolean-expr,
-readability-static-accessed-through-instance,
-readability-suspicious-call-argument,
-readability-uppercase-literal-suffix,
-readability-use-anyofallof,
-readability-simplify-boolean-expr,
-zirkon-*,
-misc-*, # temporarily disabled due to being too slow
# also disable checks in other categories which are aliases of checks in misc-*:
# https://releases.llvm.org/15.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
-cert-dcl54-cpp, # alias of misc-new-delete-overloads
-hicpp-new-delete-operators, # alias of misc-new-delete-overloads
-cert-fio38-c, # alias of misc-non-copyable-objects
-cert-dcl03-c, # alias of misc-static-assert
-hicpp-static-assert, # alias of misc-static-assert
-cert-err09-cpp, # alias of misc-throw-by-value-catch-by-reference
-cert-err61-cpp, # alias of misc-throw-by-value-catch-by-reference
-cppcoreguidelines-c-copy-assignment-signature, # alias of misc-unconventional-assign-operator
-cppcoreguidelines-non-private-member-variables-in-classes, # alias of misc-non-private-member-variables-in-classes
'
WarningsAsErrors: '*'
# TODO: use dictionary syntax for CheckOptions when minimum clang-tidy level rose to 15
# some-check.SomeOption: 'some value'
# instead of
# - key: some-check.SomeOption
# value: 'some value'
CheckOptions:
- key: readability-identifier-naming.ClassCase
value: CamelCase

1
.exrc Normal file
View File

@ -0,0 +1 @@
au BufRead,BufNewFile * set tabstop=4 softtabstop=0 expandtab shiftwidth=4 smarttab tags=tags,../tags

View File

@ -7,6 +7,6 @@ assignees: ''
---
> Make sure to check documentation https://clickhouse.com/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
> Make sure to check documentation https://clickhouse.com/docs/en/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
> If you still prefer GitHub issues, remove all this text and ask your question here.

View File

@ -18,5 +18,21 @@ tests/ci/run_check.py
### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
...
### Documentation entry for user-facing changes
<!---
Directly edit documentation source files in the "docs" folder with the same pull-request as code changes
or
Add a user-readable short description of the changes that should be added to docs.clickhouse.com below.
At a minimum, the following information should be added (but add more as needed).
- Motivation: Why is this function, table engine, etc. useful to ClickHouse users?
- Parameters: If the feature being added takes arguments, options or is influenced by settings, please list them below with a brief explanation.
- Example use: A query or command.
-->
> Information about CI checks: https://clickhouse.com/docs/en/development/continuous-integration/

View File

@ -12,11 +12,10 @@ jobs:
PythonUnitTests:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -24,34 +23,32 @@ jobs:
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
@ -59,18 +56,17 @@ jobs:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
@ -79,7 +75,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
@ -94,13 +90,12 @@ jobs:
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: CompatibilityCheck
@ -132,28 +127,25 @@ jobs:
BUILD_NAME=package_release
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # For a proper version and performance artifacts
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -177,28 +169,25 @@ jobs:
BUILD_NAME=package_aarch64
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # For a proper version and performance artifacts
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -222,26 +211,24 @@ jobs:
BUILD_NAME=package_asan
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -265,26 +252,24 @@ jobs:
BUILD_NAME=package_tsan
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -308,26 +293,24 @@ jobs:
BUILD_NAME=package_debug
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -351,28 +334,25 @@ jobs:
BUILD_NAME=binary_darwin
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -396,28 +376,25 @@ jobs:
BUILD_NAME=binary_darwin_aarch64
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
@ -436,12 +413,10 @@ jobs:
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
- name: Check docker clickhouse/clickhouse-server building
run: |
@ -477,14 +452,13 @@ jobs:
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
EOF
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Report Builder
run: |
sudo rm -fr "$TEMP_PATH"
@ -516,14 +490,13 @@ jobs:
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
EOF
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Report Builder
run: |
sudo rm -fr "$TEMP_PATH"
@ -556,14 +529,13 @@ jobs:
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
@ -594,14 +566,13 @@ jobs:
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
@ -635,14 +606,13 @@ jobs:
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Stress test
run: |
sudo rm -fr "$TEMP_PATH"
@ -672,14 +642,13 @@ jobs:
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
@ -706,11 +675,10 @@ jobs:
- CompatibilityCheck
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -28,8 +28,9 @@ jobs:
REPO_TEAM=core
EOF
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
- name: Cherry pick

View File

@ -16,15 +16,15 @@ on: # yamllint disable-line rule:truthy
- 'docker/docs/**'
- 'docs/**'
- 'website/**'
- 'utils/check-style/aspell-ignore/**'
jobs:
CheckLabels:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -33,17 +33,16 @@ jobs:
needs: CheckLabels
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
@ -51,17 +50,16 @@ jobs:
needs: CheckLabels
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
@ -69,18 +67,17 @@ jobs:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
@ -89,7 +86,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
@ -109,15 +106,14 @@ jobs:
- name: Download changed images
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
continue-on-error: true
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.TEMP_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Style Check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -139,15 +135,14 @@ jobs:
REPO_COPY=${{runner.temp}}/docs_check/ClickHouse
EOF
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.TEMP_PATH }}
- name: Clear repository
run: |
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Docs Check
run: |
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -166,11 +161,10 @@ jobs:
- DocsCheck
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -17,39 +17,38 @@ concurrency:
- 'docs/**'
- 'utils/list-versions/version_date.tsv'
- 'website/**'
- 'utils/check-style/aspell-ignore/**'
workflow_dispatch:
jobs:
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
@ -57,18 +56,17 @@ jobs:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
@ -77,7 +75,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
@ -96,13 +94,12 @@ jobs:
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
EOF
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.TEMP_PATH }}

View File

@ -19,12 +19,10 @@ jobs:
TEMP_PATH=${{runner.temp}}/keeper_jepsen
REPO_COPY=${{runner.temp}}/keeper_jepsen/ClickHouse
EOF
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0
- name: Jepsen Test
run: |
@ -50,12 +48,10 @@ jobs:
# TEMP_PATH=${{runner.temp}}/server_jepsen
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
# EOF
# - name: Clear repository
# run: |
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
# - name: Check out repository code
# uses: actions/checkout@v2
# uses: ClickHouse/checkout@v1
# with:
# clear-repository: true
# fetch-depth: 0
# - name: Jepsen Test
# run: |

File diff suppressed because it is too large Load Diff

View File

@ -16,34 +16,32 @@ jobs:
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
@ -51,18 +49,17 @@ jobs:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download changed aarch64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
@ -71,7 +68,7 @@ jobs:
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
@ -90,22 +87,17 @@ jobs:
EOF
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
- name: Download changed images
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
id: coverity-checkout
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
fetch-depth: 0 # otherwise we will have no info about contributors
clear-repository: true
submodules: true
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -134,8 +126,10 @@ jobs:
CC: clang-15
CXX: clang++-15
steps:
- uses: actions/checkout@v2
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
submodules: true
- name: Set up JDK 11

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ jobs:
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
EOF
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
# Always use the most recent script version
ref: master
@ -50,12 +50,10 @@ jobs:
DockerServerImages:
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # otherwise we will have no version info
- name: Check docker clickhouse/clickhouse-server building
run: |

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ jobs:
run: |
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
ref: master
fetch-depth: 0
@ -51,7 +51,7 @@ jobs:
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
git add "./docs/changelogs/${GITHUB_TAG}.md"
python ./utils/security-generator/generate_security.py > SECURITY.md
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
@ -61,6 +61,7 @@ jobs:
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
branch: auto/${{ env.GITHUB_TAG }}
assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher
delete-branch: true
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
labels: do not test

View File

@ -21,12 +21,10 @@ jobs:
REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse
IMAGES_PATH=${{runner.temp}}/images_path
EOF
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: 'true'
- name: Codebrowser
run: |

7
.gitignore vendored
View File

@ -17,6 +17,7 @@
# logs
*.log
*.debuglog
*.stderr
*.stdout
@ -157,3 +158,9 @@ website/package-lock.json
# temporary test files
tests/queries/0_stateless/test_*
tests/queries/0_stateless/*.binary
tests/queries/0_stateless/*.generated-expect
# rust
/rust/**/target
# It is autogenerated from *.in
/rust/**/.cargo/config.toml

47
.gitmodules vendored
View File

@ -104,13 +104,13 @@
url = https://github.com/ClickHouse/aws-sdk-cpp.git
[submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream
url = https://github.com/ClickHouse/aws-c-event-stream.git
url = https://github.com/awslabs/aws-c-event-stream.git
[submodule "aws-c-common"]
path = contrib/aws-c-common
url = https://github.com/ClickHouse/aws-c-common.git
[submodule "aws-checksums"]
path = contrib/aws-checksums
url = https://github.com/ClickHouse/aws-checksums.git
url = https://github.com/awslabs/aws-checksums.git
[submodule "contrib/curl"]
path = contrib/curl
url = https://github.com/curl/curl.git
@ -269,9 +269,6 @@
[submodule "contrib/vectorscan"]
path = contrib/vectorscan
url = https://github.com/VectorCamp/vectorscan.git
[submodule "contrib/liburing"]
path = contrib/liburing
url = https://github.com/axboe/liburing.git
[submodule "contrib/c-ares"]
path = contrib/c-ares
url = https://github.com/ClickHouse/c-ares
@ -287,3 +284,43 @@
[submodule "contrib/xxHash"]
path = contrib/xxHash
url = https://github.com/Cyan4973/xxHash.git
[submodule "contrib/openssl"]
path = contrib/openssl
url = https://github.com/openssl/openssl
branch = openssl-3.0
[submodule "contrib/google-benchmark"]
path = contrib/google-benchmark
url = https://github.com/google/benchmark.git
[submodule "contrib/libdivide"]
path = contrib/libdivide
url = https://github.com/ridiculousfish/libdivide.git
[submodule "contrib/aws-crt-cpp"]
path = contrib/aws-crt-cpp
url = https://github.com/ClickHouse/aws-crt-cpp.git
[submodule "contrib/aws-c-io"]
path = contrib/aws-c-io
url = https://github.com/ClickHouse/aws-c-io.git
[submodule "contrib/aws-c-mqtt"]
path = contrib/aws-c-mqtt
url = https://github.com/awslabs/aws-c-mqtt.git
[submodule "contrib/aws-c-auth"]
path = contrib/aws-c-auth
url = https://github.com/awslabs/aws-c-auth.git
[submodule "contrib/aws-c-cal"]
path = contrib/aws-c-cal
url = https://github.com/ClickHouse/aws-c-cal.git
[submodule "contrib/aws-c-sdkutils"]
path = contrib/aws-c-sdkutils
url = https://github.com/awslabs/aws-c-sdkutils.git
[submodule "contrib/aws-c-http"]
path = contrib/aws-c-http
url = https://github.com/awslabs/aws-c-http.git
[submodule "contrib/aws-c-s3"]
path = contrib/aws-c-s3
url = https://github.com/awslabs/aws-c-s3.git
[submodule "contrib/aws-c-compression"]
path = contrib/aws-c-compression
url = https://github.com/awslabs/aws-c-compression.git
[submodule "contrib/aws-s2n-tls"]
path = contrib/aws-s2n-tls
url = https://github.com/aws/s2n-tls.git

2
.vimrc
View File

@ -1,2 +0,0 @@
au BufRead,BufNewFile ./* set tabstop=4 softtabstop=0 expandtab shiftwidth=4 smarttab tags=tags,../tags

View File

@ -1,4 +1,5 @@
### Table of Contents
**[ClickHouse release v22.12, 2022-12-15](#2212)**<br/>
**[ClickHouse release v22.11, 2022-11-17](#2211)**<br/>
**[ClickHouse release v22.10, 2022-10-25](#2210)**<br/>
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
@ -12,6 +13,124 @@
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
# 2022 Changelog
### <a id="2212"></a> ClickHouse release 22.12, 2022-12-15
#### Backward Incompatible Change
* Add `GROUP BY ALL` syntax: [#37631](https://github.com/ClickHouse/ClickHouse/issues/37631). [#42265](https://github.com/ClickHouse/ClickHouse/pull/42265) ([刘陶峰](https://github.com/taofengliu)). If you have a column or an alias named `all` and doing `GROUP BY all` without the intention to group by all the columns, the query will have a different semantic. To keep the old semantic, put `all` into backticks or double quotes `"all"` to make it an identifier instead of a keyword.
#### Upgrade Notes
* Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend upgrading from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append an extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then the incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then newer versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Alexander Tokmakov](https://github.com/tavplubix), [Raúl Marín](https://github.com/Algunenano)). Note: all the official ClickHouse builds already include the patches. This is not necessarily true for unofficial third-party builds that should be avoided.
#### New Feature
* Add `BSONEachRow` input/output format. In this format, ClickHouse formats/parses each row as a separate BSON document and each column is formatted/parsed as a single BSON field with the column name as the key. [#42033](https://github.com/ClickHouse/ClickHouse/pull/42033) ([mark-polokhov](https://github.com/mark-polokhov)).
* Add `grace_hash` JOIN algorithm, it can be enabled with `SET join_algorithm = 'grace_hash'`. [#38191](https://github.com/ClickHouse/ClickHouse/pull/38191) ([BigRedEye](https://github.com/BigRedEye), [Vladimir C](https://github.com/vdimir)).
* Allow configuring password complexity rules and checks for creating and changing users. [#43719](https://github.com/ClickHouse/ClickHouse/pull/43719) ([Nikolay Degterinsky](https://github.com/evillique)).
* Mask sensitive information in logs; mask secret parts in the output of queries `SHOW CREATE TABLE` and `SELECT FROM system.tables`. Also resolves [#41418](https://github.com/ClickHouse/ClickHouse/issues/41418). [#43227](https://github.com/ClickHouse/ClickHouse/pull/43227) ([Vitaly Baranov](https://github.com/vitlibar)).
* Add `GROUP BY ALL` syntax: [#37631](https://github.com/ClickHouse/ClickHouse/issues/37631). [#42265](https://github.com/ClickHouse/ClickHouse/pull/42265) ([刘陶峰](https://github.com/taofengliu)).
* Add `FROM table SELECT column` syntax. [#41095](https://github.com/ClickHouse/ClickHouse/pull/41095) ([Nikolay Degterinsky](https://github.com/evillique)).
* Added function `concatWithSeparator` and `concat_ws` as an alias for Spark SQL compatibility. A function `concatWithSeparatorAssumeInjective` added as a variant to enable GROUP BY optimization, similarly to `concatAssumeInjective`. [#43749](https://github.com/ClickHouse/ClickHouse/pull/43749) ([李扬](https://github.com/taiyang-li)).
* Added `multiplyDecimal` and `divideDecimal` functions for decimal operations with fixed precision. [#42438](https://github.com/ClickHouse/ClickHouse/pull/42438) ([Andrey Zvonov](https://github.com/zvonand)).
* Added `system.moves` table with list of currently moving parts. [#42660](https://github.com/ClickHouse/ClickHouse/pull/42660) ([Sergei Trifonov](https://github.com/serxa)).
* Add support for embedded Prometheus endpoint for ClickHouse Keeper. [#43087](https://github.com/ClickHouse/ClickHouse/pull/43087) ([Antonio Andelic](https://github.com/antonio2368)).
* Support numeric literals with `_` as the separator, for example, `1_000_000`. [#43925](https://github.com/ClickHouse/ClickHouse/pull/43925) ([jh0x](https://github.com/jh0x)).
* Added possibility to use an array as a second parameter for `cutURLParameter` function. It will cut multiple parameters. Close [#6827](https://github.com/ClickHouse/ClickHouse/issues/6827). [#43788](https://github.com/ClickHouse/ClickHouse/pull/43788) ([Roman Vasin](https://github.com/rvasin)).
* Add a column with the expression of the index in the `system.data_skipping_indices` table. [#43308](https://github.com/ClickHouse/ClickHouse/pull/43308) ([Guillaume Tassery](https://github.com/YiuRULE)).
* Add column `engine_full` to system table `databases` so that users can access the entire engine definition of a database via system tables. [#43468](https://github.com/ClickHouse/ClickHouse/pull/43468) ([凌涛](https://github.com/lingtaolf)).
* New hash function [xxh3](https://github.com/Cyan4973/xxHash) added. Also, the performance of `xxHash32` and `xxHash64` are improved on ARM thanks to a library update. [#43411](https://github.com/ClickHouse/ClickHouse/pull/43411) ([Nikita Taranov](https://github.com/nickitat)).
* Added support to define constraints for merge tree settings. For example you can forbid overriding the `storage_policy` by users. [#43903](https://github.com/ClickHouse/ClickHouse/pull/43903) ([Sergei Trifonov](https://github.com/serxa)).
* Add a new setting `input_format_json_read_objects_as_strings` that allows the parsing of nested JSON objects into Strings in all JSON input formats. This setting is disabled by default. [#44052](https://github.com/ClickHouse/ClickHouse/pull/44052) ([Kruglov Pavel](https://github.com/Avogar)).
#### Experimental Feature
* Support deduplication for asynchronous inserts. Before this change, async inserts did not support deduplication, because multiple small inserts coexisted in one inserted batch. Closes [#38075](https://github.com/ClickHouse/ClickHouse/issues/38075). [#43304](https://github.com/ClickHouse/ClickHouse/pull/43304) ([Han Fei](https://github.com/hanfei1991)).
* Add support for cosine distance for the experimental Annoy (vector similarity search) index. [#42778](https://github.com/ClickHouse/ClickHouse/pull/42778) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
* Add `CREATE / ALTER / DROP NAMED COLLECTION` queries. [#43252](https://github.com/ClickHouse/ClickHouse/pull/43252) ([Kseniia Sumarokova](https://github.com/kssenii)). This feature is under development and the queries are not effective as of version 22.12. This changelog entry is added only to avoid confusion. Restrict default access to named collections to the user defined in config. This requires that `show_named_collections = 1` is set to be able to see them. [#43325](https://github.com/ClickHouse/ClickHouse/pull/43325) ([Kseniia Sumarokova](https://github.com/kssenii)). The `system.named_collections` table is introduced [#43147](https://github.com/ClickHouse/ClickHouse/pull/43147) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Performance Improvement
* Add settings `max_streams_for_merge_tree_reading` and `allow_asynchronous_read_from_io_pool_for_merge_tree`. Setting `max_streams_for_merge_tree_reading` limits the number of reading streams for MergeTree tables. Setting `allow_asynchronous_read_from_io_pool_for_merge_tree` enables a background I/O pool to read from `MergeTree` tables. This may increase performance for I/O bound queries if used together with `max_streams_to_max_threads_ratio` or `max_streams_for_merge_tree_reading`. [#43260](https://github.com/ClickHouse/ClickHouse/pull/43260) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). This improves performance up to 100 times in case of high latency storage, low number of CPU and high number of data parts.
* Settings `merge_tree_min_rows_for_concurrent_read_for_remote_filesystem/merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem` did not respect adaptive granularity. Fat rows did not decrease the number of read rows (as it was done for `merge_tree_min_rows_for_concurrent_read/merge_tree_min_bytes_for_concurrent_read`, which could lead to high memory usage when using remote filesystems. [#43965](https://github.com/ClickHouse/ClickHouse/pull/43965) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Optimized the number of list requests to ZooKeeper or ClickHouse Keeper when selecting a part to merge. Previously it could produce thousands of requests in some cases. Fixes [#43647](https://github.com/ClickHouse/ClickHouse/issues/43647). [#43675](https://github.com/ClickHouse/ClickHouse/pull/43675) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Optimization is getting skipped now if `max_size_to_preallocate_for_aggregation` has too small a value. The default value of this setting increased to `10^8`. [#43945](https://github.com/ClickHouse/ClickHouse/pull/43945) ([Nikita Taranov](https://github.com/nickitat)).
* Speed-up server shutdown by avoiding cleaning up of old data parts. Because it is unnecessary after https://github.com/ClickHouse/ClickHouse/pull/41145. [#43760](https://github.com/ClickHouse/ClickHouse/pull/43760) ([Sema Checherinda](https://github.com/CheSema)).
* Merging on initiator now uses the same memory bound approach as merging of local aggregation results if `enable_memory_bound_merging_of_aggregation_results` is set. [#40879](https://github.com/ClickHouse/ClickHouse/pull/40879) ([Nikita Taranov](https://github.com/nickitat)).
* Keeper improvement: try syncing logs to disk in parallel with replication. [#43450](https://github.com/ClickHouse/ClickHouse/pull/43450) ([Antonio Andelic](https://github.com/antonio2368)).
* Keeper improvement: requests are batched more often. The batching can be controlled with the new setting `max_requests_quick_batch_size`. [#43686](https://github.com/ClickHouse/ClickHouse/pull/43686) ([Antonio Andelic](https://github.com/antonio2368)).
#### Improvement
* Implement referential dependencies and use them to create tables in the correct order while restoring from a backup. [#43834](https://github.com/ClickHouse/ClickHouse/pull/43834) ([Vitaly Baranov](https://github.com/vitlibar)).
* Substitute UDFs in `CREATE` query to avoid failures during loading at startup. Additionally, UDFs can now be used as `DEFAULT` expressions for columns. [#43539](https://github.com/ClickHouse/ClickHouse/pull/43539) ([Antonio Andelic](https://github.com/antonio2368)).
* Change how the following queries delete parts: TRUNCATE TABLE, ALTER TABLE DROP PART, ALTER TABLE DROP PARTITION. Now, these queries make empty parts which cover the old parts. This makes the TRUNCATE query work without a followedexclusive lock which means concurrent reads aren't locked. Also achieved durability in all those queries. If the request succeeds, then no resurrected parts appear later. Note that atomicity is achieved only with transaction scope. [#41145](https://github.com/ClickHouse/ClickHouse/pull/41145) ([Sema Checherinda](https://github.com/CheSema)).
* `SET param_x` query no longer requires manual string serialization for the value of the parameter. For example, query `SET param_a = '[\'a\', \'b\']'` can now be written like `SET param_a = ['a', 'b']`. [#41874](https://github.com/ClickHouse/ClickHouse/pull/41874) ([Nikolay Degterinsky](https://github.com/evillique)).
* Show read rows in the progress indication while reading from STDIN from client. Closes [#43423](https://github.com/ClickHouse/ClickHouse/issues/43423). [#43442](https://github.com/ClickHouse/ClickHouse/pull/43442) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Show progress bar while reading from s3 table function / engine. [#43454](https://github.com/ClickHouse/ClickHouse/pull/43454) ([Kseniia Sumarokova](https://github.com/kssenii)).
* `filesystemAvailable` and related functions support one optional argument with disk name, and change `filesystemFree` to `filesystemUnreserved`. Closes [#35076](https://github.com/ClickHouse/ClickHouse/issues/35076). [#42064](https://github.com/ClickHouse/ClickHouse/pull/42064) ([flynn](https://github.com/ucasfl)).
* Integration with LDAP: increased the default value of search_limit to 256, and added LDAP server config option to change that to an arbitrary value. Closes: [#42276](https://github.com/ClickHouse/ClickHouse/issues/42276). [#42461](https://github.com/ClickHouse/ClickHouse/pull/42461) ([Vasily Nemkov](https://github.com/Enmk)).
* Allow the removal of sensitive information (see the `query_masking_rules` in the configuration file) from the exception messages as well. Resolves [#41418](https://github.com/ClickHouse/ClickHouse/issues/41418). [#42940](https://github.com/ClickHouse/ClickHouse/pull/42940) ([filimonov](https://github.com/filimonov)).
* Support queries like `SHOW FULL TABLES ...` for MySQL compatibility. [#43910](https://github.com/ClickHouse/ClickHouse/pull/43910) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
* Keeper improvement: Add 4lw command `rqld` which can manually assign a node as leader. [#43026](https://github.com/ClickHouse/ClickHouse/pull/43026) ([JackyWoo](https://github.com/JackyWoo)).
* Apply connection timeout settings for Distributed async INSERT from the query. [#43156](https://github.com/ClickHouse/ClickHouse/pull/43156) ([Azat Khuzhin](https://github.com/azat)).
* The `unhex` function now supports `FixedString` arguments. [issue42369](https://github.com/ClickHouse/ClickHouse/issues/42369). [#43207](https://github.com/ClickHouse/ClickHouse/pull/43207) ([DR](https://github.com/freedomDR)).
* Priority is given to deleting completely expired parts according to the TTL rules, see [#42869](https://github.com/ClickHouse/ClickHouse/issues/42869). [#43222](https://github.com/ClickHouse/ClickHouse/pull/43222) ([zhongyuankai](https://github.com/zhongyuankai)).
* More precise and reactive CPU load indication in clickhouse-client. [#43307](https://github.com/ClickHouse/ClickHouse/pull/43307) ([Sergei Trifonov](https://github.com/serxa)).
* Support reading of subcolumns of nested types from storage `S3` and table function `s3` with formats `Parquet`, `Arrow` and `ORC`. [#43329](https://github.com/ClickHouse/ClickHouse/pull/43329) ([chen](https://github.com/xiedeyantu)).
* Add `table_uuid` column to the `system.parts` table. [#43404](https://github.com/ClickHouse/ClickHouse/pull/43404) ([Azat Khuzhin](https://github.com/azat)).
* Added client option to display the number of locally processed rows in non-interactive mode (`--print-num-processed-rows`). [#43407](https://github.com/ClickHouse/ClickHouse/pull/43407) ([jh0x](https://github.com/jh0x)).
* Implement `aggregation-in-order` optimization on top of a query plan. It is enabled by default (but works only together with `optimize_aggregation_in_order`, which is disabled by default). Set `query_plan_aggregation_in_order = 0` to use the previous AST-based version. [#43592](https://github.com/ClickHouse/ClickHouse/pull/43592) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Allow to collect profile events with `trace_type = 'ProfileEvent'` to `system.trace_log` on each increment with current stack, profile event name and value of the increment. It can be enabled by the setting `trace_profile_events` and used to investigate performance of queries. [#43639](https://github.com/ClickHouse/ClickHouse/pull/43639) ([Anton Popov](https://github.com/CurtizJ)).
* Add a new setting `input_format_max_binary_string_size` to limit string size in RowBinary format. [#43842](https://github.com/ClickHouse/ClickHouse/pull/43842) ([Kruglov Pavel](https://github.com/Avogar)).
* When ClickHouse requests a remote HTTP server, and it returns an error, the numeric HTTP code was not displayed correctly in the exception message. Closes [#43919](https://github.com/ClickHouse/ClickHouse/issues/43919). [#43920](https://github.com/ClickHouse/ClickHouse/pull/43920) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Correctly report errors in queries even when multiple JOINs optimization is taking place. [#43583](https://github.com/ClickHouse/ClickHouse/pull/43583) ([Salvatore](https://github.com/tbsal)).
#### Build/Testing/Packaging Improvement
* Systemd integration now correctly notifies systemd that the service is really started and is ready to serve requests. [#43400](https://github.com/ClickHouse/ClickHouse/pull/43400) ([Коренберг Марк](https://github.com/socketpair)).
* Added the option to build ClickHouse with OpenSSL using the [OpenSSL FIPS Module](https://www.openssl.org/docs/man3.0/man7/fips_module.html). This build type has not been tested to validate security and is not supported. [#43991](https://github.com/ClickHouse/ClickHouse/pull/43991) ([Boris Kuschel](https://github.com/bkuschel)).
* Upgrade to the new `DeflateQpl` compression codec which has been implemented in a previous PR (details: https://github.com/ClickHouse/ClickHouse/pull/39494). This patch improves codec on below aspects: 1. QPL v0.2.0 to QPL v0.3.0 [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) 2. Improve CMake file for fixing QPL build issues for QPL v0.3.0. 3. Link the QPL library with libaccel-config at build time instead of runtime loading on QPL v0.2.0 (dlopen) 4. Fixed log print issue in CompressionCodecDeflateQpl.cpp. [#44024](https://github.com/ClickHouse/ClickHouse/pull/44024) ([jasperzhu](https://github.com/jinjunzh)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Fixed bug which could lead to deadlock while using asynchronous inserts. [#43233](https://github.com/ClickHouse/ClickHouse/pull/43233) ([Anton Popov](https://github.com/CurtizJ)).
* Fix some incorrect logic in AST level optimization `optimize_normalize_count_variants`. [#43873](https://github.com/ClickHouse/ClickHouse/pull/43873) ([Duc Canh Le](https://github.com/canhld94)).
* Fix a case when mutations are not making progress when checksums do not match between replicas (e.g. caused by a change in data format on an upgrade). [#36877](https://github.com/ClickHouse/ClickHouse/pull/36877) ([nvartolomei](https://github.com/nvartolomei)).
* Fix the `skip_unavailable_shards` optimization which did not work with the `hdfsCluster` table function. [#43236](https://github.com/ClickHouse/ClickHouse/pull/43236) ([chen](https://github.com/xiedeyantu)).
* Fix `s3` support for the `?` wildcard. Closes [#42731](https://github.com/ClickHouse/ClickHouse/issues/42731). [#43253](https://github.com/ClickHouse/ClickHouse/pull/43253) ([chen](https://github.com/xiedeyantu)).
* Fix functions `arrayFirstOrNull` and `arrayLastOrNull` or null when the array contains `Nullable` elements. [#43274](https://github.com/ClickHouse/ClickHouse/pull/43274) ([Duc Canh Le](https://github.com/canhld94)).
* Fix incorrect `UserTimeMicroseconds`/`SystemTimeMicroseconds` accounting related to Kafka tables. [#42791](https://github.com/ClickHouse/ClickHouse/pull/42791) ([Azat Khuzhin](https://github.com/azat)).
* Do not suppress exceptions in `web` disks. Fix retries for the `web` disk. [#42800](https://github.com/ClickHouse/ClickHouse/pull/42800) ([Azat Khuzhin](https://github.com/azat)).
* Fixed (logical) race condition between inserts and dropping materialized views. A race condition happened when a Materialized View was dropped at the same time as an INSERT, where the MVs were present as a dependency of the insert at the begining of the execution, but the table has been dropped by the time the insert chain tries to access it, producing either an `UNKNOWN_TABLE` or `TABLE_IS_DROPPED` exception, and stopping the insertion. After this change, we avoid these exceptions and just continue with the insert if the dependency is gone. [#43161](https://github.com/ClickHouse/ClickHouse/pull/43161) ([AlfVII](https://github.com/AlfVII)).
* Fix undefined behavior in the `quantiles` function, which might lead to uninitialized memory. Found by fuzzer. This closes [#44066](https://github.com/ClickHouse/ClickHouse/issues/44066). [#44067](https://github.com/ClickHouse/ClickHouse/pull/44067) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Additional check on zero uncompressed size is added to `CompressionCodecDelta`. [#43255](https://github.com/ClickHouse/ClickHouse/pull/43255) ([Nikita Taranov](https://github.com/nickitat)).
* Flatten arrays from Parquet to avoid an issue with inconsistent data in arrays. These incorrect files can be generated by Apache Iceberg. [#43297](https://github.com/ClickHouse/ClickHouse/pull/43297) ([Arthur Passos](https://github.com/arthurpassos)).
* Fix bad cast from `LowCardinality` column when using short circuit function execution. [#43311](https://github.com/ClickHouse/ClickHouse/pull/43311) ([Kruglov Pavel](https://github.com/Avogar)).
* Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
* Check and compare the content of the `format_version` file in `MergeTreeData` so that tables can be loaded even if the storage policy was changed. [#43328](https://github.com/ClickHouse/ClickHouse/pull/43328) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix possible (very unlikely) "No column to rollback" logical error during INSERT into `Buffer` tables. [#43336](https://github.com/ClickHouse/ClickHouse/pull/43336) ([Azat Khuzhin](https://github.com/azat)).
* Fix a bug that allowed the parser to parse an unlimited amount of round brackets into one function if `allow_function_parameters` is set. [#43350](https://github.com/ClickHouse/ClickHouse/pull/43350) ([Nikolay Degterinsky](https://github.com/evillique)).
* `MaterializeMySQL` (experimental feature) support DDL `drop table t1, t2` and compatible with most of MySQL DROP DDL. [#43366](https://github.com/ClickHouse/ClickHouse/pull/43366) ([zzsmdfj](https://github.com/zzsmdfj)).
* `session_log` (experimental feature): Fixed the inability to log in (because of failure to create the session_log entry) in a very rare case of messed up setting profiles. [#42641](https://github.com/ClickHouse/ClickHouse/pull/42641) ([Vasily Nemkov](https://github.com/Enmk)).
* Fix possible `Cannot create non-empty column with type Nothing` in functions `if`/`multiIf`. Closes [#43356](https://github.com/ClickHouse/ClickHouse/issues/43356). [#43368](https://github.com/ClickHouse/ClickHouse/pull/43368) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix a bug when a row level filter uses the default value of a column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
* Query with `DISTINCT` + `LIMIT BY` + `LIMIT` can return fewer rows than expected. Fixes [#43377](https://github.com/ClickHouse/ClickHouse/issues/43377). [#43410](https://github.com/ClickHouse/ClickHouse/pull/43410) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix `sumMap` for `Nullable(Decimal(...))`. [#43414](https://github.com/ClickHouse/ClickHouse/pull/43414) ([Azat Khuzhin](https://github.com/azat)).
* Fix `date_diff` for hour/minute on macOS. Close [#42742](https://github.com/ClickHouse/ClickHouse/issues/42742). [#43466](https://github.com/ClickHouse/ClickHouse/pull/43466) ([zzsmdfj](https://github.com/zzsmdfj)).
* Fix incorrect memory accounting because of merges/mutations. [#43516](https://github.com/ClickHouse/ClickHouse/pull/43516) ([Azat Khuzhin](https://github.com/azat)).
* Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)). This error has been found by @tisonkun.
* Ensure consistency when `clickhouse-copier` updates status and `attach_is_done` in Keeper after partition attach is done. [#43602](https://github.com/ClickHouse/ClickHouse/pull/43602) ([lzydmxy](https://github.com/lzydmxy)).
* During the recovery of a lost replica of a `Replicated` database (experimental feature), there could a situation where we need to atomically swap two table names (use EXCHANGE). Previously we tried to use two RENAME queries, which was obviously failing and moreover, failed the whole recovery process of the database replica. [#43628](https://github.com/ClickHouse/ClickHouse/pull/43628) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix the case when the `s3Cluster` function throws `NOT_FOUND_COLUMN_IN_BLOCK` error. Closes [#43534](https://github.com/ClickHouse/ClickHouse/issues/43534). [#43629](https://github.com/ClickHouse/ClickHouse/pull/43629) ([chen](https://github.com/xiedeyantu)).
* Fix possible logical error `Array sizes mismatched` while parsing JSON object with arrays with same key names but with different nesting level. Closes [#43569](https://github.com/ClickHouse/ClickHouse/issues/43569). [#43693](https://github.com/ClickHouse/ClickHouse/pull/43693) ([Kruglov Pavel](https://github.com/Avogar)).
* Fixed possible exception in the case of distributed `GROUP BY` with an `ALIAS` column among aggregation keys. [#43709](https://github.com/ClickHouse/ClickHouse/pull/43709) ([Nikita Taranov](https://github.com/nickitat)).
* Fix bug which can lead to broken projections if zero-copy replication (experimental feature) is enabled and used. [#43764](https://github.com/ClickHouse/ClickHouse/pull/43764) ([alesapin](https://github.com/alesapin)).
* Fix using multipart upload for very large S3 objects in AWS S3. [#43824](https://github.com/ClickHouse/ClickHouse/pull/43824) ([ianton-ru](https://github.com/ianton-ru)).
* Fixed `ALTER ... RESET SETTING` with `ON CLUSTER`. It could have been applied to one replica only. Fixes [#43843](https://github.com/ClickHouse/ClickHouse/issues/43843). [#43848](https://github.com/ClickHouse/ClickHouse/pull/43848) ([Elena Torró](https://github.com/elenatorro)).
* Fix a logical error in JOIN with `Join` table engine at right hand side, if `USING` is being used. [#43963](https://github.com/ClickHouse/ClickHouse/pull/43963) ([Vladimir C](https://github.com/vdimir)). Fix a bug with wrong order of keys in `Join` table engine. [#44012](https://github.com/ClickHouse/ClickHouse/pull/44012) ([Vladimir C](https://github.com/vdimir)).
* Keeper fix: throw if the interserver port for Raft is already in use. [#43984](https://github.com/ClickHouse/ClickHouse/pull/43984) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix ORDER BY positional argument (example: `ORDER BY 1, 2`) in case of unneeded columns pruning from subqueries. Closes [#43964](https://github.com/ClickHouse/ClickHouse/issues/43964). [#43987](https://github.com/ClickHouse/ClickHouse/pull/43987) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fixed exception when a subquery contains HAVING but doesn't contain an actual aggregation. [#44051](https://github.com/ClickHouse/ClickHouse/pull/44051) ([Nikita Taranov](https://github.com/nickitat)).
* Fix race in s3 multipart upload. This race could cause the error `Part number must be an integer between 1 and 10000, inclusive. (S3_ERROR)` while restoring from a backup. [#44065](https://github.com/ClickHouse/ClickHouse/pull/44065) ([Vitaly Baranov](https://github.com/vitlibar)).
### <a id="2211"></a> ClickHouse release 22.11, 2022-11-17
#### Backward Incompatible Change
@ -534,30 +653,30 @@
* Add counters (ProfileEvents) for cases when query complexity limitation has been set and has reached (a separate counter for `overflow_mode` = `break` and `throw`). For example, if you have set up `max_rows_to_read` with `read_overflow_mode = 'break'`, looking at the value of `OverflowBreak` counter will allow distinguishing incomplete results. [#40205](https://github.com/ClickHouse/ClickHouse/pull/40205) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix memory accounting in case of "Memory limit exceeded" errors (previously [peak] memory usage was takes failed allocations into account). [#40249](https://github.com/ClickHouse/ClickHouse/pull/40249) ([Azat Khuzhin](https://github.com/azat)).
* Add metrics for filesystem cache: `FilesystemCacheSize` and `FilesystemCacheElements`. [#40260](https://github.com/ClickHouse/ClickHouse/pull/40260) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Support hadoop secure RPC transfer (hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#39411](https://github.com/ClickHouse/ClickHouse/pull/39411) ([michael1589](https://github.com/michael1589)).
* Support Hadoop secure RPC transfer (hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#39411](https://github.com/ClickHouse/ClickHouse/pull/39411) ([michael1589](https://github.com/michael1589)).
* Avoid continuously growing memory consumption of pattern cache when using functions multi(Fuzzy)Match(Any|AllIndices|AnyIndex)(). [#40264](https://github.com/ClickHouse/ClickHouse/pull/40264) ([Robert Schulze](https://github.com/rschu1ze)).
* Add cache for schema inference for file/s3/hdfs/url table functions. Now, schema inference will be performed only on the first query to the file, all subsequent queries to the same file will use the schema from cache if data wasn't changed. Add system table system.schema_inference_cache with all current schemas in cache and system queries SYSTEM DROP SCHEMA CACHE [FOR FILE/S3/HDFS/URL] to drop schemas from cache. [#38286](https://github.com/ClickHouse/ClickHouse/pull/38286) ([Kruglov Pavel](https://github.com/Avogar)).
* Add cache for schema inference for file/s3/hdfs/url table functions. Now, schema inference will be performed only on the first query to the file, all subsequent queries to the same file will use the schema from the cache if data has not changed. Add system table system.schema_inference_cache with all current schemas in cache and system queries SYSTEM DROP SCHEMA CACHE [FOR FILE/S3/HDFS/URL] to drop schemas from cache. [#38286](https://github.com/ClickHouse/ClickHouse/pull/38286) ([Kruglov Pavel](https://github.com/Avogar)).
* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)).
#### Build/Testing/Packaging Improvement
* [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**).
* ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)).
* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Update `tzdata` to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently after it falls back on 2022-09-21. There are corrections to the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and are not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Ensure LSan is effective. [#39430](https://github.com/ClickHouse/ClickHouse/pull/39430) ([Azat Khuzhin](https://github.com/azat)).
* TSAN has issues with clang-14 (https://github.com/google/sanitizers/issues/1552, https://github.com/google/sanitizers/issues/1540), so here we build the TSAN binaries with clang-15. [#39450](https://github.com/ClickHouse/ClickHouse/pull/39450) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Remove the option to build ClickHouse tools as separate executable programs. This fixes [#37847](https://github.com/ClickHouse/ClickHouse/issues/37847). [#39520](https://github.com/ClickHouse/ClickHouse/pull/39520) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Small preparations for build on s390x (which is big-endian). [#39627](https://github.com/ClickHouse/ClickHouse/pull/39627) ([Harry Lee](https://github.com/HarryLeeIBM)). [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issue in BitHelpers for s390x. [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Implement a piece of code related to SipHash for s390x architecture (which is not supported by ClickHouse). [#39732](https://github.com/ClickHouse/ClickHouse/pull/39732) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed an Endian issue in Coordination snapshot code for s390x architecture (which is not supported by ClickHouse). [#39931](https://github.com/ClickHouse/ClickHouse/pull/39931) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in Codec code for s390x architecture (which is not supported by ClickHouse). [#40008](https://github.com/ClickHouse/ClickHouse/pull/40008) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in reading/writing BigEndian binary data in ReadHelpers and WriteHelpers code for s390x architecture (which is not supported by ClickHouse). [#40179](https://github.com/ClickHouse/ClickHouse/pull/40179) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Small preparations for build on s390x (which is big-endian). [#39627](https://github.com/ClickHouse/ClickHouse/pull/39627) ([Harry Lee](https://github.com/HarryLeeIBM)). [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issue in BitHelpers for s390x. [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Implement a piece of code related to SipHash for s390x architecture (which is not supported by ClickHouse). [#39732](https://github.com/ClickHouse/ClickHouse/pull/39732) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed an Endian issue in the Coordination snapshot code for s390x architecture (which is not supported by ClickHouse). [#39931](https://github.com/ClickHouse/ClickHouse/pull/39931) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in Codec code for s390x architecture (which is not supported by ClickHouse). [#40008](https://github.com/ClickHouse/ClickHouse/pull/40008) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in reading/writing BigEndian binary data in ReadHelpers and WriteHelpers code for s390x architecture (which is not supported by ClickHouse). [#40179](https://github.com/ClickHouse/ClickHouse/pull/40179) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Support build with `clang-16` (trunk). This closes [#39949](https://github.com/ClickHouse/ClickHouse/issues/39949). [#40181](https://github.com/ClickHouse/ClickHouse/pull/40181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Prepare RISC-V 64 build to run in CI. This is for [#40141](https://github.com/ClickHouse/ClickHouse/issues/40141). [#40197](https://github.com/ClickHouse/ClickHouse/pull/40197) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Simplified function registration macro interface (`FUNCTION_REGISTER*`) to eliminate the step to add and call an extern function in the registerFunctions.cpp, it also makes incremental builds of a new function faster. [#38615](https://github.com/ClickHouse/ClickHouse/pull/38615) ([Li Yin](https://github.com/liyinsg)).
* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it finds in the config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Bug Fix
* Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix possible segfault in `CapnProto` input format. This bug was found and sent in through the ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix a very rare case of incorrect behavior of the array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix the case when the order of columns can be incorrect if the `IN` operator is used with a table with `ENGINE = Set` containing multiple columns. This fixes [#13014](https://github.com/ClickHouse/ClickHouse/issues/13014). [#40225](https://github.com/ClickHouse/ClickHouse/pull/40225) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).

View File

@ -73,22 +73,7 @@ message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
option(USE_STATIC_LIBRARIES "Disable to use shared libraries" ON)
# DEVELOPER ONLY.
# Faster linking if turned on.
option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files" OFF)
if (USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
message(FATAL_ERROR "SPLIT_SHARED_LIBRARIES=1 must not be used together with USE_STATIC_LIBRARIES=1")
endif()
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
set(BUILD_SHARED_LIBS 1 CACHE INTERNAL "")
endif ()
if (USE_STATIC_LIBRARIES)
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
endif ()
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF)
@ -111,6 +96,7 @@ if (ENABLE_FUZZING)
set (ENABLE_JEMALLOC 0)
set (ENABLE_CHECK_HEAVY_BUILDS 1)
set (GLIBC_COMPATIBILITY OFF)
set (ENABLE_BENCHMARKS 0)
# For codegen_select_fuzzer
set (ENABLE_PROTOBUF 1)
@ -168,8 +154,9 @@ endif ()
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND USE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND NOT USE_MUSL)
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT USE_MUSL)
# Only for Linux, x86_64 or aarch64.
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY)
@ -375,15 +362,15 @@ set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (COMPILER_CLANG)
if (OS_DARWIN)
@ -446,12 +433,7 @@ else()
link_libraries(global-group)
endif ()
if (NOT (OS_LINUX OR OS_DARWIN))
# Using system libs can cause a lot of warnings in includes (on macro expansion).
option(WERROR "Enable -Werror compiler option" OFF)
else ()
option(WERROR "Enable -Werror compiler option" ON)
endif ()
option(WERROR "Enable -Werror compiler option" ON)
if (WERROR)
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
@ -470,22 +452,13 @@ endif ()
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
if (USE_STATIC_LIBRARIES)
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
if (OS_LINUX AND NOT ARCH_AARCH64)
# Slightly more efficient code can be generated
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
endif ()
else ()
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
# This is required for clang on Arch linux, that uses PIE by default.
# See enable-SSP-and-PIE-by-default.patch [1].
#
# [1]: https://github.com/archlinux/svntogit-packages/blob/6e681aa860e65ad46a1387081482eb875c2200f2/trunk/enable-SSP-and-PIE-by-default.patch
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie")
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
if (OS_LINUX AND NOT ARCH_AARCH64)
# Slightly more efficient code can be generated
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
endif ()
if (ENABLE_TESTS)
@ -496,13 +469,9 @@ endif ()
enable_testing() # Enable for tests without binary
option(ENABLE_EXTERNAL_OPENSSL "This option is insecure and not recommended for any occasions. If it is enabled, it allows building with alternative OpenSSL library. By default, ClickHouse is using BoringSSL, which is better. Do not use this option." OFF)
option(ENABLE_OPENSSL "This option performs a build with OpenSSL. NOTE! This option is insecure and should never be used. By default, ClickHouse uses and only supports BoringSSL" OFF)
if (ENABLE_EXTERNAL_OPENSSL)
message (STATUS "Build and uses OpenSSL library instead of BoringSSL. This is strongly discouraged. Your build of ClickHouse will be unsupported.")
set(ENABLE_SSL 1)
target_compile_options(global-group INTERFACE "-Wno-deprecated-declarations")
endif ()
option(ENABLE_OPENSSL_DYNAMIC "This option removes SSL from ClickHouse and will link to the OpenSSL version supplied by OS." OFF)
# when installing to /usr - place configs to /etc but for /usr/local place to /usr/local/etc
if (CMAKE_INSTALL_PREFIX STREQUAL "/usr")
@ -511,10 +480,7 @@ else ()
set (CLICKHOUSE_ETC_DIR "${CMAKE_INSTALL_PREFIX}/etc")
endif ()
message (STATUS
"Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ;
USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES}
SPLIT_SHARED_LIBRARIES=${SPLIT_SHARED_LIBRARIES}")
message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}")
include (GNUInstallDirs)
@ -560,7 +526,7 @@ macro (clickhouse_add_executable target)
# - _je_zone_register due to JEMALLOC_PRIVATE_NAMESPACE=je_ under OS X.
# - but jemalloc-cmake does not run private_namespace.sh
# so symbol name should be _zone_register
if (ENABLE_JEMALLOC AND USE_STATIC_LIBRARIES AND OS_DARWIN)
if (ENABLE_JEMALLOC AND OS_DARWIN)
set_property(TARGET ${target} APPEND PROPERTY LINK_OPTIONS -u_zone_register)
endif()
endif()
@ -616,6 +582,8 @@ if (NATIVE_BUILD_TARGETS
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
"-DENABLE_CCACHE=${ENABLE_CCACHE}"
# Avoid overriding .cargo/config.toml with native toolchain.
"-DENABLE_RUST=OFF"
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"

View File

@ -1,4 +1,4 @@
Copyright 2016-2022 ClickHouse, Inc.
Copyright 2016-2023 ClickHouse, Inc.
Apache License
Version 2.0, January 2004
@ -188,7 +188,7 @@ Copyright 2016-2022 ClickHouse, Inc.
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2022 ClickHouse, Inc.
Copyright 2016-2023 ClickHouse, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -16,8 +16,6 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Upcoming events
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
* [**ClickHosue Meetup at the RELEX Solutions office in Stockholm**](https://www.meetup.com/clickhouse-stockholm-user-group/events/289492084/) - Dec 1 - Formulate by RELEX is a Swedish promotion planning and analytics company. They will share why they chose ClickHouse for their real time analytics and forecasting solution. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) - Dec 5 - Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
* [**ClickHouse Meetup at the Rokt offices in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/289403909/) - Dec 6 - We are very excited to be holding our next in-person ClickHouse meetup at the Rokt offices in Manhattan. Featuring talks from Bloomberg, Disney Streaming, Prequel, Rokt, and ClickHouse
* **Recording available**: [**v22.12 Release Webinar**](https://www.youtube.com/watch?v=sREupr6uc2k) 22.12 is the ClickHouse Christmas release. There are plenty of gifts (a new JOIN algorithm among them) and we adopted something from MongoDB. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup at the CHEQ office in Tel Aviv**](https://www.meetup.com/clickhouse-tel-aviv-user-group/events/289599423/) - Jan 16 - We are very excited to be holding our next in-person ClickHouse meetup at the CHEQ office in Tel Aviv! Hear from CHEQ, ServiceNow and Contentsquare, as well as a deep dive presentation from ClickHouse CTO Alexey Milovidov. Join us for a fun evening of talks, food and discussion!
* [**ClickHouse Meetup at Microsoft Office in Seattle**](https://www.meetup.com/clickhouse-seattle-user-group/events/290310025/) - Jan 18 - Keep an eye on this space as we will be announcing speakers soon!

View File

@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported |
|:-|:-|
| 22.12 | ✔️ |
| 22.11 | ✔️ |
| 22.10 | ✔️ |
| 22.9 | ✔️ |
| 22.9 | |
| 22.8 | ✔️ |
| 22.7 | ❌ |
| 22.6 | ❌ |

View File

@ -10,7 +10,7 @@
#include <base/MoveOrCopyIfThrow.h>
/** Pool for limited size objects that cannot be used from different threads simultaneously.
* The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
* The main use case is to have fixed size of objects that can be reused in different threads during their lifetime
* and have to be initialized on demand.
* Two main properties of pool are allocated objects size and borrowed objects size.
* Allocated objects size is size of objects that are currently allocated by the pool.

View File

@ -8,16 +8,13 @@ set (SRCS
getPageSize.cpp
getThreadId.cpp
JSON.cpp
LineReader.cpp
mremap.cpp
phdr_cache.cpp
preciseExp10.cpp
setTerminalEcho.cpp
shift10.cpp
sleep.cpp
terminalColors.cpp
errnoToString.cpp
ReplxxLineReader.cpp
StringRef.cpp
safeExit.cpp
throwError.cpp
@ -42,10 +39,6 @@ endif ()
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
endif()
target_link_libraries (common
PUBLIC
ch_contrib::cityhash

View File

@ -1,28 +0,0 @@
#include <base/setTerminalEcho.h>
#include <base/errnoToString.h>
#include <stdexcept>
#include <cstring>
#include <string>
#include <termios.h>
#include <unistd.h>
void setTerminalEcho(bool enable)
{
/// Obtain terminal attributes,
/// toggle the ECHO flag
/// and set them back.
struct termios tty{};
if (0 != tcgetattr(STDIN_FILENO, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString());
if (enable)
tty.c_lflag |= ECHO;
else
tty.c_lflag &= ~ECHO;
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString());
}

View File

@ -1,4 +0,0 @@
#pragma once
/// Enable or disable echoing of typed characters. Throws std::runtime_error on error.
void setTerminalEcho(bool enable);

View File

@ -187,8 +187,20 @@ struct integer<Bits, Signed>::_impl
static_assert(Bits % base_bits == 0);
/// Simple iteration in both directions
static constexpr unsigned little(unsigned idx) { return idx; }
static constexpr unsigned big(unsigned idx) { return item_count - 1 - idx; }
static constexpr unsigned little(unsigned idx)
{
if constexpr (std::endian::native == std::endian::little)
return idx;
else
return item_count - 1 - idx;
}
static constexpr unsigned big(unsigned idx)
{
if constexpr (std::endian::native == std::endian::little)
return item_count - 1 - idx;
else
return idx;
}
static constexpr unsigned any(unsigned idx) { return idx; }
template <class T>
@ -240,20 +252,20 @@ struct integer<Bits, Signed>::_impl
{
static_assert(sizeof(Integral) <= sizeof(base_type));
self.items[0] = _impl::to_Integral(rhs);
self.items[little(0)] = _impl::to_Integral(rhs);
if constexpr (std::is_signed_v<Integral>)
{
if (rhs < 0)
{
for (size_t i = 1; i < item_count; ++i)
self.items[i] = -1;
for (unsigned i = 1; i < item_count; ++i)
self.items[little(i)] = -1;
return;
}
}
for (size_t i = 1; i < item_count; ++i)
self.items[i] = 0;
for (unsigned i = 1; i < item_count; ++i)
self.items[little(i)] = 0;
}
template <typename TupleLike, size_t i = 0>
@ -348,7 +360,7 @@ struct integer<Bits, Signed>::_impl
constexpr const unsigned to_copy = min_bits / base_bits;
for (unsigned i = 0; i < to_copy; ++i)
self.items[i] = rhs.items[i];
self.items[little(i)] = rhs.items[little(i)];
if constexpr (Bits > Bits2)
{
@ -357,13 +369,13 @@ struct integer<Bits, Signed>::_impl
if (rhs < 0)
{
for (unsigned i = to_copy; i < item_count; ++i)
self.items[i] = -1;
self.items[little(i)] = -1;
return;
}
}
for (unsigned i = to_copy; i < item_count; ++i)
self.items[i] = 0;
self.items[little(i)] = 0;
}
}
@ -454,7 +466,7 @@ private:
{
if constexpr (sizeof(T) <= sizeof(base_type))
{
if (0 == idx)
if (little(0) == idx)
return static_cast<base_type>(x);
}
else if (idx * sizeof(base_type) < sizeof(T))
@ -475,7 +487,7 @@ private:
for (unsigned i = 0; i < op_items; ++i)
{
base_type rhs_item = get_item(rhs, i);
base_type rhs_item = get_item(rhs, little(i));
base_type & res_item = res.items[little(i)];
underflows[i] = res_item < rhs_item;
@ -508,7 +520,7 @@ private:
for (unsigned i = 0; i < op_items; ++i)
{
base_type rhs_item = get_item(rhs, i);
base_type rhs_item = get_item(rhs, little(i));
base_type & res_item = res.items[little(i)];
res_item += rhs_item;
@ -580,12 +592,12 @@ private:
else if constexpr (Bits == 128 && sizeof(base_type) == 8)
{
using CompilerUInt128 = unsigned __int128;
CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 a = (CompilerUInt128(lhs.items[little(1)]) << 64) + lhs.items[little(0)]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 b = (CompilerUInt128(rhs.items[little(1)]) << 64) + rhs.items[little(0)]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 c = a * b;
integer<Bits, Signed> res;
res.items[0] = c;
res.items[1] = c >> 64;
res.items[little(0)] = c;
res.items[little(1)] = c >> 64;
return res;
}
else
@ -597,7 +609,7 @@ private:
#endif
for (unsigned i = 0; i < item_count; ++i)
{
base_type rhs_item = get_item(rhs, i);
base_type rhs_item = get_item(rhs, little(i));
unsigned pos = i * base_bits;
while (rhs_item)
@ -792,7 +804,7 @@ public:
integer<Bits, Signed> res;
for (unsigned i = 0; i < item_count; ++i)
res.items[little(i)] = lhs.items[little(i)] | get_item(rhs, i);
res.items[little(i)] = lhs.items[little(i)] | get_item(rhs, little(i));
return res;
}
else
@ -810,7 +822,7 @@ public:
integer<Bits, Signed> res;
for (unsigned i = 0; i < item_count; ++i)
res.items[little(i)] = lhs.items[little(i)] & get_item(rhs, i);
res.items[little(i)] = lhs.items[little(i)] & get_item(rhs, little(i));
return res;
}
else
@ -845,17 +857,17 @@ public:
{
using CompilerUInt128 = unsigned __int128;
CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 a = (CompilerUInt128(numerator.items[little(1)]) << 64) + numerator.items[little(0)]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 b = (CompilerUInt128(denominator.items[little(1)]) << 64) + denominator.items[little(0)]; // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
CompilerUInt128 c = a / b; // NOLINT
integer<Bits, Signed> res;
res.items[0] = c;
res.items[1] = c >> 64;
res.items[little(0)] = c;
res.items[little(1)] = c >> 64;
CompilerUInt128 remainder = a - b * c;
numerator.items[0] = remainder;
numerator.items[1] = remainder >> 64;
numerator.items[little(0)] = remainder;
numerator.items[little(1)] = remainder >> 64;
return res;
}
@ -1039,15 +1051,15 @@ constexpr integer<Bits, Signed>::integer(std::initializer_list<T> il) noexcept
else
{
auto it = il.begin();
for (size_t i = 0; i < _impl::item_count; ++i)
for (unsigned i = 0; i < _impl::item_count; ++i)
{
if (it < il.end())
{
items[i] = *it;
items[_impl::little(i)] = *it;
++it;
}
else
items[i] = 0;
items[_impl::little(i)] = 0;
}
}
}
@ -1208,7 +1220,7 @@ constexpr integer<Bits, Signed>::operator T() const noexcept
UnsignedT res{};
for (unsigned i = 0; i < _impl::item_count && i < (sizeof(T) + sizeof(base_type) - 1) / sizeof(base_type); ++i)
res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i); // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
res += UnsignedT(items[_impl::little(i)]) << (sizeof(base_type) * 8 * i); // NOLINT(clang-analyzer-core.UndefinedBinaryOperatorResult)
return res;
}

View File

@ -37,7 +37,7 @@ if (GLIBC_COMPATIBILITY)
target_include_directories(glibc-compatibility PRIVATE libcxxabi ${musl_arch_include_dir})
if (NOT USE_STATIC_LIBRARIES AND NOT USE_STATIC_LIBRARIES)
if (ENABLE_OPENSSL_DYNAMIC)
target_compile_options(glibc-compatibility PRIVATE -fPIC)
endif ()

View File

@ -153,7 +153,7 @@ restart:
/* Restore old terminal settings and signals. */
if (memcmp(&term, &oterm, sizeof(term)) != 0) {
const int sigttou = signo[SIGTTOU];
const int sigttou = (int)signo[SIGTTOU];
/* Ignore SIGTTOU generated when we are not the fg pgrp. */
while (tcsetattr(input, TCSAFLUSH|TCSASOFT, &oterm) == -1 &&

View File

@ -5,7 +5,7 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
set (ARCH_AMD64 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
set (ARCH_AARCH64 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc64le.*|ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")
set (ARCH_RISCV64 1)

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54469)
SET(VERSION_REVISION 54470)
SET(VERSION_MAJOR 22)
SET(VERSION_MINOR 12)
SET(VERSION_MINOR 13)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 0d211ed19849fe44b0e43fdebe2c15d76d560a77)
SET(VERSION_DESCRIBE v22.12.1.1-testing)
SET(VERSION_STRING 22.12.1.1)
SET(VERSION_GITHASH 688e488e930c83eefeac4f87c4cc029cc5b231e3)
SET(VERSION_DESCRIBE v22.13.1.1-testing)
SET(VERSION_STRING 22.13.1.1)
# end of autochange

View File

@ -5,21 +5,21 @@ if (ENABLE_CLANG_TIDY)
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12")
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12" "clang-tidy")
# Why do we use ';' here?
# It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY
# The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax.
set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12")
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12" "clang-tidy")
endif ()
if (CLANG_TIDY_PATH)
message (STATUS
"Using clang-tidy: ${CLANG_TIDY_PATH}.
The checks will be run during build process.
See the .clang-tidy file at the root directory to configure the checks.")
The checks will be run during the build process.
See the .clang-tidy file in the root directory to configure the checks.")
set (USE_CLANG_TIDY ON)

View File

@ -102,6 +102,11 @@ elseif (ARCH_AMD64)
SET(ENABLE_AVX512_FOR_SPEC_OP 0)
endif()
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
# AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
# Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code
# compile+link+run).
set (TEST_FLAG "-mssse3")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_SYSTEM_NAME "Darwin")
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin")

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_SYSTEM_NAME "Darwin")
set (CMAKE_SYSTEM_PROCESSOR "x86_64")
set (CMAKE_C_COMPILER_TARGET "x86_64-apple-darwin")

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_SYSTEM_NAME "FreeBSD")
set (CMAKE_SYSTEM_PROCESSOR "aarch64")
set (CMAKE_C_COMPILER_TARGET "aarch64-unknown-freebsd12")

View File

@ -0,0 +1,21 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_SYSTEM_NAME "FreeBSD")
set (CMAKE_SYSTEM_PROCESSOR "ppc64le")
set (CMAKE_C_COMPILER_TARGET "powerpc64le-unknown-freebsd13")
set (CMAKE_CXX_COMPILER_TARGET "powerpc64le-unknown-freebsd13")
set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-unknown-freebsd13")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-ppc64le")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_SYSTEM_NAME "FreeBSD")
set (CMAKE_SYSTEM_PROCESSOR "x86_64")
set (CMAKE_C_COMPILER_TARGET "x86_64-pc-freebsd11")

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")

View File

@ -1,3 +1,6 @@
# See linux/toolchain-x86_64.cmake for details about multiple load of toolchain file.
include_guard(GLOBAL)
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
set (CMAKE_SYSTEM_NAME "Linux")

View File

@ -1,18 +1,15 @@
if (_CLICKHOUSE_TOOLCHAIN_FILE_LOADED)
# During first run of cmake the toolchain file will be loaded twice,
# - /usr/share/cmake-3.23/Modules/CMakeDetermineSystem.cmake
# - /bld/CMakeFiles/3.23.2/CMakeSystem.cmake
#
# But once you already have non-empty cmake cache it will be loaded only
# once:
# - /bld/CMakeFiles/3.23.2/CMakeSystem.cmake
#
# This has no harm except for double load of toolchain will add
# --gcc-toolchain multiple times that will not allow ccache to reuse the
# cache.
return()
endif()
set (_CLICKHOUSE_TOOLCHAIN_FILE_LOADED ON)
# During first run of cmake the toolchain file will be loaded twice,
# - /usr/share/cmake-3.23/Modules/CMakeDetermineSystem.cmake
# - /bld/CMakeFiles/3.23.2/CMakeSystem.cmake
#
# But once you already have non-empty cmake cache it will be loaded only
# once:
# - /bld/CMakeFiles/3.23.2/CMakeSystem.cmake
#
# This has no harm except for double load of toolchain will add
# --gcc-toolchain multiple times that will not allow ccache to reuse the
# cache.
include_guard(GLOBAL)
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)

View File

@ -25,7 +25,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${ASAN_FLAGS}")
endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
endif ()
if (COMPILER_GCC)
@ -50,7 +50,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory")
endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan")
endif ()
@ -71,7 +71,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread")
endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan")
endif ()
if (COMPILER_GCC)
@ -103,7 +103,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
endif ()
if (COMPILER_GCC)

View File

@ -21,12 +21,12 @@ set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
set (GCC_MINIMUM_VERSION 11)
if (COMPILER_GCC)
message (FATAL_ERROR "Compilation with GCC is unsupported. Please use Clang instead.")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with GCC version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${GCC_MINIMUM_VERSION}.")
endif ()
message (WARNING "Compilation with GCC is unsupported. Please use Clang instead.")
elseif (COMPILER_CLANG)
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
@ -83,7 +83,7 @@ if ((OS_LINUX OR OS_DARWIN) AND NOT LINKER_NAME)
if (NOT LINKER_NAME)
if (GOLD_PATH)
message (WARNING "Linking with gold is not recommended. Please use lld.")
message (FATAL_ERROR "Linking with gold is unsupported. Please use lld.")
if (COMPILER_GCC)
set (LINKER_NAME "gold")
else ()

View File

@ -48,7 +48,11 @@ function(add_contrib cmake_folder)
message(STATUS "Adding contrib module ${base_folders} (configuring with ${cmake_folder})")
add_subdirectory (${cmake_folder})
endfunction()
if (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC)
add_contrib (openssl-cmake openssl)
else ()
add_contrib (boringssl-cmake boringssl)
endif ()
add_contrib (miniselect-cmake miniselect)
add_contrib (pdqsort-cmake pdqsort)
add_contrib (sparsehash-c11-cmake sparsehash-c11)
@ -61,7 +65,7 @@ add_contrib (dragonbox-cmake dragonbox)
add_contrib (vectorscan-cmake vectorscan)
add_contrib (jemalloc-cmake jemalloc)
add_contrib (libcpuid-cmake libcpuid)
add_contrib (libdivide)
add_contrib (libdivide-cmake)
add_contrib (libmetrohash)
add_contrib (lz4-cmake lz4)
add_contrib (murmurhash)
@ -74,11 +78,6 @@ add_contrib (re2-cmake re2)
add_contrib (xz-cmake xz)
add_contrib (brotli-cmake brotli)
add_contrib (double-conversion-cmake double-conversion)
if (NOT ENABLE_EXTERNAL_OPENSSL)
add_contrib (boringssl-cmake boringssl)
else ()
add_contrib (openssl-cmake openssl)
endif ()
add_contrib (poco-cmake poco)
add_contrib (croaring-cmake croaring)
add_contrib (zstd-cmake zstd)
@ -116,12 +115,25 @@ endif()
add_contrib (llvm-project-cmake llvm-project)
add_contrib (libfuzzer-cmake llvm-project)
add_contrib (libxml2-cmake libxml2)
add_contrib (aws-s3-cmake
add_contrib (aws-cmake
aws
aws-c-auth
aws-c-cal
aws-c-common
aws-c-compression
aws-c-event-stream
aws-c-http
aws-c-io
aws-c-mqtt
aws-c-s3
aws-c-sdkutils
aws-s2n-tls
aws-checksums
aws-crt-cpp
aws-cmake
)
add_contrib (base64-cmake base64)
add_contrib (simdjson-cmake simdjson)
add_contrib (rapidjson-cmake rapidjson)
@ -171,6 +183,8 @@ add_contrib (annoy-cmake annoy)
add_contrib (xxHash-cmake xxHash)
add_contrib (google-benchmark-cmake google-benchmark)
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit e4e746a24eb56861a86f3672771e3308d8c40722
Subproject commit afc36dfa9b0beb45bc4cd935060631cc80ba04a5

View File

@ -78,23 +78,14 @@ set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
# set flatbuffers CMake options
if (USE_STATIC_LIBRARIES)
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
else ()
set(FLATBUFFERS_BUILD_SHAREDLIB ON CACHE BOOL "Enable the build of the flatbuffers shared library")
set(FLATBUFFERS_BUILD_FLATLIB OFF CACHE BOOL "Disable the build of the flatbuffers library")
endif ()
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
add_library(_flatbuffers INTERFACE)
if(USE_STATIC_LIBRARIES)
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
else()
target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared)
endif()
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
# === hdfs

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit 00b03604543367d7e310cb0993973fdcb723ea79
Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252

1
contrib/aws-c-auth vendored Submodule

@ -0,0 +1 @@
Subproject commit 30df6c407e2df43bd244e2c34c9b4a4b87372bfb

1
contrib/aws-c-cal vendored Submodule

@ -0,0 +1 @@
Subproject commit 85dd7664b786a389c6fb1a6f031ab4bb2282133d

@ -1 +1 @@
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857
Subproject commit 324fd1d973ccb25c813aa747bf1759cfde5121c5

1
contrib/aws-c-compression vendored Submodule

@ -0,0 +1 @@
Subproject commit b517b7decd0dac30be2162f5186c250221c53aff

@ -1 +1 @@
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0
Subproject commit 39bfa94a14b7126bf0c1330286ef8db452d87e66

1
contrib/aws-c-http vendored Submodule

@ -0,0 +1 @@
Subproject commit 2c5a2a7d5556600b9782ffa6c9d7e09964df1abc

1
contrib/aws-c-io vendored Submodule

@ -0,0 +1 @@
Subproject commit 5d32c453560d0823df521a686bf7fbacde7f9be3

1
contrib/aws-c-mqtt vendored Submodule

@ -0,0 +1 @@
Subproject commit 882c689561a3db1466330ccfe3b63637e0a575d3

1
contrib/aws-c-s3 vendored Submodule

@ -0,0 +1 @@
Subproject commit a41255ece72a7c887bba7f9d998ca3e14f4c8a1b

1
contrib/aws-c-sdkutils vendored Submodule

@ -0,0 +1 @@
Subproject commit 25bf5cf225f977c3accc6a05a0a7a181ef2a4a30

@ -1 +1 @@
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65
Subproject commit 48e7c0e01479232f225c8044d76c84e74192889d

View File

@ -0,0 +1,114 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCSourceRuns)
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
# work-around, disable avx2 (and all other extensions) in mingw builds.
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
#
if (MINGW)
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
set(USE_CPU_EXTENSIONS OFF)
endif()
if(NOT CMAKE_CROSSCOMPILING)
check_c_source_runs("
#include <stdbool.h>
bool foo(int a, int b, int *c) {
return __builtin_mul_overflow(a, b, c);
}
int main() {
int out;
if (foo(1, 2, &out)) {
return 0;
}
return 0;
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
if (USE_CPU_EXTENSIONS)
check_c_source_runs("
int main() {
int foo = 42;
_mulx_u32(1, 2, &foo);
return foo != 2;
}" AWS_HAVE_MSVC_MULX)
endif()
endif()
check_c_source_compiles("
#include <Windows.h>
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
int main() {
return 0;
}
#else
it's not windows desktop
#endif
" AWS_HAVE_WINAPI_DESKTOP)
check_c_source_compiles("
int main() {
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
# error \"not intel\"
#endif
return 0;
}
" AWS_ARCH_INTEL)
check_c_source_compiles("
int main() {
#if !(defined(__aarch64__) || defined(_M_ARM64))
# error \"not arm64\"
#endif
return 0;
}
" AWS_ARCH_ARM64)
check_c_source_compiles("
int main() {
#if !(defined(__arm__) || defined(_M_ARM))
# error \"not arm\"
#endif
return 0;
}
" AWS_ARCH_ARM32)
check_c_source_compiles("
int main() {
int foo = 42, bar = 24;
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
}" AWS_HAVE_GCC_INLINE_ASM)
check_c_source_compiles("
#include <sys/auxv.h>
int main() {
#ifdef __linux__
getauxval(AT_HWCAP);
getauxval(AT_HWCAP2);
#endif
return 0;
}" AWS_HAVE_AUXV)
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
check_c_source_compiles("
#include <execinfo.h>
int main() {
backtrace(NULL, 0);
return 0;
}" AWS_HAVE_EXECINFO)
endif()
check_c_source_compiles("
#include <linux/if_link.h>
int main() {
return 1;
}" AWS_HAVE_LINUX_IF_LINK_H)

View File

@ -0,0 +1,74 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCCompilerFlag)
include(CheckIncludeFile)
if (USE_CPU_EXTENSIONS)
if (MSVC)
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "/arch:AVX2")
endif()
else()
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "-mavx -mavx2")
endif()
endif()
cmake_push_check_state()
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
check_c_source_compiles("
#include <immintrin.h>
#include <emmintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
_mm256_shuffle_epi8(vec, vec);
_mm256_set_epi32(1,2,3,4,5,6,7,8);
_mm256_permutevar8x32_epi32(vec, vec);
return 0;
}" HAVE_AVX2_INTRINSICS)
check_c_source_compiles("
#include <immintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
return (int)_mm256_extract_epi64(vec, 2);
}" HAVE_MM256_EXTRACT_EPI64)
cmake_pop_check_state()
endif() # USE_CPU_EXTENSIONS
macro(simd_add_definition_if target definition)
if(${definition})
target_compile_definitions(${target} PRIVATE -D${definition})
endif(${definition})
endmacro(simd_add_definition_if)
# Configure private preprocessor definitions for SIMD-related features
# Does not set any processor feature codegen flags
function(simd_add_definitions target)
simd_add_definition_if(${target} HAVE_AVX2_INTRINSICS)
simd_add_definition_if(${target} HAVE_MM256_EXTRACT_EPI64)
endfunction(simd_add_definitions)
# Adds source files only if AVX2 is supported. These files will be built with
# avx2 intrinsics enabled.
# Usage: simd_add_source_avx2(target file1.c file2.c ...)
function(simd_add_source_avx2 target)
foreach(file ${ARGN})
target_sources(${target} PRIVATE ${file})
set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
endforeach()
endfunction(simd_add_source_avx2)

View File

@ -0,0 +1,50 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check if the platform supports setting thread affinity
# (important for hitting full NIC entitlement on NUMA architectures)
function(aws_set_thread_affinity_method target)
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
if (NOT UNIX OR ANDROID OR APPLE)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
set(headers "pthread.h")
# BSDs put nonportable pthread declarations in a separate header.
if(CMAKE_SYSTEM_NAME MATCHES BSD)
set(headers "${headers};pthread_np.h")
endif()
# Using pthread attrs is the preferred method, but is glibc-specific.
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
if (USE_PTHREAD_ATTR_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
return()
endif()
# This method is still nonportable, but is supported by musl and BSDs.
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
if (USE_PTHREAD_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
return()
endif()
# If we got here, we expected thread affinity support but didn't find it.
# We still build with degraded NUMA performance, but show a warning.
message(WARNING "No supported method for setting thread affinity")
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
cmake_pop_check_state()
endfunction()

View File

@ -0,0 +1,61 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check how the platform supports setting thread name
function(aws_set_thread_name_method target)
if (WINDOWS)
# On Windows we do a runtime check, instead of compile-time check
return()
elseif (APPLE)
# All Apple platforms we support have the same function, so no need for compile-time check.
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
# The start of the test program
set(c_source_start "
#define _GNU_SOURCE
#include <pthread.h>
#if defined(__FreeBSD__) || defined(__NETBSD__)
#include <pthread_np.h>
#endif
int main() {
pthread_t thread_id;
")
# The end of the test program
set(c_source_end "}")
# pthread_setname_np() usually takes 2 args
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\");
${c_source_end}"
PTHREAD_SETNAME_TAKES_2ARGS)
if (PTHREAD_SETNAME_TAKES_2ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
return()
endif()
# But on NetBSD it takes 3!
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\", NULL);
${c_source_end}
" PTHREAD_SETNAME_TAKES_3ARGS)
if (PTHREAD_SETNAME_TAKES_3ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
return()
endif()
# And on many older/weirder platforms it's just not supported
cmake_pop_check_state()
endfunction()

View File

@ -0,0 +1,376 @@
set(ENABLE_AWS_S3_DEFAULT OFF)
if(ENABLE_LIBRARIES AND (OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::Crypto)
set(ENABLE_AWS_S3_DEFAULT ON)
endif()
option(ENABLE_AWS_S3 "Enable AWS S3" ${ENABLE_AWS_S3_DEFAULT})
if(ENABLE_AWS_S3)
if(NOT TARGET OpenSSL::Crypto)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK without OpenSSL")
elseif(NOT (OS_LINUX OR OS_DARWIN))
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK with platform ${CMAKE_SYSTEM_NAME}")
endif()
endif()
if(NOT ENABLE_AWS_S3)
message(STATUS "Not using AWS S3")
return()
endif()
# Utilities.
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsFeatureTests.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
# Gather sources and options.
set(AWS_SOURCES)
set(AWS_PUBLIC_INCLUDES)
set(AWS_PRIVATE_INCLUDES)
set(AWS_PUBLIC_COMPILE_DEFS)
set(AWS_PRIVATE_COMPILE_DEFS)
set(AWS_PRIVATE_LIBS)
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
endif()
set(ENABLE_OPENSSL_ENCRYPTION ON)
if (ENABLE_OPENSSL_ENCRYPTION)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DENABLE_OPENSSL_ENCRYPTION")
endif()
set(USE_S2N ON)
if (USE_S2N)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_S2N")
endif()
# Directories.
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-core")
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-s3")
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
SET(AWS_CHECKSUMS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
SET(AWS_COMMON_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
SET(AWS_COMPRESSION_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-compression")
SET(AWS_CRT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-crt-cpp")
SET(AWS_EVENT_STREAM_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
SET(AWS_HTTP_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-http")
SET(AWS_IO_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-io")
SET(AWS_MQTT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-mqtt")
SET(AWS_S2N_TLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-s2n-tls")
SET(AWS_S3_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-s3")
SET(AWS_SDKUTILS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-sdkutils")
# aws-cpp-sdk-core
file(GLOB AWS_SDK_CORE_SRC
"${AWS_SDK_CORE_DIR}/source/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/bearer-token-provider/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/signer/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/signer-provider/*.cpp"
"${AWS_SDK_CORE_DIR}/source/client/*.cpp"
"${AWS_SDK_CORE_DIR}/source/config/*.cpp"
"${AWS_SDK_CORE_DIR}/source/config/defaults/*.cpp"
"${AWS_SDK_CORE_DIR}/source/endpoint/*.cpp"
"${AWS_SDK_CORE_DIR}/source/endpoint/internal/*.cpp"
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/memory/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/stream/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/threading/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/xml/*.cpp"
)
if(OS_LINUX OR OS_DARWIN)
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/linux-shared/*.cpp")
file(GLOB AWS_SDK_CORE_PLATFORM_SRC "${AWS_SDK_CORE_DIR}/source/platform/linux-shared/*.cpp")
else()
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/*.cpp")
set(AWS_SDK_CORE_PLATFORM_SRC)
endif()
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MAJOR=1")
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MINOR=10")
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_PATCH=36")
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
list(APPEND AWS_PUBLIC_INCLUDES
"${AWS_SDK_CORE_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
# aws-cpp-sdk-s3
file(GLOB AWS_SDK_S3_SRC
"${AWS_SDK_S3_DIR}/source/*.cpp"
"${AWS_SDK_S3_DIR}/source/model/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
# aws-c-auth
file(GLOB AWS_AUTH_SRC
"${AWS_AUTH_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_AUTH_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_AUTH_DIR}/include/")
# aws-c-cal
file(GLOB AWS_CAL_SRC
"${AWS_CAL_DIR}/source/*.c"
)
if (ENABLE_OPENSSL_ENCRYPTION)
file(GLOB AWS_CAL_OS_SRC
"${AWS_CAL_DIR}/source/unix/*.c"
)
list(APPEND AWS_PRIVATE_LIBS OpenSSL::Crypto)
endif()
list(APPEND AWS_SOURCES ${AWS_CAL_SRC} ${AWS_CAL_OS_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CAL_DIR}/include/")
# aws-c-event-stream
file(GLOB AWS_EVENT_STREAM_SRC
"${AWS_EVENT_STREAM_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_EVENT_STREAM_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_EVENT_STREAM_DIR}/include/")
# aws-c-common
file(GLOB AWS_COMMON_SRC
"${AWS_COMMON_DIR}/source/*.c"
"${AWS_COMMON_DIR}/source/external/*.c"
"${AWS_COMMON_DIR}/source/posix/*.c"
)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/generic/*.c"
)
if (AWS_ARCH_INTEL)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/intel/cpuid.c"
"${AWS_COMMON_DIR}/source/arch/intel/asm/*.c"
)
elseif (AWS_ARCH_ARM64 OR AWS_ARCH_ARM32)
if (AWS_HAVE_AUXV)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/arm/asm/*.c"
)
endif()
endif()
set(AWS_COMMON_AVX2_SRC)
if (HAVE_AVX2_INTRINSICS)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_SIMD_ENCODING")
set(AWS_COMMON_AVX2_SRC "${AWS_COMMON_DIR}/source/arch/intel/encoding_avx2.c")
set_source_files_properties(${AWS_COMMON_AVX2_SRC} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
endif()
configure_file("${AWS_COMMON_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
list(APPEND AWS_SOURCES ${AWS_COMMON_SRC} ${AWS_COMMON_ARCH_SRC} ${AWS_COMMON_AVX2_SRC})
list(APPEND AWS_PUBLIC_INCLUDES
"${AWS_COMMON_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
# aws-checksums
file(GLOB AWS_CHECKSUMS_SRC
"${AWS_CHECKSUMS_DIR}/source/*.c"
"${AWS_CHECKSUMS_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
)
if(AWS_ARCH_INTEL AND AWS_HAVE_GCC_INLINE_ASM)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
)
endif()
if (AWS_ARCH_ARM64)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
)
set_source_files_properties("${AWS_CHECKSUMS_DIR}/source/arm/crc32c_arm.c" PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
elseif (AWS_ARCH_ARM32)
if (AWS_ARM32_CRC)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
"${AWS_CHECKSUMS_DIR}/source/arm/asm/*.c"
)
set_source_files_properties(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
endif()
endif()
list(APPEND AWS_SOURCES ${AWS_CHECKSUMS_SRC} ${AWS_CHECKSUMS_ARCH_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CHECKSUMS_DIR}/include/")
# aws-c-io
file(GLOB AWS_IO_SRC
"${AWS_IO_DIR}/source/*.c"
)
if (OS_LINUX)
file(GLOB AWS_IO_OS_SRC
"${AWS_IO_DIR}/source/linux/*.c"
"${AWS_IO_DIR}/source/posix/*.c"
)
elseif (OS_DARWIN)
file(GLOB AWS_IO_OS_SRC
"${AWS_IO_DIR}/source/bsd/*.c"
"${AWS_IO_DIR}/source/posix/*.c"
)
endif()
set(AWS_IO_TLS_SRC)
if (USE_S2N)
file(GLOB AWS_IO_TLS_SRC
"${AWS_IO_DIR}/source/s2n/*.c"
)
endif()
list(APPEND AWS_SOURCES ${AWS_IO_SRC} ${AWS_IO_OS_SRC} ${AWS_IO_TLS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_IO_DIR}/include/")
# aws-s2n-tls
if (USE_S2N)
file(GLOB AWS_S2N_TLS_SRC
"${AWS_S2N_TLS_DIR}/crypto/*.c"
"${AWS_S2N_TLS_DIR}/error/*.c"
"${AWS_S2N_TLS_DIR}/stuffer/*.c"
"${AWS_S2N_TLS_DIR}/pq-crypto/*.c"
"${AWS_S2N_TLS_DIR}/pq-crypto/kyber_r3/*.c"
"${AWS_S2N_TLS_DIR}/tls/*.c"
"${AWS_S2N_TLS_DIR}/tls/extensions/*.c"
"${AWS_S2N_TLS_DIR}/utils/*.c"
)
list(APPEND AWS_SOURCES ${AWS_S2N_TLS_SRC})
list(APPEND AWS_PRIVATE_INCLUDES
"${AWS_S2N_TLS_DIR}/"
"${AWS_S2N_TLS_DIR}/api/"
)
endif()
# aws-crt-cpp
file(GLOB AWS_CRT_SRC
"${AWS_CRT_DIR}/source/*.cpp"
"${AWS_CRT_DIR}/source/auth/*.cpp"
"${AWS_CRT_DIR}/source/crypto/*.cpp"
"${AWS_CRT_DIR}/source/endpoints/*.cpp"
"${AWS_CRT_DIR}/source/external/*.cpp"
"${AWS_CRT_DIR}/source/http/*.cpp"
"${AWS_CRT_DIR}/source/io/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_CRT_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_CRT_DIR}/include/")
# aws-c-mqtt
file(GLOB AWS_MQTT_SRC
"${AWS_MQTT_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_MQTT_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_MQTT_DIR}/include/")
# aws-c-http
file(GLOB AWS_HTTP_SRC
"${AWS_HTTP_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_HTTP_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_HTTP_DIR}/include/")
# aws-c-compression
file(GLOB AWS_COMPRESSION_SRC
"${AWS_COMPRESSION_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_COMPRESSION_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_COMPRESSION_DIR}/include/")
# aws-c-s3
file(GLOB AWS_S3_SRC
"${AWS_S3_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_S3_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_S3_DIR}/include/")
# aws-c-sdkutils
file(GLOB AWS_SDKUTILS_SRC
"${AWS_SDKUTILS_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_SDKUTILS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDKUTILS_DIR}/include/")
# Add library.
add_library(_aws ${AWS_SOURCES})
target_include_directories(_aws SYSTEM BEFORE PUBLIC ${AWS_PUBLIC_INCLUDES})
target_include_directories(_aws SYSTEM BEFORE PRIVATE ${AWS_PRIVATE_INCLUDES})
target_compile_definitions(_aws PUBLIC ${AWS_PUBLIC_COMPILE_DEFS})
target_compile_definitions(_aws PRIVATE ${AWS_PRIVATE_COMPILE_DEFS})
target_link_libraries(_aws PRIVATE ${AWS_PRIVATE_LIBS})
aws_set_thread_affinity_method(_aws)
aws_set_thread_name_method(_aws)
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options (_aws PRIVATE -g0)
endif()
add_library(ch_contrib::aws_s3 ALIAS _aws)

1
contrib/aws-crt-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit ec0bea288f451d884c0d80d534bc5c66241c39a4

1
contrib/aws-s2n-tls vendored Submodule

@ -0,0 +1 @@
Subproject commit 15d534e8a9ca1eda6bacee514e37d08b4f38a526

View File

@ -1,122 +0,0 @@
if(NOT OS_FREEBSD)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
elseif(ENABLE_S3)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on FreeBSD")
endif()
if(NOT ENABLE_S3)
message(STATUS "Not using S3")
return()
endif()
SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core")
SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
file(GLOB AWS_CORE_SOURCES
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
)
file(GLOB AWS_S3_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
)
file(GLOB AWS_S3_MODEL_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
)
file(GLOB AWS_EVENT_STREAM_SOURCES
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
)
file(GLOB AWS_COMMON_SOURCES
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
)
file(GLOB AWS_CHECKSUMS_SOURCES
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
)
file(GLOB S3_UNIFIED_SRC
${AWS_EVENT_STREAM_SOURCES}
${AWS_COMMON_SOURCES}
${AWS_S3_SOURCES}
${AWS_S3_MODEL_SOURCES}
${AWS_CORE_SOURCES}
)
set(S3_INCLUDES
"${AWS_COMMON_LIBRARY_DIR}/include/"
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
"${AWS_S3_LIBRARY_DIR}/include/"
"${AWS_CORE_LIBRARY_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include/"
)
add_library(_aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
target_include_directories(_aws_s3_checksums SYSTEM PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
target_compile_definitions(_aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
endif()
set_target_properties(_aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
set_property(TARGET _aws_s3_checksums PROPERTY C_STANDARD 99)
add_library(_aws_s3 ${S3_UNIFIED_SRC})
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(_aws_s3 SYSTEM BEFORE PUBLIC ${S3_INCLUDES})
if (TARGET OpenSSL::SSL)
target_compile_definitions(_aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
target_link_libraries(_aws_s3 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
endif()
target_link_libraries(_aws_s3 PRIVATE _aws_s3_checksums)
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options (_aws_s3 PRIVATE -g0)
target_compile_options (_aws_s3_checksums PRIVATE -g0)
endif()
add_library(ch_contrib::aws_s3 ALIAS _aws_s3)

View File

@ -1,4 +1,4 @@
if(ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE)
if(ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_S390X)
option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
elseif(ENABLE_BASE64)
message (${RECONFIGURE_MESSAGE_LEVEL} "base64 library is only supported on x86_64 and aarch64")

View File

@ -139,13 +139,6 @@ if(NOT OPENSSL_NO_ASM)
endif()
endif()
if(BUILD_SHARED_LIBS)
add_definitions(-DBORINGSSL_SHARED_LIBRARY)
# Enable position-independent code globally. This is needed because
# some library targets are OBJECT libraries.
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
endif()
set(
CRYPTO_ios_aarch64_SOURCES

View File

@ -63,13 +63,8 @@ SET(SRCS
"${LIBRARY_DIR}/src/lib/windows_port.c"
)
if (USE_STATIC_LIBRARIES)
add_library(_c-ares STATIC ${SRCS})
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
else()
add_library(_c-ares SHARED ${SRCS})
target_compile_definitions(_c-ares PUBLIC CARES_BUILDING_LIBRARY)
endif()
add_library(_c-ares STATIC ${SRCS})
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
target_compile_definitions(_c-ares PRIVATE HAVE_CONFIG_H=1)

2
contrib/cctz vendored

@ -1 +1 @@
Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5
Subproject commit 7c78edd52b4d65acc103c2f195818ffcabe6fe0d

View File

@ -10,9 +10,6 @@ else()
endif()
option(ENABLE_RUST "Enable rust" ${DEFAULT_ENABLE_RUST})
message(STATUS ${ENABLE_RUST})
if(NOT ENABLE_RUST)
message(STATUS "Not using rust")
return()
@ -42,5 +39,7 @@ endif()
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")
# FindRust.cmake
list(APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
# Define function corrosion_import_crate()
include ("${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake/Corrosion.cmake")

1
contrib/google-benchmark vendored Submodule

@ -0,0 +1 @@
Subproject commit 2257fa4d6afb8e5a2ccd510a70f38fe7fcdf1edf

View File

@ -0,0 +1,34 @@
set (SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-benchmark/src")
set (SRCS
"${SRC_DIR}/benchmark.cc"
"${SRC_DIR}/benchmark_api_internal.cc"
"${SRC_DIR}/benchmark_name.cc"
"${SRC_DIR}/benchmark_register.cc"
"${SRC_DIR}/benchmark_runner.cc"
"${SRC_DIR}/check.cc"
"${SRC_DIR}/colorprint.cc"
"${SRC_DIR}/commandlineflags.cc"
"${SRC_DIR}/complexity.cc"
"${SRC_DIR}/console_reporter.cc"
"${SRC_DIR}/counter.cc"
"${SRC_DIR}/csv_reporter.cc"
"${SRC_DIR}/json_reporter.cc"
"${SRC_DIR}/perf_counters.cc"
"${SRC_DIR}/reporter.cc"
"${SRC_DIR}/sleep.cc"
"${SRC_DIR}/statistics.cc"
"${SRC_DIR}/string_util.cc"
"${SRC_DIR}/sysinfo.cc"
"${SRC_DIR}/timers.cc")
add_library(google_benchmark "${SRCS}")
target_include_directories(google_benchmark SYSTEM PUBLIC "${SRC_DIR}/../include")
add_library(google_benchmark_main "${SRC_DIR}/benchmark_main.cc")
target_link_libraries(google_benchmark_main PUBLIC google_benchmark)
add_library(google_benchmark_all INTERFACE)
target_link_libraries(google_benchmark_all INTERFACE google_benchmark google_benchmark_main)
add_library(ch_contrib::gbenchmark_all ALIAS google_benchmark_all)

2
contrib/googletest vendored

@ -1 +1 @@
Subproject commit e7e591764baba0a0c3c9ad0014430e7a27331d16
Subproject commit 71140c3ca7a87bb1b5b9c9f1500fea8858cce344

View File

@ -0,0 +1,435 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
/* #undef JEMALLOC_PREFIX */
/* #undef JEMALLOC_CPREFIX */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
/* #undef JEMALLOC_OVERRIDE___LIBC_PVALLOC */
#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
#define HAVE_CPU_SPINWAIT 0
/*
* Number of significant bits in virtual addresses. This may be less than the
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
* bits are the same as bit 47.
*/
#define LG_VADDR 64
/* Defined if C11 atomics are available. */
#define JEMALLOC_C11_ATOMICS
/* Defined if GCC __atomic atomics are available. */
#define JEMALLOC_GCC_ATOMIC_ATOMICS
/* and the 8-bit variant support. */
#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
/* Defined if GCC __sync atomics are available. */
#define JEMALLOC_GCC_SYNC_ATOMICS
/* and the 8-bit variant support. */
#define JEMALLOC_GCC_U8_SYNC_ATOMICS
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
#define JEMALLOC_HAVE_BUILTIN_CLZ
/*
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
*/
/* #undef JEMALLOC_OS_UNFAIR_LOCK */
/* Defined if syscall(2) is usable. */
#define JEMALLOC_USE_SYSCALL
/*
* Defined if secure_getenv(3) is available.
*/
/* #undef JEMALLOC_HAVE_SECURE_GETENV */
/*
* Defined if issetugid(2) is available.
*/
#define JEMALLOC_HAVE_ISSETUGID
/* Defined if pthread_atfork(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
/* Defined if pthread_getname_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
/* Defined if pthread_get_name_np(3) is available. */
#define JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
*/
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
* Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_REALTIME
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
#define JEMALLOC_MALLOC_THREAD_CLEANUP
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
/* #undef JEMALLOC_THREADED_INIT */
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
#define JEMALLOC_MUTEX_INIT_CB
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
#define JEMALLOC_STATS
/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
/* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/* JEMALLOC_PAGEID enabled page id */
/* #undef JEMALLOC_PAGEID */
/* JEMALLOC_HAVE_PRCTL checks prctl */
/* #undef JEMALLOC_HAVE_PRCTL */
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
/* Support memory filling (junk/zero). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support utrace(2)-based tracing (label based signature). */
/* #undef JEMALLOC_UTRACE_LABEL */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
#define JEMALLOC_LAZY_LOCK
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
/* #undef LG_QUANTUM */
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
/* Maximum number of regions in a slab. */
/* #undef CONFIG_LG_SLAB_MAXREGS */
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
* explicit huge page support are separately configured.
*/
#define LG_HUGEPAGE 21
/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
* VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
* mappings do *not* coalesce/fragment.
*/
#define JEMALLOC_MAPS_COALESCE
/*
* If defined, retain memory for later reuse by default rather than using e.g.
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
* common sequences of mmap()/munmap() calls will cause virtual memory map
* holes.
*/
/* #undef JEMALLOC_RETAIN */
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
/*
* Used to mark unreachable code to quiet "end of non-void" compiler warnings.
* Don't use this directly; instead use unreachable() from util.h
*/
#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
/*
* ffs*() functions to use for bitmapping. Don't use these directly; instead,
* use ffs_*() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll
#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
* popcount*() functions to use for bitmapping.
*/
#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl
#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount
/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
#define JEMALLOC_CACHE_OBLIVIOUS
/*
* If defined, enable logging facilities. We make this a configure option to
* avoid taking extra branches everywhere.
*/
/* #undef JEMALLOC_LOG */
/*
* If defined, use readlinkat() (instead of readlink()) to follow
* /etc/malloc_conf.
*/
/* #undef JEMALLOC_READLINKAT */
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
/* Defined if madvise(2) is available. */
#define JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
* defined, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched;
* otherwise this behaves similarly to
* MADV_FREE, though typically with higher
* system overhead.
*/
#define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
/* #undef JEMALLOC_DEFINE_MADVISE_FREE */
/*
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
*/
/* #undef JEMALLOC_MADVISE_DONTDUMP */
/*
* Defined if MADV_[NO]CORE is supported as an argument to madvise.
*/
#define JEMALLOC_MADVISE_NOCORE
/* Defined if mprotect(2) is available. */
#define JEMALLOC_HAVE_MPROTECT
/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
/* #undef JEMALLOC_THP */
/* Defined if posix_madvise is available. */
/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
/*
* Method for purging unused pages using posix_madvise.
*
* posix_madvise(..., POSIX_MADV_DONTNEED)
*/
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
/*
* Defined if memcntl page admin call is supported
*/
/* #undef JEMALLOC_HAVE_MEMCNTL */
/*
* Defined if malloc_size is supported
*/
/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
/* Define if operating system has alloca.h header. */
/* #undef JEMALLOC_HAS_ALLOCA_H */
/* C99 restrict keyword supported. */
#define JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 3
/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
#define LG_SIZEOF_LONG_LONG 3
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */
/* glibc memalign hook. */
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
/* pthread support */
#define JEMALLOC_HAVE_PTHREAD
/* dlsym() support */
#define JEMALLOC_HAVE_DLSYM
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
#define JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
#define JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
#define JEMALLOC_BACKGROUND_THREAD
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
*/
/* #undef JEMALLOC_EXPORT */
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
#define JEMALLOC_IS_MALLOC
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
*/
/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */
/* Performs additional safety checks when defined. */
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
/* Is C++ support being built? */
#define JEMALLOC_ENABLE_CXX
/* Performs additional size checks when defined. */
/* #undef JEMALLOC_OPT_SIZE_CHECKS */
/* Allows sampled junk and stash for checking use-after-free when defined. */
/* #undef JEMALLOC_UAF_DETECTION */
/* Darwin VM_MAKE_TAG support */
/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
/* #undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE */
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */

View File

@ -578,7 +578,7 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
list(APPEND ALL_SRCS "${CMAKE_CURRENT_BINARY_DIR}/include_private/kcmrpc.c")
endif()
if (ENABLE_EXTERNAL_OPENSSL)
if (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC)
list(REMOVE_ITEM ALL_SRCS "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/aes.c")
list(APPEND ALL_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/aes.c")
endif ()

302
contrib/krb5-cmake/aes.c Normal file
View File

@ -0,0 +1,302 @@
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/crypto/openssl/enc_provider/aes.c */
/*
* Copyright (C) 2003, 2007, 2008, 2009 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "crypto_int.h"
#include <openssl/evp.h>
#include <openssl/aes.h>
/* proto's */
static krb5_error_code
cbc_enc(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data);
static krb5_error_code
cbc_decr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data);
static krb5_error_code
cts_encr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data, size_t dlen);
static krb5_error_code
cts_decr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data, size_t dlen);
#define BLOCK_SIZE 16
#define NUM_BITS 8
#define IV_CTS_BUF_SIZE 16 /* 16 - hardcoded in CRYPTO_cts128_en/decrypt */
static const EVP_CIPHER *
map_mode(unsigned int len)
{
if (len==16)
return EVP_aes_128_cbc();
if (len==32)
return EVP_aes_256_cbc();
else
return NULL;
}
/* Encrypt one block using CBC. */
static krb5_error_code
cbc_enc(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data)
{
int ret, olen = BLOCK_SIZE;
unsigned char iblock[BLOCK_SIZE], oblock[BLOCK_SIZE];
EVP_CIPHER_CTX *ctx;
struct iov_cursor cursor;
ctx = EVP_CIPHER_CTX_new();
if (ctx == NULL)
return ENOMEM;
ret = EVP_EncryptInit_ex(ctx, map_mode(key->keyblock.length),
NULL, key->keyblock.contents, (ivec) ? (unsigned char*)ivec->data : NULL);
if (ret == 0) {
EVP_CIPHER_CTX_free(ctx);
return KRB5_CRYPTO_INTERNAL;
}
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
k5_iov_cursor_get(&cursor, iblock);
EVP_CIPHER_CTX_set_padding(ctx,0);
ret = EVP_EncryptUpdate(ctx, oblock, &olen, iblock, BLOCK_SIZE);
if (ret == 1)
k5_iov_cursor_put(&cursor, oblock);
EVP_CIPHER_CTX_free(ctx);
zap(iblock, BLOCK_SIZE);
zap(oblock, BLOCK_SIZE);
return (ret == 1) ? 0 : KRB5_CRYPTO_INTERNAL;
}
/* Decrypt one block using CBC. */
static krb5_error_code
cbc_decr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data)
{
int ret = 0, olen = BLOCK_SIZE;
unsigned char iblock[BLOCK_SIZE], oblock[BLOCK_SIZE];
EVP_CIPHER_CTX *ctx;
struct iov_cursor cursor;
ctx = EVP_CIPHER_CTX_new();
if (ctx == NULL)
return ENOMEM;
ret = EVP_DecryptInit_ex(ctx, map_mode(key->keyblock.length),
NULL, key->keyblock.contents, (ivec) ? (unsigned char*)ivec->data : NULL);
if (ret == 0) {
EVP_CIPHER_CTX_free(ctx);
return KRB5_CRYPTO_INTERNAL;
}
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
k5_iov_cursor_get(&cursor, iblock);
EVP_CIPHER_CTX_set_padding(ctx,0);
ret = EVP_DecryptUpdate(ctx, oblock, &olen, iblock, BLOCK_SIZE);
if (ret == 1)
k5_iov_cursor_put(&cursor, oblock);
EVP_CIPHER_CTX_free(ctx);
zap(iblock, BLOCK_SIZE);
zap(oblock, BLOCK_SIZE);
return (ret == 1) ? 0 : KRB5_CRYPTO_INTERNAL;
}
static krb5_error_code
cts_encr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data, size_t dlen)
{
int ret = 0;
size_t size = 0;
unsigned char *oblock = NULL, *dbuf = NULL;
unsigned char iv_cts[IV_CTS_BUF_SIZE];
struct iov_cursor cursor;
AES_KEY enck;
memset(iv_cts,0,sizeof(iv_cts));
if (ivec && ivec->data){
if (ivec->length != sizeof(iv_cts))
return KRB5_CRYPTO_INTERNAL;
memcpy(iv_cts, ivec->data,ivec->length);
}
oblock = OPENSSL_malloc(dlen);
if (!oblock){
return ENOMEM;
}
dbuf = OPENSSL_malloc(dlen);
if (!dbuf){
OPENSSL_free(oblock);
return ENOMEM;
}
k5_iov_cursor_init(&cursor, data, num_data, dlen, FALSE);
k5_iov_cursor_get(&cursor, dbuf);
AES_set_encrypt_key(key->keyblock.contents,
NUM_BITS * key->keyblock.length, &enck);
size = CRYPTO_cts128_encrypt((unsigned char *)dbuf, oblock, dlen, &enck,
iv_cts, AES_cbc_encrypt);
if (size <= 0)
ret = KRB5_CRYPTO_INTERNAL;
else
k5_iov_cursor_put(&cursor, oblock);
if (!ret && ivec && ivec->data)
memcpy(ivec->data, iv_cts, sizeof(iv_cts));
zap(oblock, dlen);
zap(dbuf, dlen);
OPENSSL_free(oblock);
OPENSSL_free(dbuf);
return ret;
}
static krb5_error_code
cts_decr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data, size_t dlen)
{
int ret = 0;
size_t size = 0;
unsigned char *oblock = NULL;
unsigned char *dbuf = NULL;
unsigned char iv_cts[IV_CTS_BUF_SIZE];
struct iov_cursor cursor;
AES_KEY deck;
memset(iv_cts,0,sizeof(iv_cts));
if (ivec && ivec->data){
if (ivec->length != sizeof(iv_cts))
return KRB5_CRYPTO_INTERNAL;
memcpy(iv_cts, ivec->data,ivec->length);
}
oblock = OPENSSL_malloc(dlen);
if (!oblock)
return ENOMEM;
dbuf = OPENSSL_malloc(dlen);
if (!dbuf){
OPENSSL_free(oblock);
return ENOMEM;
}
AES_set_decrypt_key(key->keyblock.contents,
NUM_BITS * key->keyblock.length, &deck);
k5_iov_cursor_init(&cursor, data, num_data, dlen, FALSE);
k5_iov_cursor_get(&cursor, dbuf);
size = CRYPTO_cts128_decrypt((unsigned char *)dbuf, oblock,
dlen, &deck,
iv_cts, AES_cbc_encrypt);
if (size <= 0)
ret = KRB5_CRYPTO_INTERNAL;
else
k5_iov_cursor_put(&cursor, oblock);
if (!ret && ivec && ivec->data)
memcpy(ivec->data, iv_cts, sizeof(iv_cts));
zap(oblock, dlen);
zap(dbuf, dlen);
OPENSSL_free(oblock);
OPENSSL_free(dbuf);
return ret;
}
krb5_error_code
krb5int_aes_encrypt(krb5_key key, const krb5_data *ivec,
krb5_crypto_iov *data, size_t num_data)
{
int ret = 0;
size_t input_length, nblocks;
input_length = iov_total_length(data, num_data, FALSE);
nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (nblocks == 1) {
if (input_length != BLOCK_SIZE)
return KRB5_BAD_MSIZE;
ret = cbc_enc(key, ivec, data, num_data);
} else if (nblocks > 1) {
ret = cts_encr(key, ivec, data, num_data, input_length);
}
return ret;
}
krb5_error_code
krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec,
krb5_crypto_iov *data, size_t num_data)
{
int ret = 0;
size_t input_length, nblocks;
input_length = iov_total_length(data, num_data, FALSE);
nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (nblocks == 1) {
if (input_length != BLOCK_SIZE)
return KRB5_BAD_MSIZE;
ret = cbc_decr(key, ivec, data, num_data);
} else if (nblocks > 1) {
ret = cts_decr(key, ivec, data, num_data, input_length);
}
return ret;
}
static krb5_error_code
krb5int_aes_init_state (const krb5_keyblock *key, krb5_keyusage usage,
krb5_data *state)
{
state->length = 16;
state->data = (void *) malloc(16);
if (state->data == NULL)
return ENOMEM;
memset(state->data, 0, state->length);
return 0;
}
const struct krb5_enc_provider krb5int_enc_aes128 = {
16,
16, 16,
krb5int_aes_encrypt,
krb5int_aes_decrypt,
NULL,
krb5int_aes_init_state,
krb5int_default_free_state
};
const struct krb5_enc_provider krb5int_enc_aes256 = {
16,
32, 32,
krb5int_aes_encrypt,
krb5int_aes_decrypt,
NULL,
krb5int_aes_init_state,
krb5int_default_free_state
};

1
contrib/libdivide vendored Submodule

@ -0,0 +1 @@
Subproject commit 3bd34388573681ce563348cdf04fe15d24770d04

View File

@ -0,0 +1,7 @@
set(LIBDIVIDE_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libdivide")
add_library (_libdivide INTERFACE)
# for libdivide.h
target_include_directories (_libdivide SYSTEM BEFORE INTERFACE ${LIBDIVIDE_SOURCE_DIR})
# for libdivide-config.h
target_include_directories (_libdivide SYSTEM BEFORE INTERFACE .)
add_library (ch_contrib::libdivide ALIAS _libdivide)

View File

@ -0,0 +1,9 @@
#if defined(__SSE2__)
# define LIBDIVIDE_SSE2
#elif defined(__AVX512F__) || defined(__AVX512BW__) || defined(__AVX512VL__)
# define LIBDIVIDE_AVX512
#elif defined(__AVX2__)
# define LIBDIVIDE_AVX2
#elif defined(__aarch64__) && defined(__ARM_NEON)
# define LIBDIVIDE_NEON
#endif

View File

@ -1,3 +0,0 @@
add_library (_libdivide INTERFACE)
target_include_directories (_libdivide SYSTEM BEFORE INTERFACE .)
add_library (ch_contrib::libdivide ALIAS _libdivide)

View File

@ -1,20 +0,0 @@
libdivide
Copyright (C) 2010 ridiculous_fish
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
libdivide@ridiculousfish.com

View File

@ -1,2 +0,0 @@
https://github.com/ridiculousfish/libdivide
http://libdivide.com/

File diff suppressed because it is too large Load Diff

View File

@ -59,7 +59,7 @@ set(SRCS
add_library(_libpq ${SRCS})
if (ENABLE_EXTERNAL_OPENSSL)
if (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC)
add_definitions(-DHAVE_BIO_METH_NEW)
add_definitions(-DHAVE_HMAC_CTX_NEW)
add_definitions(-DHAVE_HMAC_CTX_FREE)

View File

@ -43,7 +43,10 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY)
target_compile_options(unwind PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
# We should enable optimizations (otherwise it will be too slow in debug)
# and disable sanitizers (otherwise infinite loop may happen)
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)

View File

@ -136,11 +136,6 @@ add_library(ch_contrib::uv ALIAS _uv)
target_compile_definitions(_uv PRIVATE ${uv_defines})
target_include_directories(_uv SYSTEM PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
target_link_libraries(_uv ${uv_libraries})
if (NOT USE_STATIC_LIBRARIES)
target_compile_definitions(_uv
INTERFACE USING_UV_SHARED=1
PRIVATE BUILDING_UV_SHARED=1)
endif()
if(UNIX)
# Now for some gibbering horrors from beyond the stars...

View File

@ -6,8 +6,6 @@ endif()
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
# If USE_STATIC_LIBRARIES=0 was passed to CMake, we'll still build LLVM statically to keep complexity minimal.
if (NOT ENABLE_EMBEDDED_COMPILER)
message(STATUS "Not using LLVM")
return()

View File

@ -0,0 +1,63 @@
/* include/lber_types.h. Generated from lber_types.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* LBER types
*/
#ifndef _LBER_TYPES_H
#define _LBER_TYPES_H
#include <ldap_cdefs.h>
LDAP_BEGIN_DECL
/* LBER boolean, enum, integers (32 bits or larger) */
#define LBER_INT_T int
/* LBER tags (32 bits or larger) */
#define LBER_TAG_T long
/* LBER socket descriptor */
#define LBER_SOCKET_T int
/* LBER lengths (32 bits or larger) */
#define LBER_LEN_T long
/* ------------------------------------------------------------ */
/* booleans, enumerations, and integers */
typedef LBER_INT_T ber_int_t;
/* signed and unsigned versions */
typedef signed LBER_INT_T ber_sint_t;
typedef unsigned LBER_INT_T ber_uint_t;
/* tags */
typedef unsigned LBER_TAG_T ber_tag_t;
/* "socket" descriptors */
typedef LBER_SOCKET_T ber_socket_t;
/* lengths */
typedef unsigned LBER_LEN_T ber_len_t;
/* signed lengths */
typedef signed LBER_LEN_T ber_slen_t;
LDAP_END_DECL
#endif /* _LBER_TYPES_H */

View File

@ -0,0 +1,74 @@
/* include/ldap_config.h. Generated from ldap_config.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* This file works in conjunction with OpenLDAP configure system.
* If you do no like the values below, adjust your configure options.
*/
#ifndef _LDAP_CONFIG_H
#define _LDAP_CONFIG_H
/* directory separator */
#ifndef LDAP_DIRSEP
#ifndef _WIN32
#define LDAP_DIRSEP "/"
#else
#define LDAP_DIRSEP "\\"
#endif
#endif
/* directory for temporary files */
#if defined(_WIN32)
# define LDAP_TMPDIR "C:\\." /* we don't have much of a choice */
#elif defined( _P_tmpdir )
# define LDAP_TMPDIR _P_tmpdir
#elif defined( P_tmpdir )
# define LDAP_TMPDIR P_tmpdir
#elif defined( _PATH_TMPDIR )
# define LDAP_TMPDIR _PATH_TMPDIR
#else
# define LDAP_TMPDIR LDAP_DIRSEP "tmp"
#endif
/* directories */
#ifndef LDAP_BINDIR
#define LDAP_BINDIR "/tmp/ldap-prefix/bin"
#endif
#ifndef LDAP_SBINDIR
#define LDAP_SBINDIR "/tmp/ldap-prefix/sbin"
#endif
#ifndef LDAP_DATADIR
#define LDAP_DATADIR "/tmp/ldap-prefix/share/openldap"
#endif
#ifndef LDAP_SYSCONFDIR
#define LDAP_SYSCONFDIR "/tmp/ldap-prefix/etc/openldap"
#endif
#ifndef LDAP_LIBEXECDIR
#define LDAP_LIBEXECDIR "/tmp/ldap-prefix/libexec"
#endif
#ifndef LDAP_MODULEDIR
#define LDAP_MODULEDIR "/tmp/ldap-prefix/libexec/openldap"
#endif
#ifndef LDAP_RUNDIR
#define LDAP_RUNDIR "/tmp/ldap-prefix/var"
#endif
#ifndef LDAP_LOCALEDIR
#define LDAP_LOCALEDIR ""
#endif
#endif /* _LDAP_CONFIG_H */

View File

@ -0,0 +1,61 @@
/* include/ldap_features.h. Generated from ldap_features.hin by configure. */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 1998-2020 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/*
* LDAP Features
*/
#ifndef _LDAP_FEATURES_H
#define _LDAP_FEATURES_H 1
/* OpenLDAP API version macros */
#define LDAP_VENDOR_VERSION 20501
#define LDAP_VENDOR_VERSION_MAJOR 2
#define LDAP_VENDOR_VERSION_MINOR 5
#define LDAP_VENDOR_VERSION_PATCH X
/*
** WORK IN PROGRESS!
**
** OpenLDAP reentrancy/thread-safeness should be dynamically
** checked using ldap_get_option().
**
** The -lldap implementation is not thread-safe.
**
** The -lldap_r implementation is:
** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety)
** but also be:
** LDAP_API_FEATURE_SESSION_THREAD_SAFE
** LDAP_API_FEATURE_OPERATION_THREAD_SAFE
**
** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE
** can be used to determine if -lldap_r is available at compile
** time. You must define LDAP_THREAD_SAFE if and only if you
** link with -lldap_r.
**
** If you fail to define LDAP_THREAD_SAFE when linking with
** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap,
** provided header definitions and declarations may be incorrect.
**
*/
/* is -lldap_r available or not */
#define LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE 1
/* LDAP v2 Referrals */
/* #undef LDAP_API_FEATURE_X_OPENLDAP_V2_REFERRALS */
#endif /* LDAP_FEATURES */

File diff suppressed because it is too large Load Diff

1
contrib/openssl vendored Submodule

@ -0,0 +1 @@
Subproject commit 19cc035b6c6f2283573d29c7ea7f7d675cf750ce

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,160 @@
/*
* WARNING: do not edit!
* Generated by Makefile from providers/common/der/der_digests_gen.c.in
*
* Copyright 2020-2021 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "prov/der_digests.h"
/* Well known OIDs precompiled */
/*
* sigAlgs OBJECT IDENTIFIER ::= { nistAlgorithms 3 }
*/
const unsigned char ossl_der_oid_sigAlgs[DER_OID_SZ_sigAlgs] = {
DER_OID_V_sigAlgs
};
/*
* id-sha1 OBJECT IDENTIFIER ::= { iso(1)
* identified-organization(3) oiw(14)
* secsig(3) algorithms(2) 26 }
*/
const unsigned char ossl_der_oid_id_sha1[DER_OID_SZ_id_sha1] = {
DER_OID_V_id_sha1
};
/*
* id-md2 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 2 }
*/
const unsigned char ossl_der_oid_id_md2[DER_OID_SZ_id_md2] = {
DER_OID_V_id_md2
};
/*
* id-md5 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2) 5 }
*/
const unsigned char ossl_der_oid_id_md5[DER_OID_SZ_id_md5] = {
DER_OID_V_id_md5
};
/*
* id-sha256 OBJECT IDENTIFIER ::= { hashAlgs 1 }
*/
const unsigned char ossl_der_oid_id_sha256[DER_OID_SZ_id_sha256] = {
DER_OID_V_id_sha256
};
/*
* id-sha384 OBJECT IDENTIFIER ::= { hashAlgs 2 }
*/
const unsigned char ossl_der_oid_id_sha384[DER_OID_SZ_id_sha384] = {
DER_OID_V_id_sha384
};
/*
* id-sha512 OBJECT IDENTIFIER ::= { hashAlgs 3 }
*/
const unsigned char ossl_der_oid_id_sha512[DER_OID_SZ_id_sha512] = {
DER_OID_V_id_sha512
};
/*
* id-sha224 OBJECT IDENTIFIER ::= { hashAlgs 4 }
*/
const unsigned char ossl_der_oid_id_sha224[DER_OID_SZ_id_sha224] = {
DER_OID_V_id_sha224
};
/*
* id-sha512-224 OBJECT IDENTIFIER ::= { hashAlgs 5 }
*/
const unsigned char ossl_der_oid_id_sha512_224[DER_OID_SZ_id_sha512_224] = {
DER_OID_V_id_sha512_224
};
/*
* id-sha512-256 OBJECT IDENTIFIER ::= { hashAlgs 6 }
*/
const unsigned char ossl_der_oid_id_sha512_256[DER_OID_SZ_id_sha512_256] = {
DER_OID_V_id_sha512_256
};
/*
* id-sha3-224 OBJECT IDENTIFIER ::= { hashAlgs 7 }
*/
const unsigned char ossl_der_oid_id_sha3_224[DER_OID_SZ_id_sha3_224] = {
DER_OID_V_id_sha3_224
};
/*
* id-sha3-256 OBJECT IDENTIFIER ::= { hashAlgs 8 }
*/
const unsigned char ossl_der_oid_id_sha3_256[DER_OID_SZ_id_sha3_256] = {
DER_OID_V_id_sha3_256
};
/*
* id-sha3-384 OBJECT IDENTIFIER ::= { hashAlgs 9 }
*/
const unsigned char ossl_der_oid_id_sha3_384[DER_OID_SZ_id_sha3_384] = {
DER_OID_V_id_sha3_384
};
/*
* id-sha3-512 OBJECT IDENTIFIER ::= { hashAlgs 10 }
*/
const unsigned char ossl_der_oid_id_sha3_512[DER_OID_SZ_id_sha3_512] = {
DER_OID_V_id_sha3_512
};
/*
* id-shake128 OBJECT IDENTIFIER ::= { hashAlgs 11 }
*/
const unsigned char ossl_der_oid_id_shake128[DER_OID_SZ_id_shake128] = {
DER_OID_V_id_shake128
};
/*
* id-shake256 OBJECT IDENTIFIER ::= { hashAlgs 12 }
*/
const unsigned char ossl_der_oid_id_shake256[DER_OID_SZ_id_shake256] = {
DER_OID_V_id_shake256
};
/*
* id-shake128-len OBJECT IDENTIFIER ::= { hashAlgs 17 }
*/
const unsigned char ossl_der_oid_id_shake128_len[DER_OID_SZ_id_shake128_len] = {
DER_OID_V_id_shake128_len
};
/*
* id-shake256-len OBJECT IDENTIFIER ::= { hashAlgs 18 }
*/
const unsigned char ossl_der_oid_id_shake256_len[DER_OID_SZ_id_shake256_len] = {
DER_OID_V_id_shake256_len
};
/*
* id-KMACWithSHAKE128 OBJECT IDENTIFIER ::={hashAlgs 19}
*/
const unsigned char ossl_der_oid_id_KMACWithSHAKE128[DER_OID_SZ_id_KMACWithSHAKE128] = {
DER_OID_V_id_KMACWithSHAKE128
};
/*
* id-KMACWithSHAKE256 OBJECT IDENTIFIER ::={ hashAlgs 20}
*/
const unsigned char ossl_der_oid_id_KMACWithSHAKE256[DER_OID_SZ_id_KMACWithSHAKE256] = {
DER_OID_V_id_KMACWithSHAKE256
};

View File

@ -0,0 +1,94 @@
/*
* WARNING: do not edit!
* Generated by Makefile from providers/common/der/der_dsa_gen.c.in
*
* Copyright 2020-2021 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
/*
* DSA low level APIs are deprecated for public use, but still ok for
* internal use.
*/
#include "internal/deprecated.h"
#include "prov/der_dsa.h"
/* Well known OIDs precompiled */
/*
* id-dsa OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) x9-57(10040) x9algorithm(4) 1 }
*/
const unsigned char ossl_der_oid_id_dsa[DER_OID_SZ_id_dsa] = {
DER_OID_V_id_dsa
};
/*
* id-dsa-with-sha1 OBJECT IDENTIFIER ::= {
* iso(1) member-body(2) us(840) x9-57 (10040) x9algorithm(4) 3 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha1[DER_OID_SZ_id_dsa_with_sha1] = {
DER_OID_V_id_dsa_with_sha1
};
/*
* id-dsa-with-sha224 OBJECT IDENTIFIER ::= { sigAlgs 1 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha224[DER_OID_SZ_id_dsa_with_sha224] = {
DER_OID_V_id_dsa_with_sha224
};
/*
* id-dsa-with-sha256 OBJECT IDENTIFIER ::= { sigAlgs 2 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha256[DER_OID_SZ_id_dsa_with_sha256] = {
DER_OID_V_id_dsa_with_sha256
};
/*
* id-dsa-with-sha384 OBJECT IDENTIFIER ::= { sigAlgs 3 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha384[DER_OID_SZ_id_dsa_with_sha384] = {
DER_OID_V_id_dsa_with_sha384
};
/*
* id-dsa-with-sha512 OBJECT IDENTIFIER ::= { sigAlgs 4 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha512[DER_OID_SZ_id_dsa_with_sha512] = {
DER_OID_V_id_dsa_with_sha512
};
/*
* id-dsa-with-sha3-224 OBJECT IDENTIFIER ::= { sigAlgs 5 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha3_224[DER_OID_SZ_id_dsa_with_sha3_224] = {
DER_OID_V_id_dsa_with_sha3_224
};
/*
* id-dsa-with-sha3-256 OBJECT IDENTIFIER ::= { sigAlgs 6 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha3_256[DER_OID_SZ_id_dsa_with_sha3_256] = {
DER_OID_V_id_dsa_with_sha3_256
};
/*
* id-dsa-with-sha3-384 OBJECT IDENTIFIER ::= { sigAlgs 7 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha3_384[DER_OID_SZ_id_dsa_with_sha3_384] = {
DER_OID_V_id_dsa_with_sha3_384
};
/*
* id-dsa-with-sha3-512 OBJECT IDENTIFIER ::= { sigAlgs 8 }
*/
const unsigned char ossl_der_oid_id_dsa_with_sha3_512[DER_OID_SZ_id_dsa_with_sha3_512] = {
DER_OID_V_id_dsa_with_sha3_512
};

Some files were not shown because too many files have changed in this diff Show More