Merge branch 'master' into analyzer-fuzz-4

This commit is contained in:
Alexey Milovidov 2024-06-05 04:14:17 +02:00
commit adc5677313
9524 changed files with 418999 additions and 403443 deletions

View File

@ -89,7 +89,7 @@ PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
RemoveBracesLLVM: true
RemoveBracesLLVM: false
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements

View File

@ -5,128 +5,138 @@
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
# TODO: Once clang(-tidy) 17 is the minimum, we can convert this list to YAML
# See https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/ReleaseNotes.html#improvements-to-clang-tidy
HeaderFilterRegex: '^.*/(base|src|programs|utils)/.*(h|hpp)$'
# TODO Let clang-tidy check headers in further directories
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
HeaderFilterRegex: '^.*/(base)/.*(h|hpp)$'
Checks: [
'*',
Checks: '*,
-abseil-*,
'-abseil-*',
-altera-*,
'-altera-*',
-android-*,
'-android-*',
-bugprone-assignment-in-if-condition,
-bugprone-branch-clone,
-bugprone-easily-swappable-parameters,
-bugprone-exception-escape,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-narrowing-conversions,
-bugprone-not-null-terminated-result,
-bugprone-reserved-identifier, # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
-bugprone-unchecked-optional-access,
'-bugprone-assignment-in-if-condition',
'-bugprone-branch-clone',
'-bugprone-easily-swappable-parameters',
'-bugprone-exception-escape',
'-bugprone-forward-declaration-namespace',
'-bugprone-implicit-widening-of-multiplication-result',
'-bugprone-multi-level-implicit-pointer-conversion',
'-bugprone-narrowing-conversions',
'-bugprone-not-null-terminated-result',
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
'-bugprone-unchecked-optional-access',
-cert-dcl16-c,
-cert-dcl37-c,
-cert-dcl51-cpp,
-cert-err58-cpp,
-cert-msc32-c,
-cert-msc51-cpp,
-cert-oop54-cpp,
-cert-oop57-cpp,
'-cert-dcl16-c',
'-cert-dcl37-c',
'-cert-dcl51-cpp',
'-cert-err58-cpp',
'-cert-msc32-c',
'-cert-msc51-cpp',
'-cert-oop54-cpp',
'-cert-oop57-cpp',
-clang-analyzer-unix.Malloc,
'-clang-analyzer-optin.core.EnumCastOutOfRange', # https://github.com/abseil/abseil-cpp/issues/1667
'-clang-analyzer-optin.performance.Padding',
-cppcoreguidelines-*, # impractical in a codebase as large as ClickHouse, also slow
'-clang-analyzer-unix.Malloc',
-darwin-*,
'-cppcoreguidelines-*', # impractical in a codebase as large as ClickHouse, also slow
-fuchsia-*,
'-darwin-*',
-google-build-using-namespace,
-google-readability-braces-around-statements,
-google-readability-casting,
-google-readability-function-size,
-google-readability-namespace-comments,
-google-readability-todo,
'-fuchsia-*',
-hicpp-avoid-c-arrays,
-hicpp-avoid-goto,
-hicpp-braces-around-statements,
-hicpp-explicit-conversions,
-hicpp-function-size,
-hicpp-member-init,
-hicpp-move-const-arg,
-hicpp-multiway-paths-covered,
-hicpp-named-parameter,
-hicpp-no-array-decay,
-hicpp-no-assembler,
-hicpp-no-malloc,
-hicpp-signed-bitwise,
-hicpp-special-member-functions,
-hicpp-uppercase-literal-suffix,
-hicpp-use-auto,
-hicpp-use-emplace,
-hicpp-vararg,
'-google-build-using-namespace',
'-google-readability-braces-around-statements',
'-google-readability-casting',
'-google-readability-function-size',
'-google-readability-namespace-comments',
'-google-readability-todo',
-linuxkernel-*,
'-hicpp-avoid-c-arrays',
'-hicpp-avoid-goto',
'-hicpp-braces-around-statements',
'-hicpp-explicit-conversions',
'-hicpp-function-size',
'-hicpp-member-init',
'-hicpp-move-const-arg',
'-hicpp-multiway-paths-covered',
'-hicpp-named-parameter',
'-hicpp-no-array-decay',
'-hicpp-no-assembler',
'-hicpp-no-malloc',
'-hicpp-signed-bitwise',
'-hicpp-special-member-functions',
'-hicpp-uppercase-literal-suffix',
'-hicpp-use-auto',
'-hicpp-use-emplace',
'-hicpp-vararg',
-llvm-*,
'-linuxkernel-*',
-llvmlibc-*,
'-llvm-*',
-openmp-*,
'-llvmlibc-*',
-misc-const-correctness,
-misc-include-cleaner, # useful but far too many occurrences
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-confusable-identifiers, # useful but slooow
-misc-use-anonymous-namespace,
'-openmp-*',
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
-modernize-macro-to-enum,
-modernize-pass-by-value,
-modernize-return-braced-init-list,
-modernize-use-auto,
-modernize-use-default-member-init,
-modernize-use-emplace,
-modernize-use-nodiscard,
-modernize-use-override,
-modernize-use-trailing-return-type,
'-misc-const-correctness',
'-misc-include-cleaner', # useful but far too many occurrences
'-misc-no-recursion',
'-misc-non-private-member-variables-in-classes',
'-misc-confusable-identifiers', # useful but slooo
'-misc-use-anonymous-namespace',
-performance-inefficient-string-concatenation,
-performance-no-int-to-ptr,
-performance-avoid-endl,
-performance-unnecessary-value-param,
'-modernize-avoid-c-arrays',
'-modernize-concat-nested-namespaces',
'-modernize-macro-to-enum',
'-modernize-pass-by-value',
'-modernize-return-braced-init-list',
'-modernize-use-auto',
'-modernize-use-constraints', # This is a good check, but clang-tidy crashes, see https://github.com/llvm/llvm-project/issues/91872
'-modernize-use-default-member-init',
'-modernize-use-emplace',
'-modernize-use-nodiscard',
'-modernize-use-trailing-return-type',
-portability-simd-intrinsics,
'-performance-enum-size',
'-performance-inefficient-string-concatenation',
'-performance-no-int-to-ptr',
'-performance-avoid-endl',
'-performance-unnecessary-value-param',
-readability-avoid-unconditional-preprocessor-if,
-readability-braces-around-statements,
-readability-convert-member-functions-to-static,
-readability-else-after-return,
-readability-function-cognitive-complexity,
-readability-function-size,
-readability-identifier-length,
-readability-identifier-naming, # useful but too slow
-readability-implicit-bool-conversion,
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-named-parameter,
-readability-redundant-declaration,
-readability-simplify-boolean-expr,
-readability-static-accessed-through-instance,
-readability-suspicious-call-argument,
-readability-uppercase-literal-suffix,
-readability-use-anyofallof,
'-portability-simd-intrinsics',
-zircon-*,
'
'-readability-avoid-nested-conditional-operator',
'-readability-avoid-unconditional-preprocessor-if',
'-readability-braces-around-statements',
'-readability-convert-member-functions-to-static',
'-readability-else-after-return',
'-readability-function-cognitive-complexity',
'-readability-function-size',
'-readability-identifier-length',
'-readability-identifier-naming', # useful but too slow
'-readability-implicit-bool-conversion',
'-readability-isolate-declaration',
'-readability-magic-numbers',
'-readability-named-parameter',
'-readability-redundant-declaration',
'-readability-redundant-inline-specifier', # useful but incompatible with __attribute((always_inline))__ (aka. ALWAYS_INLINE, base/base/defines.h).
# ALWAYS_INLINE only has an effect if combined with `inline`: https://godbolt.org/z/Eefd74qdM
'-readability-redundant-member-init', # Useful but triggers another problem. Imagine a struct S with multiple String members. Structs are often instantiated via designated
# initializer S s{.s1 = [...], .s2 = [...], [...]}. In this case, compiler warning `missing-field-initializers` requires to specify all members which are not in-struct
# initialized (example: s1 in struct S { String s1; String s2{};}; is not in-struct initialized, therefore it must be specified at instantiation time). As explicitly
# specifying all members is tedious for large structs, `missing-field-initializers` makes programmers initialize as many members as possible in-struct. Clang-tidy
# warning `readability-redundant-member-init` does the opposite thing, both are not compatible with each other.
'-readability-simplify-boolean-expr',
'-readability-suspicious-call-argument',
'-readability-uppercase-literal-suffix',
'-readability-use-anyofallof',
'-zircon-*'
]
WarningsAsErrors: '*'

View File

@ -10,3 +10,11 @@ assignees: ''
> Make sure to check documentation https://clickhouse.com/docs/en/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
> If you still prefer GitHub issues, remove all this text and ask your question here.
**Company or project name**
Put your company name or project description here
**Question**
Your question

View File

@ -9,6 +9,10 @@ assignees: ''
> (you don't have to strictly follow this form)
**Company or project name**
> Put your company name or project description here
**Use case**
> A clear and concise description of what is the intended usage scenario is.

View File

@ -9,6 +9,10 @@ assignees: ''
(you don't have to strictly follow this form)
**Company or project name**
Put your company name or project description here
**Describe the unexpected behaviour**
A clear and concise description of what works not as it is supposed to.

View File

@ -9,6 +9,10 @@ assignees: ''
(you don't have to strictly follow this form)
**Company or project name**
Put your company name or project description here
**Describe the unexpected behaviour**
A clear and concise description of what works not as it is supposed to.

View File

@ -9,6 +9,9 @@ assignees: ''
(you don't have to strictly follow this form)
**Company or project name**
Put your company name or project description here
**Describe the issue**
A clear and concise description of what works not as it is supposed to.

View File

@ -9,6 +9,10 @@ assignees: ''
> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
**Company or project name**
> Put your company name or project description here
**Operating system**
> OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too.

View File

@ -8,6 +8,9 @@ labels: comp-documentation
(you don't have to strictly follow this form)
**Company or project name**
Put your company name or project description here
**Describe the issue**
A clear and concise description of what's wrong in documentation.

View File

@ -9,6 +9,9 @@ assignees: ''
(you don't have to strictly follow this form)
**Company or project name**
Put your company name or project description here
**Describe the situation**
What exactly works slower than expected?

View File

@ -9,6 +9,9 @@ assignees: ''
(you don't have to strictly follow this form)
**Company or project name**
Put your company name or project description here
**Describe the issue**
A clear and concise description of what works not as it is supposed to.

View File

@ -11,13 +11,17 @@ assignees: ''
> You have to provide the following information whenever possible.
**Company or project name**
> Put your company name or project description here
**Describe what's wrong**
> A clear and concise description of what works not as it is supposed to.
> A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
**Does it reproduce on recent release?**
**Does it reproduce on the most recent release?**
[The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
@ -34,11 +38,11 @@ assignees: ''
**How to reproduce**
* Which ClickHouse server version to use
* Which interface to use, if matters
* Which interface to use, if it matters
* Non-default settings, if any
* `CREATE TABLE` statements for all tables involved
* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
* Queries to run that lead to unexpected result
* Queries to run that lead to an unexpected result
**Expected behavior**

View File

@ -7,6 +7,10 @@ assignees: ''
---
**Company or project name**
Put your company name or project description here
**I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors
**Installation type**

View File

@ -11,7 +11,9 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)
@ -39,3 +41,28 @@ At a minimum, the following information should be added (but add more as needed)
> Information about CI checks: https://clickhouse.com/docs/en/development/continuous-integration/
#### CI Settings (Only check the boxes if you know what you are doing):
- [ ] <!---ci_set_required--> Allow: All Required Checks
- [ ] <!---ci_include_stateless--> Allow: Stateless tests
- [ ] <!---ci_include_stateful--> Allow: Stateful tests
- [ ] <!---ci_include_integration--> Allow: Integration Tests
- [ ] <!---ci_include_performance--> Allow: Performance tests
- [ ] <!---ci_set_non_required--> Allow: All NOT Required Checks
- [ ] <!---batch_0_1--> Allow: batch 1, 2 for multi-batch jobs
- [ ] <!---batch_2_3--> Allow: batch 3, 4, 5, 6 for multi-batch jobs
---
- [ ] <!---ci_exclude_style--> Exclude: Style check
- [ ] <!---ci_exclude_fast--> Exclude: Fast test
- [ ] <!---ci_exclude_integration--> Exclude: Integration Tests
- [ ] <!---ci_exclude_stateless--> Exclude: Stateless tests
- [ ] <!---ci_exclude_stateful--> Exclude: Stateful tests
- [ ] <!---ci_exclude_performance--> Exclude: Performance tests
- [ ] <!---ci_exclude_asan--> Exclude: All with ASAN
- [ ] <!---ci_exclude_aarch64--> Exclude: All with Aarch64
- [ ] <!---ci_exclude_tsan|msan|ubsan|coverage--> Exclude: All with TSAN, MSAN, UBSAN, Coverage
---
- [ ] <!---do_not_test--> Do not test
- [ ] <!---upload_all--> Upload binaries for special builds
- [ ] <!---no_merge_commit--> Disable merge-commit
- [ ] <!---no_ci_cache--> Disable CI cache

View File

@ -18,9 +18,6 @@ runs:
echo "Setup the common ENV variables"
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
IMAGES_PATH=${{runner.temp}}/images_path
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
@ -30,6 +27,11 @@ runs:
shell: bash
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH"
mkdir -p "$REPO_COPY"
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
- name: Tune vm.mmap_rnd_bits for sanitizers
shell: bash
run: |
sudo sysctl vm.mmap_rnd_bits
# https://github.com/google/sanitizers/issues/856
echo "Tune vm.mmap_rnd_bits for sanitizers"
sudo sysctl vm.mmap_rnd_bits=28

View File

@ -9,28 +9,28 @@ on: # yamllint disable-line rule:truthy
push:
branches:
- 'backport/**'
# Cancel the previous wf run in PRs.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
CheckLabels:
runs-on: [self-hosted, style-checker]
# Run the first check always, even if the CI is cancelled
if: ${{ always() }}
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
PythonUnitTests:
runs-on: [self-hosted, style-checker]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -40,273 +40,222 @@ jobs:
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
- name: PrepareRunConfig
id: runconfig
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (amd64)
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (aarch64)
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
data: ${{ needs.RunConfig.outputs.data }}
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebTsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebDebug:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwin:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
DockerServerImage:
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker server image
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
DockerKeeperImage:
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker keeper image
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() }}
needs:
- BuilderDebRelease
- RunConfig
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebTsan
- BuilderDebDebug
- BuilderDebRelease
- BuilderDebTsan
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
BuilderSpecialReport:
if: ${{ success() || failure() }}
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() }}
needs:
- RunConfig
- BuilderBinDarwin
- BuilderBinDarwinAarch64
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- DockerHubPush
- DockerServerImages
- BuilderReport
- BuilderSpecialReport
- FunctionalStatelessTestAsan

View File

@ -1,19 +0,0 @@
name: Cancel
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
workflow_run:
workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
types:
- requested
jobs:
cancel:
runs-on: [self-hosted, style-checker]
steps:
- uses: styfle/cancel-workflow-action@0.9.1
with:
all_but_latest: true
workflow_id: ${{ github.event.workflow.id }}

View File

@ -1,11 +0,0 @@
# The CI for each commit, prints envs and content of GITHUB_EVENT_PATH
name: Debug
'on':
[push, pull_request, pull_request_review, release, workflow_dispatch, workflow_call]
jobs:
DebugInfo:
runs-on: ubuntu-latest
steps:
- uses: hmarr/debug-action@a701ed95a46e6f2fb0df25e1a558c16356fae35a

View File

@ -1,138 +0,0 @@
name: DocsCheck
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
pull_request:
types:
- synchronize
- reopened
- opened
branches:
- master
paths:
- '**.md'
- 'docker/docs/**'
- 'docs/**'
- 'utils/check-style/aspell-ignore/**'
- 'tests/ci/docs_check.py'
- '.github/workflows/docs_check.yml'
jobs:
CheckLabels:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
DockerHubPushAarch64:
needs: CheckLabels
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
needs: CheckLabels
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
StyleCheck:
needs: DockerHubPush
# We need additional `&& ! cancelled()` to have the job being able to cancel
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 style_check.py
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
DocsCheck:
needs: DockerHubPush
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docs check
runner_type: func-tester-aarch64
additional_envs: |
run_command: |
cd "$REPO_COPY/tests/ci"
python3 docs_check.py
FinishCheck:
needs:
- StyleCheck
- DockerHubPush
- DocsCheck
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py
python3 merge_pr.py --check-approved

View File

@ -8,19 +8,17 @@ on: # yamllint disable-line rule:truthy
schedule:
- cron: '0 */6 * * *'
workflow_dispatch:
workflow_call:
jobs:
KeeperJepsenRelease:
uses: ./.github/workflows/reusable_test.yml
uses: ./.github/workflows/reusable_simple_job.yml
with:
test_name: Jepsen keeper check
runner_type: style-checker
report_required: true
run_command: |
cd "$REPO_COPY/tests/ci"
python3 jepsen_check.py keeper
# ServerJepsenRelease:
# runs-on: [self-hosted, style-checker]
# uses: ./.github/workflows/reusable_test.yml
# uses: ./.github/workflows/reusable_simple_job.yml
# with:
# test_name: Jepsen server check
# runner_type: style-checker

View File

@ -8,19 +8,26 @@ on: # yamllint disable-line rule:truthy
# schedule:
# - cron: '0 0 2 31 1' # never for now
workflow_call:
inputs:
data:
description: json ci data
type: string
required: true
jobs:
BuilderFuzzers:
uses: ./.github/workflows/reusable_build.yml
with:
build_name: fuzzers
data: ${{ inputs.data }}
libFuzzerTest:
needs: [BuilderFuzzers]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: libFuzzer tests
runner_type: func-tester
data: ${{ inputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"

View File

@ -10,830 +10,165 @@ on: # yamllint disable-line rule:truthy
branches:
- 'master'
jobs:
PythonUnitTests:
runs-on: [self-hosted, style-checker]
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Python unit tests
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Merge sync PR
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
DockerHubPushAarch64:
python3 sync_pr.py --merge || :
# Runs in MQ:
# - name: Python unit tests
# run: |
# cd "$GITHUB_WORKSPACE/tests/ci"
# echo "Testing the main ci directory"
# python3 -m unittest discover -s . -p 'test_*.py'
# for dir in *_lambda/; do
# echo "Testing $dir"
# python3 -m unittest discover -s "$dir" -p 'test_*.py'
# done
- name: PrepareRunConfig
id: runconfig
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::group::CI configuration"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
# Runs in MQ:
# BuildDockers:
# needs: [RunConfig]
# if: ${{ !failure() && !cancelled() }}
# uses: ./.github/workflows/reusable_docker.yml
# with:
# data: ${{ needs.RunConfig.outputs.data }}
# StyleCheck:
# needs: [RunConfig, BuildDockers]
# if: ${{ !failure() && !cancelled() }}
# uses: ./.github/workflows/reusable_test.yml
# with:
# test_name: Style check
# runner_type: style-checker
# data: ${{ needs.RunConfig.outputs.data }}
# run_command: |
# python3 style_check.py --no-push
################################# Main stages #################################
# for main CI chain
#
Builds_1:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
# using callable wf (reusable_stage.yml) allows grouping all nested jobs under a tab
uses: ./.github/workflows/reusable_build_stage.yml
with:
stage: Builds_1
data: ${{ needs.RunConfig.outputs.data }}
Tests_1:
needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_1
data: ${{ needs.RunConfig.outputs.data }}
Builds_2:
needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
uses: ./.github/workflows/reusable_build_stage.yml
with:
stage: Builds_2
data: ${{ needs.RunConfig.outputs.data }}
Tests_2:
needs: [RunConfig, Builds_2]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_2
data: ${{ needs.RunConfig.outputs.data }}
# stage for jobs that do not prohibit merge
Tests_3:
needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_3
data: ${{ needs.RunConfig.outputs.data }}
################################# Reports #################################
# Reports should be run even if Builds_1/2 failed - put them separately in wf (not in Tests_1/2)
Builds_1_Report:
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
needs: [RunConfig, Builds_1]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
Builds_2_Report:
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }}
needs: [RunConfig, Builds_2]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
MarkReleaseReady:
if: ${{ !failure() && !cancelled() }}
needs: [RunConfig, Builds_1, Builds_2]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Debug
run: |
echo need with different filters
cat << 'EOF'
${{ toJSON(needs) }}
${{ toJSON(needs.*.result) }}
no failures ${{ !contains(needs.*.result, 'failure') }}
no skips ${{ !contains(needs.*.result, 'skipped') }}
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
EOF
- name: Not ready
# fail the job to be able to restart it
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
run: exit 1
- name: Check out repository code
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
uses: ClickHouse/checkout@v1
- name: Mark Commit Release Ready
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 mark_release_ready.py
FinishCheck:
if: ${{ !cancelled() }}
needs: [RunConfig, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2, Tests_3]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
StyleCheck:
needs: DockerHubPush
if: ${{ success() || failure() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 style_check.py --no-push
CompatibilityCheckX86:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
CompatibilityCheckAarch64:
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
checkout_depth: 0
build_name: package_release
BuilderDebAarch64:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
checkout_depth: 0
build_name: package_aarch64
BuilderBinRelease:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
checkout_depth: 0
build_name: binary_release
BuilderDebAsan:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
BuilderDebUBsan:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_ubsan
BuilderDebTsan:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
BuilderDebMsan:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_msan
BuilderDebDebug:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
##########################################################################################
##################################### SPECIAL BUILDS #####################################
##########################################################################################
BuilderBinClangTidy:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_tidy
BuilderBinDarwin:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
checkout_depth: 0
BuilderBinAarch64:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_aarch64
checkout_depth: 0
BuilderBinFreeBSD:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_freebsd
checkout_depth: 0
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
checkout_depth: 0
BuilderBinPPC64:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_ppc64le
checkout_depth: 0
BuilderBinAmd64Compat:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_amd64_compat
checkout_depth: 0
BuilderBinAarch64V80Compat:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_aarch64_v80compat
checkout_depth: 0
BuilderBinRISCV64:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_riscv64
checkout_depth: 0
BuilderBinS390X:
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_s390x
checkout_depth: 0
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
needs:
- BuilderBinRelease
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebDebug
- BuilderDebMsan
- BuilderDebRelease
- BuilderDebTsan
- BuilderDebUBsan
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ success() || failure() }}
needs:
- BuilderBinAarch64
- BuilderBinDarwin
- BuilderBinDarwinAarch64
- BuilderBinFreeBSD
- BuilderBinPPC64
- BuilderBinRISCV64
- BuilderBinS390X
- BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat
- BuilderBinClangTidy
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
MarkReleaseReady:
needs:
- BuilderBinDarwin
- BuilderBinDarwinAarch64
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Mark Commit Release Ready
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 mark_release_ready.py
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestRelease:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestReleaseDatabaseOrdinary:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, DatabaseOrdinary)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestReleaseDatabaseReplicated:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, DatabaseReplicated)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, s3 storage)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 2
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestReleaseAnalyzer:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, analyzer)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (aarch64)
runner_type: func-tester-aarch64
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestTsan:
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (tsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (ubsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 2
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestMsan:
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestDebug:
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestRelease:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (release)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (aarch64)
runner_type: func-tester-aarch64
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (tsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (ubsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestTsan:
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestMsan:
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestUBsan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestDebug:
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
IntegrationTestsAnalyzerAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, analyzer)
runner_type: stress-tester
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
IntegrationTestsTsan:
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
IntegrationTestsRelease:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
##############################################################################################
##################################### AST FUZZERS ############################################
##############################################################################################
ASTFuzzerTestAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: AST fuzzer (asan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 ast_fuzzer_check.py "$CHECK_NAME"
ASTFuzzerTestTsan:
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: AST fuzzer (tsan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 ast_fuzzer_check.py "$CHECK_NAME"
ASTFuzzerTestUBSan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: AST fuzzer (ubsan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 ast_fuzzer_check.py "$CHECK_NAME"
ASTFuzzerTestMSan:
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: AST fuzzer (msan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 ast_fuzzer_check.py "$CHECK_NAME"
ASTFuzzerTestDebug:
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: AST fuzzer (debug)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 ast_fuzzer_check.py "$CHECK_NAME"
#############################################################################################
#################################### UNIT TESTS #############################################
#############################################################################################
UnitTestsAsan:
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Unit tests (asan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 unit_tests_check.py "$CHECK_NAME"
UnitTestsReleaseClang:
needs: [BuilderBinRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Unit tests (release)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 unit_tests_check.py "$CHECK_NAME"
UnitTestsTsan:
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Unit tests (tsan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 unit_tests_check.py "$CHECK_NAME"
UnitTestsMsan:
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Unit tests (msan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 unit_tests_check.py "$CHECK_NAME"
UnitTestsUBsan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Unit tests (ubsan)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 unit_tests_check.py "$CHECK_NAME"
#############################################################################################
#################################### PERFORMANCE TESTS ######################################
#############################################################################################
PerformanceComparisonX86:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Performance Comparison
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 performance_comparison_check.py "$CHECK_NAME"
PerformanceComparisonAarch:
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Performance Comparison Aarch64
runner_type: func-tester-aarch64
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 performance_comparison_check.py "$CHECK_NAME"
##############################################################################################
###################################### SQLANCER FUZZERS ######################################
##############################################################################################
SQLancerTestRelease:
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: SQLancer (release)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 sqlancer_check.py "$CHECK_NAME"
SQLancerTestDebug:
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: SQLancer (debug)
runner_type: fuzzer-unit-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 sqlancer_check.py "$CHECK_NAME"
FinishCheck:
needs:
- DockerHubPush
- BuilderReport
- BuilderSpecialReport
- MarkReleaseReady
- FunctionalStatelessTestDebug
- FunctionalStatelessTestRelease
- FunctionalStatelessTestReleaseDatabaseOrdinary
- FunctionalStatelessTestReleaseDatabaseReplicated
- FunctionalStatelessTestReleaseAnalyzer
- FunctionalStatelessTestReleaseS3
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan
- FunctionalStatelessTestTsan
- FunctionalStatelessTestMsan
- FunctionalStatelessTestUBsan
- FunctionalStatefulTestDebug
- FunctionalStatefulTestRelease
- FunctionalStatefulTestAarch64
- FunctionalStatefulTestAsan
- FunctionalStatefulTestTsan
- FunctionalStatefulTestMsan
- FunctionalStatefulTestUBsan
- StressTestDebug
- StressTestAsan
- StressTestTsan
- StressTestMsan
- StressTestUBsan
- IntegrationTestsAsan
- IntegrationTestsAnalyzerAsan
- IntegrationTestsTsan
- IntegrationTestsRelease
- PerformanceComparisonX86
- PerformanceComparisonAarch
- CompatibilityCheckX86
- CompatibilityCheckAarch64
- ASTFuzzerTestDebug
- ASTFuzzerTestAsan
- ASTFuzzerTestTsan
- ASTFuzzerTestMSan
- ASTFuzzerTestUBSan
- UnitTestsAsan
- UnitTestsTsan
- UnitTestsMsan
- UnitTestsUBsan
- UnitTestsReleaseClang
- SQLancerTestRelease
- SQLancerTestDebug
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"

115
.github/workflows/merge_queue.yml vendored Normal file
View File

@ -0,0 +1,115 @@
# yamllint disable rule:comments-indentation
name: MergeQueueCI
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
merge_group:
jobs:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get a version
filter: tree:0
- name: Cancel PR workflow
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::group::CI configuration"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker-aarch64
run_command: |
python3 style_check.py
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Fast test
runner_type: builder
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
python3 fast_test_check.py
Builds_1:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
# using callable wf (reusable_stage.yml) allows grouping all nested jobs under a tab
uses: ./.github/workflows/reusable_build_stage.yml
with:
stage: Builds_1
data: ${{ needs.RunConfig.outputs.data }}
Tests_1:
needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_1
data: ${{ needs.RunConfig.outputs.data }}
################################# Stage Final #################################
#
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- name: Check sync status
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 sync_pr.py --status
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py ${{ (contains(needs.*.result, 'failure') && github.event_name == 'merge_group') && '--pipeline-failure' || '' }}

View File

@ -10,126 +10,37 @@ env:
workflow_dispatch:
jobs:
Debug:
# The task for having a preserved ENV and event.json for later investigation
uses: ./.github/workflows/debug.yml
DockerHubPushAarch64:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
- name: PrepareRunConfig
id: runconfig
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
SonarCloud:
runs-on: [self-hosted, builder]
env:
SONAR_SCANNER_VERSION: 4.8.0.2856
SONAR_SERVER_URL: "https://sonarcloud.io"
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
CC: clang-17
CXX: clang++-17
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
filter: tree:0
submodules: true
- name: Set up JDK 11
uses: actions/setup-java@v1
with:
java-version: 11
- name: Download and set up sonar-scanner
env:
SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip
run: |
mkdir -p "$HOME/.sonar"
curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}"
unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/"
echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH"
- name: Download and set up build-wrapper
env:
BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip
run: |
curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}"
unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/"
echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH"
- name: Set Up Build Tools
run: |
sudo apt-get update
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
- name: Run build-wrapper
run: |
mkdir build
cd build
cmake ..
cd ..
build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/
- name: Run sonar-scanner
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
run: |
sonar-scanner \
--define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
--define sonar.projectKey="ClickHouse_ClickHouse" \
--define sonar.organization="clickhouse-java" \
--define sonar.cfamily.cpp23.enabled=true \
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
uses: ./.github/workflows/reusable_docker.yml
with:
data: "${{ needs.RunConfig.outputs.data }}"
set_latest: true

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
name: PullRequestApprovedCI
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
pull_request_review:
types:
- submitted
jobs:
MergeOnApproval:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Merge approved PR
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --check-approved

View File

@ -54,10 +54,13 @@ jobs:
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
export CHECK_NAME="Docker server image"
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
- name: Check docker clickhouse/clickhouse-keeper building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push
- name: Cleanup
if: always()
run: |

View File

@ -13,171 +13,174 @@ on: # yamllint disable-line rule:truthy
- '2[1-9].[1-9]'
jobs:
DockerHubPushAarch64:
RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
python3 run_check.py
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (amd64)
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
test_name: Compatibility check (aarch64)
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
data: ${{ needs.RunConfig.outputs.data }}
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
# always rebuild on release branches to be able to publish from any commit
force: true
BuilderDebAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
# always rebuild on release branches to be able to publish from any commit
force: true
BuilderDebAsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebUBsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_ubsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebTsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebMsan:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_msan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebDebug:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwin:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
# always rebuild on release branches to be able to publish from any commit
force: true
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
# always rebuild on release branches to be able to publish from any commit
force: true
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
DockerServerImage:
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker server image
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
DockerKeeperImage:
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker keeper image
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() }}
needs:
- RunConfig
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
@ -188,43 +191,48 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
BuilderSpecialReport:
if: ${{ success() || failure() }}
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() }}
needs:
- RunConfig
- BuilderBinDarwin
- BuilderBinDarwinAarch64
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
MarkReleaseReady:
if: ${{ !failure() && !cancelled() }}
needs:
- BuilderBinDarwin
- BuilderBinDarwinAarch64
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Debug
run: |
echo need with different filters
cat << 'EOF'
${{ toJSON(needs) }}
${{ toJSON(needs.*.result) }}
no failures ${{ !contains(needs.*.result, 'failure') }}
no skips ${{ !contains(needs.*.result, 'skipped') }}
no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
EOF
- name: Not ready
# fail the job to be able restart it
if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
run: exit 1
- name: Check out repository code
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Mark Commit Release Ready
if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 mark_release_ready.py
@ -232,283 +240,226 @@ jobs:
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (aarch64)
runner_type: func-tester-aarch64
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (tsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (ubsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 2
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestMsan:
needs: [BuilderDebMsan]
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (ubsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (release)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (aarch64)
runner_type: func-tester-aarch64
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (tsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (ubsan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
data: ${{ needs.RunConfig.outputs.data }}
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestMsan:
needs: [BuilderDebMsan]
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestUBsan:
needs: [BuilderDebUBsan]
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
StressTestDebug:
needs: [BuilderDebDebug]
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan:
needs: [BuilderDebAsan]
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, analyzer)
test_name: Integration tests (asan, old analyzer)
runner_type: stress-tester
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [BuilderDebTsan]
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease:
needs: [BuilderDebRelease]
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- DockerHubPush
- DockerServerImages
- DockerServerImage
- DockerKeeperImage
- BuilderReport
- BuilderSpecialReport
- MarkReleaseReady

View File

@ -22,13 +22,26 @@ name: Build ClickHouse
description: the label of runner to use
default: builder
type: string
data:
description: json ci data
type: string
required: true
force:
description: disallow job skipping
type: boolean
default: false
additional_envs:
description: additional ENV variables to setup the job
type: string
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
jobs:
Build:
name: Build-${{inputs.build_name}}
if: ${{ contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name) || inputs.force }}
env:
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
runs-on: [self-hosted, '${{inputs.runner_type}}']
@ -37,6 +50,7 @@ jobs:
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
submodules: true
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
@ -44,11 +58,16 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
EOF
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
# This step is done in GITHUB_WORKSPACE,
# because it's broken in REPO_COPY for some reason
# See also update-submodules.sh
if: ${{ env.BUILD_SPARSE_CHECKOUT == 'true' }}
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
@ -60,20 +79,25 @@ jobs:
uses: ./.github/actions/common_setup
with:
job_type: build_check
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Build
- name: Pre
run: |
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
- name: Run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
--infile ${{ toJson(inputs.data) }} \
--job-name "$BUILD_NAME" \
--run \
${{ inputs.force && '--force' || '' }}
- name: Post
# it still be build report to upload for failed build job
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
- name: Mark as done
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -0,0 +1,38 @@
### FIXME: merge reusable_test.yml and reusable_build.yml as they are almost identical
# and then merge reusable_build_stage.yml and reusable_test_stage.yml
name: BuildStageWF
'on':
workflow_call:
inputs:
stage:
description: stage name
type: string
required: true
data:
description: ci data
type: string
required: true
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
jobs:
s:
if: ${{ !failure() && !cancelled() }}
strategy:
fail-fast: false
matrix:
job_name_and_runner_type: ${{ fromJson(inputs.data).stages_data[inputs.stage] }}
uses: ./.github/workflows/reusable_build.yml
with:
build_name: ${{ matrix.job_name_and_runner_type.job_name }}
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
# don't forget to pass force flag (no ci cache/no reuse) - once it's needed
force: false
# for now let's do I deep checkout for builds
checkout_depth: 0
data: ${{ inputs.data }}
secrets:
secret_envs: ${{ secrets.secret_envs }}

66
.github/workflows/reusable_docker.yml vendored Normal file
View File

@ -0,0 +1,66 @@
name: Build docker images
'on':
workflow_call:
inputs:
data:
description: json with ci data from todo job
required: true
type: string
set_latest:
description: set latest tag for resulting multiarch manifest
required: false
type: boolean
default: false
jobs:
DockerBuildAarch64:
runs-on: [self-hosted, style-checker-aarch64]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
--suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
DockerBuildAmd64:
runs-on: [self-hosted, style-checker]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
--suffix amd64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
DockerMultiArchManifest:
needs: [DockerBuildAmd64, DockerBuildAarch64]
runs-on: [self-hosted, style-checker]
if: |
!failure() && !cancelled() && (toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]' || inputs.set_latest)
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
FLAG_LATEST=''
if [ "${{ inputs.set_latest }}" == "true" ]; then
FLAG_LATEST='--set-latest'
echo "latest tag will be set for resulting manifests"
fi
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
$FLAG_LATEST

View File

@ -0,0 +1,96 @@
### For the pure soul wishes to move it to another place
# https://github.com/orgs/community/discussions/9050
name: Simple job
'on':
workflow_call:
inputs:
test_name:
description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
required: true
type: string
runner_type:
description: the label of runner to use
required: true
type: string
run_command:
description: the command to launch the check
default: ""
required: false
type: string
checkout_depth:
description: the value of the git shallow checkout
required: false
type: number
default: 1
submodules:
description: if the submodules should be checked out
required: false
type: boolean
default: false
additional_envs:
description: additional ENV variables to setup the job
type: string
working-directory:
description: sets custom working directory
type: string
default: "$GITHUB_WORKSPACE/tests/ci"
git_ref:
description: commit to use, merge commit for pr or head
required: false
type: string
default: ${{ github.event.after }} # no merge commit
report_required:
description: set to true if job report with the commit status required
type: boolean
default: false
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
Test:
runs-on: [self-hosted, '${{inputs.runner_type}}']
name: ${{inputs.test_name}}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: DebugInfo
uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ inputs.git_ref }}
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Run
run: |
cd "${{ inputs.working-directory }}"
${{ inputs.run_command }}
- name: Post
if: ${{ inputs.report_required }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --post --job-name '${{inputs.test_name}}'
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -14,13 +14,10 @@ name: Testing workflow
required: true
type: string
run_command:
description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'`
required: true
description: the command to launch the check
default: ""
required: false
type: string
batches:
description: how many batches for the test will be launched
default: 1
type: number
checkout_depth:
description: the value of the git shallow checkout
required: false
@ -34,80 +31,85 @@ name: Testing workflow
additional_envs:
description: additional ENV variables to setup the job
type: string
data:
description: ci data
type: string
required: true
working-directory:
description: sets custom working directory
type: string
default: "$GITHUB_WORKSPACE/tests/ci"
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
PrepareStrategy:
# batches < 1 is misconfiguration,
# and we need this step only for batches > 1
if: ${{ inputs.batches > 1 }}
runs-on: [self-hosted, style-checker-aarch64]
outputs:
batches: ${{steps.batches.outputs.batches}}
steps:
- name: Calculate batches
id: batches
run: |
batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))')
echo "batches=${batches_output}" >> "$GITHUB_OUTPUT"
Test:
# If PrepareStrategy is skipped for batches == 1,
# we still need to launch the test.
# `! failure()` is mandatory here to launch on skipped Job
# `&& !cancelled()` to allow the be cancelable
if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }}
# Do not add `-0` to the end, if there's only one batch
name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
runs-on: [self-hosted, '${{inputs.runner_type}}']
needs: [PrepareStrategy]
if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
strategy:
fail-fast: false # we always wait for entire matrix
fail-fast: false # we always wait for the entire matrix
matrix:
# if PrepareStrategy does not have batches, we use 0
batch: ${{ needs.PrepareStrategy.outputs.batches
&& fromJson(needs.PrepareStrategy.outputs.batches)
|| fromJson('[0]')}}
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Setup batch
if: ${{ inputs.batches > 1}}
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
run: |
cat >> "$GITHUB_ENV" << 'EOF'
RUN_BY_HASH_NUM=${{matrix.batch}}
RUN_BY_HASH_TOTAL=${{inputs.batches}}
RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
EOF
- name: Run test
run: ${{inputs.run_command}}
- name: Pre run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
- name: Run
run: |
cd "${{ inputs.working-directory }}"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
--infile ${{ toJson(inputs.data) }} \
--job-name '${{inputs.test_name}}' \
--run \
--run-command '''${{inputs.run_command}}'''
- name: Post run
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
- name: Mark as done
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -0,0 +1,31 @@
name: StageWF
'on':
workflow_call:
inputs:
stage:
description: stage name
type: string
required: true
data:
description: ci data
type: string
required: true
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
jobs:
s:
if: ${{ !failure() && !cancelled() }}
strategy:
fail-fast: false
matrix:
job_name_and_runner_type: ${{ fromJson(inputs.data).stages_data[inputs.stage] }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ${{ matrix.job_name_and_runner_type.job_name }}
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
data: ${{ inputs.data }}
secrets:
secret_envs: ${{ secrets.secret_envs }}

View File

@ -55,7 +55,7 @@ jobs:
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}

8
.gitignore vendored
View File

@ -21,6 +21,9 @@
*.stderr
*.stdout
# llvm-xray logs
xray-log.*
/docs/build
/docs/publish
/docs/edit
@ -164,8 +167,11 @@ tests/queries/0_stateless/*.generated-expect
tests/queries/0_stateless/*.expect.history
tests/integration/**/_gen
# pytest --pdb history
.pdb_history
# rust
/rust/**/target
/rust/**/target*
# It is autogenerated from *.in
/rust/**/.cargo/config.toml
/rust/**/vendor

37
.gitmodules vendored
View File

@ -6,7 +6,7 @@
url = https://github.com/facebook/zstd
[submodule "contrib/lz4"]
path = contrib/lz4
url = https://github.com/ClickHouse/lz4
url = https://github.com/lz4/lz4
[submodule "contrib/librdkafka"]
path = contrib/librdkafka
url = https://github.com/ClickHouse/librdkafka
@ -22,9 +22,6 @@
[submodule "contrib/capnproto"]
path = contrib/capnproto
url = https://github.com/ClickHouse/capnproto
[submodule "contrib/double-conversion"]
path = contrib/double-conversion
url = https://github.com/google/double-conversion
[submodule "contrib/re2"]
path = contrib/re2
url = https://github.com/google/re2
@ -99,7 +96,7 @@
url = https://github.com/awslabs/aws-c-event-stream
[submodule "aws-c-common"]
path = contrib/aws-c-common
url = https://github.com/ClickHouse/aws-c-common
url = https://github.com/awslabs/aws-c-common.git
[submodule "aws-checksums"]
path = contrib/aws-checksums
url = https://github.com/awslabs/aws-checksums
@ -176,9 +173,6 @@
[submodule "contrib/libpq"]
path = contrib/libpq
url = https://github.com/ClickHouse/libpq
[submodule "contrib/boringssl"]
path = contrib/boringssl
url = https://github.com/ClickHouse/boringssl
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse/NuRaft
@ -245,6 +239,12 @@
[submodule "contrib/idxd-config"]
path = contrib/idxd-config
url = https://github.com/intel/idxd-config
[submodule "contrib/QAT-ZSTD-Plugin"]
path = contrib/QAT-ZSTD-Plugin
url = https://github.com/intel/QAT-ZSTD-Plugin
[submodule "contrib/qatlib"]
path = contrib/qatlib
url = https://github.com/intel/qatlib
[submodule "contrib/wyhash"]
path = contrib/wyhash
url = https://github.com/wangyi-fudan/wyhash
@ -272,9 +272,6 @@
[submodule "contrib/crc32-s390x"]
path = contrib/crc32-s390x
url = https://github.com/linux-on-ibm-z/crc32-s390x
[submodule "contrib/openssl"]
path = contrib/openssl
url = https://github.com/openssl/openssl
[submodule "contrib/google-benchmark"]
path = contrib/google-benchmark
url = https://github.com/google/benchmark
@ -320,6 +317,9 @@
[submodule "contrib/crc32-vpmsum"]
path = contrib/crc32-vpmsum
url = https://github.com/antonblanchard/crc32-vpmsum.git
[submodule "contrib/expected"]
path = contrib/expected
url = https://github.com/TartanLlama/expected
[submodule "contrib/liburing"]
path = contrib/liburing
url = https://github.com/axboe/liburing
@ -357,3 +357,18 @@
[submodule "contrib/pocketfft"]
path = contrib/pocketfft
url = https://github.com/mreineck/pocketfft.git
[submodule "contrib/sqids-cpp"]
path = contrib/sqids-cpp
url = https://github.com/sqids/sqids-cpp.git
[submodule "contrib/idna"]
path = contrib/idna
url = https://github.com/ada-url/idna.git
[submodule "contrib/rust_vendor"]
path = contrib/rust_vendor
url = https://github.com/ClickHouse/rust_vendor.git
[submodule "contrib/openssl"]
path = contrib/openssl
url = https://github.com/ClickHouse/openssl.git
[submodule "contrib/double-conversion"]
path = contrib/double-conversion
url = https://github.com/ClickHouse/double-conversion.git

View File

@ -1,43 +0,0 @@
# vim: ft=config
[BASIC]
max-module-lines=2000
# due to SQL
max-line-length=200
# Drop/decrease them one day:
max-branches=50
max-nested-blocks=10
max-statements=200
[FORMAT]
ignore-long-lines = (# )?<?https?://\S+>?$
[MESSAGES CONTROL]
disable = missing-docstring,
too-few-public-methods,
invalid-name,
too-many-arguments,
keyword-arg-before-vararg,
too-many-locals,
too-many-instance-attributes,
cell-var-from-loop,
fixme,
too-many-public-methods,
wildcard-import,
unused-wildcard-import,
singleton-comparison,
# pytest.mark.parametrize is not callable (not-callable)
not-callable,
# https://github.com/PyCQA/pylint/issues/3882
# [Python 3.9] Value 'Optional' is unsubscriptable (unsubscriptable-object) (also Union)
unsubscriptable-object,
# Drop them one day:
redefined-outer-name,
broad-except,
bare-except,
no-else-return,
global-statement
[SIMILARITIES]
# due to SQL
min-similarity-lines=1000

File diff suppressed because it is too large Load Diff

View File

@ -56,14 +56,22 @@ option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile t
if (ENABLE_CHECK_HEAVY_BUILDS)
# set DATA (since RSS does not work since 2.6.x+) to 5G
set (RLIMIT_DATA 5000000000)
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
# set VIRT (RLIMIT_AS) to 10G (DATA*2)
set (RLIMIT_AS 10000000000)
# set CPU time limit to 1000 seconds
set (RLIMIT_CPU 1000)
# -fsanitize=memory is too heavy
if (SANITIZE STREQUAL "memory")
set (RLIMIT_DATA 10000000000) # 10G
# Sanitizers are too heavy. Some architectures too.
if (SANITIZE OR SANITIZE_COVERAGE OR WITH_COVERAGE OR ARCH_RISCV64 OR ARCH_LOONGARCH64)
# Twice as large
set (RLIMIT_DATA 10000000000)
set (RLIMIT_AS 20000000000)
endif()
# For some files currently building RISCV64/LOONGARCH64 might be too slow.
# TODO: Improve compilation times per file
if (ARCH_RISCV64 OR ARCH_LOONGARCH64)
set (RLIMIT_CPU 1800)
endif()
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
@ -102,6 +110,8 @@ if (ENABLE_FUZZING)
# For codegen_select_fuzzer
set (ENABLE_PROTOBUF 1)
add_compile_definitions(FUZZING_MODE=1)
endif()
# Global libraries
@ -110,13 +120,10 @@ endif()
# - sanitize.cmake
add_library(global-libs INTERFACE)
# We don't want to instrument everything with fuzzer, but only specific targets (see below),
# also, since we build our own llvm, we specifically don't want to instrument
# libFuzzer library itself - it would result in infinite recursion
#include (cmake/fuzzer.cmake)
include (cmake/sanitize.cmake)
include (cmake/instrument.cmake)
option(ENABLE_COLORED_BUILD "Enable colors in compiler output" ON)
set (CMAKE_COLOR_MAKEFILE ${ENABLE_COLORED_BUILD}) # works only for the makefile generator
@ -133,23 +140,21 @@ endif ()
include (cmake/check_flags.cmake)
include (cmake/add_warning.cmake)
if (COMPILER_CLANG)
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# NOTE: that clang has a bug because of it does not emit .debug_aranges
# with ThinLTO, so custom ld.lld wrapper is shipped in docker images.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
# See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# NOTE: that clang has a bug because of it does not emit .debug_aranges
# with ThinLTO, so custom ld.lld wrapper is shipped in docker images.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
# See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
@ -205,8 +210,6 @@ option(OMIT_HEAVY_DEBUG_SYMBOLS
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
if (NOT BUILD_STANDALONE_KEEPER)
option(CREATE_KEEPER_SYMLINK "Create symlink for clickhouse-keeper to main server binary" ON)
@ -254,10 +257,17 @@ endif()
include(cmake/cpu_features.cmake)
# Asynchronous unwind tables are needed for Query Profiler.
# They are already by default on some platforms but possibly not on all platforms.
# Enable it explicitly.
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
# Query Profiler doesn't work on MacOS for several reasons
# - PHDR cache is not available
# - We use native functionality to get stacktraces which is not async signal safe
# and thus we don't need to generate asynchronous unwind tables
if (NOT OS_DARWIN)
# Asynchronous unwind tables are needed for Query Profiler.
# They are already by default on some platforms but possibly not on all platforms.
# Enable it explicitly.
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
endif()
# Reproducible builds.
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
@ -275,16 +285,12 @@ endif ()
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
if (ENABLE_BUILD_PROFILING)
if (COMPILER_CLANG)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
else ()
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
endif ()
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
endif ()
set (CMAKE_CXX_STANDARD 23)
@ -295,24 +301,23 @@ set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
endif ()
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
# Our built-in unwinder only supports DWARF version up to 4.
set (DEBUG_INFO_FLAGS "-g")
@ -338,39 +343,34 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} $
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (COMPILER_CLANG)
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
# https://clang.llvm.org/docs/ThinLTO.html
# Applies to clang and linux only.
# Disabled when building with tests or sanitizers.
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
endif()
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
# https://clang.llvm.org/docs/ThinLTO.html
# Applies to clang and linux only.
# Disabled when building with tests or sanitizers.
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE)
# Link time optimization
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
elseif (ENABLE_THINLTO)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot enable ThinLTO")
endif ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE)
# Link time optimization
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
elseif (ENABLE_THINLTO)
message (${RECONFIGURE_MESSAGE_LEVEL} "ThinLTO is only available with Clang")
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot enable ThinLTO")
endif ()
# Turns on all external libs like s3, kafka, ODBC, ...
@ -445,8 +445,6 @@ endif ()
enable_testing() # Enable for tests without binary
option(ENABLE_OPENSSL "This option performs a build with OpenSSL. NOTE! This option is insecure and should never be used. By default, ClickHouse uses and only supports BoringSSL" OFF)
if (ARCH_S390X)
set(ENABLE_OPENSSL_DYNAMIC_DEFAULT ON)
else ()
@ -546,7 +544,9 @@ if (ENABLE_RUST)
endif()
endif()
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND NOT ENABLE_FUZZING
AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
else ()
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
@ -569,9 +569,6 @@ if (FUZZER)
if (NOT(target_type STREQUAL "INTERFACE_LIBRARY" OR target_type STREQUAL "UTILITY"))
target_compile_options(${target} PRIVATE "-fsanitize=fuzzer-no-link")
endif()
# clickhouse fuzzer isn't working correctly
# initial PR https://github.com/ClickHouse/ClickHouse/pull/27526
#if (target MATCHES ".+_fuzzer" OR target STREQUAL "clickhouse")
if (target_type STREQUAL "EXECUTABLE" AND target MATCHES ".+_fuzzer")
message(STATUS "${target} instrumented with fuzzer")
target_link_libraries(${target} PUBLIC ch_contrib::fuzzer)
@ -581,6 +578,12 @@ if (FUZZER)
get_target_property(target_bin_dir ${target} BINARY_DIR)
add_custom_command(TARGET fuzzers POST_BUILD COMMAND mv "${target_bin_dir}/${target_bin_name}" "${CMAKE_CURRENT_BINARY_DIR}/programs/" VERBATIM)
endif()
if (target STREQUAL "clickhouse")
message(STATUS "${target} instrumented with fuzzer")
target_link_libraries(${target} PUBLIC ch_contrib::fuzzer_no_main)
# Add to fuzzers bundle
add_dependencies(fuzzers ${target})
endif()
endif()
endforeach()
add_custom_command(TARGET fuzzers POST_BUILD COMMAND SRC=${CMAKE_SOURCE_DIR} BIN=${CMAKE_BINARY_DIR} OUT=${CMAKE_BINARY_DIR}/programs ${CMAKE_SOURCE_DIR}/tests/fuzz/build.sh VERBATIM)

View File

@ -1,4 +1,4 @@
Copyright 2016-2023 ClickHouse, Inc.
Copyright 2016-2024 ClickHouse, Inc.
Apache License
Version 2.0, January 2004
@ -188,7 +188,7 @@ Copyright 2016-2023 ClickHouse, Inc.
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2023 ClickHouse, Inc.
Copyright 2016-2024 ClickHouse, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -93,6 +93,8 @@ if (OS MATCHES "Linux"
set (CMAKE_TOOLCHAIN_FILE "cmake/linux/toolchain-ppc64le.cmake" CACHE INTERNAL "")
elseif (ARCH MATCHES "^(s390x.*|S390X.*)")
set (CMAKE_TOOLCHAIN_FILE "cmake/linux/toolchain-s390x.cmake" CACHE INTERNAL "")
elseif (ARCH MATCHES "^(loongarch64.*|LOONGARCH64.*)")
set (CMAKE_TOOLCHAIN_FILE "cmake/linux/toolchain-loongarch64.cmake" CACHE INTERNAL "")
else ()
message (FATAL_ERROR "Unsupported architecture: ${ARCH}")
endif ()

View File

@ -28,22 +28,31 @@ curl https://clickhouse.com/ | sh
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Monthly Release & Community Call
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
* [v24.5 Community Call](https://clickhouse.com/company/events/v24-5-community-release-call) - May 30
## Upcoming Events
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/296488501/) - Nov 30
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/296488779/) - Dec 11
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/296488840/) - Dec 12
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
* [ClickHouse Happy Hour @ Tom's Watch Bar - Los Angeles](https://www.meetup.com/clickhouse-los-angeles-user-group/events/300740584/) - May 22
* [ClickHouse & Confluent Meetup in Dubai](https://www.meetup.com/clickhouse-dubai-meetup-group/events/299629189/) - May 28
* [ClickHouse Meetup in Stockholm](https://www.meetup.com/clickhouse-stockholm-user-group/events/299752651/) - Jun 3
* [ClickHouse Meetup @ Cloudflare - San Francisco](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/300523061/) - Jun 4
* [ClickHouse (クリックハウス) Meetup Tokyo](https://www.meetup.com/clickhouse-tokyo-user-group/events/300798053/) - Jun 5
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
* **Recording available**: [**v23.10 Release Webinar**](https://www.youtube.com/watch?v=PGQS6uPb970) All the features of 23.10, one convenient video! Watch it now!
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
## Interested in joining ClickHouse and making it your full-time job?

View File

@ -2,25 +2,30 @@
the file is autogenerated by utils/security-generator/generate_security.py
-->
# Security Policy
# ClickHouse Security Vulnerability Response Policy
## Security Announcements
Security fixes will be announced by posting them in the [security changelog](https://clickhouse.com/docs/en/whats-new/security-changelog/).
## Security Change Log and Support
## Scope and Supported Versions
Details regarding security fixes are publicly reported in our [security changelog](https://clickhouse.com/docs/en/whats-new/security-changelog/). A summary of known security vulnerabilities is shown at the bottom of this page.
The following versions of ClickHouse server are currently being supported with security updates:
Vulnerability notifications pre-release or during embargo periods are available to open source users and support customers registered for vulnerability alerts. Refer to our [Embargo Policy](#embargo-policy) below.
The following versions of ClickHouse server are currently supported with security updates:
| Version | Supported |
|:-|:-|
| 23.10 | ✔️ |
| 23.9 | ✔️ |
| 24.5 | ✔️ |
| 24.4 | ✔️ |
| 24.3 | ✔️ |
| 24.2 | ❌ |
| 24.1 | ❌ |
| 23.* | ❌ |
| 23.8 | ✔️ |
| 23.7 | ❌ |
| 23.6 | ❌ |
| 23.5 | ❌ |
| 23.4 | ❌ |
| 23.3 | ✔️ |
| 23.3 | |
| 23.2 | ❌ |
| 23.1 | ❌ |
| 22.* | ❌ |
@ -34,7 +39,7 @@ The following versions of ClickHouse server are currently being supported with s
We're extremely grateful for security researchers and users that report vulnerabilities to the ClickHouse Open Source Community. All reports are thoroughly investigated by developers.
To report a potential vulnerability in ClickHouse please send the details about it to [security@clickhouse.com](mailto:security@clickhouse.com). We do not offer any financial rewards for reporting issues to us using this method. Alternatively, you can also submit your findings through our public bug bounty program hosted by [Bugcrowd](https://bugcrowd.com/clickhouse) and be rewarded for it as per the program scope and rules of engagement.
To report a potential vulnerability in ClickHouse please send the details about it through our public bug bounty program hosted by [Bugcrowd](https://bugcrowd.com/clickhouse) and be rewarded for it as per the program scope and rules of engagement.
### When Should I Report a Vulnerability?
@ -56,3 +61,21 @@ As the security issue moves from triage, to identified fix, to release planning
A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect the report date to disclosure date to be on the order of 7 days.
## Embargo Policy
Open source users and support customers may subscribe to receive alerts during the embargo period by visiting [https://trust.clickhouse.com/?product=clickhouseoss](https://trust.clickhouse.com/?product=clickhouseoss), requesting access and subscribing for alerts. Subscribers agree not to make these notifications public, issue communications, share this information with others, or issue public patches before the disclosure date. Accidental disclosures must be reported immediately to trust@clickhouse.com. Failure to follow this policy or repeated leaks may result in removal from the subscriber list.
Participation criteria:
1. Be a current open source user or support customer with a valid corporate email domain (no @gmail.com, @azure.com, etc.).
1. Sign up to the ClickHouse OSS Trust Center at [https://trust.clickhouse.com](https://trust.clickhouse.com).
1. Accept the ClickHouse Security Vulnerability Response Policy as outlined above.
1. Subscribe to ClickHouse OSS Trust Center alerts.
Removal criteria:
1. Members may be removed for failure to follow this policy or repeated leaks.
1. Members may be removed for bounced messages (mail delivery failure).
1. Members may unsubscribe at any time.
Notification process:
ClickHouse will post notifications within our OSS Trust Center and notify subscribers. Subscribers must log in to the Trust Center to download the notification. The notification will include the timeframe for public disclosure.

View File

@ -86,7 +86,7 @@ public:
}
/// Return object into pool. Client must return same object that was borrowed.
inline void returnObject(T && object_to_return)
void returnObject(T && object_to_return)
{
{
std::lock_guard lock(objects_mutex);
@ -99,20 +99,20 @@ public:
}
/// Max pool size
inline size_t maxSize() const
size_t maxSize() const
{
return max_size;
}
/// Allocated objects size by the pool. If allocatedObjectsSize == maxSize then pool is full.
inline size_t allocatedObjectsSize() const
size_t allocatedObjectsSize() const
{
std::lock_guard lock(objects_mutex);
return allocated_objects_size;
}
/// Returns allocatedObjectsSize == maxSize
inline bool isFull() const
bool isFull() const
{
std::lock_guard lock(objects_mutex);
return allocated_objects_size == max_size;
@ -120,7 +120,7 @@ public:
/// Borrowed objects size. If borrowedObjectsSize == allocatedObjectsSize and pool is full.
/// Then client will wait during borrowObject function call.
inline size_t borrowedObjectsSize() const
size_t borrowedObjectsSize() const
{
std::lock_guard lock(objects_mutex);
return borrowed_objects_size;
@ -129,7 +129,7 @@ public:
private:
template <typename FactoryFunc>
inline T allocateObjectForBorrowing(const std::unique_lock<std::mutex> &, FactoryFunc && func)
T allocateObjectForBorrowing(const std::unique_lock<std::mutex> &, FactoryFunc && func)
{
++allocated_objects_size;
++borrowed_objects_size;
@ -137,7 +137,7 @@ private:
return std::forward<FactoryFunc>(func)();
}
inline T borrowFromObjects(const std::unique_lock<std::mutex> &)
T borrowFromObjects(const std::unique_lock<std::mutex> &)
{
T dst;
detail::moveOrCopyIfThrow(std::move(objects.back()), dst);

View File

@ -10,13 +10,17 @@ set (CMAKE_CXX_STANDARD 20)
set (SRCS
argsToConfig.cpp
cgroupsv2.cpp
coverage.cpp
demangle.cpp
Decimal.cpp
getAvailableMemoryAmount.cpp
getFQDNOrHostName.cpp
getMemoryAmount.cpp
getPageSize.cpp
getThreadId.cpp
int8_to_string.cpp
itoa.cpp
JSON.cpp
mremap.cpp
phdr_cache.cpp
@ -30,15 +34,6 @@ set (SRCS
throwError.cpp
)
if (USE_DEBUG_HELPERS)
get_target_property(MAGIC_ENUM_INCLUDE_DIR ch_contrib::magic_enum INTERFACE_INCLUDE_DIRECTORIES)
# CMake generator expression will do insane quoting when it encounters special character like quotes, spaces, etc.
# Prefixing "SHELL:" will force it to use the original text.
set (INCLUDE_DEBUG_HELPERS "SHELL:-I\"${MAGIC_ENUM_INCLUDE_DIR}\" -include \"${ClickHouse_SOURCE_DIR}/base/base/iostream_debug_helpers.h\"")
# Use generator expression as we don't want to pollute CMAKE_CXX_FLAGS, which will interfere with CMake check system.
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
endif ()
add_library (common ${SRCS})
if (WITH_COVERAGE)

87
base/base/Decimal.cpp Normal file
View File

@ -0,0 +1,87 @@
#include <base/Decimal.h>
#include <base/extended_types.h>
namespace DB
{
/// Explicit template instantiations.
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE(M) \
M(Int32) \
M(Int64) \
M(Int128) \
M(Int256)
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(M, X) \
M(Int32, X) \
M(Int64, X) \
M(Int128, X) \
M(Int256, X)
template <typename T> const Decimal<T> & Decimal<T>::operator += (const T & x) { value += x; return *this; }
template <typename T> const Decimal<T> & Decimal<T>::operator -= (const T & x) { value -= x; return *this; }
template <typename T> const Decimal<T> & Decimal<T>::operator *= (const T & x) { value *= x; return *this; }
template <typename T> const Decimal<T> & Decimal<T>::operator /= (const T & x) { value /= x; return *this; }
template <typename T> const Decimal<T> & Decimal<T>::operator %= (const T & x) { value %= x; return *this; }
template <typename T> void NO_SANITIZE_UNDEFINED Decimal<T>::addOverflow(const T & x) { value += x; }
/// Maybe this explicit instantiation affects performance since operators cannot be inlined.
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator += (const Decimal<U> & x) { value += static_cast<T>(x.value); return *this; }
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator -= (const Decimal<U> & x) { value -= static_cast<T>(x.value); return *this; }
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator *= (const Decimal<U> & x) { value *= static_cast<T>(x.value); return *this; }
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator /= (const Decimal<U> & x) { value /= static_cast<T>(x.value); return *this; }
template <typename T> template <typename U> const Decimal<T> & Decimal<T>::operator %= (const Decimal<U> & x) { value %= static_cast<T>(x.value); return *this; }
#define DISPATCH(TYPE_T, TYPE_U) \
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator += (const Decimal<TYPE_U> & x); \
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator -= (const Decimal<TYPE_U> & x); \
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator *= (const Decimal<TYPE_U> & x); \
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator /= (const Decimal<TYPE_U> & x); \
template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator %= (const Decimal<TYPE_U> & x);
#define INVOKE(X) FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(DISPATCH, X)
FOR_EACH_UNDERLYING_DECIMAL_TYPE(INVOKE);
#undef INVOKE
#undef DISPATCH
#define DISPATCH(TYPE) template struct Decimal<TYPE>;
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
#undef DISPATCH
template <typename T> bool operator< (const Decimal<T> & x, const Decimal<T> & y) { return x.value < y.value; }
template <typename T> bool operator> (const Decimal<T> & x, const Decimal<T> & y) { return x.value > y.value; }
template <typename T> bool operator<= (const Decimal<T> & x, const Decimal<T> & y) { return x.value <= y.value; }
template <typename T> bool operator>= (const Decimal<T> & x, const Decimal<T> & y) { return x.value >= y.value; }
template <typename T> bool operator== (const Decimal<T> & x, const Decimal<T> & y) { return x.value == y.value; }
template <typename T> bool operator!= (const Decimal<T> & x, const Decimal<T> & y) { return x.value != y.value; }
#define DISPATCH(TYPE) \
template bool operator< (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template bool operator> (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template bool operator<= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template bool operator>= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template bool operator== (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template bool operator!= (const Decimal<TYPE> & x, const Decimal<TYPE> & y);
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
#undef DISPATCH
template <typename T> Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y) { return x.value + y.value; }
template <typename T> Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y) { return x.value - y.value; }
template <typename T> Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y) { return x.value * y.value; }
template <typename T> Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y) { return x.value / y.value; }
template <typename T> Decimal<T> operator- (const Decimal<T> & x) { return -x.value; }
#define DISPATCH(TYPE) \
template Decimal<TYPE> operator+ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template Decimal<TYPE> operator- (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template Decimal<TYPE> operator* (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template Decimal<TYPE> operator/ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
template Decimal<TYPE> operator- (const Decimal<TYPE> & x);
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
#undef DISPATCH
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE
}

View File

@ -1,20 +1,28 @@
#pragma once
#include <base/extended_types.h>
#include <base/Decimal_fwd.h>
#include <base/types.h>
#include <base/defines.h>
#if !defined(NO_SANITIZE_UNDEFINED)
#if defined(__clang__)
#define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
#else
#define NO_SANITIZE_UNDEFINED
#endif
#endif
namespace DB
{
template <class> struct Decimal;
class DateTime64;
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE(M) \
M(Int32) \
M(Int64) \
M(Int128) \
M(Int256)
#define FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(M, X) \
M(Int32, X) \
M(Int64, X) \
M(Int128, X) \
M(Int256, X)
using Decimal32 = Decimal<Int32>;
using Decimal64 = Decimal<Int64>;
using Decimal128 = Decimal<Int128>;
@ -55,36 +63,73 @@ struct Decimal
return static_cast<U>(value);
}
const Decimal<T> & operator += (const T & x) { value += x; return *this; }
const Decimal<T> & operator -= (const T & x) { value -= x; return *this; }
const Decimal<T> & operator *= (const T & x) { value *= x; return *this; }
const Decimal<T> & operator /= (const T & x) { value /= x; return *this; }
const Decimal<T> & operator %= (const T & x) { value %= x; return *this; }
const Decimal<T> & operator += (const T & x);
const Decimal<T> & operator -= (const T & x);
const Decimal<T> & operator *= (const T & x);
const Decimal<T> & operator /= (const T & x);
const Decimal<T> & operator %= (const T & x);
template <typename U> const Decimal<T> & operator += (const Decimal<U> & x) { value += x.value; return *this; }
template <typename U> const Decimal<T> & operator -= (const Decimal<U> & x) { value -= x.value; return *this; }
template <typename U> const Decimal<T> & operator *= (const Decimal<U> & x) { value *= x.value; return *this; }
template <typename U> const Decimal<T> & operator /= (const Decimal<U> & x) { value /= x.value; return *this; }
template <typename U> const Decimal<T> & operator %= (const Decimal<U> & x) { value %= x.value; return *this; }
template <typename U> const Decimal<T> & operator += (const Decimal<U> & x);
template <typename U> const Decimal<T> & operator -= (const Decimal<U> & x);
template <typename U> const Decimal<T> & operator *= (const Decimal<U> & x);
template <typename U> const Decimal<T> & operator /= (const Decimal<U> & x);
template <typename U> const Decimal<T> & operator %= (const Decimal<U> & x);
/// This is to avoid UB for sumWithOverflow()
void NO_SANITIZE_UNDEFINED addOverflow(const T & x) { value += x; }
void NO_SANITIZE_UNDEFINED addOverflow(const T & x);
T value;
};
template <typename T> inline bool operator< (const Decimal<T> & x, const Decimal<T> & y) { return x.value < y.value; }
template <typename T> inline bool operator> (const Decimal<T> & x, const Decimal<T> & y) { return x.value > y.value; }
template <typename T> inline bool operator<= (const Decimal<T> & x, const Decimal<T> & y) { return x.value <= y.value; }
template <typename T> inline bool operator>= (const Decimal<T> & x, const Decimal<T> & y) { return x.value >= y.value; }
template <typename T> inline bool operator== (const Decimal<T> & x, const Decimal<T> & y) { return x.value == y.value; }
template <typename T> inline bool operator!= (const Decimal<T> & x, const Decimal<T> & y) { return x.value != y.value; }
#define DISPATCH(TYPE) extern template struct Decimal<TYPE>;
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
#undef DISPATCH
template <typename T> inline Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y) { return x.value + y.value; }
template <typename T> inline Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y) { return x.value - y.value; }
template <typename T> inline Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y) { return x.value * y.value; }
template <typename T> inline Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y) { return x.value / y.value; }
template <typename T> inline Decimal<T> operator- (const Decimal<T> & x) { return -x.value; }
#define DISPATCH(TYPE_T, TYPE_U) \
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator += (const Decimal<TYPE_U> & x); \
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator -= (const Decimal<TYPE_U> & x); \
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator *= (const Decimal<TYPE_U> & x); \
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator /= (const Decimal<TYPE_U> & x); \
extern template const Decimal<TYPE_T> & Decimal<TYPE_T>::operator %= (const Decimal<TYPE_U> & x);
#define INVOKE(X) FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS(DISPATCH, X)
FOR_EACH_UNDERLYING_DECIMAL_TYPE(INVOKE);
#undef INVOKE
#undef DISPATCH
template <typename T> bool operator< (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> bool operator> (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> bool operator<= (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> bool operator>= (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> bool operator== (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> bool operator!= (const Decimal<T> & x, const Decimal<T> & y);
#define DISPATCH(TYPE) \
extern template bool operator< (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template bool operator> (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template bool operator<= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template bool operator>= (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template bool operator== (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template bool operator!= (const Decimal<TYPE> & x, const Decimal<TYPE> & y);
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
#undef DISPATCH
template <typename T> Decimal<T> operator+ (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> Decimal<T> operator- (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> Decimal<T> operator* (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> Decimal<T> operator/ (const Decimal<T> & x, const Decimal<T> & y);
template <typename T> Decimal<T> operator- (const Decimal<T> & x);
#define DISPATCH(TYPE) \
extern template Decimal<TYPE> operator+ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template Decimal<TYPE> operator- (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template Decimal<TYPE> operator* (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template Decimal<TYPE> operator/ (const Decimal<TYPE> & x, const Decimal<TYPE> & y); \
extern template Decimal<TYPE> operator- (const Decimal<TYPE> & x);
FOR_EACH_UNDERLYING_DECIMAL_TYPE(DISPATCH)
#undef DISPATCH
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE_PASS
#undef FOR_EACH_UNDERLYING_DECIMAL_TYPE
/// Distinguishable type to allow function resolution/deduction based on value type,
/// but also relatively easy to convert to/from Decimal64.
@ -99,7 +144,7 @@ public:
};
}
constexpr DB::UInt64 max_uint_mask = std::numeric_limits<DB::UInt64>::max();
constexpr UInt64 max_uint_mask = std::numeric_limits<UInt64>::max();
namespace std
{
@ -114,8 +159,8 @@ namespace std
{
size_t operator()(const DB::Decimal128 & x) const
{
return std::hash<DB::Int64>()(x.value >> 64)
^ std::hash<DB::Int64>()(x.value & max_uint_mask);
return std::hash<Int64>()(x.value >> 64)
^ std::hash<Int64>()(x.value & max_uint_mask);
}
};
@ -134,8 +179,8 @@ namespace std
size_t operator()(const DB::Decimal256 & x) const
{
// FIXME temp solution
return std::hash<DB::Int64>()(static_cast<DB::Int64>(x.value >> 64 & max_uint_mask))
^ std::hash<DB::Int64>()(static_cast<DB::Int64>(x.value & max_uint_mask));
return std::hash<Int64>()(static_cast<Int64>(x.value >> 64 & max_uint_mask))
^ std::hash<Int64>()(static_cast<Int64>(x.value & max_uint_mask));
}
};
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <base/types.h>
#include <base/extended_types.h>
namespace wide
{
@ -43,4 +44,13 @@ concept is_over_big_int =
|| std::is_same_v<T, UInt256>
|| std::is_same_v<T, Decimal128>
|| std::is_same_v<T, Decimal256>;
template <class T>
concept is_over_big_decimal = is_decimal<T> && is_over_big_int<typename T::NativeType>;
}
template <> struct is_signed<DB::Decimal32> { static constexpr bool value = true; };
template <> struct is_signed<DB::Decimal64> { static constexpr bool value = true; };
template <> struct is_signed<DB::Decimal128> { static constexpr bool value = true; };
template <> struct is_signed<DB::Decimal256> { static constexpr bool value = true; };

View File

@ -51,11 +51,9 @@ struct DecomposedFloat
/// Returns 0 for both +0. and -0.
int sign() const
{
return (exponent() == 0 && mantissa() == 0)
? 0
: (isNegative()
? -1
: 1);
if (exponent() == 0 && mantissa() == 0)
return 0;
return isNegative() ? -1 : 1;
}
uint16_t exponent() const

View File

@ -11,7 +11,7 @@ namespace detail
template <is_enum E, class F, size_t ...I>
constexpr void static_for(F && f, std::index_sequence<I...>)
{
(std::forward<F>(f)(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
(f(std::integral_constant<E, magic_enum::enum_value<E>(I)>()) , ...);
}
}

View File

@ -1,8 +1,7 @@
#pragma once
#include <base/strong_typedef.h>
#include <base/extended_types.h>
#include <Common/formatIPv6.h>
#include <base/strong_typedef.h>
#include <Common/memcmpSmall.h>
namespace DB
@ -62,7 +61,8 @@ namespace std
{
size_t operator()(const DB::IPv6 & x) const
{
return std::hash<std::string_view>{}(std::string_view(reinterpret_cast<const char*>(&x.toUnderType()), IPV6_BINARY_LENGTH));
return std::hash<std::string_view>{}(
std::string_view(reinterpret_cast<const char *>(&x.toUnderType()), sizeof(DB::IPv6::UnderlyingType)));
}
};

View File

@ -10,14 +10,10 @@
#define JSON_MAX_DEPTH 100
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
#endif
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException") // NOLINT(cert-err60-cpp, modernize-use-noexcept, hicpp-use-noexcept)
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#pragma clang diagnostic pop
/// Read unsigned integer in a simple form from a non-0-terminated string.
@ -655,7 +651,9 @@ std::string_view JSON::getRawString() const
Pos s = ptr_begin;
if (*s != '"')
throw JSONException(std::string("JSON: expected \", got ") + *s);
while (++s != ptr_end && *s != '"');
++s;
while (s != ptr_end && *s != '"')
++s;
if (s != ptr_end)
return std::string_view(ptr_begin + 1, s - ptr_begin - 1);
throw JSONException("JSON: incorrect syntax (expected end of string, found end of JSON).");

View File

@ -39,14 +39,10 @@
// NOLINTBEGIN(google-explicit-constructor)
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
#endif
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
POCO_DECLARE_EXCEPTION(Foundation_API, JSONException, Poco::Exception)
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#pragma clang diagnostic pop
// NOLINTEND(google-explicit-constructor)
class JSON
@ -78,7 +74,7 @@ public:
const char * data() const { return ptr_begin; }
const char * dataEnd() const { return ptr_end; }
enum ElementType
enum ElementType : uint8_t
{
TYPE_OBJECT,
TYPE_ARRAY,

View File

@ -185,7 +185,8 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
{
case 3: if (!compare8(p1 + 32, p2 + 32)) return false; [[fallthrough]];
case 2: if (!compare8(p1 + 16, p2 + 16)) return false; [[fallthrough]];
case 1: if (!compare8(p1, p2)) return false;
case 1: if (!compare8(p1, p2)) return false; [[fallthrough]];
default: ;
}
return compare8(p1 + size - 16, p2 + size - 16);

View File

@ -27,7 +27,7 @@ namespace TypeListUtils /// In some contexts it's more handy to use functions in
constexpr Root<Args...> changeRoot(TypeList<Args...>) { return {}; }
template <typename F, typename ...Args>
constexpr void forEach(TypeList<Args...>, F && f) { (std::forward<F>(f)(TypeList<Args>{}), ...); }
constexpr void forEach(TypeList<Args...>, F && f) { (f(TypeList<Args>{}), ...); }
}
template <typename TypeListLeft, typename TypeListRight>

View File

@ -1,5 +1,6 @@
#pragma once
#include <bit>
#include <cstring>
#include <algorithm>
#include <type_traits>

70
base/base/cgroupsv2.cpp Normal file
View File

@ -0,0 +1,70 @@
#include <base/cgroupsv2.h>
#include <base/defines.h>
#include <fstream>
#include <sstream>
bool cgroupsV2Enabled()
{
#if defined(OS_LINUX)
try
{
/// This file exists iff the host has cgroups v2 enabled.
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
if (!std::filesystem::exists(controllers_file))
return false;
return true;
}
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
{
return false; /// not logging the exception as most callers fall back to cgroups v1
}
#else
return false;
#endif
}
bool cgroupsV2MemoryControllerEnabled()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
std::string cgroup = cgroupV2OfProcess();
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
if (!controllers_file.is_open())
return false;
std::string controllers;
std::getline(controllers_file, controllers);
return controllers.find("memory") != std::string::npos;
#else
return false;
#endif
}
std::string cgroupV2OfProcess()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
/// All PIDs assigned to a cgroup are in /sys/fs/cgroups/{cgroup_name}/cgroup.procs
/// A simpler way to get the membership is:
std::ifstream cgroup_name_file("/proc/self/cgroup");
if (!cgroup_name_file.is_open())
return "";
/// With cgroups v2, there will be a *single* line with prefix "0::/"
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
std::string cgroup;
std::getline(cgroup_name_file, cgroup);
static const std::string v2_prefix = "0::/";
if (!cgroup.starts_with(v2_prefix))
return "";
cgroup = cgroup.substr(v2_prefix.length());
return cgroup;
#else
return "";
#endif
}

22
base/base/cgroupsv2.h Normal file
View File

@ -0,0 +1,22 @@
#pragma once
#include <filesystem>
#include <string>
#if defined(OS_LINUX)
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
/// /sys/fs/cgroup was still symlinked to the actual mount in the cases that I have seen.
static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgroup";
#endif
/// Is cgroups v2 enabled on the system?
bool cgroupsV2Enabled();
/// Is the memory controller of cgroups v2 enabled on the system?
/// Assumes that cgroupsV2Enabled() is enabled.
bool cgroupsV2MemoryControllerEnabled();
/// Which cgroup does the process belong to?
/// Returns an empty string if the cgroup cannot be determined.
/// Assumes that cgroupsV2Enabled() is enabled.
std::string cgroupV2OfProcess();

View File

@ -21,7 +21,7 @@ bool func_wrapper(Func && func, Arg && arg)
template <typename T, T Begin, typename Func, T... Is>
constexpr bool static_for_impl(Func && f, std::integer_sequence<T, Is...>)
{
return (func_wrapper(std::forward<Func>(f), std::integral_constant<T, Begin + Is>{}) || ...);
return (func_wrapper(f, std::integral_constant<T, Begin + Is>{}) || ...);
}
template <auto Begin, decltype(Begin) End, typename Func>

View File

@ -1,6 +1,7 @@
#include "coverage.h"
#include <sys/mman.h>
#pragma GCC diagnostic ignored "-Wreserved-identifier"
#pragma clang diagnostic ignored "-Wreserved-identifier"
/// WITH_COVERAGE enables the default implementation of code coverage,
@ -12,11 +13,7 @@
#include <unistd.h>
# if defined(__clang__)
extern "C" void __llvm_profile_dump(); // NOLINT
# elif defined(__GNUC__) || defined(__GNUG__)
extern "C" void __gcov_exit();
# endif
#endif
@ -27,12 +24,7 @@ void dumpCoverageReportIfPossible()
static std::mutex mutex;
std::lock_guard lock(mutex);
# if defined(__clang__)
__llvm_profile_dump(); // NOLINT
# elif defined(__GNUC__) || defined(__GNUG__)
__gcov_exit();
# endif
#endif
}
@ -52,11 +44,21 @@ namespace
uint32_t * guards_start = nullptr;
uint32_t * guards_end = nullptr;
uintptr_t * coverage_array = nullptr;
uintptr_t * current_coverage_array = nullptr;
uintptr_t * cumulative_coverage_array = nullptr;
size_t coverage_array_size = 0;
uintptr_t * all_addresses_array = nullptr;
size_t all_addresses_array_size = 0;
uintptr_t * allocate(size_t size)
{
/// Note: mmap return zero-initialized memory, and we count on that.
void * map = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (MAP_FAILED == map)
return nullptr;
return static_cast<uintptr_t*>(map);
}
}
extern "C"
@ -79,7 +81,8 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t * start, uint32_t * stop)
coverage_array_size = stop - start;
/// Note: we will leak this.
coverage_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
current_coverage_array = allocate(sizeof(uintptr_t) * coverage_array_size);
cumulative_coverage_array = allocate(sizeof(uintptr_t) * coverage_array_size);
resetCoverage();
}
@ -92,8 +95,8 @@ void __sanitizer_cov_pcs_init(const uintptr_t * pcs_begin, const uintptr_t * pcs
return;
pc_table_initialized = true;
all_addresses_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
all_addresses_array_size = pcs_end - pcs_begin;
all_addresses_array = allocate(sizeof(uintptr_t) * all_addresses_array_size);
/// They are not a real pointers, but also contain a flag in the most significant bit,
/// in which we are not interested for now. Reset it.
@ -115,17 +118,24 @@ void __sanitizer_cov_trace_pc_guard(uint32_t * guard)
/// The values of `*guard` are as you set them in
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
/// and use them to dereference an array or a bit vector.
void * pc = __builtin_return_address(0);
intptr_t pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
coverage_array[guard - guards_start] = reinterpret_cast<uintptr_t>(pc);
current_coverage_array[guard - guards_start] = pc;
cumulative_coverage_array[guard - guards_start] = pc;
}
}
__attribute__((no_sanitize("coverage")))
std::span<const uintptr_t> getCoverage()
std::span<const uintptr_t> getCurrentCoverage()
{
return {coverage_array, coverage_array_size};
return {current_coverage_array, coverage_array_size};
}
__attribute__((no_sanitize("coverage")))
std::span<const uintptr_t> getCumulativeCoverage()
{
return {cumulative_coverage_array, coverage_array_size};
}
__attribute__((no_sanitize("coverage")))
@ -137,7 +147,7 @@ std::span<const uintptr_t> getAllInstrumentedAddresses()
__attribute__((no_sanitize("coverage")))
void resetCoverage()
{
memset(coverage_array, 0, coverage_array_size * sizeof(*coverage_array));
memset(current_coverage_array, 0, coverage_array_size * sizeof(*current_coverage_array));
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
/// For example, you can unset it after first invocation to prevent excessive work.

View File

@ -15,7 +15,10 @@ void dumpCoverageReportIfPossible();
/// Get accumulated unique program addresses of the instrumented parts of the code,
/// seen so far after program startup or after previous reset.
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
std::span<const uintptr_t> getCoverage();
std::span<const uintptr_t> getCurrentCoverage();
/// Similar but not being reset.
std::span<const uintptr_t> getCumulativeCoverage();
/// Get all instrumented addresses that could be in the coverage.
std::span<const uintptr_t> getAllInstrumentedAddresses();

View File

@ -11,7 +11,7 @@
/// including <base/defines.h>
/// - it should not have fallback to 0,
/// since this may create false-positive detection (common problem)
#if defined(__clang__) && defined(__has_feature)
#if defined(__has_feature)
# define ch_has_feature __has_feature
#endif
@ -28,8 +28,8 @@
#define NO_INLINE __attribute__((__noinline__))
#define MAY_ALIAS __attribute__((__may_alias__))
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) && !defined(__s390x__) && !(defined(__riscv) && (__riscv_xlen == 64))
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress), s390x (work in progress) and RISC-V 64 (experimental)"
#if !defined(__x86_64__) && !defined(__aarch64__) && !defined(__PPC__) && !defined(__s390x__) && !(defined(__loongarch64)) && !(defined(__riscv) && (__riscv_xlen == 64))
# error "The only supported platforms are x86_64 and AArch64, PowerPC (work in progress), s390x (work in progress), loongarch64 (experimental) and RISC-V 64 (experimental)"
#endif
/// Check for presence of address sanitizer
@ -76,24 +76,11 @@
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
#if defined(__clang__)
# define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
# define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address")))
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED __attribute__((__always_inline__, __no_sanitize__("undefined")))
#else /// It does not work in GCC. GCC 7 cannot recognize this attribute and GCC 8 simply ignores it.
# define NO_SANITIZE_UNDEFINED
# define NO_SANITIZE_ADDRESS
# define NO_SANITIZE_THREAD
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED ALWAYS_INLINE
#endif
#if defined(__clang__) && defined(__clang_major__) && __clang_major__ >= 14
# define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation))
#else
# define DISABLE_SANITIZER_INSTRUMENTATION
#endif
#define NO_SANITIZE_UNDEFINED __attribute__((__no_sanitize__("undefined")))
#define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address")))
#define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
#define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED __attribute__((__always_inline__, __no_sanitize__("undefined")))
#define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation))
#if !__has_include(<sanitizer/asan_interface.h>) || !defined(ADDRESS_SANITIZER)
# define ASAN_UNPOISON_MEMORY_REGION(a, b)
@ -121,68 +108,53 @@
{
[[noreturn]] void abortOnFailedAssertion(const String & description);
}
#define chassert(x) do { static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x); } while (0)
#define chassert_1(x, ...) do { static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x); } while (0)
#define chassert_2(x, comment, ...) do { static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(comment); } while (0)
#define UNREACHABLE() abort()
// clang-format off
#else
/// Here sizeof() trick is used to suppress unused warning for result,
/// since simple "(void)x" will evaluate the expression, while
/// "sizeof(!(x))" will not.
#define chassert(x) (void)sizeof(!(x))
#define chassert_1(x, ...) (void)sizeof(!(x))
#define chassert_2(x, comment, ...) (void)sizeof(!(x))
#define UNREACHABLE() __builtin_unreachable()
#endif
#define CHASSERT_DISPATCH(_1,_2, N,...) N(_1, _2)
#define CHASSERT_INVOKE(tuple) CHASSERT_DISPATCH tuple
#define chassert(...) CHASSERT_INVOKE((__VA_ARGS__, chassert_2, chassert_1))
#endif
/// Macros for Clang Thread Safety Analysis (TSA). They can be safely ignored by other compilers.
/// Feel free to extend, but please stay close to https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#mutexheader
#if defined(__clang__)
# define TSA_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__))) /// data is protected by given capability
# define TSA_PT_GUARDED_BY(...) __attribute__((pt_guarded_by(__VA_ARGS__))) /// pointed-to data is protected by the given capability
# define TSA_REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__))) /// thread needs exclusive possession of given capability
# define TSA_REQUIRES_SHARED(...) __attribute__((requires_shared_capability(__VA_ARGS__))) /// thread needs shared possession of given capability
# define TSA_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) /// annotated lock must be locked after given lock
# define TSA_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) /// disable TSA for a function
# define TSA_CAPABILITY(...) __attribute__((capability(__VA_ARGS__))) /// object of a class can be used as capability
# define TSA_ACQUIRE(...) __attribute__((acquire_capability(__VA_ARGS__))) /// function acquires a capability, but does not release it
# define TSA_TRY_ACQUIRE(...) __attribute__((try_acquire_capability(__VA_ARGS__))) /// function tries to acquire a capability and returns a boolean value indicating success or failure
# define TSA_RELEASE(...) __attribute__((release_capability(__VA_ARGS__))) /// function releases the given capability
# define TSA_ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) /// function acquires a shared capability, but does not release it
# define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
# define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
# define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
#define TSA_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__))) /// data is protected by given capability
#define TSA_PT_GUARDED_BY(...) __attribute__((pt_guarded_by(__VA_ARGS__))) /// pointed-to data is protected by the given capability
#define TSA_REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__))) /// thread needs exclusive possession of given capability
#define TSA_REQUIRES_SHARED(...) __attribute__((requires_shared_capability(__VA_ARGS__))) /// thread needs shared possession of given capability
#define TSA_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) /// annotated lock must be locked after given lock
#define TSA_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) /// disable TSA for a function
#define TSA_CAPABILITY(...) __attribute__((capability(__VA_ARGS__))) /// object of a class can be used as capability
#define TSA_ACQUIRE(...) __attribute__((acquire_capability(__VA_ARGS__))) /// function acquires a capability, but does not release it
#define TSA_TRY_ACQUIRE(...) __attribute__((try_acquire_capability(__VA_ARGS__))) /// function tries to acquire a capability and returns a boolean value indicating success or failure
#define TSA_RELEASE(...) __attribute__((release_capability(__VA_ARGS__))) /// function releases the given capability
#define TSA_ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) /// function acquires a shared capability, but does not release it
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
/// suppressing them in the whole function
/// Consider adding a comment when using these macros.
# define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
#define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
#define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
/// This macro is useful when only one thread writes to a member
/// and you want to read this member from the same thread without locking a mutex.
/// It's safe (because no concurrent writes are possible), but TSA generates a warning.
/// (Seems like there's no way to verify it, but it makes sense to distinguish it from TSA_SUPPRESS_WARNING_FOR_READ for readability)
# define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
#else
# define TSA_GUARDED_BY(...)
# define TSA_PT_GUARDED_BY(...)
# define TSA_REQUIRES(...)
# define TSA_REQUIRES_SHARED(...)
# define TSA_NO_THREAD_SAFETY_ANALYSIS
# define TSA_CAPABILITY(...)
# define TSA_ACQUIRE(...)
# define TSA_TRY_ACQUIRE(...)
# define TSA_RELEASE(...)
# define TSA_ACQUIRE_SHARED(...)
# define TSA_TRY_ACQUIRE_SHARED(...)
# define TSA_RELEASE_SHARED(...)
# define TSA_SCOPED_LOCKABLE
# define TSA_SUPPRESS_WARNING_FOR_READ(x) (x)
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) (x)
# define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
#endif
#define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
/// A template function for suppressing warnings about unused variables or function results.
template <typename... Args>

View File

@ -64,6 +64,44 @@ template <> struct is_arithmetic<UInt256> { static constexpr bool value = true;
template <typename T>
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
#define FOR_EACH_ARITHMETIC_TYPE(M) \
M(DataTypeDate) \
M(DataTypeDate32) \
M(DataTypeDateTime) \
M(DataTypeInt8) \
M(DataTypeUInt8) \
M(DataTypeInt16) \
M(DataTypeUInt16) \
M(DataTypeInt32) \
M(DataTypeUInt32) \
M(DataTypeInt64) \
M(DataTypeUInt64) \
M(DataTypeInt128) \
M(DataTypeUInt128) \
M(DataTypeInt256) \
M(DataTypeUInt256) \
M(DataTypeFloat32) \
M(DataTypeFloat64)
#define FOR_EACH_ARITHMETIC_TYPE_PASS(M, X) \
M(DataTypeDate, X) \
M(DataTypeDate32, X) \
M(DataTypeDateTime, X) \
M(DataTypeInt8, X) \
M(DataTypeUInt8, X) \
M(DataTypeInt16, X) \
M(DataTypeUInt16, X) \
M(DataTypeInt32, X) \
M(DataTypeUInt32, X) \
M(DataTypeInt64, X) \
M(DataTypeUInt64, X) \
M(DataTypeInt128, X) \
M(DataTypeUInt128, X) \
M(DataTypeInt256, X) \
M(DataTypeUInt256, X) \
M(DataTypeFloat32, X) \
M(DataTypeFloat64, X)
template <typename T>
struct make_unsigned // NOLINT(readability-identifier-naming)
{

View File

@ -147,7 +147,7 @@ constexpr uint16_t maybe_negate(uint16_t x)
return ~x;
}
enum class ReturnMode
enum class ReturnMode : uint8_t
{
End,
Nullptr,

View File

@ -1,19 +1,55 @@
#include <stdexcept>
#include <fstream>
#include <base/getMemoryAmount.h>
#include <base/cgroupsv2.h>
#include <base/getPageSize.h>
#include <fstream>
#include <stdexcept>
#include <unistd.h>
#include <sys/types.h>
#include <sys/param.h>
#if defined(BSD)
#include <sys/sysctl.h>
namespace
{
std::optional<uint64_t> getCgroupsV2MemoryLimit()
{
#if defined(OS_LINUX)
if (!cgroupsV2Enabled())
return {};
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::string cgroup = cgroupV2OfProcess();
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
while (current_cgroup != default_cgroups_mount.parent_path())
{
std::ifstream setting_file(current_cgroup / "memory.max");
if (setting_file.is_open())
{
uint64_t value;
if (setting_file >> value)
return {value};
else
return {}; /// e.g. the cgroups default "max"
}
current_cgroup = current_cgroup.parent_path();
}
return {};
#else
return {};
#endif
}
}
/** Returns the size of physical memory (RAM) in bytes.
* Returns 0 on unsupported platform
*/
uint64_t getMemoryAmountOrZero()
{
int64_t num_pages = sysconf(_SC_PHYS_PAGES);
@ -26,34 +62,26 @@ uint64_t getMemoryAmountOrZero()
uint64_t memory_amount = num_pages * page_size;
#if defined(OS_LINUX)
// Try to lookup at the Cgroup limit
// CGroups v2
std::ifstream cgroupv2_limit("/sys/fs/cgroup/memory.max");
if (cgroupv2_limit.is_open())
{
uint64_t memory_limit = 0;
cgroupv2_limit >> memory_limit;
if (memory_limit > 0 && memory_limit < memory_amount)
memory_amount = memory_limit;
}
/// Respect the memory limit set by cgroups v2.
auto limit_v2 = getCgroupsV2MemoryLimit();
if (limit_v2.has_value() && *limit_v2 < memory_amount)
memory_amount = *limit_v2;
else
{
// CGroups v1
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
if (cgroup_limit.is_open())
/// Cgroups v1 were replaced by v2 in 2015. The only reason we keep supporting v1 is that the transition to v2
/// has been slow. Caveat : Hierarchical groups as in v2 are not supported for v1, the location of the memory
/// limit (virtual) file is hard-coded.
/// TODO: check at the end of 2024 if we can get rid of v1.
std::ifstream limit_file_v1("/sys/fs/cgroup/memory/memory.limit_in_bytes");
if (limit_file_v1.is_open())
{
uint64_t memory_limit = 0; // in case of read error
cgroup_limit >> memory_limit;
if (memory_limit > 0 && memory_limit < memory_amount)
memory_amount = memory_limit;
uint64_t limit_v1;
if (limit_file_v1 >> limit_v1)
memory_amount = std::min(memory_amount, limit_v1);
}
}
#endif
return memory_amount;
}

View File

@ -2,11 +2,10 @@
#include <cstdint>
/** Returns the size of physical memory (RAM) in bytes.
* Returns 0 on unsupported platform or if it cannot determine the size of physical memory.
*/
/// Returns the size in bytes of physical memory (RAM) available to the process. The value can
/// be smaller than the total available RAM available to the system due to cgroups settings.
/// Returns 0 on unsupported platform or if it cannot determine the size of physical memory.
uint64_t getMemoryAmountOrZero();
/** Throws exception if it cannot determine the size of physical memory.
*/
/// Throws exception if it cannot determine the size of physical memory.
uint64_t getMemoryAmount();

View File

@ -146,7 +146,7 @@ namespace impl
TUInt res;
if constexpr (sizeof(TUInt) == 1)
{
res = static_cast<UInt8>(unhexDigit(data[0])) * 0x10 + static_cast<UInt8>(unhexDigit(data[1]));
res = unhexDigit(data[0]) * 0x10 + unhexDigit(data[1]);
}
else if constexpr (sizeof(TUInt) == 2)
{
@ -176,17 +176,19 @@ namespace impl
};
/// Helper template class to convert a value of any supported type to hexadecimal representation and back.
template <typename T, typename SFINAE = void>
template <typename T>
struct HexConversion;
template <typename TUInt>
struct HexConversion<TUInt, std::enable_if_t<std::is_integral_v<TUInt>>> : public HexConversionUInt<TUInt> {};
requires(std::is_integral_v<TUInt>)
struct HexConversion<TUInt> : public HexConversionUInt<TUInt> {};
template <size_t Bits, typename Signed>
struct HexConversion<wide::integer<Bits, Signed>> : public HexConversionUInt<wide::integer<Bits, Signed>> {};
template <typename CityHashUInt128> /// Partial specialization here allows not to include <city.h> in this header.
struct HexConversion<CityHashUInt128, std::enable_if_t<std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>>>
requires(std::is_same_v<CityHashUInt128, typename CityHash_v1_0_2::uint128>)
struct HexConversion<CityHashUInt128>
{
static const constexpr size_t num_hex_digits = 32;

View File

@ -0,0 +1,9 @@
#include <base/int8_to_string.h>
namespace std
{
std::string to_string(Int8 v) /// NOLINT (cert-dcl58-cpp)
{
return to_string(int8_t{v});
}
}

View File

@ -0,0 +1,17 @@
#pragma once
#include <base/defines.h>
#include <base/types.h>
#include <fmt/format.h>
template <>
struct fmt::formatter<Int8> : fmt::formatter<int8_t>
{
};
namespace std
{
std::string to_string(Int8 v); /// NOLINT (cert-dcl58-cpp)
}

View File

@ -1,185 +0,0 @@
#pragma once
#include "demangle.h"
#include "getThreadId.h"
#include <type_traits>
#include <tuple>
#include <iomanip>
#include <iostream>
#include <magic_enum.hpp>
/** Usage:
*
* DUMP(variable...)
*/
template <typename Out, typename T>
Out & dumpValue(Out &, T &&);
/// Catch-all case.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == -1, Out> & dumpImpl(Out & out, T &&) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << "{...}";
}
/// An object, that could be output with operator <<.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 0, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::declval<Out &>() << std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << x;
}
/// A pointer-like object.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 1
/// Protect from the case when operator * do effectively nothing (function pointer).
&& !std::is_same_v<std::decay_t<T>, std::decay_t<decltype(*std::declval<T>())>>
, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(*std::declval<T>())> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
if (!x)
return out << "nullptr";
return dumpValue(out, *x);
}
/// Container.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 2, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::begin(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
bool first = true;
out << "{";
for (const auto & elem : x)
{
if (first)
first = false;
else
out << ", ";
dumpValue(out, elem);
}
return out << "}";
}
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && std::is_enum_v<std::decay_t<T>>, Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << magic_enum::enum_name(x);
}
/// string and const char * - output not as container or pointer.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && (std::is_same_v<std::decay_t<T>, std::string> || std::is_same_v<std::decay_t<T>, const char *>), Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << std::quoted(x);
}
/// UInt8 - output as number, not char.
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 3 && std::is_same_v<std::decay_t<T>, unsigned char>, Out> &
dumpImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return out << int(x);
}
/// Tuple, pair
template <size_t N, typename Out, typename T>
Out & dumpTupleImpl(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
if constexpr (N == 0)
out << "{";
else
out << ", ";
dumpValue(out, std::get<N>(x));
if constexpr (N + 1 == std::tuple_size_v<std::decay_t<T>>)
out << "}";
else
dumpTupleImpl<N + 1>(out, x);
return out;
}
template <int priority, typename Out, typename T>
std::enable_if_t<priority == 4, Out> & dumpImpl(Out & out, T && x, std::decay_t<decltype(std::get<0>(std::declval<T>()))> * = nullptr) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return dumpTupleImpl<0>(out, x);
}
template <int priority, typename Out, typename T>
Out & dumpDispatchPriorities(Out & out, T && x, std::decay_t<decltype(dumpImpl<priority>(std::declval<Out &>(), std::declval<T>()))> *) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return dumpImpl<priority>(out, x);
}
// NOLINTNEXTLINE(google-explicit-constructor)
struct LowPriority { LowPriority(void *) {} };
template <int priority, typename Out, typename T>
Out & dumpDispatchPriorities(Out & out, T && x, LowPriority) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return dumpDispatchPriorities<priority - 1>(out, x, nullptr);
}
template <typename Out, typename T>
Out & dumpValue(Out & out, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
return dumpDispatchPriorities<5>(out, x, nullptr);
}
template <typename Out, typename T>
Out & dump(Out & out, const char * name, T && x) // NOLINT(cppcoreguidelines-missing-std-forward)
{
// Dumping string literal, printing name and demangled type is irrelevant.
if constexpr (std::is_same_v<const char *, std::decay_t<std::remove_reference_t<T>>>)
{
const auto name_len = strlen(name);
const auto value_len = strlen(x);
// `name` is the same as quoted `x`
if (name_len > 2 && value_len > 0 && name[0] == '"' && name[name_len - 1] == '"'
&& strncmp(name + 1, x, std::min(value_len, name_len) - 1) == 0)
return out << x;
}
out << demangle(typeid(x).name()) << " " << name << " = ";
return dumpValue(out, x) << "; ";
}
#ifdef __clang__
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
#endif
#define DUMPVAR(VAR) ::dump(std::cerr, #VAR, (VAR));
#define DUMPHEAD std::cerr << __FILE__ << ':' << __LINE__ << " [ " << getThreadId() << " ] ";
#define DUMPTAIL std::cerr << '\n';
#define DUMP1(V1) do { DUMPHEAD DUMPVAR(V1) DUMPTAIL } while(0)
#define DUMP2(V1, V2) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPTAIL } while(0)
#define DUMP3(V1, V2, V3) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPTAIL } while(0)
#define DUMP4(V1, V2, V3, V4) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPTAIL } while(0)
#define DUMP5(V1, V2, V3, V4, V5) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPTAIL } while(0)
#define DUMP6(V1, V2, V3, V4, V5, V6) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPTAIL } while(0)
#define DUMP7(V1, V2, V3, V4, V5, V6, V7) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPVAR(V7) DUMPTAIL } while(0)
#define DUMP8(V1, V2, V3, V4, V5, V6, V7, V8) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPVAR(V7) DUMPVAR(V8) DUMPTAIL } while(0)
#define DUMP9(V1, V2, V3, V4, V5, V6, V7, V8, V9) do { DUMPHEAD DUMPVAR(V1) DUMPVAR(V2) DUMPVAR(V3) DUMPVAR(V4) DUMPVAR(V5) DUMPVAR(V6) DUMPVAR(V7) DUMPVAR(V8) DUMPVAR(V9) DUMPTAIL } while(0)
/// https://groups.google.com/forum/#!searchin/kona-dev/variadic$20macro%7Csort:date/kona-dev/XMA-lDOqtlI/GCzdfZsD41sJ
#define VA_NUM_ARGS_IMPL(x1, x2, x3, x4, x5, x6, x7, x8, x9, N, ...) N
#define VA_NUM_ARGS(...) VA_NUM_ARGS_IMPL(__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1)
#define MAKE_VAR_MACRO_IMPL_CONCAT(PREFIX, NUM_ARGS) PREFIX ## NUM_ARGS
#define MAKE_VAR_MACRO_IMPL(PREFIX, NUM_ARGS) MAKE_VAR_MACRO_IMPL_CONCAT(PREFIX, NUM_ARGS)
#define MAKE_VAR_MACRO(PREFIX, ...) MAKE_VAR_MACRO_IMPL(PREFIX, VA_NUM_ARGS(__VA_ARGS__))
#define DUMP(...) MAKE_VAR_MACRO(DUMP, __VA_ARGS__)(__VA_ARGS__)

505
base/base/itoa.cpp Normal file
View File

@ -0,0 +1,505 @@
// Based on https://github.com/amdn/itoa and combined with our optimizations
//
//=== itoa.cpp - Fast integer to ascii conversion --*- C++ -*-//
//
// The MIT License (MIT)
// Copyright (c) 2016 Arturo Martin-de-Nicolas
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//===----------------------------------------------------------------------===//
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <type_traits>
#include <base/defines.h>
#include <base/extended_types.h>
#include <base/itoa.h>
namespace
{
template <typename T>
ALWAYS_INLINE inline constexpr T pow10(size_t x)
{
return x ? 10 * pow10<T>(x - 1) : 1;
}
// Division by a power of 10 is implemented using a multiplicative inverse.
// This strength reduction is also done by optimizing compilers, but
// presently the fastest results are produced by using the values
// for the multiplication and the shift as given by the algorithm
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
//
// http://www.agner.org/optimize/optimizing_assembly.pdf
//
// "Integer division by a constant (all processors)
// A floating point number can be divided by a constant by multiplying
// with the reciprocal. If we want to do the same with integers, we have
// to scale the reciprocal by 2n and then shift the product to the right
// by n. There are various algorithms for finding a suitable value of n
// and compensating for rounding errors. The algorithm described below
// was invented by Terje Mathisen, Norway, and not published elsewhere."
/// Division by constant is performed by:
/// 1. Adding 1 if needed;
/// 2. Multiplying by another constant;
/// 3. Shifting right by another constant.
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
struct Division
{
static constexpr bool add{add_};
static constexpr UInt multiplier{multiplier_};
static constexpr unsigned shift{shift_};
};
/// Select a type with appropriate number of bytes from the list of types.
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
template <size_t N, typename T, typename... Ts>
struct SelectType
{
using Result = typename SelectType<N / 2, Ts...>::Result;
};
template <typename T, typename... Ts>
struct SelectType<1, T, Ts...>
{
using Result = T;
};
/// Division by 10^N where N is the size of the type.
template <size_t N>
using DivisionBy10PowN = typename SelectType<
N,
Division<uint8_t, false, 205U, 11>, /// divide by 10
Division<uint16_t, true, 41943U, 22>, /// divide by 100
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
>::Result;
template <size_t N>
using UnsignedOfSize = typename SelectType<N, uint8_t, uint16_t, uint32_t, uint64_t, __uint128_t>::Result;
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
template <size_t N>
struct QuotientAndRemainder
{
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
};
template <size_t N>
QuotientAndRemainder<N> inline split(UnsignedOfSize<N> value)
{
constexpr DivisionBy10PowN<N> division;
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
return {quotient, remainder};
}
ALWAYS_INLINE inline char * outDigit(char * p, uint8_t value)
{
*p = '0' + value;
++p;
return p;
}
// Using a lookup table to convert binary numbers from 0 to 99
// into ascii characters as described by Andrei Alexandrescu in
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
const char digits[201] = "00010203040506070809"
"10111213141516171819"
"20212223242526272829"
"30313233343536373839"
"40414243444546474849"
"50515253545556575859"
"60616263646566676869"
"70717273747576777879"
"80818283848586878889"
"90919293949596979899";
ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
{
memcpy(p, &digits[value * 2], 2);
p += 2;
return p;
}
namespace convert
{
template <typename UInt, size_t N = sizeof(UInt)>
char * head(char * p, UInt u);
template <typename UInt, size_t N = sizeof(UInt)>
char * tail(char * p, UInt u);
//===----------------------------------------------------------===//
// head: find most significant digit, skip leading zeros
//===----------------------------------------------------------===//
// "x" contains quotient and remainder after division by 10^N
// quotient is less than 10^N
template <size_t N>
ALWAYS_INLINE inline char * head(char * p, QuotientAndRemainder<N> x)
{
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
p = tail(p, x.remainder);
return p;
}
// "u" is less than 10^2*N
template <typename UInt, size_t N>
ALWAYS_INLINE inline char * head(char * p, UInt u)
{
return u < pow10<UnsignedOfSize<N>>(N) ? head(p, UnsignedOfSize<N / 2>(u)) : head<N>(p, split<N>(u));
}
// recursion base case, selected when "u" is one byte
template <>
ALWAYS_INLINE inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
{
return u < 10 ? outDigit(p, u) : outTwoDigits(p, u);
}
//===----------------------------------------------------------===//
// tail: produce all digits including leading zeros
//===----------------------------------------------------------===//
// recursive step, "u" is less than 10^2*N
template <typename UInt, size_t N>
ALWAYS_INLINE inline char * tail(char * p, UInt u)
{
QuotientAndRemainder<N> x = split<N>(u);
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
p = tail(p, x.remainder);
return p;
}
// recursion base case, selected when "u" is one byte
template <>
ALWAYS_INLINE inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
{
return outTwoDigits(p, u);
}
//===----------------------------------------------------------===//
// large values are >= 10^2*N
// where x contains quotient and remainder after division by 10^N
//===----------------------------------------------------------===//
template <size_t N>
ALWAYS_INLINE inline char * large(char * p, QuotientAndRemainder<N> x)
{
QuotientAndRemainder<N> y = split<N>(x.quotient);
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
p = tail(p, y.remainder);
p = tail(p, x.remainder);
return p;
}
//===----------------------------------------------------------===//
// handle values of "u" that might be >= 10^2*N
// where N is the size of "u" in bytes
//===----------------------------------------------------------===//
template <typename UInt, size_t N = sizeof(UInt)>
ALWAYS_INLINE inline char * uitoa(char * p, UInt u)
{
if (u < pow10<UnsignedOfSize<N>>(N))
return head(p, UnsignedOfSize<N / 2>(u));
QuotientAndRemainder<N> x = split<N>(u);
return u < pow10<UnsignedOfSize<N>>(2 * N) ? head<N>(p, x) : large<N>(p, x);
}
// selected when "u" is one byte
template <>
ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
{
if (u < 10)
return outDigit(p, u);
else if (u < 100)
return outTwoDigits(p, u);
else
{
p = outDigit(p, u / 100);
p = outTwoDigits(p, u % 100);
return p;
}
}
//===----------------------------------------------------------===//
// handle unsigned and signed integral operands
//===----------------------------------------------------------===//
// itoa: handle unsigned integral operands (selected by SFINAE)
template <typename U>
requires(!std::is_signed_v<U> && std::is_integral_v<U>)
ALWAYS_INLINE inline char * itoa(U u, char * p)
{
return convert::uitoa(p, u);
}
// itoa: handle signed integral operands (selected by SFINAE)
template <typename I, size_t N = sizeof(I)>
requires(std::is_signed_v<I> && std::is_integral_v<I>)
ALWAYS_INLINE inline char * itoa(I i, char * p)
{
// Need "mask" to be filled with a copy of the sign bit.
// If "i" is a negative value, then the result of "operator >>"
// is implementation-defined, though usually it is an arithmetic
// right shift that replicates the sign bit.
// Use a conditional expression to be portable,
// a good optimizing compiler generates an arithmetic right shift
// and avoids the conditional branch.
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
// Cannot use std::abs() because the result is undefined
// in 2's complement systems for the most-negative value.
// Want to avoid conditional branch for performance reasons since
// CPU branch prediction will be ineffective when negative values
// occur randomly.
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
// This yields the absolute value with the desired type without
// using a conditional branch and without invoking undefined or
// implementation defined behavior:
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
// Unconditionally store a minus sign when producing digits
// in a forward direction and increment the pointer only if
// the value is in fact negative.
// This avoids a conditional branch and is safe because we will
// always produce at least one digit and it will overwrite the
// minus sign when the value is not negative.
*p = '-';
p += (mask & 1);
p = convert::uitoa(p, u);
return p;
}
}
const uint64_t max_multiple_of_hundred_that_fits_in_64_bits = 1'00'00'00'00'00'00'00'00'00ull;
const int max_multiple_of_hundred_blocks = 9;
static_assert(max_multiple_of_hundred_that_fits_in_64_bits % 100 == 0);
ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
{
/// If we the highest 64bit item is empty, we can print just the lowest item as u64
if (_x.items[UInt128::_impl::little(1)] == 0)
return convert::itoa(_x.items[UInt128::_impl::little(0)], p);
/// Doing operations using __int128 is faster and we already rely on this feature
using T = unsigned __int128;
T x = (T(_x.items[UInt128::_impl::little(1)]) << 64) + T(_x.items[UInt128::_impl::little(0)]);
/// We are going to accumulate blocks of 2 digits to print until the number is small enough to be printed as u64
/// To do this we could do: x / 100, x % 100
/// But these would mean doing many iterations with long integers, so instead we divide by a much longer integer
/// multiple of 100 (100^9) and then get the blocks out of it (as u64)
/// Once we reach u64::max we can stop and use the fast method to print that in the front
static const T large_divisor = max_multiple_of_hundred_that_fits_in_64_bits;
static const T largest_uint64 = std::numeric_limits<uint64_t>::max();
uint8_t two_values[20] = {0}; // 39 Max characters / 2
int current_block = 0;
while (x > largest_uint64)
{
uint64_t u64_remainder = uint64_t(x % large_divisor);
x /= large_divisor;
int pos = current_block;
while (u64_remainder)
{
two_values[pos] = uint8_t(u64_remainder % 100);
pos++;
u64_remainder /= 100;
}
current_block += max_multiple_of_hundred_blocks;
}
char * highest_part_print = convert::itoa(uint64_t(x), p);
for (int i = 0; i < current_block; i++)
{
outTwoDigits(highest_part_print, two_values[current_block - 1 - i]);
highest_part_print += 2;
}
return highest_part_print;
}
ALWAYS_INLINE inline char * writeUIntText(UInt256 _x, char * p)
{
/// If possible, treat it as a smaller integer as they are much faster to print
if (_x.items[UInt256::_impl::little(3)] == 0 && _x.items[UInt256::_impl::little(2)] == 0)
return writeUIntText(UInt128{_x.items[UInt256::_impl::little(0)], _x.items[UInt256::_impl::little(1)]}, p);
/// If available (x86) we transform from our custom class to _BitInt(256) which has better support in the compiler
/// and produces better code
using T =
#if defined(__x86_64__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wbit-int-extension"
unsigned _BitInt(256)
# pragma clang diagnostic pop
#else
UInt256
#endif
;
#if defined(__x86_64__)
T x = (T(_x.items[UInt256::_impl::little(3)]) << 192) + (T(_x.items[UInt256::_impl::little(2)]) << 128)
+ (T(_x.items[UInt256::_impl::little(1)]) << 64) + T(_x.items[UInt256::_impl::little(0)]);
#else
T x = _x;
#endif
/// Similar to writeUIntText(UInt128) only that in this case we will stop as soon as we reach the largest u128
/// and switch to that function
uint8_t two_values[39] = {0}; // 78 Max characters / 2
int current_pos = 0;
static const T large_divisor = max_multiple_of_hundred_that_fits_in_64_bits;
static const T largest_uint128 = T(std::numeric_limits<uint64_t>::max()) << 64 | T(std::numeric_limits<uint64_t>::max());
while (x > largest_uint128)
{
uint64_t u64_remainder = uint64_t(x % large_divisor);
x /= large_divisor;
int pos = current_pos;
while (u64_remainder)
{
two_values[pos] = uint8_t(u64_remainder % 100);
pos++;
u64_remainder /= 100;
}
current_pos += max_multiple_of_hundred_blocks;
}
#if defined(__x86_64__)
UInt128 pending{uint64_t(x), uint64_t(x >> 64)};
#else
UInt128 pending{x.items[UInt256::_impl::little(0)], x.items[UInt256::_impl::little(1)]};
#endif
char * highest_part_print = writeUIntText(pending, p);
for (int i = 0; i < current_pos; i++)
{
outTwoDigits(highest_part_print, two_values[current_pos - 1 - i]);
highest_part_print += 2;
}
return highest_part_print;
}
ALWAYS_INLINE inline char * writeLeadingMinus(char * pos)
{
*pos = '-';
return pos + 1;
}
template <typename T>
ALWAYS_INLINE inline char * writeSIntText(T x, char * pos)
{
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
using UnsignedT = make_unsigned_t<T>;
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
if (unlikely(x == min_int))
{
if constexpr (std::is_same_v<T, Int128>)
{
const char * res = "-170141183460469231731687303715884105728";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
else if constexpr (std::is_same_v<T, Int256>)
{
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
}
if (x < 0)
{
x = -x;
pos = writeLeadingMinus(pos);
}
return writeUIntText(UnsignedT(x), pos);
}
}
char * itoa(UInt8 i, char * p)
{
return convert::itoa(uint8_t(i), p);
}
char * itoa(Int8 i, char * p)
{
return convert::itoa(int8_t(i), p);
}
char * itoa(UInt128 i, char * p)
{
return writeUIntText(i, p);
}
char * itoa(Int128 i, char * p)
{
return writeSIntText(i, p);
}
char * itoa(UInt256 i, char * p)
{
return writeUIntText(i, p);
}
char * itoa(Int256 i, char * p)
{
return writeSIntText(i, p);
}
#define DEFAULT_ITOA(T) \
char * itoa(T i, char * p) \
{ \
return convert::itoa(i, p); \
}
#define FOR_MISSING_INTEGER_TYPES(M) \
M(uint8_t) \
M(UInt16) \
M(UInt32) \
M(UInt64) \
M(int8_t) \
M(Int16) \
M(Int32) \
M(Int64)
FOR_MISSING_INTEGER_TYPES(DEFAULT_ITOA)
#if defined(OS_DARWIN)
DEFAULT_ITOA(unsigned long)
DEFAULT_ITOA(long)
#endif
#undef FOR_MISSING_INTEGER_TYPES
#undef DEFAULT_ITOA

View File

@ -1,446 +1,30 @@
#pragma once
// Based on https://github.com/amdn/itoa and combined with our optimizations
//
//=== itoa.h - Fast integer to ascii conversion --*- C++ -*-//
//
// The MIT License (MIT)
// Copyright (c) 2016 Arturo Martin-de-Nicolas
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//===----------------------------------------------------------------------===//
#include <cstdint>
#include <cstddef>
#include <cstring>
#include <type_traits>
#include <base/extended_types.h>
#define FOR_INTEGER_TYPES(M) \
M(uint8_t) \
M(UInt8) \
M(UInt16) \
M(UInt32) \
M(UInt64) \
M(UInt128) \
M(UInt256) \
M(int8_t) \
M(Int8) \
M(Int16) \
M(Int32) \
M(Int64) \
M(Int128) \
M(Int256)
template <typename T>
inline int digits10(T x)
{
if (x < 10ULL)
return 1;
if (x < 100ULL)
return 2;
if (x < 1000ULL)
return 3;
#define INSTANTIATION(T) char * itoa(T i, char * p);
FOR_INTEGER_TYPES(INSTANTIATION)
if (x < 1000000000000ULL)
{
if (x < 100000000ULL)
{
if (x < 1000000ULL)
{
if (x < 10000ULL)
return 4;
else
return 5 + (x >= 100000ULL);
}
#if defined(OS_DARWIN)
INSTANTIATION(unsigned long)
INSTANTIATION(long)
#endif
return 7 + (x >= 10000000ULL);
}
if (x < 10000000000ULL)
return 9 + (x >= 1000000000ULL);
return 11 + (x >= 100000000000ULL);
}
return 12 + digits10(x / 1000000000000ULL);
}
namespace impl
{
template <typename T>
static constexpr T pow10(size_t x)
{
return x ? 10 * pow10<T>(x - 1) : 1;
}
// Division by a power of 10 is implemented using a multiplicative inverse.
// This strength reduction is also done by optimizing compilers, but
// presently the fastest results are produced by using the values
// for the multiplication and the shift as given by the algorithm
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
//
// http://www.agner.org/optimize/optimizing_assembly.pdf
//
// "Integer division by a constant (all processors)
// A floating point number can be divided by a constant by multiplying
// with the reciprocal. If we want to do the same with integers, we have
// to scale the reciprocal by 2n and then shift the product to the right
// by n. There are various algorithms for finding a suitable value of n
// and compensating for rounding errors. The algorithm described below
// was invented by Terje Mathisen, Norway, and not published elsewhere."
/// Division by constant is performed by:
/// 1. Adding 1 if needed;
/// 2. Multiplying by another constant;
/// 3. Shifting right by another constant.
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
struct Division
{
static constexpr bool add{add_};
static constexpr UInt multiplier{multiplier_};
static constexpr unsigned shift{shift_};
};
/// Select a type with appropriate number of bytes from the list of types.
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
template <size_t N, typename T, typename... Ts>
struct SelectType
{
using Result = typename SelectType<N / 2, Ts...>::Result;
};
template <typename T, typename... Ts>
struct SelectType<1, T, Ts...>
{
using Result = T;
};
/// Division by 10^N where N is the size of the type.
template <size_t N>
using DivisionBy10PowN = typename SelectType
<
N,
Division<uint8_t, false, 205U, 11>, /// divide by 10
Division<uint16_t, true, 41943U, 22>, /// divide by 100
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
>::Result;
template <size_t N>
using UnsignedOfSize = typename SelectType
<
N,
uint8_t,
uint16_t,
uint32_t,
uint64_t,
__uint128_t
>::Result;
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
template <size_t N>
struct QuotientAndRemainder
{
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
};
template <size_t N>
QuotientAndRemainder<N> static inline split(UnsignedOfSize<N> value)
{
constexpr DivisionBy10PowN<N> division;
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
return {quotient, remainder};
}
static inline char * outDigit(char * p, uint8_t value)
{
*p = '0' + value;
++p;
return p;
}
// Using a lookup table to convert binary numbers from 0 to 99
// into ascii characters as described by Andrei Alexandrescu in
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
static const char digits[201] = "00010203040506070809"
"10111213141516171819"
"20212223242526272829"
"30313233343536373839"
"40414243444546474849"
"50515253545556575859"
"60616263646566676869"
"70717273747576777879"
"80818283848586878889"
"90919293949596979899";
static inline char * outTwoDigits(char * p, uint8_t value)
{
memcpy(p, &digits[value * 2], 2);
p += 2;
return p;
}
namespace convert
{
template <typename UInt, size_t N = sizeof(UInt)> static char * head(char * p, UInt u);
template <typename UInt, size_t N = sizeof(UInt)> static char * tail(char * p, UInt u);
//===----------------------------------------------------------===//
// head: find most significant digit, skip leading zeros
//===----------------------------------------------------------===//
// "x" contains quotient and remainder after division by 10^N
// quotient is less than 10^N
template <size_t N>
static inline char * head(char * p, QuotientAndRemainder<N> x)
{
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
p = tail(p, x.remainder);
return p;
}
// "u" is less than 10^2*N
template <typename UInt, size_t N>
static inline char * head(char * p, UInt u)
{
return u < pow10<UnsignedOfSize<N>>(N)
? head(p, UnsignedOfSize<N / 2>(u))
: head<N>(p, split<N>(u));
}
// recursion base case, selected when "u" is one byte
template <>
inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
{
return u < 10
? outDigit(p, u)
: outTwoDigits(p, u);
}
//===----------------------------------------------------------===//
// tail: produce all digits including leading zeros
//===----------------------------------------------------------===//
// recursive step, "u" is less than 10^2*N
template <typename UInt, size_t N>
static inline char * tail(char * p, UInt u)
{
QuotientAndRemainder<N> x = split<N>(u);
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
p = tail(p, x.remainder);
return p;
}
// recursion base case, selected when "u" is one byte
template <>
inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
{
return outTwoDigits(p, u);
}
//===----------------------------------------------------------===//
// large values are >= 10^2*N
// where x contains quotient and remainder after division by 10^N
//===----------------------------------------------------------===//
template <size_t N>
static inline char * large(char * p, QuotientAndRemainder<N> x)
{
QuotientAndRemainder<N> y = split<N>(x.quotient);
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
p = tail(p, y.remainder);
p = tail(p, x.remainder);
return p;
}
//===----------------------------------------------------------===//
// handle values of "u" that might be >= 10^2*N
// where N is the size of "u" in bytes
//===----------------------------------------------------------===//
template <typename UInt, size_t N = sizeof(UInt)>
static inline char * uitoa(char * p, UInt u)
{
if (u < pow10<UnsignedOfSize<N>>(N))
return head(p, UnsignedOfSize<N / 2>(u));
QuotientAndRemainder<N> x = split<N>(u);
return u < pow10<UnsignedOfSize<N>>(2 * N)
? head<N>(p, x)
: large<N>(p, x);
}
// selected when "u" is one byte
template <>
inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
{
if (u < 10)
return outDigit(p, u);
else if (u < 100)
return outTwoDigits(p, u);
else
{
p = outDigit(p, u / 100);
p = outTwoDigits(p, u % 100);
return p;
}
}
//===----------------------------------------------------------===//
// handle unsigned and signed integral operands
//===----------------------------------------------------------===//
// itoa: handle unsigned integral operands (selected by SFINAE)
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
static inline char * itoa(U u, char * p)
{
return convert::uitoa(p, u);
}
// itoa: handle signed integral operands (selected by SFINAE)
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
static inline char * itoa(I i, char * p)
{
// Need "mask" to be filled with a copy of the sign bit.
// If "i" is a negative value, then the result of "operator >>"
// is implementation-defined, though usually it is an arithmetic
// right shift that replicates the sign bit.
// Use a conditional expression to be portable,
// a good optimizing compiler generates an arithmetic right shift
// and avoids the conditional branch.
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
// Cannot use std::abs() because the result is undefined
// in 2's complement systems for the most-negative value.
// Want to avoid conditional branch for performance reasons since
// CPU branch prediction will be ineffective when negative values
// occur randomly.
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
// This yields the absolute value with the desired type without
// using a conditional branch and without invoking undefined or
// implementation defined behavior:
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
// Unconditionally store a minus sign when producing digits
// in a forward direction and increment the pointer only if
// the value is in fact negative.
// This avoids a conditional branch and is safe because we will
// always produce at least one digit and it will overwrite the
// minus sign when the value is not negative.
*p = '-';
p += (mask & 1);
p = convert::uitoa(p, u);
return p;
}
}
template <typename T>
static inline char * writeUIntText(T x, char * p)
{
static_assert(is_unsigned_v<T>);
int len = digits10(x);
auto * pp = p + len;
while (x >= 100)
{
const auto i = x % 100;
x /= 100;
pp -= 2;
outTwoDigits(pp, i);
}
if (x < 10)
*p = '0' + x;
else
outTwoDigits(p, x);
return p + len;
}
static inline char * writeLeadingMinus(char * pos)
{
*pos = '-';
return pos + 1;
}
template <typename T>
static inline char * writeSIntText(T x, char * pos)
{
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
using UnsignedT = make_unsigned_t<T>;
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
if (unlikely(x == min_int))
{
if constexpr (std::is_same_v<T, Int128>)
{
const char * res = "-170141183460469231731687303715884105728";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
else if constexpr (std::is_same_v<T, Int256>)
{
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
}
if (x < 0)
{
x = -x;
pos = writeLeadingMinus(pos);
}
return writeUIntText(UnsignedT(x), pos);
}
}
template <typename I>
char * itoa(I i, char * p)
{
return impl::convert::itoa(i, p);
}
template <>
inline char * itoa(char8_t i, char * p)
{
return impl::convert::itoa(uint8_t(i), p);
}
template <>
inline char * itoa(UInt128 i, char * p)
{
return impl::writeUIntText(i, p);
}
template <>
inline char * itoa(Int128 i, char * p)
{
return impl::writeSIntText(i, p);
}
template <>
inline char * itoa(UInt256 i, char * p)
{
return impl::writeUIntText(i, p);
}
template <>
inline char * itoa(Int256 i, char * p)
{
return impl::writeSIntText(i, p);
}
#undef FOR_INTEGER_TYPES
#undef INSTANTIATION

View File

@ -19,8 +19,8 @@ auto map(const Collection<Params...> & collection, Mapper && mapper)
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified container-type,
@ -33,8 +33,8 @@ auto map(const Collection & collection, Mapper && mapper)
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified type,
@ -45,8 +45,8 @@ template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
}

View File

@ -11,10 +11,8 @@
/// Thread Sanitizer uses dl_iterate_phdr function on initialization and fails if we provide our own.
#ifdef USE_PHDR_CACHE
#if defined(__clang__)
# pragma clang diagnostic ignored "-Wreserved-id-macro"
# pragma clang diagnostic ignored "-Wunused-macros"
#endif
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#pragma clang diagnostic ignored "-Wunused-macros"
#define __msan_unpoison(X, Y) // NOLINT
#if defined(ch_has_feature)
@ -57,10 +55,6 @@ std::atomic<PHDRCache *> phdr_cache {};
extern "C"
#ifndef __clang__
[[gnu::visibility("default")]]
[[gnu::externally_visible]]
#endif
int dl_iterate_phdr(int (*callback) (dl_phdr_info * info, size_t size, void * data), void * data)
{
auto * current_phdr_cache = phdr_cache.load();

View File

@ -23,12 +23,10 @@ namespace internal
/// For loop adaptor which is used to iterate through a half-closed interval [begin, end).
/// The parameters `begin` and `end` can have any integral or enum types.
template <typename BeginType,
typename EndType,
typename = std::enable_if_t<
(std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>>
template <typename BeginType, typename EndType>
requires((std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>))
inline auto range(BeginType begin, EndType end)
{
if constexpr (std::is_integral_v<BeginType> && std::is_integral_v<EndType>)
@ -50,8 +48,8 @@ inline auto range(BeginType begin, EndType end)
/// For loop adaptor which is used to iterate through a half-closed interval [0, end).
/// The parameter `end` can have any integral or enum type.
/// The same as range(0, end).
template <typename Type,
typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>>
template <typename Type>
requires(std::is_integral_v<Type> || std::is_enum_v<Type>)
inline auto range(Type end)
{
if constexpr (std::is_integral_v<Type>)

View File

@ -29,11 +29,13 @@ public:
requires std::is_convertible_v<G, F>
constexpr BasicScopeGuard & operator=(BasicScopeGuard<G> && src) // NOLINT(cppcoreguidelines-rvalue-reference-param-not-moved, cppcoreguidelines-noexcept-move-operations)
{
if (this != &src)
if constexpr (std::is_same_v<G, F>)
{
invoke();
function = src.release();
if (this == &src)
return *this;
}
invoke();
function = src.release();
return *this;
}

View File

@ -2,6 +2,7 @@
#include <ctime>
#include <cerrno>
#include <system_error>
#if defined(OS_DARWIN)
#include <mach/mach.h>
@ -34,7 +35,8 @@ void sleepForNanoseconds(uint64_t nanoseconds)
constexpr auto clock_type = CLOCK_MONOTONIC;
struct timespec current_time;
clock_gettime(clock_type, &current_time);
if (0 != clock_gettime(clock_type, &current_time))
throw std::system_error(std::error_code(errno, std::system_category()));
constexpr uint64_t resolution = 1'000'000'000;
struct timespec finish_time = current_time;

View File

@ -59,24 +59,19 @@ using ComparatorWrapper = Comparator;
#endif
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wold-style-cast"
#include <miniselect/floyd_rivest_select.h>
template <typename RandomIt>
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
template <typename RandomIt, typename Compare>
void nth_element(RandomIt first, RandomIt nth, RandomIt last, Compare compare)
{
using value_type = typename std::iterator_traits<RandomIt>::value_type;
using comparator = std::less<value_type>;
comparator compare;
ComparatorWrapper<comparator> compare_wrapper = compare;
#ifndef NDEBUG
::shuffle(first, last);
#endif
ComparatorWrapper<Compare> compare_wrapper = compare;
::miniselect::floyd_rivest_select(first, nth, last, compare_wrapper);
#ifndef NDEBUG
@ -87,6 +82,15 @@ void nth_element(RandomIt first, RandomIt nth, RandomIt last)
#endif
}
template <typename RandomIt>
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
{
using value_type = typename std::iterator_traits<RandomIt>::value_type;
using comparator = std::less<value_type>;
::nth_element(first, nth, last, comparator());
}
template <typename RandomIt, typename Compare>
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
{
@ -111,7 +115,7 @@ void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
::partial_sort(first, middle, last, comparator());
}
#pragma GCC diagnostic pop
#pragma clang diagnostic pop
template <typename RandomIt, typename Compare>
void sort(RandomIt first, RandomIt last, Compare compare)

View File

@ -1,2 +0,0 @@
clickhouse_add_executable (dump_variable dump_variable.cpp)
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)

View File

@ -1,70 +0,0 @@
#include <base/iostream_debug_helpers.h>
#include <iostream>
#include <memory>
#include <vector>
#include <map>
#include <set>
#include <tuple>
#include <array>
#include <utility>
struct S1;
struct S2 {};
struct S3
{
std::set<const char *> m1;
};
std::ostream & operator<<(std::ostream & stream, const S3 & what)
{
stream << "S3 {m1=";
dumpValue(stream, what.m1) << "}";
return stream;
}
int main(int, char **)
{
int x = 1;
DUMP(x);
DUMP(x, 1, &x);
DUMP(std::make_unique<int>(1));
DUMP(std::make_shared<int>(1));
std::vector<int> vec{1, 2, 3};
DUMP(vec);
auto pair = std::make_pair(1, 2);
DUMP(pair);
auto tuple = std::make_tuple(1, 2, 3);
DUMP(tuple);
std::map<int, std::string> map{{1, "hello"}, {2, "world"}};
DUMP(map);
std::initializer_list<const char *> list{"hello", "world"};
DUMP(list);
std::array<const char *, 2> arr{{"hello", "world"}};
DUMP(arr);
//DUMP([]{});
S1 * s = nullptr;
DUMP(s);
DUMP(S2());
std::set<const char *> variants = {"hello", "world"};
DUMP(variants);
S3 s3 {{"hello", "world"}};
DUMP(s3);
return 0;
}

View File

@ -3,40 +3,34 @@
#include <cstdint>
#include <string>
using Int8 = int8_t;
using Int16 = int16_t;
using Int32 = int32_t;
using Int64 = int64_t;
#ifndef __cpp_char8_t
using char8_t = unsigned char;
#endif
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
/// Using char8_t more strict aliasing (https://stackoverflow.com/a/57453713)
using UInt8 = char8_t;
/// Same for using signed _BitInt(8) (there isn't a signed char8_t, which would be more convenient)
/// See https://godbolt.org/z/fafnWEnnf
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wbit-int-extension"
using Int8 = signed _BitInt(8);
#pragma clang diagnostic pop
namespace std
{
template <>
struct hash<Int8> /// NOLINT (cert-dcl58-cpp)
{
size_t operator()(const Int8 x) const { return std::hash<int8_t>()(int8_t{x}); }
};
}
using UInt16 = uint16_t;
using UInt32 = uint32_t;
using UInt64 = uint64_t;
using String = std::string;
namespace DB
{
using UInt8 = ::UInt8;
using UInt16 = ::UInt16;
using UInt32 = ::UInt32;
using UInt64 = ::UInt64;
using Int8 = ::Int8;
using Int16 = ::Int16;
using Int32 = ::Int32;
using Int64 = ::Int64;
using Int16 = int16_t;
using Int32 = int32_t;
using Int64 = int64_t;
using Float32 = float;
using Float64 = double;
using String = std::string;
}

View File

@ -111,7 +111,8 @@ public:
constexpr explicit operator bool() const noexcept;
template <typename T, typename = std::enable_if_t<std::is_arithmetic_v<T>, T>>
template <typename T>
requires(std::is_arithmetic_v<T>)
constexpr operator T() const noexcept;
constexpr operator long double() const noexcept;
@ -208,12 +209,14 @@ constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, in
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept;
template <size_t Bits, typename Signed, typename Int, typename = std::enable_if_t<!std::is_same_v<Int, int>>>
template <size_t Bits, typename Signed, typename Int>
requires(!std::is_same_v<Int, int>)
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, Int n) noexcept
{
return lhs << int(n);
}
template <size_t Bits, typename Signed, typename Int, typename = std::enable_if_t<!std::is_same_v<Int, int>>>
template <size_t Bits, typename Signed, typename Int>
requires(!std::is_same_v<Int, int>)
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, Int n) noexcept
{
return lhs >> int(n);
@ -262,4 +265,3 @@ struct hash<wide::integer<Bits, Signed>>;
// NOLINTEND(*)
#include "wide_integer_impl.h"

View File

@ -6,14 +6,13 @@
#include "throwError.h"
#include <bit>
#include <cmath>
#include <cfloat>
#include <cassert>
#include <tuple>
#include <limits>
#include <boost/math/special_functions/fpclassify.hpp>
// NOLINTBEGIN(*)
/// Use same extended double for all platforms
@ -21,6 +20,7 @@
#define CONSTEXPR_FROM_DOUBLE constexpr
using FromDoubleIntermediateType = long double;
#else
#include <boost/math/special_functions/fpclassify.hpp>
#include <boost/multiprecision/cpp_bin_float.hpp>
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
#define CONSTEXPR_FROM_DOUBLE
@ -308,6 +308,13 @@ struct integer<Bits, Signed>::_impl
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
#if (LDBL_MANT_DIG == 64)
if (!std::isfinite(t))
{
self = 0;
return;
}
#else
if constexpr (std::is_same_v<T, double>)
{
if (!std::isfinite(t))
@ -324,6 +331,7 @@ struct integer<Bits, Signed>::_impl
return;
}
}
#endif
const T alpha = t / static_cast<T>(max_int);
@ -1238,7 +1246,8 @@ constexpr integer<Bits, Signed>::operator bool() const noexcept
}
template <size_t Bits, typename Signed>
template <class T, class>
template <class T>
requires(std::is_arithmetic_v<T>)
constexpr integer<Bits, Signed>::operator T() const noexcept
{
static_assert(std::numeric_limits<T>::is_integer);

View File

@ -30,7 +30,6 @@ int __gai_sigqueue(int sig, const union sigval val, pid_t caller_pid)
}
#include <sys/select.h>
#include <stdlib.h>
#include <features.h>

View File

@ -2,6 +2,7 @@
.hidden __syscall
.type __syscall,%function
__syscall:
.cfi_startproc
uxtw x8,w0
mov x0,x1
mov x1,x2
@ -12,3 +13,4 @@ __syscall:
mov x6,x7
svc 0
ret
.cfi_endproc

View File

@ -20,11 +20,7 @@
/// Suppress TSan since it is possible for this code to be called from multiple threads,
/// and initialization is safe to be done multiple times from multiple threads.
#if defined(__clang__)
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
#else
# define NO_SANITIZE_THREAD
#endif
#define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
// We don't have libc struct available here.
// Compute aux vector manually (from /proc/self/auxv).

View File

@ -6,11 +6,7 @@
/// It is only enabled in debug build (its intended use is for CI checks).
#if !defined(NDEBUG)
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
#else
#pragma GCC diagnostic ignored "-Wbuiltin-declaration-mismatch"
#endif
#pragma clang diagnostic ignored "-Wincompatible-library-redeclaration"
/// We cannot use libc headers here.
long write(int, const void *, unsigned long);

View File

@ -23,6 +23,9 @@
#include <openssl/conf.h>
#endif
#if __has_feature(address_sanitizer)
#include <sanitizer/lsan_interface.h>
#endif
using Poco::RandomInputStream;
using Poco::Thread;
@ -67,12 +70,18 @@ void OpenSSLInitializer::initialize()
SSL_library_init();
SSL_load_error_strings();
OpenSSL_add_all_algorithms();
char seed[SEEDSIZE];
RandomInputStream rnd;
rnd.read(seed, sizeof(seed));
RAND_seed(seed, SEEDSIZE);
{
# if __has_feature(address_sanitizer)
/// Leak sanitizer (part of address sanitizer) thinks that a few bytes of memory in OpenSSL are allocated during but never released.
__lsan::ScopedDisabler lsan_disabler;
#endif
RAND_seed(seed, SEEDSIZE);
}
int nMutexes = CRYPTO_num_locks();
_mutexes = new Poco::FastMutex[nMutexes];
CRYPTO_set_locking_callback(&OpenSSLInitializer::lock);
@ -80,8 +89,8 @@ void OpenSSLInitializer::initialize()
// https://sourceforge.net/p/poco/bugs/110/
//
// From http://www.openssl.org/docs/crypto/threads.html :
// "If the application does not register such a callback using CRYPTO_THREADID_set_callback(),
// then a default implementation is used - on Windows and BeOS this uses the system's
// "If the application does not register such a callback using CRYPTO_THREADID_set_callback(),
// then a default implementation is used - on Windows and BeOS this uses the system's
// default thread identifying APIs"
CRYPTO_set_id_callback(&OpenSSLInitializer::id);
CRYPTO_set_dynlock_create_callback(&OpenSSLInitializer::dynlockCreate);
@ -100,7 +109,7 @@ void OpenSSLInitializer::uninitialize()
CRYPTO_set_locking_callback(0);
CRYPTO_set_id_callback(0);
delete [] _mutexes;
CONF_modules_free();
}
}

View File

@ -55,7 +55,6 @@ set (SRCS
src/DigestStream.cpp
src/DirectoryIterator.cpp
src/DirectoryIteratorStrategy.cpp
src/DirectoryWatcher.cpp
src/Environment.cpp
src/Error.cpp
src/ErrorHandler.cpp

View File

@ -26,6 +26,11 @@
#include "Poco/StreamUtil.h"
namespace DB
{
class ReadBufferFromIStream;
}
namespace Poco
{
@ -120,6 +125,8 @@ protected:
openmode getMode() const { return _mode; }
private:
friend class DB::ReadBufferFromIStream;
virtual int readFromDevice(char_type * /*buffer*/, std::streamsize /*length*/) { return 0; }
virtual int writeToDevice(const char_type * /*buffer*/, std::streamsize /*length*/) { return 0; }

View File

@ -1,228 +0,0 @@
//
// DirectoryWatcher.h
//
// Library: Foundation
// Package: Filesystem
// Module: DirectoryWatcher
//
// Definition of the DirectoryWatcher class.
//
// Copyright (c) 2012, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_DirectoryWatcher_INCLUDED
#define Foundation_DirectoryWatcher_INCLUDED
#include "Poco/Foundation.h"
#ifndef POCO_NO_INOTIFY
# include "Poco/AtomicCounter.h"
# include "Poco/BasicEvent.h"
# include "Poco/File.h"
# include "Poco/Runnable.h"
# include "Poco/Thread.h"
namespace Poco
{
class DirectoryWatcherStrategy;
class Foundation_API DirectoryWatcher : protected Runnable
/// This class is used to get notifications about changes
/// to the filesystem, more specifically, to a specific
/// directory. Changes to a directory are reported via
/// events.
///
/// A thread will be created that watches the specified
/// directory for changes. Events are reported in the context
/// of this thread.
///
/// Note that changes to files in subdirectories of the watched
/// directory are not reported. Separate DirectoryWatcher objects
/// must be created for these directories if they should be watched.
///
/// Changes to file attributes are not reported.
///
/// On Windows, this class is implemented using FindFirstChangeNotification()/FindNextChangeNotification().
/// On Linux, this class is implemented using inotify.
/// On FreeBSD and Darwin (Mac OS X, iOS), this class uses kevent/kqueue.
/// On all other platforms, the watched directory is periodically scanned
/// for changes. This can negatively affect performance if done too often.
/// Therefore, the interval in which scans are done can be specified in
/// the constructor. Note that periodic scanning will also be done on FreeBSD
/// and Darwin if events for changes to files (DW_ITEM_MODIFIED) are enabled.
///
/// DW_ITEM_MOVED_FROM and DW_ITEM_MOVED_TO events will only be reported
/// on Linux. On other platforms, a file rename or move operation
/// will be reported via a DW_ITEM_REMOVED and a DW_ITEM_ADDED event.
/// The order of these two events is not defined.
///
/// An event mask can be specified to enable only certain events.
{
public:
enum DirectoryEventType
{
DW_ITEM_ADDED = 1,
/// A new item has been created and added to the directory.
DW_ITEM_REMOVED = 2,
/// An item has been removed from the directory.
DW_ITEM_MODIFIED = 4,
/// An item has been modified.
DW_ITEM_MOVED_FROM = 8,
/// An item has been renamed or moved. This event delivers the old name.
DW_ITEM_MOVED_TO = 16
/// An item has been renamed or moved. This event delivers the new name.
};
enum DirectoryEventMask
{
DW_FILTER_ENABLE_ALL = 31,
/// Enables all event types.
DW_FILTER_DISABLE_ALL = 0
/// Disables all event types.
};
enum
{
DW_DEFAULT_SCAN_INTERVAL = 5 /// Default scan interval for platforms that don't provide a native notification mechanism.
};
struct DirectoryEvent
{
DirectoryEvent(const File & f, DirectoryEventType ev) : item(f), event(ev) { }
const File & item; /// The directory or file that has been changed.
DirectoryEventType event; /// The kind of event.
};
BasicEvent<const DirectoryEvent> itemAdded;
/// Fired when a file or directory has been created or added to the directory.
BasicEvent<const DirectoryEvent> itemRemoved;
/// Fired when a file or directory has been removed from the directory.
BasicEvent<const DirectoryEvent> itemModified;
/// Fired when a file or directory has been modified.
BasicEvent<const DirectoryEvent> itemMovedFrom;
/// Fired when a file or directory has been renamed. This event delivers the old name.
BasicEvent<const DirectoryEvent> itemMovedTo;
/// Fired when a file or directory has been moved. This event delivers the new name.
BasicEvent<const Exception> scanError;
/// Fired when an error occurs while scanning for changes.
DirectoryWatcher(const std::string & path, int eventMask = DW_FILTER_ENABLE_ALL, int scanInterval = DW_DEFAULT_SCAN_INTERVAL);
/// Creates a DirectoryWatcher for the directory given in path.
/// To enable only specific events, an eventMask can be specified by
/// OR-ing the desired event IDs (e.g., DW_ITEM_ADDED | DW_ITEM_MODIFIED).
/// On platforms where no native filesystem notifications are available,
/// scanInterval specifies the interval in seconds between scans
/// of the directory.
DirectoryWatcher(const File & directory, int eventMask = DW_FILTER_ENABLE_ALL, int scanInterval = DW_DEFAULT_SCAN_INTERVAL);
/// Creates a DirectoryWatcher for the specified directory
/// To enable only specific events, an eventMask can be specified by
/// OR-ing the desired event IDs (e.g., DW_ITEM_ADDED | DW_ITEM_MODIFIED).
/// On platforms where no native filesystem notifications are available,
/// scanInterval specifies the interval in seconds between scans
/// of the directory.
~DirectoryWatcher();
/// Destroys the DirectoryWatcher.
void suspendEvents();
/// Suspends sending of events. Can be called multiple times, but every
/// call to suspendEvent() must be matched by a call to resumeEvents().
void resumeEvents();
/// Resumes events, after they have been suspended with a call to suspendEvents().
bool eventsSuspended() const;
/// Returns true iff events are suspended.
int eventMask() const;
/// Returns the value of the eventMask passed to the constructor.
int scanInterval() const;
/// Returns the scan interval in seconds.
const File & directory() const;
/// Returns the directory being watched.
bool supportsMoveEvents() const;
/// Returns true iff the platform supports DW_ITEM_MOVED_FROM/itemMovedFrom and
/// DW_ITEM_MOVED_TO/itemMovedTo events.
protected:
void init();
void stop();
void run();
private:
DirectoryWatcher();
DirectoryWatcher(const DirectoryWatcher &);
DirectoryWatcher & operator=(const DirectoryWatcher &);
Thread _thread;
File _directory;
int _eventMask;
AtomicCounter _eventsSuspended;
int _scanInterval;
DirectoryWatcherStrategy * _pStrategy;
};
//
// inlines
//
inline bool DirectoryWatcher::eventsSuspended() const
{
return _eventsSuspended.value() > 0;
}
inline int DirectoryWatcher::eventMask() const
{
return _eventMask;
}
inline int DirectoryWatcher::scanInterval() const
{
return _scanInterval;
}
inline const File & DirectoryWatcher::directory() const
{
return _directory;
}
} // namespace Poco
#endif // POCO_NO_INOTIFY
#endif // Foundation_DirectoryWatcher_INCLUDED

View File

@ -0,0 +1,75 @@
//
// FPEnvironment_SUN.h
//
// Library: Foundation
// Package: Core
// Module: FPEnvironment
//
// Definitions of class FPEnvironmentImpl for Solaris.
//
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_FPEnvironment_SUN_INCLUDED
#define Foundation_FPEnvironment_SUN_INCLUDED
#include <ieeefp.h>
#include "Poco/Foundation.h"
namespace Poco
{
class FPEnvironmentImpl
{
protected:
enum RoundingModeImpl
{
FP_ROUND_DOWNWARD_IMPL = FP_RM,
FP_ROUND_UPWARD_IMPL = FP_RP,
FP_ROUND_TONEAREST_IMPL = FP_RN,
FP_ROUND_TOWARDZERO_IMPL = FP_RZ
};
enum FlagImpl
{
FP_DIVIDE_BY_ZERO_IMPL = FP_X_DZ,
FP_INEXACT_IMPL = FP_X_IMP,
FP_OVERFLOW_IMPL = FP_X_OFL,
FP_UNDERFLOW_IMPL = FP_X_UFL,
FP_INVALID_IMPL = FP_X_INV
};
FPEnvironmentImpl();
FPEnvironmentImpl(const FPEnvironmentImpl & env);
~FPEnvironmentImpl();
FPEnvironmentImpl & operator=(const FPEnvironmentImpl & env);
void keepCurrentImpl();
static void clearFlagsImpl();
static bool isFlagImpl(FlagImpl flag);
static void setRoundingModeImpl(RoundingModeImpl mode);
static RoundingModeImpl getRoundingModeImpl();
static bool isInfiniteImpl(float value);
static bool isInfiniteImpl(double value);
static bool isInfiniteImpl(long double value);
static bool isNaNImpl(float value);
static bool isNaNImpl(double value);
static bool isNaNImpl(long double value);
static float copySignImpl(float target, float source);
static double copySignImpl(double target, double source);
static long double copySignImpl(long double target, long double source);
private:
fp_rnd _rnd;
fp_except _exc;
};
} // namespace Poco
#endif // Foundation_FPEnvironment_SUN_INCLUDED

View File

@ -68,7 +68,7 @@ public:
typedef typename Bucket::iterator BucketIterator;
typedef typename BucketVec::iterator BucketVecIterator;
class ConstIterator : public std::iterator<std::forward_iterator_tag, Value>
class ConstIterator
{
public:
ConstIterator() : _initialized(false) { }

View File

@ -22,6 +22,7 @@
#include <cstddef>
#include <map>
#include <vector>
#include "Poco/Channel.h"
#include "Poco/Format.h"
#include "Poco/Foundation.h"
@ -33,7 +34,8 @@ namespace Poco
class Exception;
class Logger;
using LoggerPtr = std::shared_ptr<Logger>;
class Foundation_API Logger : public Channel
/// Logger is a special Channel that acts as the main
@ -870,21 +872,21 @@ public:
/// If the Logger does not yet exist, it is created, based
/// on its parent logger.
static Logger & unsafeGet(const std::string & name);
/// Returns a reference to the Logger with the given name.
static LoggerPtr getShared(const std::string & name, bool should_be_owned_by_shared_ptr_if_created = true);
/// Returns a shared pointer to the Logger with the given name.
/// If the Logger does not yet exist, it is created, based
/// on its parent logger.
///
/// WARNING: This method is not thread safe. You should
/// probably use get() instead.
/// The only time this method should be used is during
/// program initialization, when only one thread is running.
static Logger & create(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
/// Creates and returns a reference to a Logger with the
/// given name. The Logger's Channel and log level as set as
/// specified.
static LoggerPtr createShared(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
/// Creates and returns a shared pointer to a Logger with the
/// given name. The Logger's Channel and log level as set as
/// specified.
static Logger & root();
/// Returns a reference to the root logger, which is the ultimate
/// ancestor of all Loggers.
@ -893,13 +895,6 @@ public:
/// Returns a pointer to the Logger with the given name if it
/// exists, or a null pointer otherwise.
static void destroy(const std::string & name);
/// Destroys the logger with the specified name. Does nothing
/// if the logger is not found.
///
/// After a logger has been destroyed, all references to it
/// become invalid.
static void shutdown();
/// Shuts down the logging framework and releases all
/// Loggers.
@ -928,9 +923,17 @@ public:
static const std::string ROOT; /// The name of the root logger ("").
protected:
typedef std::map<std::string, Logger *> LoggerMap;
public:
struct LoggerEntry
{
Poco::Logger * logger;
bool owned_by_shared_ptr = false;
};
using LoggerMap = std::unordered_map<std::string, LoggerEntry>;
using LoggerMapIterator = LoggerMap::iterator;
protected:
Logger(const std::string & name, Channel * pChannel, int level);
~Logger();
@ -938,11 +941,16 @@ protected:
void log(const std::string & text, Message::Priority prio, const char * file, int line);
static std::string format(const std::string & fmt, int argc, std::string argv[]);
static Logger & parent(const std::string & name);
static void add(Logger * pLogger);
static Logger * find(const std::string & name);
private:
static std::pair<Logger::LoggerMapIterator, bool> unsafeGet(const std::string & name, bool get_shared);
static Logger * unsafeGetRawPtr(const std::string & name);
static std::pair<LoggerMapIterator, bool> unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
static Logger & parent(const std::string & name);
static std::pair<LoggerMapIterator, bool> add(Logger * pLogger);
static std::optional<LoggerMapIterator> find(const std::string & name);
static Logger * findRawPtr(const std::string & name);
Logger();
Logger(const Logger &);
Logger & operator=(const Logger &);
@ -950,9 +958,6 @@ private:
std::string _name;
Channel * _pChannel;
std::atomic_int _level;
static LoggerMap * _pLoggerMap;
static Mutex _mapMtx;
};

View File

@ -66,9 +66,11 @@ public:
/// The thread and process ids are set.
Message(
const std::string & source, const std::string & text, Priority prio, const char * file, int line, std::string_view fmt_str = {});
const std::string & source, const std::string & text, Priority prio, const char * file, int line,
std::string_view fmt_str = {}, const std::vector<std::string> & fmt_str_args = {});
Message(
std::string && source, std::string && text, Priority prio, const char * file, int line, std::string_view fmt_str);
std::string && source, std::string && text, Priority prio, const char * file, int line,
std::string_view fmt_str, std::vector<std::string> && fmt_str_args);
/// Creates a Message with the given source, text, priority,
/// source file path and line.
///
@ -161,6 +163,9 @@ public:
std::string_view getFormatString() const;
void setFormatString(std::string_view fmt_str);
const std::vector<std::string> & getFormatStringArgs() const;
void setFormatStringArgs(const std::vector<std::string> & fmt_str_args);
int getSourceLine() const;
/// Returns the source file line of the statement
/// generating the log message. May be 0
@ -210,6 +215,7 @@ private:
int _line;
StringMap * _pMap;
std::string_view _fmt_str;
std::vector<std::string> _fmt_str_args;
};

View File

@ -120,6 +120,7 @@
#define POCO_ARCH_AARCH64 0x0f
#define POCO_ARCH_ARM64 0x0f // same as POCO_ARCH_AARCH64
#define POCO_ARCH_RISCV64 0x10
#define POCO_ARCH_LOONGARCH64 0x12
#if defined(__ALPHA) || defined(__alpha) || defined(__alpha__) || defined(_M_ALPHA)
@ -209,6 +210,9 @@
#elif defined(__riscv) && (__riscv_xlen == 64)
# define POCO_ARCH POCO_ARCH_RISCV64
# define POCO_ARCH_LITTLE_ENDIAN 1
#elif defined(__loongarch64)
# define POCO_ARCH POCO_ARCH_LOONGARCH64
# define POCO_ARCH_LITTLE_ENDIAN 1
#endif

View File

@ -38,15 +38,15 @@ public:
/// Creates the RefCountedObject.
/// The initial reference count is one.
void duplicate() const;
/// Increments the object's reference count.
size_t duplicate() const;
/// Increments the object's reference count, returns reference count before call.
void release() const throw();
size_t release() const throw();
/// Decrements the object's reference count
/// and deletes the object if the count
/// reaches zero.
/// reaches zero, returns reference count before call.
int referenceCount() const;
size_t referenceCount() const;
/// Returns the reference count.
protected:
@ -57,36 +57,40 @@ private:
RefCountedObject(const RefCountedObject &);
RefCountedObject & operator=(const RefCountedObject &);
mutable AtomicCounter _counter;
mutable std::atomic<size_t> _counter;
};
//
// inlines
//
inline int RefCountedObject::referenceCount() const
inline size_t RefCountedObject::referenceCount() const
{
return _counter.value();
return _counter.load(std::memory_order_acquire);
}
inline void RefCountedObject::duplicate() const
inline size_t RefCountedObject::duplicate() const
{
++_counter;
return _counter.fetch_add(1, std::memory_order_acq_rel);
}
inline void RefCountedObject::release() const throw()
inline size_t RefCountedObject::release() const throw()
{
size_t reference_count_before = _counter.fetch_sub(1, std::memory_order_acq_rel);
try
{
if (--_counter == 0)
if (reference_count_before == 1)
delete this;
}
catch (...)
{
poco_unexpected();
}
return reference_count_before;
}

View File

@ -69,6 +69,9 @@
// init() is called in the MyIOS constructor.
// Therefore we replace each call to init() with
// the poco_ios_init macro defined below.
//
// Also this macro will adjust exceptions() flags, since by default std::ios
// will hide exceptions, while in ClickHouse it is better to pass them through.
#if !defined(POCO_IOS_INIT_HACK)
@ -79,7 +82,10 @@
#if defined(POCO_IOS_INIT_HACK)
# define poco_ios_init(buf)
#else
# define poco_ios_init(buf) init(buf)
# define poco_ios_init(buf) do { \
init(buf); \
this->exceptions(std::ios::failbit | std::ios::badbit); \
} while (0)
#endif

View File

@ -70,6 +70,15 @@ public:
int queryConvert(const unsigned char * bytes, int length) const;
int sequenceLength(const unsigned char * bytes, int length) const;
protected:
static int safeToInt(Poco::UInt32 value)
{
if (value <= 0x10FFFF)
return static_cast<int>(value);
else
return -1;
}
private:
bool _flipBytes;
static const char * _names[];

Some files were not shown because too many files have changed in this diff Show More