mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge remote-tracking branch 'origin/master' into TableEngineGrant_version2
This commit is contained in:
commit
ee589f8a8b
200
.clang-tidy
200
.clang-tidy
@ -5,128 +5,128 @@
|
||||
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
|
||||
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
|
||||
|
||||
# TODO: Once clang(-tidy) 17 is the minimum, we can convert this list to YAML
|
||||
# See https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/ReleaseNotes.html#improvements-to-clang-tidy
|
||||
HeaderFilterRegex: '^.*/(base|src|programs|utils)/.*(h|hpp)$'
|
||||
|
||||
# TODO Let clang-tidy check headers in further directories
|
||||
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
|
||||
HeaderFilterRegex: '^.*/(base|programs|utils)/.*(h|hpp)$'
|
||||
Checks: [
|
||||
'*',
|
||||
|
||||
Checks: '*,
|
||||
-abseil-*,
|
||||
'-abseil-*',
|
||||
|
||||
-altera-*,
|
||||
'-altera-*',
|
||||
|
||||
-android-*,
|
||||
'-android-*',
|
||||
|
||||
-bugprone-assignment-in-if-condition,
|
||||
-bugprone-branch-clone,
|
||||
-bugprone-easily-swappable-parameters,
|
||||
-bugprone-exception-escape,
|
||||
-bugprone-implicit-widening-of-multiplication-result,
|
||||
-bugprone-narrowing-conversions,
|
||||
-bugprone-not-null-terminated-result,
|
||||
-bugprone-reserved-identifier, # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||
-bugprone-unchecked-optional-access,
|
||||
'-bugprone-assignment-in-if-condition',
|
||||
'-bugprone-branch-clone',
|
||||
'-bugprone-easily-swappable-parameters',
|
||||
'-bugprone-exception-escape',
|
||||
'-bugprone-forward-declaration-namespace',
|
||||
'-bugprone-implicit-widening-of-multiplication-result',
|
||||
'-bugprone-narrowing-conversions',
|
||||
'-bugprone-not-null-terminated-result',
|
||||
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
|
||||
'-bugprone-unchecked-optional-access',
|
||||
|
||||
-cert-dcl16-c,
|
||||
-cert-dcl37-c,
|
||||
-cert-dcl51-cpp,
|
||||
-cert-err58-cpp,
|
||||
-cert-msc32-c,
|
||||
-cert-msc51-cpp,
|
||||
-cert-oop54-cpp,
|
||||
-cert-oop57-cpp,
|
||||
'-cert-dcl16-c',
|
||||
'-cert-dcl37-c',
|
||||
'-cert-dcl51-cpp',
|
||||
'-cert-err58-cpp',
|
||||
'-cert-msc32-c',
|
||||
'-cert-msc51-cpp',
|
||||
'-cert-oop54-cpp',
|
||||
'-cert-oop57-cpp',
|
||||
|
||||
-clang-analyzer-unix.Malloc,
|
||||
'-clang-analyzer-optin.performance.Padding',
|
||||
|
||||
-cppcoreguidelines-*, # impractical in a codebase as large as ClickHouse, also slow
|
||||
'-clang-analyzer-unix.Malloc',
|
||||
|
||||
-darwin-*,
|
||||
'-cppcoreguidelines-*', # impractical in a codebase as large as ClickHouse, also slow
|
||||
|
||||
-fuchsia-*,
|
||||
'-darwin-*',
|
||||
|
||||
-google-build-using-namespace,
|
||||
-google-readability-braces-around-statements,
|
||||
-google-readability-casting,
|
||||
-google-readability-function-size,
|
||||
-google-readability-namespace-comments,
|
||||
-google-readability-todo,
|
||||
'-fuchsia-*',
|
||||
|
||||
-hicpp-avoid-c-arrays,
|
||||
-hicpp-avoid-goto,
|
||||
-hicpp-braces-around-statements,
|
||||
-hicpp-explicit-conversions,
|
||||
-hicpp-function-size,
|
||||
-hicpp-member-init,
|
||||
-hicpp-move-const-arg,
|
||||
-hicpp-multiway-paths-covered,
|
||||
-hicpp-named-parameter,
|
||||
-hicpp-no-array-decay,
|
||||
-hicpp-no-assembler,
|
||||
-hicpp-no-malloc,
|
||||
-hicpp-signed-bitwise,
|
||||
-hicpp-special-member-functions,
|
||||
-hicpp-uppercase-literal-suffix,
|
||||
-hicpp-use-auto,
|
||||
-hicpp-use-emplace,
|
||||
-hicpp-vararg,
|
||||
'-google-build-using-namespace',
|
||||
'-google-readability-braces-around-statements',
|
||||
'-google-readability-casting',
|
||||
'-google-readability-function-size',
|
||||
'-google-readability-namespace-comments',
|
||||
'-google-readability-todo',
|
||||
|
||||
-linuxkernel-*,
|
||||
'-hicpp-avoid-c-arrays',
|
||||
'-hicpp-avoid-goto',
|
||||
'-hicpp-braces-around-statements',
|
||||
'-hicpp-explicit-conversions',
|
||||
'-hicpp-function-size',
|
||||
'-hicpp-member-init',
|
||||
'-hicpp-move-const-arg',
|
||||
'-hicpp-multiway-paths-covered',
|
||||
'-hicpp-named-parameter',
|
||||
'-hicpp-no-array-decay',
|
||||
'-hicpp-no-assembler',
|
||||
'-hicpp-no-malloc',
|
||||
'-hicpp-signed-bitwise',
|
||||
'-hicpp-special-member-functions',
|
||||
'-hicpp-uppercase-literal-suffix',
|
||||
'-hicpp-use-auto',
|
||||
'-hicpp-use-emplace',
|
||||
'-hicpp-vararg',
|
||||
|
||||
-llvm-*,
|
||||
'-linuxkernel-*',
|
||||
|
||||
-llvmlibc-*,
|
||||
'-llvm-*',
|
||||
|
||||
-openmp-*,
|
||||
'-llvmlibc-*',
|
||||
|
||||
-misc-const-correctness,
|
||||
-misc-include-cleaner, # useful but far too many occurrences
|
||||
-misc-no-recursion,
|
||||
-misc-non-private-member-variables-in-classes,
|
||||
-misc-confusable-identifiers, # useful but slooow
|
||||
-misc-use-anonymous-namespace,
|
||||
'-openmp-*',
|
||||
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-concat-nested-namespaces,
|
||||
-modernize-macro-to-enum,
|
||||
-modernize-pass-by-value,
|
||||
-modernize-return-braced-init-list,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-use-emplace,
|
||||
-modernize-use-nodiscard,
|
||||
-modernize-use-override,
|
||||
-modernize-use-trailing-return-type,
|
||||
'-misc-const-correctness',
|
||||
'-misc-include-cleaner', # useful but far too many occurrences
|
||||
'-misc-no-recursion',
|
||||
'-misc-non-private-member-variables-in-classes',
|
||||
'-misc-confusable-identifiers', # useful but slooo
|
||||
'-misc-use-anonymous-namespace',
|
||||
|
||||
-performance-inefficient-string-concatenation,
|
||||
-performance-no-int-to-ptr,
|
||||
-performance-avoid-endl,
|
||||
-performance-unnecessary-value-param,
|
||||
'-modernize-avoid-c-arrays',
|
||||
'-modernize-concat-nested-namespaces',
|
||||
'-modernize-macro-to-enum',
|
||||
'-modernize-pass-by-value',
|
||||
'-modernize-return-braced-init-list',
|
||||
'-modernize-use-auto',
|
||||
'-modernize-use-default-member-init',
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-override',
|
||||
'-modernize-use-trailing-return-type',
|
||||
|
||||
-portability-simd-intrinsics,
|
||||
'-performance-inefficient-string-concatenation',
|
||||
'-performance-no-int-to-ptr',
|
||||
'-performance-avoid-endl',
|
||||
'-performance-unnecessary-value-param',
|
||||
|
||||
-readability-avoid-unconditional-preprocessor-if,
|
||||
-readability-braces-around-statements,
|
||||
-readability-convert-member-functions-to-static,
|
||||
-readability-else-after-return,
|
||||
-readability-function-cognitive-complexity,
|
||||
-readability-function-size,
|
||||
-readability-identifier-length,
|
||||
-readability-identifier-naming, # useful but too slow
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
-readability-named-parameter,
|
||||
-readability-redundant-declaration,
|
||||
-readability-simplify-boolean-expr,
|
||||
-readability-static-accessed-through-instance,
|
||||
-readability-suspicious-call-argument,
|
||||
-readability-uppercase-literal-suffix,
|
||||
-readability-use-anyofallof,
|
||||
'-portability-simd-intrinsics',
|
||||
|
||||
-zircon-*,
|
||||
'
|
||||
'-readability-avoid-unconditional-preprocessor-if',
|
||||
'-readability-braces-around-statements',
|
||||
'-readability-convert-member-functions-to-static',
|
||||
'-readability-else-after-return',
|
||||
'-readability-function-cognitive-complexity',
|
||||
'-readability-function-size',
|
||||
'-readability-identifier-length',
|
||||
'-readability-identifier-naming', # useful but too slow
|
||||
'-readability-implicit-bool-conversion',
|
||||
'-readability-isolate-declaration',
|
||||
'-readability-magic-numbers',
|
||||
'-readability-named-parameter',
|
||||
'-readability-redundant-declaration',
|
||||
'-readability-simplify-boolean-expr',
|
||||
'-readability-static-accessed-through-instance',
|
||||
'-readability-suspicious-call-argument',
|
||||
'-readability-uppercase-literal-suffix',
|
||||
'-readability-use-anyofallof',
|
||||
|
||||
'-zircon-*'
|
||||
]
|
||||
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
|
42
.github/PULL_REQUEST_TEMPLATE.md
vendored
42
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -40,3 +40,45 @@ At a minimum, the following information should be added (but add more as needed)
|
||||
|
||||
|
||||
> Information about CI checks: https://clickhouse.com/docs/en/development/continuous-integration/
|
||||
|
||||
---
|
||||
### Modify your CI run:
|
||||
**NOTE:** If your merge the PR with modified CI you **MUST KNOW** what you are doing
|
||||
**NOTE:** Checked options will be applied if set before CI RunConfig/PrepareRunConfig step
|
||||
|
||||
#### Include tests (required builds will be added automatically):
|
||||
- [ ] <!---ci_include_fast--> Fast test
|
||||
- [ ] <!---ci_include_integration--> Integration Tests
|
||||
- [ ] <!---ci_include_stateless--> Stateless tests
|
||||
- [ ] <!---ci_include_stateful--> Stateful tests
|
||||
- [ ] <!---ci_include_unit--> Unit tests
|
||||
- [ ] <!---ci_include_performance--> Performance tests
|
||||
- [ ] <!---ci_include_asan--> All with ASAN
|
||||
- [ ] <!---ci_include_tsan--> All with TSAN
|
||||
- [ ] <!---ci_include_analyzer--> All with Analyzer
|
||||
- [ ] <!---ci_include_KEYWORD--> Add your option here
|
||||
|
||||
#### Exclude tests:
|
||||
- [ ] <!---ci_exclude_fast--> Fast test
|
||||
- [ ] <!---ci_exclude_integration--> Integration Tests
|
||||
- [ ] <!---ci_exclude_stateless--> Stateless tests
|
||||
- [ ] <!---ci_exclude_stateful--> Stateful tests
|
||||
- [ ] <!---ci_exclude_performance--> Performance tests
|
||||
- [ ] <!---ci_exclude_asan--> All with ASAN
|
||||
- [ ] <!---ci_exclude_tsan--> All with TSAN
|
||||
- [ ] <!---ci_exclude_msan--> All with MSAN
|
||||
- [ ] <!---ci_exclude_ubsan--> All with UBSAN
|
||||
- [ ] <!---ci_exclude_coverage--> All with Coverage
|
||||
- [ ] <!---ci_exclude_aarch64--> All with Aarch64
|
||||
- [ ] <!---ci_exclude_KEYWORD--> Add your option here
|
||||
|
||||
#### Extra options:
|
||||
- [ ] <!---do_not_test--> do not test (only style check)
|
||||
- [ ] <!---no_merge_commit--> disable merge-commit (no merge from master before tests)
|
||||
- [ ] <!---no_ci_cache--> disable CI cache (job reuse)
|
||||
|
||||
#### Only specified batches in multi-batch jobs:
|
||||
- [ ] <!---batch_0--> 1
|
||||
- [ ] <!---batch_1--> 2
|
||||
- [ ] <!---batch_2--> 3
|
||||
- [ ] <!---batch_3--> 4
|
||||
|
4
.github/workflows/master.yml
vendored
4
.github/workflows/master.yml
vendored
@ -374,7 +374,7 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stateless tests (release, analyzer, s3, DatabaseReplicated)
|
||||
test_name: Stateless tests (release, old analyzer, s3, DatabaseReplicated)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FunctionalStatelessTestS3Debug:
|
||||
@ -632,7 +632,7 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan, analyzer)
|
||||
test_name: Integration tests (asan, old analyzer)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
|
59
.github/workflows/nightly.yml
vendored
59
.github/workflows/nightly.yml
vendored
@ -45,62 +45,3 @@ jobs:
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
SonarCloud:
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
SONAR_SCANNER_VERSION: 4.8.0.2856
|
||||
SONAR_SERVER_URL: "https://sonarcloud.io"
|
||||
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
|
||||
CC: clang-17
|
||||
CXX: clang++-17
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
filter: tree:0
|
||||
submodules: true
|
||||
- name: Set up JDK 11
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 11
|
||||
- name: Download and set up sonar-scanner
|
||||
env:
|
||||
SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip
|
||||
run: |
|
||||
mkdir -p "$HOME/.sonar"
|
||||
curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}"
|
||||
unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/"
|
||||
echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH"
|
||||
- name: Download and set up build-wrapper
|
||||
env:
|
||||
BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip
|
||||
run: |
|
||||
curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}"
|
||||
unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/"
|
||||
echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH"
|
||||
- name: Set Up Build Tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
- name: Run build-wrapper
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cd ..
|
||||
build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/
|
||||
- name: Run sonar-scanner
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
run: |
|
||||
sonar-scanner \
|
||||
--define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \
|
||||
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
|
||||
--define sonar.projectKey="ClickHouse_ClickHouse" \
|
||||
--define sonar.organization="clickhouse-java" \
|
||||
--define sonar.cfamily.cpp23.enabled=true \
|
||||
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
|
||||
|
12
.github/workflows/pull_request.yml
vendored
12
.github/workflows/pull_request.yml
vendored
@ -6,6 +6,7 @@ env:
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
merge_group:
|
||||
pull_request:
|
||||
types:
|
||||
- synchronize
|
||||
@ -29,6 +30,7 @@ jobs:
|
||||
fetch-depth: 0 # to get version
|
||||
filter: tree:0
|
||||
- name: Labels check
|
||||
if: ${{ github.event_name != 'merge_group' }}
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 run_check.py
|
||||
@ -56,16 +58,9 @@ jobs:
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Re-create GH statuses for skipped jobs if any
|
||||
if: ${{ github.event_name != 'merge_group' }}
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
|
||||
- name: Style check early
|
||||
# hack to run style check before the docker build job if possible (style-check image not changed)
|
||||
if: contains(fromJson(steps.runconfig.outputs.CI_DATA).jobs_data.jobs_to_do, 'Style check early')
|
||||
run: |
|
||||
DOCKER_TAG=$(echo '${{ toJson(fromJson(steps.runconfig.outputs.CI_DATA).docker_data.images) }}' | tr -d '\n')
|
||||
export DOCKER_TAG=$DOCKER_TAG
|
||||
python3 ./tests/ci/style_check.py --no-push
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --post --job-name 'Style check'
|
||||
BuildDockers:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
|
||||
@ -172,6 +167,7 @@ jobs:
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 finish_check.py
|
||||
python3 merge_pr.py --check-approved
|
||||
|
||||
|
||||
#############################################################################################
|
||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -436,7 +436,7 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan, analyzer)
|
||||
test_name: Integration tests (asan, old analyzer)
|
||||
runner_type: stress-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
|
3
.github/workflows/reusable_build.yml
vendored
3
.github/workflows/reusable_build.yml
vendored
@ -43,8 +43,7 @@ jobs:
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
# WIP: temporary try commit with limited perallelization of checkout
|
||||
uses: ClickHouse/checkout@0be3f7b3098bae494d3ef5d29d2e0676fb606232
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
ref: ${{ fromJson(inputs.data).git_ref }}
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -164,6 +164,9 @@ tests/queries/0_stateless/*.generated-expect
|
||||
tests/queries/0_stateless/*.expect.history
|
||||
tests/integration/**/_gen
|
||||
|
||||
# pytest --pdb history
|
||||
.pdb_history
|
||||
|
||||
# rust
|
||||
/rust/**/target*
|
||||
# It is autogenerated from *.in
|
||||
|
@ -26,4 +26,4 @@
|
||||
|
||||
## To run only specified batches for multi-batch job(s)
|
||||
#batch_2
|
||||
#btach_1_2_3
|
||||
#batch_1_2_3
|
||||
|
173
CHANGELOG.md
173
CHANGELOG.md
@ -1,10 +1,183 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v24.3 LTS, 2024-03-26](#243)**<br/>
|
||||
**[ClickHouse release v24.2, 2024-02-29](#242)**<br/>
|
||||
**[ClickHouse release v24.1, 2024-01-30](#241)**<br/>
|
||||
**[Changelog for 2023](https://clickhouse.com/docs/en/whats-new/changelog/2023/)**<br/>
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### <a id="243"></a> ClickHouse release 24.3 LTS, 2024-03-27
|
||||
|
||||
#### Upgrade Notes
|
||||
* The setting `allow_experimental_analyzer` is enabled by default and it switches the query analysis to a new implementation, which has better compatibility and feature completeness. The feature "analyzer" is considered beta instead of experimental. You can turn the old behavior by setting the `compatibility` to `24.2` or disabling the `allow_experimental_analyzer` setting. Watch the [video on YouTube](https://www.youtube.com/watch?v=zhrOYQpgvkk).
|
||||
* ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. This is controlled by the settings, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`, `output_format_arrow_string_as_string`. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases. Parquet/ORC/Arrow supports many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools lack support for the faster `lz4` compression method, that's why we set `zstd` by default. This is controlled by the settings `output_format_parquet_compression_method`, `output_format_orc_compression_method`, and `output_format_arrow_compression_method`. We changed the default to `zstd` for Parquet and ORC, but not Arrow (it is emphasized for low-level usages). [#61817](https://github.com/ClickHouse/ClickHouse/pull/61817) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In the new ClickHouse version, the functions `geoDistance`, `greatCircleDistance`, and `greatCircleAngle` will use 64-bit double precision floating point data type for internal calculations and return type if all the arguments are Float64. This closes [#58476](https://github.com/ClickHouse/ClickHouse/issues/58476). In previous versions, the function always used Float32. You can switch to the old behavior by setting `geo_distance_returns_float64_on_float64_arguments` to `false` or setting `compatibility` to `24.2` or earlier. [#61848](https://github.com/ClickHouse/ClickHouse/pull/61848) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Co-authored with [Geet Patel](https://github.com/geetptl).
|
||||
* The obsolete in-memory data parts have been deprecated since version 23.5 and have not been supported since version 23.10. Now the remaining code is removed. Continuation of [#55186](https://github.com/ClickHouse/ClickHouse/issues/55186) and [#45409](https://github.com/ClickHouse/ClickHouse/issues/45409). It is unlikely that you have used in-memory data parts because they were available only before version 23.5 and only when you enabled them manually by specifying the corresponding SETTINGS for a MergeTree table. To check if you have in-memory data parts, run the following query: `SELECT part_type, count() FROM system.parts GROUP BY part_type ORDER BY part_type`. To disable the usage of in-memory data parts, do `ALTER TABLE ... MODIFY SETTING min_bytes_for_compact_part = DEFAULT, min_rows_for_compact_part = DEFAULT`. Before upgrading from old ClickHouse releases, first check that you don't have in-memory data parts. If there are in-memory data parts, disable them first, then wait while there are no in-memory data parts and continue the upgrade. [#61127](https://github.com/ClickHouse/ClickHouse/pull/61127) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Changed the column name from `duration_ms` to `duration_microseconds` in the `system.zookeeper` table to reflect the reality that the duration is in the microsecond resolution. [#60774](https://github.com/ClickHouse/ClickHouse/pull/60774) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Reject incoming INSERT queries in case when query-level settings `async_insert` and `deduplicate_blocks_in_dependent_materialized_views` are enabled together. This behaviour is controlled by a setting `throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert` and enabled by default. This is a continuation of https://github.com/ClickHouse/ClickHouse/pull/59699 needed to unblock https://github.com/ClickHouse/ClickHouse/pull/59915. [#60888](https://github.com/ClickHouse/ClickHouse/pull/60888) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Utility `clickhouse-copier` is moved to a separate repository on GitHub: https://github.com/ClickHouse/copier. It is no longer included in the bundle but is still available as a separate download. This closes: [#60734](https://github.com/ClickHouse/ClickHouse/issues/60734) This closes: [#60540](https://github.com/ClickHouse/ClickHouse/issues/60540) This closes: [#60250](https://github.com/ClickHouse/ClickHouse/issues/60250) This closes: [#52917](https://github.com/ClickHouse/ClickHouse/issues/52917) This closes: [#51140](https://github.com/ClickHouse/ClickHouse/issues/51140) This closes: [#47517](https://github.com/ClickHouse/ClickHouse/issues/47517) This closes: [#47189](https://github.com/ClickHouse/ClickHouse/issues/47189) This closes: [#46598](https://github.com/ClickHouse/ClickHouse/issues/46598) This closes: [#40257](https://github.com/ClickHouse/ClickHouse/issues/40257) This closes: [#36504](https://github.com/ClickHouse/ClickHouse/issues/36504) This closes: [#35485](https://github.com/ClickHouse/ClickHouse/issues/35485) This closes: [#33702](https://github.com/ClickHouse/ClickHouse/issues/33702) This closes: [#26702](https://github.com/ClickHouse/ClickHouse/issues/26702).
|
||||
* To increase compatibility with MySQL, the compatibility alias `locate` now accepts arguments `(needle, haystack[, start_pos])` by default. The previous behavior `(haystack, needle, [, start_pos])` can be restored by setting `function_locate_has_mysql_compatible_argument_order = 0`. [#61092](https://github.com/ClickHouse/ClickHouse/pull/61092) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Forbid `SimpleAggregateFunction` in `ORDER BY` of `MergeTree` tables (like `AggregateFunction` is forbidden, but they are forbidden because they are not comparable) by default (use `allow_suspicious_primary_key` to allow them). [#61399](https://github.com/ClickHouse/ClickHouse/pull/61399) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The `Ordinary` database engine is deprecated. You will receive a warning in clickhouse-client if your server is using it. This closes [#52229](https://github.com/ClickHouse/ClickHouse/issues/52229). [#56942](https://github.com/ClickHouse/ClickHouse/pull/56942) ([shabroo](https://github.com/shabroo)).
|
||||
|
||||
#### New Feature
|
||||
* Support reading and writing backups as `tar` (in addition to `zip`). [#59535](https://github.com/ClickHouse/ClickHouse/pull/59535) ([josh-hildred](https://github.com/josh-hildred)).
|
||||
* Implemented support for S3 Express buckets. [#59965](https://github.com/ClickHouse/ClickHouse/pull/59965) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Allow to attach parts from a different disk (using copy instead of hard link). [#60112](https://github.com/ClickHouse/ClickHouse/pull/60112) ([Unalian](https://github.com/Unalian)).
|
||||
* Size-capped `Memory` tables: controlled by their settings, `min_bytes_to_keep, max_bytes_to_keep, min_rows_to_keep` and `max_rows_to_keep`. [#60612](https://github.com/ClickHouse/ClickHouse/pull/60612) ([Jake Bamrah](https://github.com/JakeBamrah)).
|
||||
* Separate limits on number of waiting and executing queries. Added new server setting `max_waiting_queries` that limits the number of queries waiting due to `async_load_databases`. Existing limits on number of executing queries no longer count waiting queries. [#61053](https://github.com/ClickHouse/ClickHouse/pull/61053) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Added a table `system.keywords` which contains all the keywords from parser. Mostly needed and will be used for better fuzzing and syntax highlighting. [#51808](https://github.com/ClickHouse/ClickHouse/pull/51808) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add support for `ATTACH PARTITION ALL`. [#61107](https://github.com/ClickHouse/ClickHouse/pull/61107) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* Add a new function, `getClientHTTPHeader`. This closes [#54665](https://github.com/ClickHouse/ClickHouse/issues/54665). Co-authored with @lingtaolf. [#61820](https://github.com/ClickHouse/ClickHouse/pull/61820) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `generate_series` as a table function (compatibility alias for PostgreSQL to the existing `numbers` function). This function generates table with an arithmetic progression with natural numbers. [#59390](https://github.com/ClickHouse/ClickHouse/pull/59390) ([divanik](https://github.com/divanik)).
|
||||
* A mode for `topK`/`topkWeighed` support mode, which return count of values and its error. [#54508](https://github.com/ClickHouse/ClickHouse/pull/54508) ([UnamedRus](https://github.com/UnamedRus)).
|
||||
* Added function `toMillisecond` which returns the millisecond component for values of type`DateTime` or `DateTime64`. [#60281](https://github.com/ClickHouse/ClickHouse/pull/60281) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Allow configuring HTTP redirect handlers for clickhouse-server. For example, you can make `/` redirect to the Play UI. [#60390](https://github.com/ClickHouse/ClickHouse/pull/60390) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Optimized function `dotProduct` to omit unnecessary and expensive memory copies. [#60928](https://github.com/ClickHouse/ClickHouse/pull/60928) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* 30x faster printing for 256-bit integers. [#61100](https://github.com/ClickHouse/ClickHouse/pull/61100) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* If the table's primary key contains mostly useless columns, don't keep them in memory. This is controlled by a new setting `primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns` with the value `0.9` by default, which means: for a composite primary key, if a column changes its value for at least 0.9 of all the times, the next columns after it will be not loaded. [#60255](https://github.com/ClickHouse/ClickHouse/pull/60255) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve the performance of serialized aggregation method when involving multiple `Nullable` columns. [#55809](https://github.com/ClickHouse/ClickHouse/pull/55809) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Lazy build JSON's output to improve performance of ALL JOIN. [#58278](https://github.com/ClickHouse/ClickHouse/pull/58278) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Make HTTP/HTTPs connections with external services, such as AWS S3 reusable for all uses cases. Even when response is 3xx or 4xx. [#58845](https://github.com/ClickHouse/ClickHouse/pull/58845) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Improvements to aggregate functions `argMin` / `argMax` / `any` / `anyLast` / `anyHeavy`, as well as `ORDER BY {u8/u16/u32/u64/i8/i16/u32/i64) LIMIT 1` queries. [#58640](https://github.com/ClickHouse/ClickHouse/pull/58640) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Trivial optimization for column's filter. Peak memory can be reduced to 44% of the original in some cases. [#59698](https://github.com/ClickHouse/ClickHouse/pull/59698) ([李扬](https://github.com/taiyang-li)).
|
||||
* Execute `multiIf` function in a columnar fashion when the result type's underlying type is a number. [#60384](https://github.com/ClickHouse/ClickHouse/pull/60384) ([李扬](https://github.com/taiyang-li)).
|
||||
* Faster (almost 2x) mutexes. [#60823](https://github.com/ClickHouse/ClickHouse/pull/60823) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Drain multiple connections in parallel when a distributed query is finishing. [#60845](https://github.com/ClickHouse/ClickHouse/pull/60845) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Optimize data movement between columns of a Nullable number or a Nullable string, which improves some micro-benchmarks. [#60846](https://github.com/ClickHouse/ClickHouse/pull/60846) ([李扬](https://github.com/taiyang-li)).
|
||||
* Operations with the filesystem cache will suffer less from the lock contention. [#61066](https://github.com/ClickHouse/ClickHouse/pull/61066) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Optimize array join and other JOINs by preventing a wrong compiler's optimization. Close [#61074](https://github.com/ClickHouse/ClickHouse/issues/61074). [#61075](https://github.com/ClickHouse/ClickHouse/pull/61075) ([李扬](https://github.com/taiyang-li)).
|
||||
* If a query with a syntax error contained `COLUMNS` matcher with a regular expression, the regular expression was compiled each time during the parser's backtracking, instead of being compiled once. This was a fundamental error. The compiled regexp was put to AST. But the letter A in AST means "abstract" which means it should not contain heavyweight objects. Parts of AST can be created and discarded during parsing, including a large number of backtracking. This leads to slowness on the parsing side and consequently allows DoS by a readonly user. But the main problem is that it prevents progress in fuzzers. [#61543](https://github.com/ClickHouse/ClickHouse/pull/61543) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a new analyzer pass to optimize the IN operator for a single value. [#61564](https://github.com/ClickHouse/ClickHouse/pull/61564) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* DNSResolver shuffles set of resolved IPs which is needed to uniformly utilize multiple endpoints of AWS S3. [#60965](https://github.com/ClickHouse/ClickHouse/pull/60965) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Support parallel reading for Azure blob storage. This improves the performance of the experimental Azure object storage. [#61503](https://github.com/ClickHouse/ClickHouse/pull/61503) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Add asynchronous WriteBuffer for Azure blob storage similar to S3. This improves the performance of the experimental Azure object storage. [#59929](https://github.com/ClickHouse/ClickHouse/pull/59929) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Use managed identity for backups IO when using Azure Blob Storage. Add a setting to prevent ClickHouse from attempting to create a non-existent container, which requires permissions at the storage account level. [#61785](https://github.com/ClickHouse/ClickHouse/pull/61785) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Add a setting `parallel_replicas_allow_in_with_subquery = 1` which allows subqueries for IN work with parallel replicas. [#60950](https://github.com/ClickHouse/ClickHouse/pull/60950) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* A change for the "zero-copy" replication: all zero copy locks related to a table have to be dropped when the table is dropped. The directory which contains these locks has to be removed also. [#57575](https://github.com/ClickHouse/ClickHouse/pull/57575) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
|
||||
#### Improvement
|
||||
* Use `MergeTree` as a default table engine. [#60524](https://github.com/ClickHouse/ClickHouse/pull/60524) ([Alexey Milovidov](https://github.com/alexey-milovidov))
|
||||
* Enable `output_format_pretty_row_numbers` by default. It is better for usability. [#61791](https://github.com/ClickHouse/ClickHouse/pull/61791) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In the previous version, some numbers in Pretty formats were not pretty enough. [#61794](https://github.com/ClickHouse/ClickHouse/pull/61794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* A long value in Pretty formats won't be cut if it is the single value in the resultset, such as in the result of the `SHOW CREATE TABLE` query. [#61795](https://github.com/ClickHouse/ClickHouse/pull/61795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Similarly to `clickhouse-local`, `clickhouse-client` will accept the `--output-format` option as a synonym to the `--format` option. This closes [#59848](https://github.com/ClickHouse/ClickHouse/issues/59848). [#61797](https://github.com/ClickHouse/ClickHouse/pull/61797) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* If stdout is a terminal and the output format is not specified, `clickhouse-client` and similar tools will use `PrettyCompact` by default, similarly to the interactive mode. `clickhouse-client` and `clickhouse-local` will handle command line arguments for input and output formats in a unified fashion. This closes [#61272](https://github.com/ClickHouse/ClickHouse/issues/61272). [#61800](https://github.com/ClickHouse/ClickHouse/pull/61800) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Underscore digit groups in Pretty formats for better readability. This is controlled by a new setting, `output_format_pretty_highlight_digit_groups`. [#61802](https://github.com/ClickHouse/ClickHouse/pull/61802) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to override initial INSERT settings via `SYSTEM FLUSH DISTRIBUTED`. [#61832](https://github.com/ClickHouse/ClickHouse/pull/61832) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable processors profiling (time spent/in and out bytes for sorting, aggregation, ...) by default. [#61096](https://github.com/ClickHouse/ClickHouse/pull/61096) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support files without format extension in Filesystem database. [#60795](https://github.com/ClickHouse/ClickHouse/pull/60795) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Make all format names case insensitive, like Tsv, or TSV, or tsv, or even rowbinary. [#60420](https://github.com/ClickHouse/ClickHouse/pull/60420) ([豪肥肥](https://github.com/HowePa)). I appreciate if you will continue to write it correctly, e.g., `JSON` 😇, not `Json` 🤮, but we don't mind if you spell it as you prefer.
|
||||
* Added `none_only_active` mode for `distributed_ddl_output_mode` setting. [#60340](https://github.com/ClickHouse/ClickHouse/pull/60340) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* The advanced dashboard has slightly better colors for multi-line graphs. [#60391](https://github.com/ClickHouse/ClickHouse/pull/60391) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The Advanced dashboard now has controls always visible on scrolling. This allows you to add a new chart without scrolling up. [#60692](https://github.com/ClickHouse/ClickHouse/pull/60692) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* While running the `MODIFY COLUMN` query for materialized views, check the inner table's structure to ensure every column exists. [#47427](https://github.com/ClickHouse/ClickHouse/pull/47427) ([sunny](https://github.com/sunny19930321)).
|
||||
* String types and Enums can be used in the same context, such as: arrays, UNION queries, conditional expressions. This closes [#60726](https://github.com/ClickHouse/ClickHouse/issues/60726). [#60727](https://github.com/ClickHouse/ClickHouse/pull/60727) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow declaring Enums in the structure of external data for query processing (this is an immediate temporary table that you can provide for your query). [#57857](https://github.com/ClickHouse/ClickHouse/pull/57857) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Consider lightweight deleted rows when selecting parts to merge, so the disk size of the resulting part will be estimated better. [#58223](https://github.com/ClickHouse/ClickHouse/pull/58223) ([Zhuo Qiu](https://github.com/jewelzqiu)).
|
||||
* Added comments for columns for more system tables. Continuation of https://github.com/ClickHouse/ClickHouse/pull/58356. [#59016](https://github.com/ClickHouse/ClickHouse/pull/59016) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Now we can use virtual columns in PREWHERE. It's worthwhile for non-const virtual columns like `_part_offset`. [#59033](https://github.com/ClickHouse/ClickHouse/pull/59033) ([Amos Bird](https://github.com/amosbird)). Improved overall usability of virtual columns. Now it is allowed to use virtual columns in `PREWHERE` (it's worthwhile for non-const virtual columns like `_part_offset`). Now a builtin documentation is available for virtual columns as a comment of column in `DESCRIBE` query with enabled setting `describe_include_virtual_columns`. [#60205](https://github.com/ClickHouse/ClickHouse/pull/60205) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Instead of using a constant key, now object storage generates key for determining remove objects capability. [#59495](https://github.com/ClickHouse/ClickHouse/pull/59495) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Allow "local" as object storage type instead of "local_blob_storage". [#60165](https://github.com/ClickHouse/ClickHouse/pull/60165) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Parallel flush of pending INSERT blocks of Distributed engine on `DETACH`/server shutdown and `SYSTEM FLUSH DISTRIBUTED` (Parallelism will work only if you have multi-disk policy for a table (like everything in the Distributed engine right now)). [#60225](https://github.com/ClickHouse/ClickHouse/pull/60225) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add a setting to force read-through cache for merges. [#60308](https://github.com/ClickHouse/ClickHouse/pull/60308) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* An improvement for the MySQL compatibility protocol. The issue [#57598](https://github.com/ClickHouse/ClickHouse/issues/57598) mentions a variant behaviour regarding transaction handling. An issued COMMIT/ROLLBACK when no transaction is active is reported as an error contrary to MySQL behaviour. [#60338](https://github.com/ClickHouse/ClickHouse/pull/60338) ([PapaToemmsn](https://github.com/PapaToemmsn)).
|
||||
* Function `substring` now has a new alias `byteSlice`. [#60494](https://github.com/ClickHouse/ClickHouse/pull/60494) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Renamed server setting `dns_cache_max_size` to `dns_cache_max_entries` to reduce ambiguity. [#60500](https://github.com/ClickHouse/ClickHouse/pull/60500) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* `SHOW INDEX | INDEXES | INDICES | KEYS` no longer sorts by the primary key columns (which was unintuitive). [#60514](https://github.com/ClickHouse/ClickHouse/pull/60514) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Keeper improvement: abort during startup if an invalid snapshot is detected to avoid data loss. [#60537](https://github.com/ClickHouse/ClickHouse/pull/60537) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Keeper improvement: support `leadership_expiry_ms` in Keeper's settings. [#60806](https://github.com/ClickHouse/ClickHouse/pull/60806) ([Brokenice0415](https://github.com/Brokenice0415)).
|
||||
* Always infer exponential numbers in JSON formats regardless of the setting `input_format_try_infer_exponent_floats`. Add setting `input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects` that allows to use String type for ambiguous paths instead of an exception during named Tuples inference from JSON objects. [#60808](https://github.com/ClickHouse/ClickHouse/pull/60808) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for `START TRANSACTION` syntax typically used in MySQL syntax, resolving https://github.com/ClickHouse/ClickHouse/discussions/60865. [#60886](https://github.com/ClickHouse/ClickHouse/pull/60886) ([Zach Naimon](https://github.com/ArctypeZach)).
|
||||
* Add a flag for the full-sorting merge join algorithm to treat null as biggest/smallest. So the behavior can be compitable with other SQL systems, like Apache Spark. [#60896](https://github.com/ClickHouse/ClickHouse/pull/60896) ([loudongfeng](https://github.com/loudongfeng)).
|
||||
* Support detect output format by file exctension in `clickhouse-client` and `clickhouse-local`. [#61036](https://github.com/ClickHouse/ClickHouse/pull/61036) ([豪肥肥](https://github.com/HowePa)).
|
||||
* Update memory limit in runtime when Linux's CGroups value changed. [#61049](https://github.com/ClickHouse/ClickHouse/pull/61049) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Add the function `toUInt128OrZero`, which was missed by mistake (the mistake is related to https://github.com/ClickHouse/ClickHouse/pull/945). The compatibility aliases `FROM_UNIXTIME` and `DATE_FORMAT` (they are not ClickHouse-native and only exist for MySQL compatibility) have been made case insensitive, as expected for SQL-compatibility aliases. [#61114](https://github.com/ClickHouse/ClickHouse/pull/61114) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improvements for the access checks, allowing to revoke of unpossessed rights in case the target user doesn't have the revoking grants either. Example: `GRANT SELECT ON *.* TO user1; REVOKE SELECT ON system.* FROM user1;`. [#61115](https://github.com/ClickHouse/ClickHouse/pull/61115) ([pufit](https://github.com/pufit)).
|
||||
* Fix `has()` function with `Nullable` column (fixes [#60214](https://github.com/ClickHouse/ClickHouse/issues/60214)). [#61249](https://github.com/ClickHouse/ClickHouse/pull/61249) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Now it's possible to specify the attribute `merge="true"` in config substitutions for subtrees `<include from_zk="/path" merge="true">`. In case this attribute specified, clickhouse will merge subtree with existing configuration, otherwise default behavior is append new content to configuration. [#61299](https://github.com/ClickHouse/ClickHouse/pull/61299) ([alesapin](https://github.com/alesapin)).
|
||||
* Add async metrics for virtual memory mappings: `VMMaxMapCount` & `VMNumMaps`. Closes [#60662](https://github.com/ClickHouse/ClickHouse/issues/60662). [#61354](https://github.com/ClickHouse/ClickHouse/pull/61354) ([Tuan Pham Anh](https://github.com/tuanpavn)).
|
||||
* Use `temporary_files_codec` setting in all places where we create temporary data, for example external memory sorting and external memory GROUP BY. Before it worked only in `partial_merge` JOIN algorithm. [#61456](https://github.com/ClickHouse/ClickHouse/pull/61456) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add a new setting `max_parser_backtracks` which allows to limit the complexity of query parsing. [#61502](https://github.com/ClickHouse/ClickHouse/pull/61502) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less contention during dynamic resize of the filesystem cache. [#61524](https://github.com/ClickHouse/ClickHouse/pull/61524) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disallow sharded mode of StorageS3 queue, because it will be rewritten. [#61537](https://github.com/ClickHouse/ClickHouse/pull/61537) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed typo: from `use_leagcy_max_level` to `use_legacy_max_level`. [#61545](https://github.com/ClickHouse/ClickHouse/pull/61545) ([William Schoeffel](https://github.com/wiledusc)).
|
||||
* Remove some duplicate entries in `system.blob_storage_log`. [#61622](https://github.com/ClickHouse/ClickHouse/pull/61622) ([YenchangChan](https://github.com/YenchangChan)).
|
||||
* Added `current_user` function as a compatibility alias for MySQL. [#61770](https://github.com/ClickHouse/ClickHouse/pull/61770) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix inconsistent floating point aggregate function states in mixed x86-64 / ARM clusters [#60610](https://github.com/ClickHouse/ClickHouse/pull/60610) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* The real-time query profiler now works on AArch64. In previous versions, it worked only when a program didn't spend time inside a syscall. [#60807](https://github.com/ClickHouse/ClickHouse/pull/60807) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse version has been added to docker labels. Closes [#54224](https://github.com/ClickHouse/ClickHouse/issues/54224). [#60949](https://github.com/ClickHouse/ClickHouse/pull/60949) ([Nikolay Monkov](https://github.com/nikmonkov)).
|
||||
* Upgrade `prqlc` to 0.11.3. [#60616](https://github.com/ClickHouse/ClickHouse/pull/60616) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* Add generic query text fuzzer in `clickhouse-local`. [#61508](https://github.com/ClickHouse/ClickHouse/pull/61508) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Fix finished_mutations_to_keep=0 for MergeTree (as docs says 0 is to keep everything) [#60031](https://github.com/ClickHouse/ClickHouse/pull/60031) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Something was wrong with the FINAL optimization, here is how the author describes it: "PartsSplitter invalid ranges for the same part". [#60041](https://github.com/ClickHouse/ClickHouse/pull/60041) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Something was wrong with Apache Hive, which is experimental and not supported. [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)).
|
||||
* An improvement for experimental parallel replicas: force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Something was wrong with experimental KQL (Kusto) support: fix `max_query_size_for_kql_compound_operator`: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Keeper fix: add timeouts when waiting for commit logs [#60544](https://github.com/ClickHouse/ClickHouse/pull/60544) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Don't output number tips for date types [#60577](https://github.com/ClickHouse/ClickHouse/pull/60577) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix reading from MergeTree with non-deterministic functions in filter [#60586](https://github.com/ClickHouse/ClickHouse/pull/60586) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix logical error on bad compatibility setting value type [#60596](https://github.com/ClickHouse/ClickHouse/pull/60596) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* fix(prql): Robust panic handler [#60615](https://github.com/ClickHouse/ClickHouse/pull/60615) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* Fix `intDiv` for decimal and date arguments [#60672](https://github.com/ClickHouse/ClickHouse/pull/60672) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix: expand CTE in alter modify query [#60682](https://github.com/ClickHouse/ClickHouse/pull/60682) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix system.parts for non-Atomic/Ordinary database engine (i.e. Memory) [#60689](https://github.com/ClickHouse/ClickHouse/pull/60689) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix "Invalid storage definition in metadata file" for parameterized views [#60708](https://github.com/ClickHouse/ClickHouse/pull/60708) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove wrong assertion in aggregate function quantileGK [#60740](https://github.com/ClickHouse/ClickHouse/pull/60740) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix insert-select + insert_deduplication_token bug by setting streams to 1 [#60745](https://github.com/ClickHouse/ClickHouse/pull/60745) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Fix toStartOfInterval [#60763](https://github.com/ClickHouse/ClickHouse/pull/60763) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible stuck on error in HashedDictionaryParallelLoader [#60926](https://github.com/ClickHouse/ClickHouse/pull/60926) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix async RESTORE with Replicated database (experimental feature) [#60934](https://github.com/ClickHouse/ClickHouse/pull/60934) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix deadlock in async inserts to `Log` tables via native protocol [#61055](https://github.com/ClickHouse/ClickHouse/pull/61055) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix lazy execution of default argument in dictGetOrDefault for RangeHashedDictionary [#61196](https://github.com/ClickHouse/ClickHouse/pull/61196) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix multiple bugs in groupArraySorted [#61203](https://github.com/ClickHouse/ClickHouse/pull/61203) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix usage of session_token in S3 engine [#61234](https://github.com/ClickHouse/ClickHouse/pull/61234) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible incorrect result of aggregate function `uniqExact` [#61257](https://github.com/ClickHouse/ClickHouse/pull/61257) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bugs in show database [#61269](https://github.com/ClickHouse/ClickHouse/pull/61269) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix logical error in RabbitMQ storage with MATERIALIZED columns [#61320](https://github.com/ClickHouse/ClickHouse/pull/61320) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix CREATE OR REPLACE DICTIONARY [#61356](https://github.com/ClickHouse/ClickHouse/pull/61356) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix ATTACH query with external ON CLUSTER [#61365](https://github.com/ClickHouse/ClickHouse/pull/61365) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix consecutive keys optimization for nullable keys [#61393](https://github.com/ClickHouse/ClickHouse/pull/61393) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* fix issue of actions dag split [#61458](https://github.com/ClickHouse/ClickHouse/pull/61458) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix finishing a failed RESTORE [#61466](https://github.com/ClickHouse/ClickHouse/pull/61466) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Disable async_insert_use_adaptive_busy_timeout correctly with compatibility settings [#61468](https://github.com/ClickHouse/ClickHouse/pull/61468) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow queuing in restore pool [#61475](https://github.com/ClickHouse/ClickHouse/pull/61475) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix an inconsistency when reading system.parts using UUID. [#61479](https://github.com/ClickHouse/ClickHouse/pull/61479) ([Dan Wu](https://github.com/wudanzy)).
|
||||
* Fix ALTER QUERY MODIFY SQL SECURITY [#61480](https://github.com/ClickHouse/ClickHouse/pull/61480) ([pufit](https://github.com/pufit)).
|
||||
* Fix a crash in window view (experimental feature) [#61526](https://github.com/ClickHouse/ClickHouse/pull/61526) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `repeat` with non-native integers [#61527](https://github.com/ClickHouse/ClickHouse/pull/61527) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix client's `-s` argument [#61530](https://github.com/ClickHouse/ClickHouse/pull/61530) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix crash in arrayPartialReverseSort [#61539](https://github.com/ClickHouse/ClickHouse/pull/61539) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix string search with const position [#61547](https://github.com/ClickHouse/ClickHouse/pull/61547) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix addDays cause an error when used DateTime64 [#61561](https://github.com/ClickHouse/ClickHouse/pull/61561) ([Shuai li](https://github.com/loneylee)).
|
||||
* Disallow LowCardinality input type for JSONExtract [#61617](https://github.com/ClickHouse/ClickHouse/pull/61617) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Fix `system.part_log` for async insert with deduplication [#61620](https://github.com/ClickHouse/ClickHouse/pull/61620) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix a `Non-ready set` exception for system.parts. [#61666](https://github.com/ClickHouse/ClickHouse/pull/61666) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix actual_part_name for REPLACE_RANGE (`Entry actual part isn't empty yet`) [#61675](https://github.com/ClickHouse/ClickHouse/pull/61675) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix a sanitizer report in `multiSearchAllPositionsCaseInsensitiveUTF8` for incorrect UTF-8 [#61749](https://github.com/ClickHouse/ClickHouse/pull/61749) ([pufit](https://github.com/pufit)).
|
||||
* Fix an observation that the RANGE frame is not supported for Nullable columns. [#61766](https://github.com/ClickHouse/ClickHouse/pull/61766) ([YuanLiu](https://github.com/ditgittube)).
|
||||
|
||||
### <a id="242"></a> ClickHouse release 24.2, 2024-02-29
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -56,16 +56,21 @@ option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile t
|
||||
if (ENABLE_CHECK_HEAVY_BUILDS)
|
||||
# set DATA (since RSS does not work since 2.6.x+) to 5G
|
||||
set (RLIMIT_DATA 5000000000)
|
||||
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
|
||||
# set VIRT (RLIMIT_AS) to 10G (DATA*2)
|
||||
set (RLIMIT_AS 10000000000)
|
||||
# set CPU time limit to 1000 seconds
|
||||
set (RLIMIT_CPU 1000)
|
||||
|
||||
# -fsanitize=memory and address are too heavy
|
||||
if (SANITIZE)
|
||||
# Sanitizers are too heavy
|
||||
if (SANITIZE OR SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||
set (RLIMIT_DATA 10000000000) # 10G
|
||||
endif()
|
||||
|
||||
# For some files currently building RISCV64 might be too slow. TODO: Improve compilation times per file
|
||||
if (ARCH_RISCV64)
|
||||
set (RLIMIT_CPU 1800)
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||
endif ()
|
||||
|
||||
@ -102,6 +107,8 @@ if (ENABLE_FUZZING)
|
||||
|
||||
# For codegen_select_fuzzer
|
||||
set (ENABLE_PROTOBUF 1)
|
||||
|
||||
add_compile_definitions(FUZZING_MODE=1)
|
||||
endif()
|
||||
|
||||
# Global libraries
|
||||
@ -110,11 +117,6 @@ endif()
|
||||
# - sanitize.cmake
|
||||
add_library(global-libs INTERFACE)
|
||||
|
||||
# We don't want to instrument everything with fuzzer, but only specific targets (see below),
|
||||
# also, since we build our own llvm, we specifically don't want to instrument
|
||||
# libFuzzer library itself - it would result in infinite recursion
|
||||
#include (cmake/fuzzer.cmake)
|
||||
|
||||
include (cmake/sanitize.cmake)
|
||||
|
||||
option(ENABLE_COLORED_BUILD "Enable colors in compiler output" ON)
|
||||
@ -554,7 +556,9 @@ if (ENABLE_RUST)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||
AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND NOT ENABLE_FUZZING
|
||||
AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
||||
else ()
|
||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
||||
@ -577,9 +581,6 @@ if (FUZZER)
|
||||
if (NOT(target_type STREQUAL "INTERFACE_LIBRARY" OR target_type STREQUAL "UTILITY"))
|
||||
target_compile_options(${target} PRIVATE "-fsanitize=fuzzer-no-link")
|
||||
endif()
|
||||
# clickhouse fuzzer isn't working correctly
|
||||
# initial PR https://github.com/ClickHouse/ClickHouse/pull/27526
|
||||
#if (target MATCHES ".+_fuzzer" OR target STREQUAL "clickhouse")
|
||||
if (target_type STREQUAL "EXECUTABLE" AND target MATCHES ".+_fuzzer")
|
||||
message(STATUS "${target} instrumented with fuzzer")
|
||||
target_link_libraries(${target} PUBLIC ch_contrib::fuzzer)
|
||||
@ -589,6 +590,12 @@ if (FUZZER)
|
||||
get_target_property(target_bin_dir ${target} BINARY_DIR)
|
||||
add_custom_command(TARGET fuzzers POST_BUILD COMMAND mv "${target_bin_dir}/${target_bin_name}" "${CMAKE_CURRENT_BINARY_DIR}/programs/" VERBATIM)
|
||||
endif()
|
||||
if (target STREQUAL "clickhouse")
|
||||
message(STATUS "${target} instrumented with fuzzer")
|
||||
target_link_libraries(${target} PUBLIC ch_contrib::fuzzer_no_main)
|
||||
# Add to fuzzers bundle
|
||||
add_dependencies(fuzzers ${target})
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
add_custom_command(TARGET fuzzers POST_BUILD COMMAND SRC=${CMAKE_SOURCE_DIR} BIN=${CMAKE_BINARY_DIR} OUT=${CMAKE_BINARY_DIR}/programs ${CMAKE_SOURCE_DIR}/tests/fuzz/build.sh VERBATIM)
|
||||
|
@ -28,7 +28,6 @@ curl https://clickhouse.com/ | sh
|
||||
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
||||
* [Static Analysis (SonarCloud)](https://sonarcloud.io/project/issues?resolved=false&id=ClickHouse_ClickHouse) proposes C++ quality improvements.
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Monthly Release & Community Call
|
||||
|
@ -13,18 +13,16 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 24.3 | ✔️ |
|
||||
| 24.2 | ✔️ |
|
||||
| 24.1 | ✔️ |
|
||||
| 23.12 | ✔️ |
|
||||
| 23.11 | ❌ |
|
||||
| 23.10 | ❌ |
|
||||
| 23.9 | ❌ |
|
||||
| 23.* | ❌ |
|
||||
| 23.8 | ✔️ |
|
||||
| 23.7 | ❌ |
|
||||
| 23.6 | ❌ |
|
||||
| 23.5 | ❌ |
|
||||
| 23.4 | ❌ |
|
||||
| 23.3 | ✔️ |
|
||||
| 23.3 | ❌ |
|
||||
| 23.2 | ❌ |
|
||||
| 23.1 | ❌ |
|
||||
| 22.* | ❌ |
|
||||
|
@ -20,6 +20,7 @@ set (SRCS
|
||||
getPageSize.cpp
|
||||
getThreadId.cpp
|
||||
int8_to_string.cpp
|
||||
itoa.cpp
|
||||
JSON.cpp
|
||||
mremap.cpp
|
||||
phdr_cache.cpp
|
||||
|
@ -1,8 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/strong_typedef.h>
|
||||
#include <base/extended_types.h>
|
||||
#include <Common/formatIPv6.h>
|
||||
#include <base/strong_typedef.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
|
||||
namespace DB
|
||||
@ -62,7 +61,8 @@ namespace std
|
||||
{
|
||||
size_t operator()(const DB::IPv6 & x) const
|
||||
{
|
||||
return std::hash<std::string_view>{}(std::string_view(reinterpret_cast<const char*>(&x.toUnderType()), IPV6_BINARY_LENGTH));
|
||||
return std::hash<std::string_view>{}(
|
||||
std::string_view(reinterpret_cast<const char *>(&x.toUnderType()), sizeof(DB::IPv6::UnderlyingType)));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include "coverage.h"
|
||||
#include <sys/mman.h>
|
||||
|
||||
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
|
||||
|
||||
/// WITH_COVERAGE enables the default implementation of code coverage,
|
||||
|
@ -50,9 +50,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
|
||||
}
|
||||
|
||||
/** Returns the size of physical memory (RAM) in bytes.
|
||||
* Returns 0 on unsupported platform
|
||||
*/
|
||||
uint64_t getMemoryAmountOrZero()
|
||||
{
|
||||
int64_t num_pages = sysconf(_SC_PHYS_PAGES);
|
||||
|
@ -2,11 +2,10 @@
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
/** Returns the size of physical memory (RAM) in bytes.
|
||||
* Returns 0 on unsupported platform or if it cannot determine the size of physical memory.
|
||||
*/
|
||||
/// Returns the size in bytes of physical memory (RAM) available to the process. The value can
|
||||
/// be smaller than the total available RAM available to the system due to cgroups settings.
|
||||
/// Returns 0 on unsupported platform or if it cannot determine the size of physical memory.
|
||||
uint64_t getMemoryAmountOrZero();
|
||||
|
||||
/** Throws exception if it cannot determine the size of physical memory.
|
||||
*/
|
||||
/// Throws exception if it cannot determine the size of physical memory.
|
||||
uint64_t getMemoryAmount();
|
||||
|
503
base/base/itoa.cpp
Normal file
503
base/base/itoa.cpp
Normal file
@ -0,0 +1,503 @@
|
||||
// Based on https://github.com/amdn/itoa and combined with our optimizations
|
||||
//
|
||||
//=== itoa.cpp - Fast integer to ascii conversion --*- C++ -*-//
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2016 Arturo Martin-de-Nicolas
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <base/defines.h>
|
||||
#include <base/extended_types.h>
|
||||
#include <base/itoa.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T>
|
||||
ALWAYS_INLINE inline constexpr T pow10(size_t x)
|
||||
{
|
||||
return x ? 10 * pow10<T>(x - 1) : 1;
|
||||
}
|
||||
|
||||
// Division by a power of 10 is implemented using a multiplicative inverse.
|
||||
// This strength reduction is also done by optimizing compilers, but
|
||||
// presently the fastest results are produced by using the values
|
||||
// for the multiplication and the shift as given by the algorithm
|
||||
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
|
||||
//
|
||||
// http://www.agner.org/optimize/optimizing_assembly.pdf
|
||||
//
|
||||
// "Integer division by a constant (all processors)
|
||||
// A floating point number can be divided by a constant by multiplying
|
||||
// with the reciprocal. If we want to do the same with integers, we have
|
||||
// to scale the reciprocal by 2n and then shift the product to the right
|
||||
// by n. There are various algorithms for finding a suitable value of n
|
||||
// and compensating for rounding errors. The algorithm described below
|
||||
// was invented by Terje Mathisen, Norway, and not published elsewhere."
|
||||
|
||||
/// Division by constant is performed by:
|
||||
/// 1. Adding 1 if needed;
|
||||
/// 2. Multiplying by another constant;
|
||||
/// 3. Shifting right by another constant.
|
||||
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
|
||||
struct Division
|
||||
{
|
||||
static constexpr bool add{add_};
|
||||
static constexpr UInt multiplier{multiplier_};
|
||||
static constexpr unsigned shift{shift_};
|
||||
};
|
||||
|
||||
/// Select a type with appropriate number of bytes from the list of types.
|
||||
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
|
||||
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
|
||||
template <size_t N, typename T, typename... Ts>
|
||||
struct SelectType
|
||||
{
|
||||
using Result = typename SelectType<N / 2, Ts...>::Result;
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
struct SelectType<1, T, Ts...>
|
||||
{
|
||||
using Result = T;
|
||||
};
|
||||
|
||||
|
||||
/// Division by 10^N where N is the size of the type.
|
||||
template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType<
|
||||
N,
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
using UnsignedOfSize = typename SelectType<N, uint8_t, uint16_t, uint32_t, uint64_t, __uint128_t>::Result;
|
||||
|
||||
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
|
||||
template <size_t N>
|
||||
struct QuotientAndRemainder
|
||||
{
|
||||
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
|
||||
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
|
||||
};
|
||||
|
||||
template <size_t N>
|
||||
QuotientAndRemainder<N> inline split(UnsignedOfSize<N> value)
|
||||
{
|
||||
constexpr DivisionBy10PowN<N> division;
|
||||
|
||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||
|
||||
return {quotient, remainder};
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * outDigit(char * p, uint8_t value)
|
||||
{
|
||||
*p = '0' + value;
|
||||
++p;
|
||||
return p;
|
||||
}
|
||||
|
||||
// Using a lookup table to convert binary numbers from 0 to 99
|
||||
// into ascii characters as described by Andrei Alexandrescu in
|
||||
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
||||
|
||||
const char digits[201] = "00010203040506070809"
|
||||
"10111213141516171819"
|
||||
"20212223242526272829"
|
||||
"30313233343536373839"
|
||||
"40414243444546474849"
|
||||
"50515253545556575859"
|
||||
"60616263646566676869"
|
||||
"70717273747576777879"
|
||||
"80818283848586878889"
|
||||
"90919293949596979899";
|
||||
|
||||
ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
||||
{
|
||||
memcpy(p, &digits[value * 2], 2);
|
||||
p += 2;
|
||||
return p;
|
||||
}
|
||||
|
||||
namespace convert
|
||||
{
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
char * head(char * p, UInt u);
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
char * tail(char * p, UInt u);
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// head: find most significant digit, skip leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// "x" contains quotient and remainder after division by 10^N
|
||||
// quotient is less than 10^N
|
||||
template <size_t N>
|
||||
ALWAYS_INLINE inline char * head(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
ALWAYS_INLINE inline char * head(char * p, UInt u)
|
||||
{
|
||||
return u < pow10<UnsignedOfSize<N>>(N) ? head(p, UnsignedOfSize<N / 2>(u)) : head<N>(p, split<N>(u));
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return u < 10 ? outDigit(p, u) : outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// tail: produce all digits including leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// recursive step, "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
ALWAYS_INLINE inline char * tail(char * p, UInt u)
|
||||
{
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// large values are >= 10^2*N
|
||||
// where x contains quotient and remainder after division by 10^N
|
||||
//===----------------------------------------------------------===//
|
||||
template <size_t N>
|
||||
ALWAYS_INLINE inline char * large(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
QuotientAndRemainder<N> y = split<N>(x.quotient);
|
||||
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
|
||||
p = tail(p, y.remainder);
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle values of "u" that might be >= 10^2*N
|
||||
// where N is the size of "u" in bytes
|
||||
//===----------------------------------------------------------===//
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
ALWAYS_INLINE inline char * uitoa(char * p, UInt u)
|
||||
{
|
||||
if (u < pow10<UnsignedOfSize<N>>(N))
|
||||
return head(p, UnsignedOfSize<N / 2>(u));
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
|
||||
return u < pow10<UnsignedOfSize<N>>(2 * N) ? head<N>(p, x) : large<N>(p, x);
|
||||
}
|
||||
|
||||
// selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
if (u < 10)
|
||||
return outDigit(p, u);
|
||||
else if (u < 100)
|
||||
return outTwoDigits(p, u);
|
||||
else
|
||||
{
|
||||
p = outDigit(p, u / 100);
|
||||
p = outTwoDigits(p, u % 100);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle unsigned and signed integral operands
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// itoa: handle unsigned integral operands (selected by SFINAE)
|
||||
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
|
||||
ALWAYS_INLINE inline char * itoa(U u, char * p)
|
||||
{
|
||||
return convert::uitoa(p, u);
|
||||
}
|
||||
|
||||
// itoa: handle signed integral operands (selected by SFINAE)
|
||||
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
|
||||
ALWAYS_INLINE inline char * itoa(I i, char * p)
|
||||
{
|
||||
// Need "mask" to be filled with a copy of the sign bit.
|
||||
// If "i" is a negative value, then the result of "operator >>"
|
||||
// is implementation-defined, though usually it is an arithmetic
|
||||
// right shift that replicates the sign bit.
|
||||
// Use a conditional expression to be portable,
|
||||
// a good optimizing compiler generates an arithmetic right shift
|
||||
// and avoids the conditional branch.
|
||||
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
|
||||
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
|
||||
// Cannot use std::abs() because the result is undefined
|
||||
// in 2's complement systems for the most-negative value.
|
||||
// Want to avoid conditional branch for performance reasons since
|
||||
// CPU branch prediction will be ineffective when negative values
|
||||
// occur randomly.
|
||||
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
|
||||
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
|
||||
// This yields the absolute value with the desired type without
|
||||
// using a conditional branch and without invoking undefined or
|
||||
// implementation defined behavior:
|
||||
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
|
||||
// Unconditionally store a minus sign when producing digits
|
||||
// in a forward direction and increment the pointer only if
|
||||
// the value is in fact negative.
|
||||
// This avoids a conditional branch and is safe because we will
|
||||
// always produce at least one digit and it will overwrite the
|
||||
// minus sign when the value is not negative.
|
||||
*p = '-';
|
||||
p += (mask & 1);
|
||||
p = convert::uitoa(p, u);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
const uint64_t max_multiple_of_hundred_that_fits_in_64_bits = 1'00'00'00'00'00'00'00'00'00ull;
|
||||
const int max_multiple_of_hundred_blocks = 9;
|
||||
static_assert(max_multiple_of_hundred_that_fits_in_64_bits % 100 == 0);
|
||||
|
||||
ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
|
||||
{
|
||||
/// If we the highest 64bit item is empty, we can print just the lowest item as u64
|
||||
if (_x.items[UInt128::_impl::little(1)] == 0)
|
||||
return convert::itoa(_x.items[UInt128::_impl::little(0)], p);
|
||||
|
||||
/// Doing operations using __int128 is faster and we already rely on this feature
|
||||
using T = unsigned __int128;
|
||||
T x = (T(_x.items[UInt128::_impl::little(1)]) << 64) + T(_x.items[UInt128::_impl::little(0)]);
|
||||
|
||||
/// We are going to accumulate blocks of 2 digits to print until the number is small enough to be printed as u64
|
||||
/// To do this we could do: x / 100, x % 100
|
||||
/// But these would mean doing many iterations with long integers, so instead we divide by a much longer integer
|
||||
/// multiple of 100 (100^9) and then get the blocks out of it (as u64)
|
||||
/// Once we reach u64::max we can stop and use the fast method to print that in the front
|
||||
static const T large_divisor = max_multiple_of_hundred_that_fits_in_64_bits;
|
||||
static const T largest_uint64 = std::numeric_limits<uint64_t>::max();
|
||||
uint8_t two_values[20] = {0}; // 39 Max characters / 2
|
||||
|
||||
int current_block = 0;
|
||||
while (x > largest_uint64)
|
||||
{
|
||||
uint64_t u64_remainder = uint64_t(x % large_divisor);
|
||||
x /= large_divisor;
|
||||
|
||||
int pos = current_block;
|
||||
while (u64_remainder)
|
||||
{
|
||||
two_values[pos] = uint8_t(u64_remainder % 100);
|
||||
pos++;
|
||||
u64_remainder /= 100;
|
||||
}
|
||||
current_block += max_multiple_of_hundred_blocks;
|
||||
}
|
||||
|
||||
char * highest_part_print = convert::itoa(uint64_t(x), p);
|
||||
for (int i = 0; i < current_block; i++)
|
||||
{
|
||||
outTwoDigits(highest_part_print, two_values[current_block - 1 - i]);
|
||||
highest_part_print += 2;
|
||||
}
|
||||
|
||||
return highest_part_print;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * writeUIntText(UInt256 _x, char * p)
|
||||
{
|
||||
/// If possible, treat it as a smaller integer as they are much faster to print
|
||||
if (_x.items[UInt256::_impl::little(3)] == 0 && _x.items[UInt256::_impl::little(2)] == 0)
|
||||
return writeUIntText(UInt128{_x.items[UInt256::_impl::little(0)], _x.items[UInt256::_impl::little(1)]}, p);
|
||||
|
||||
/// If available (x86) we transform from our custom class to _BitInt(256) which has better support in the compiler
|
||||
/// and produces better code
|
||||
using T =
|
||||
#if defined(__x86_64__)
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wbit-int-extension"
|
||||
unsigned _BitInt(256)
|
||||
# pragma clang diagnostic pop
|
||||
#else
|
||||
UInt256
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(__x86_64__)
|
||||
T x = (T(_x.items[UInt256::_impl::little(3)]) << 192) + (T(_x.items[UInt256::_impl::little(2)]) << 128)
|
||||
+ (T(_x.items[UInt256::_impl::little(1)]) << 64) + T(_x.items[UInt256::_impl::little(0)]);
|
||||
#else
|
||||
T x = _x;
|
||||
#endif
|
||||
|
||||
/// Similar to writeUIntText(UInt128) only that in this case we will stop as soon as we reach the largest u128
|
||||
/// and switch to that function
|
||||
uint8_t two_values[39] = {0}; // 78 Max characters / 2
|
||||
int current_pos = 0;
|
||||
|
||||
static const T large_divisor = max_multiple_of_hundred_that_fits_in_64_bits;
|
||||
static const T largest_uint128 = T(std::numeric_limits<uint64_t>::max()) << 64 | T(std::numeric_limits<uint64_t>::max());
|
||||
|
||||
while (x > largest_uint128)
|
||||
{
|
||||
uint64_t u64_remainder = uint64_t(x % large_divisor);
|
||||
x /= large_divisor;
|
||||
|
||||
int pos = current_pos;
|
||||
while (u64_remainder)
|
||||
{
|
||||
two_values[pos] = uint8_t(u64_remainder % 100);
|
||||
pos++;
|
||||
u64_remainder /= 100;
|
||||
}
|
||||
current_pos += max_multiple_of_hundred_blocks;
|
||||
}
|
||||
|
||||
#if defined(__x86_64__)
|
||||
UInt128 pending{uint64_t(x), uint64_t(x >> 64)};
|
||||
#else
|
||||
UInt128 pending{x.items[UInt256::_impl::little(0)], x.items[UInt256::_impl::little(1)]};
|
||||
#endif
|
||||
|
||||
char * highest_part_print = writeUIntText(pending, p);
|
||||
for (int i = 0; i < current_pos; i++)
|
||||
{
|
||||
outTwoDigits(highest_part_print, two_values[current_pos - 1 - i]);
|
||||
highest_part_print += 2;
|
||||
}
|
||||
|
||||
return highest_part_print;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * writeLeadingMinus(char * pos)
|
||||
{
|
||||
*pos = '-';
|
||||
return pos + 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ALWAYS_INLINE inline char * writeSIntText(T x, char * pos)
|
||||
{
|
||||
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
|
||||
|
||||
using UnsignedT = make_unsigned_t<T>;
|
||||
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
|
||||
|
||||
if (unlikely(x == min_int))
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Int128>)
|
||||
{
|
||||
const char * res = "-170141183460469231731687303715884105728";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, Int256>)
|
||||
{
|
||||
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
}
|
||||
|
||||
if (x < 0)
|
||||
{
|
||||
x = -x;
|
||||
pos = writeLeadingMinus(pos);
|
||||
}
|
||||
return writeUIntText(UnsignedT(x), pos);
|
||||
}
|
||||
}
|
||||
|
||||
char * itoa(UInt8 i, char * p)
|
||||
{
|
||||
return convert::itoa(uint8_t(i), p);
|
||||
}
|
||||
|
||||
char * itoa(Int8 i, char * p)
|
||||
{
|
||||
return convert::itoa(int8_t(i), p);
|
||||
}
|
||||
|
||||
char * itoa(UInt128 i, char * p)
|
||||
{
|
||||
return writeUIntText(i, p);
|
||||
}
|
||||
|
||||
char * itoa(Int128 i, char * p)
|
||||
{
|
||||
return writeSIntText(i, p);
|
||||
}
|
||||
|
||||
char * itoa(UInt256 i, char * p)
|
||||
{
|
||||
return writeUIntText(i, p);
|
||||
}
|
||||
|
||||
char * itoa(Int256 i, char * p)
|
||||
{
|
||||
return writeSIntText(i, p);
|
||||
}
|
||||
|
||||
#define DEFAULT_ITOA(T) \
|
||||
char * itoa(T i, char * p) \
|
||||
{ \
|
||||
return convert::itoa(i, p); \
|
||||
}
|
||||
|
||||
#define FOR_MISSING_INTEGER_TYPES(M) \
|
||||
M(uint8_t) \
|
||||
M(UInt16) \
|
||||
M(UInt32) \
|
||||
M(UInt64) \
|
||||
M(int8_t) \
|
||||
M(Int16) \
|
||||
M(Int32) \
|
||||
M(Int64)
|
||||
|
||||
FOR_MISSING_INTEGER_TYPES(DEFAULT_ITOA)
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
DEFAULT_ITOA(unsigned long)
|
||||
DEFAULT_ITOA(long)
|
||||
#endif
|
||||
|
||||
#undef FOR_MISSING_INTEGER_TYPES
|
||||
#undef DEFAULT_ITOA
|
462
base/base/itoa.h
462
base/base/itoa.h
@ -1,446 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
// Based on https://github.com/amdn/itoa and combined with our optimizations
|
||||
//
|
||||
//=== itoa.h - Fast integer to ascii conversion --*- C++ -*-//
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2016 Arturo Martin-de-Nicolas
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <base/extended_types.h>
|
||||
|
||||
#define FOR_INTEGER_TYPES(M) \
|
||||
M(uint8_t) \
|
||||
M(UInt8) \
|
||||
M(UInt16) \
|
||||
M(UInt32) \
|
||||
M(UInt64) \
|
||||
M(UInt128) \
|
||||
M(UInt256) \
|
||||
M(int8_t) \
|
||||
M(Int8) \
|
||||
M(Int16) \
|
||||
M(Int32) \
|
||||
M(Int64) \
|
||||
M(Int128) \
|
||||
M(Int256)
|
||||
|
||||
template <typename T>
|
||||
inline int digits10(T x)
|
||||
{
|
||||
if (x < 10ULL)
|
||||
return 1;
|
||||
if (x < 100ULL)
|
||||
return 2;
|
||||
if (x < 1000ULL)
|
||||
return 3;
|
||||
#define INSTANTIATION(T) char * itoa(T i, char * p);
|
||||
FOR_INTEGER_TYPES(INSTANTIATION)
|
||||
|
||||
if (x < 1000000000000ULL)
|
||||
{
|
||||
if (x < 100000000ULL)
|
||||
{
|
||||
if (x < 1000000ULL)
|
||||
{
|
||||
if (x < 10000ULL)
|
||||
return 4;
|
||||
else
|
||||
return 5 + (x >= 100000ULL);
|
||||
}
|
||||
#if defined(OS_DARWIN)
|
||||
INSTANTIATION(unsigned long)
|
||||
INSTANTIATION(long)
|
||||
#endif
|
||||
|
||||
return 7 + (x >= 10000000ULL);
|
||||
}
|
||||
|
||||
if (x < 10000000000ULL)
|
||||
return 9 + (x >= 1000000000ULL);
|
||||
|
||||
return 11 + (x >= 100000000000ULL);
|
||||
}
|
||||
|
||||
return 12 + digits10(x / 1000000000000ULL);
|
||||
}
|
||||
|
||||
|
||||
namespace impl
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
static constexpr T pow10(size_t x)
|
||||
{
|
||||
return x ? 10 * pow10<T>(x - 1) : 1;
|
||||
}
|
||||
|
||||
// Division by a power of 10 is implemented using a multiplicative inverse.
|
||||
// This strength reduction is also done by optimizing compilers, but
|
||||
// presently the fastest results are produced by using the values
|
||||
// for the multiplication and the shift as given by the algorithm
|
||||
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
|
||||
//
|
||||
// http://www.agner.org/optimize/optimizing_assembly.pdf
|
||||
//
|
||||
// "Integer division by a constant (all processors)
|
||||
// A floating point number can be divided by a constant by multiplying
|
||||
// with the reciprocal. If we want to do the same with integers, we have
|
||||
// to scale the reciprocal by 2n and then shift the product to the right
|
||||
// by n. There are various algorithms for finding a suitable value of n
|
||||
// and compensating for rounding errors. The algorithm described below
|
||||
// was invented by Terje Mathisen, Norway, and not published elsewhere."
|
||||
|
||||
/// Division by constant is performed by:
|
||||
/// 1. Adding 1 if needed;
|
||||
/// 2. Multiplying by another constant;
|
||||
/// 3. Shifting right by another constant.
|
||||
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
|
||||
struct Division
|
||||
{
|
||||
static constexpr bool add{add_};
|
||||
static constexpr UInt multiplier{multiplier_};
|
||||
static constexpr unsigned shift{shift_};
|
||||
};
|
||||
|
||||
/// Select a type with appropriate number of bytes from the list of types.
|
||||
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
|
||||
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
|
||||
template <size_t N, typename T, typename... Ts>
|
||||
struct SelectType
|
||||
{
|
||||
using Result = typename SelectType<N / 2, Ts...>::Result;
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
struct SelectType<1, T, Ts...>
|
||||
{
|
||||
using Result = T;
|
||||
};
|
||||
|
||||
|
||||
/// Division by 10^N where N is the size of the type.
|
||||
template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType
|
||||
<
|
||||
N,
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
using UnsignedOfSize = typename SelectType
|
||||
<
|
||||
N,
|
||||
uint8_t,
|
||||
uint16_t,
|
||||
uint32_t,
|
||||
uint64_t,
|
||||
__uint128_t
|
||||
>::Result;
|
||||
|
||||
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
|
||||
template <size_t N>
|
||||
struct QuotientAndRemainder
|
||||
{
|
||||
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
|
||||
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
|
||||
};
|
||||
|
||||
template <size_t N>
|
||||
QuotientAndRemainder<N> static inline split(UnsignedOfSize<N> value)
|
||||
{
|
||||
constexpr DivisionBy10PowN<N> division;
|
||||
|
||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||
|
||||
return {quotient, remainder};
|
||||
}
|
||||
|
||||
|
||||
static inline char * outDigit(char * p, uint8_t value)
|
||||
{
|
||||
*p = '0' + value;
|
||||
++p;
|
||||
return p;
|
||||
}
|
||||
|
||||
// Using a lookup table to convert binary numbers from 0 to 99
|
||||
// into ascii characters as described by Andrei Alexandrescu in
|
||||
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
||||
|
||||
static const char digits[201] = "00010203040506070809"
|
||||
"10111213141516171819"
|
||||
"20212223242526272829"
|
||||
"30313233343536373839"
|
||||
"40414243444546474849"
|
||||
"50515253545556575859"
|
||||
"60616263646566676869"
|
||||
"70717273747576777879"
|
||||
"80818283848586878889"
|
||||
"90919293949596979899";
|
||||
|
||||
static inline char * outTwoDigits(char * p, uint8_t value)
|
||||
{
|
||||
memcpy(p, &digits[value * 2], 2);
|
||||
p += 2;
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
namespace convert
|
||||
{
|
||||
template <typename UInt, size_t N = sizeof(UInt)> static char * head(char * p, UInt u);
|
||||
template <typename UInt, size_t N = sizeof(UInt)> static char * tail(char * p, UInt u);
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// head: find most significant digit, skip leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// "x" contains quotient and remainder after division by 10^N
|
||||
// quotient is less than 10^N
|
||||
template <size_t N>
|
||||
static inline char * head(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
static inline char * head(char * p, UInt u)
|
||||
{
|
||||
return u < pow10<UnsignedOfSize<N>>(N)
|
||||
? head(p, UnsignedOfSize<N / 2>(u))
|
||||
: head<N>(p, split<N>(u));
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return u < 10
|
||||
? outDigit(p, u)
|
||||
: outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// tail: produce all digits including leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// recursive step, "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
static inline char * tail(char * p, UInt u)
|
||||
{
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// large values are >= 10^2*N
|
||||
// where x contains quotient and remainder after division by 10^N
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
template <size_t N>
|
||||
static inline char * large(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
QuotientAndRemainder<N> y = split<N>(x.quotient);
|
||||
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
|
||||
p = tail(p, y.remainder);
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle values of "u" that might be >= 10^2*N
|
||||
// where N is the size of "u" in bytes
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
static inline char * uitoa(char * p, UInt u)
|
||||
{
|
||||
if (u < pow10<UnsignedOfSize<N>>(N))
|
||||
return head(p, UnsignedOfSize<N / 2>(u));
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
|
||||
return u < pow10<UnsignedOfSize<N>>(2 * N)
|
||||
? head<N>(p, x)
|
||||
: large<N>(p, x);
|
||||
}
|
||||
|
||||
// selected when "u" is one byte
|
||||
template <>
|
||||
inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
if (u < 10)
|
||||
return outDigit(p, u);
|
||||
else if (u < 100)
|
||||
return outTwoDigits(p, u);
|
||||
else
|
||||
{
|
||||
p = outDigit(p, u / 100);
|
||||
p = outTwoDigits(p, u % 100);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle unsigned and signed integral operands
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// itoa: handle unsigned integral operands (selected by SFINAE)
|
||||
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
|
||||
static inline char * itoa(U u, char * p)
|
||||
{
|
||||
return convert::uitoa(p, u);
|
||||
}
|
||||
|
||||
// itoa: handle signed integral operands (selected by SFINAE)
|
||||
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
|
||||
static inline char * itoa(I i, char * p)
|
||||
{
|
||||
// Need "mask" to be filled with a copy of the sign bit.
|
||||
// If "i" is a negative value, then the result of "operator >>"
|
||||
// is implementation-defined, though usually it is an arithmetic
|
||||
// right shift that replicates the sign bit.
|
||||
// Use a conditional expression to be portable,
|
||||
// a good optimizing compiler generates an arithmetic right shift
|
||||
// and avoids the conditional branch.
|
||||
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
|
||||
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
|
||||
// Cannot use std::abs() because the result is undefined
|
||||
// in 2's complement systems for the most-negative value.
|
||||
// Want to avoid conditional branch for performance reasons since
|
||||
// CPU branch prediction will be ineffective when negative values
|
||||
// occur randomly.
|
||||
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
|
||||
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
|
||||
// This yields the absolute value with the desired type without
|
||||
// using a conditional branch and without invoking undefined or
|
||||
// implementation defined behavior:
|
||||
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
|
||||
// Unconditionally store a minus sign when producing digits
|
||||
// in a forward direction and increment the pointer only if
|
||||
// the value is in fact negative.
|
||||
// This avoids a conditional branch and is safe because we will
|
||||
// always produce at least one digit and it will overwrite the
|
||||
// minus sign when the value is not negative.
|
||||
*p = '-';
|
||||
p += (mask & 1);
|
||||
p = convert::uitoa(p, u);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline char * writeUIntText(T x, char * p)
|
||||
{
|
||||
static_assert(is_unsigned_v<T>);
|
||||
|
||||
int len = digits10(x);
|
||||
auto * pp = p + len;
|
||||
while (x >= 100)
|
||||
{
|
||||
const auto i = x % 100;
|
||||
x /= 100;
|
||||
pp -= 2;
|
||||
outTwoDigits(pp, i);
|
||||
}
|
||||
if (x < 10)
|
||||
*p = '0' + x;
|
||||
else
|
||||
outTwoDigits(p, x);
|
||||
return p + len;
|
||||
}
|
||||
|
||||
static inline char * writeLeadingMinus(char * pos)
|
||||
{
|
||||
*pos = '-';
|
||||
return pos + 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline char * writeSIntText(T x, char * pos)
|
||||
{
|
||||
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
|
||||
|
||||
using UnsignedT = make_unsigned_t<T>;
|
||||
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
|
||||
|
||||
if (unlikely(x == min_int))
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Int128>)
|
||||
{
|
||||
const char * res = "-170141183460469231731687303715884105728";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, Int256>)
|
||||
{
|
||||
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
}
|
||||
|
||||
if (x < 0)
|
||||
{
|
||||
x = -x;
|
||||
pos = writeLeadingMinus(pos);
|
||||
}
|
||||
return writeUIntText(UnsignedT(x), pos);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template <typename I>
|
||||
char * itoa(I i, char * p)
|
||||
{
|
||||
return impl::convert::itoa(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(char8_t i, char * p)
|
||||
{
|
||||
return impl::convert::itoa(uint8_t(i), p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(UInt128 i, char * p)
|
||||
{
|
||||
return impl::writeUIntText(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(Int128 i, char * p)
|
||||
{
|
||||
return impl::writeSIntText(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(UInt256 i, char * p)
|
||||
{
|
||||
return impl::writeUIntText(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(Int256 i, char * p)
|
||||
{
|
||||
return impl::writeSIntText(i, p);
|
||||
}
|
||||
#undef FOR_INTEGER_TYPES
|
||||
#undef INSTANTIATION
|
||||
|
@ -59,8 +59,8 @@ using ComparatorWrapper = Comparator;
|
||||
|
||||
#endif
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wold-style-cast"
|
||||
|
||||
#include <miniselect/floyd_rivest_select.h>
|
||||
|
||||
@ -115,7 +115,7 @@ void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
::partial_sort(first, middle, last, comparator());
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
template <typename RandomIt, typename Compare>
|
||||
void sort(RandomIt first, RandomIt last, Compare compare)
|
||||
|
75
base/poco/Foundation/include/Poco/FPEnvironment_SUN.h
Normal file
75
base/poco/Foundation/include/Poco/FPEnvironment_SUN.h
Normal file
@ -0,0 +1,75 @@
|
||||
//
|
||||
// FPEnvironment_SUN.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: FPEnvironment
|
||||
//
|
||||
// Definitions of class FPEnvironmentImpl for Solaris.
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_FPEnvironment_SUN_INCLUDED
|
||||
#define Foundation_FPEnvironment_SUN_INCLUDED
|
||||
|
||||
|
||||
#include <ieeefp.h>
|
||||
#include "Poco/Foundation.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
class FPEnvironmentImpl
|
||||
{
|
||||
protected:
|
||||
enum RoundingModeImpl
|
||||
{
|
||||
FP_ROUND_DOWNWARD_IMPL = FP_RM,
|
||||
FP_ROUND_UPWARD_IMPL = FP_RP,
|
||||
FP_ROUND_TONEAREST_IMPL = FP_RN,
|
||||
FP_ROUND_TOWARDZERO_IMPL = FP_RZ
|
||||
};
|
||||
enum FlagImpl
|
||||
{
|
||||
FP_DIVIDE_BY_ZERO_IMPL = FP_X_DZ,
|
||||
FP_INEXACT_IMPL = FP_X_IMP,
|
||||
FP_OVERFLOW_IMPL = FP_X_OFL,
|
||||
FP_UNDERFLOW_IMPL = FP_X_UFL,
|
||||
FP_INVALID_IMPL = FP_X_INV
|
||||
};
|
||||
FPEnvironmentImpl();
|
||||
FPEnvironmentImpl(const FPEnvironmentImpl & env);
|
||||
~FPEnvironmentImpl();
|
||||
FPEnvironmentImpl & operator=(const FPEnvironmentImpl & env);
|
||||
void keepCurrentImpl();
|
||||
static void clearFlagsImpl();
|
||||
static bool isFlagImpl(FlagImpl flag);
|
||||
static void setRoundingModeImpl(RoundingModeImpl mode);
|
||||
static RoundingModeImpl getRoundingModeImpl();
|
||||
static bool isInfiniteImpl(float value);
|
||||
static bool isInfiniteImpl(double value);
|
||||
static bool isInfiniteImpl(long double value);
|
||||
static bool isNaNImpl(float value);
|
||||
static bool isNaNImpl(double value);
|
||||
static bool isNaNImpl(long double value);
|
||||
static float copySignImpl(float target, float source);
|
||||
static double copySignImpl(double target, double source);
|
||||
static long double copySignImpl(long double target, long double source);
|
||||
|
||||
private:
|
||||
fp_rnd _rnd;
|
||||
fp_except _exc;
|
||||
};
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_FPEnvironment_SUN_INCLUDED
|
@ -281,15 +281,15 @@ void EnvironmentImpl::nodeIdImpl(NodeId& id)
|
||||
/// #include <sys/ioctl.h>
|
||||
#if defined(sun) || defined(__sun)
|
||||
#include <sys/sockio.h>
|
||||
#include <netdb.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_arp.h>
|
||||
#endif
|
||||
/// #include <sys/socket.h>
|
||||
/// #include <sys/types.h>
|
||||
/// #include <netinet/in.h>
|
||||
/// #include <net/if.h>
|
||||
/// #include <arpa/inet.h>
|
||||
/// #include <netdb.h>
|
||||
/// #include <net/if.h>
|
||||
/// #include <net/if_arp.h>
|
||||
/// #include <unistd.h>
|
||||
|
||||
|
||||
|
139
base/poco/Foundation/src/FPEnvironment_SUN.cpp
Normal file
139
base/poco/Foundation/src/FPEnvironment_SUN.cpp
Normal file
@ -0,0 +1,139 @@
|
||||
//
|
||||
// FPEnvironment_SUN.cpp
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: FPEnvironment
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include <math.h>
|
||||
#include "Poco/FPEnvironment_SUN.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
|
||||
|
||||
FPEnvironmentImpl::FPEnvironmentImpl()
|
||||
{
|
||||
_rnd = fpgetround();
|
||||
_exc = fpgetmask();
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl::FPEnvironmentImpl(const FPEnvironmentImpl& env)
|
||||
{
|
||||
_rnd = env._rnd;
|
||||
_exc = env._exc;
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl::~FPEnvironmentImpl()
|
||||
{
|
||||
fpsetround(_rnd);
|
||||
fpsetmask(_exc);
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl& FPEnvironmentImpl::operator = (const FPEnvironmentImpl& env)
|
||||
{
|
||||
_rnd = env._rnd;
|
||||
_exc = env._exc;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isInfiniteImpl(float value)
|
||||
{
|
||||
int cls = fpclass(value);
|
||||
return cls == FP_PINF || cls == FP_NINF;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isInfiniteImpl(double value)
|
||||
{
|
||||
int cls = fpclass(value);
|
||||
return cls == FP_PINF || cls == FP_NINF;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isInfiniteImpl(long double value)
|
||||
{
|
||||
int cls = fpclass(value);
|
||||
return cls == FP_PINF || cls == FP_NINF;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isNaNImpl(float value)
|
||||
{
|
||||
return isnanf(value) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isNaNImpl(double value)
|
||||
{
|
||||
return isnan(value) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isNaNImpl(long double value)
|
||||
{
|
||||
return isnan((double) value) != 0;
|
||||
}
|
||||
|
||||
|
||||
float FPEnvironmentImpl::copySignImpl(float target, float source)
|
||||
{
|
||||
return (float) copysign(target, source);
|
||||
}
|
||||
|
||||
|
||||
double FPEnvironmentImpl::copySignImpl(double target, double source)
|
||||
{
|
||||
return (float) copysign(target, source);
|
||||
}
|
||||
|
||||
|
||||
long double FPEnvironmentImpl::copySignImpl(long double target, long double source)
|
||||
{
|
||||
return (source > 0 && target > 0) || (source < 0 && target < 0) ? target : -target;
|
||||
}
|
||||
|
||||
|
||||
void FPEnvironmentImpl::keepCurrentImpl()
|
||||
{
|
||||
fpsetround(_rnd);
|
||||
fpsetmask(_exc);
|
||||
}
|
||||
|
||||
|
||||
void FPEnvironmentImpl::clearFlagsImpl()
|
||||
{
|
||||
fpsetsticky(0);
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isFlagImpl(FlagImpl flag)
|
||||
{
|
||||
return (fpgetsticky() & flag) != 0;
|
||||
}
|
||||
|
||||
|
||||
void FPEnvironmentImpl::setRoundingModeImpl(RoundingModeImpl mode)
|
||||
{
|
||||
fpsetround((fp_rnd) mode);
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl::RoundingModeImpl FPEnvironmentImpl::getRoundingModeImpl()
|
||||
{
|
||||
return (FPEnvironmentImpl::RoundingModeImpl) fpgetround();
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
@ -31,7 +31,7 @@
|
||||
namespace Poco {
|
||||
|
||||
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD)
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD) || (POCO_OS == POCO_OS_SOLARIS)
|
||||
union semun
|
||||
{
|
||||
int val;
|
||||
|
@ -31,7 +31,7 @@
|
||||
namespace Poco {
|
||||
|
||||
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD)
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD) || (POCO_OS == POCO_OS_SOLARIS)
|
||||
union semun
|
||||
{
|
||||
int val;
|
||||
|
@ -314,13 +314,13 @@ static int read_unicode(json_stream *json)
|
||||
|
||||
if (l < 0xdc00 || l > 0xdfff) {
|
||||
json_error(json, "invalid surrogate pair continuation \\u%04lx out "
|
||||
"of range (dc00-dfff)", l);
|
||||
"of range (dc00-dfff)", (unsigned long)l);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cp = ((h - 0xd800) * 0x400) + ((l - 0xdc00) + 0x10000);
|
||||
} else if (cp >= 0xdc00 && cp <= 0xdfff) {
|
||||
json_error(json, "dangling surrogate \\u%04lx", cp);
|
||||
json_error(json, "dangling surrogate \\u%04lx", (unsigned long)cp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,10 @@ elseif (OS_DARWIN OR OS_FREEBSD)
|
||||
target_compile_definitions (_poco_net PUBLIC POCO_HAVE_FD_POLL)
|
||||
endif ()
|
||||
|
||||
if (OS_SUNOS)
|
||||
target_link_libraries (_poco_net PUBLIC socket nsl)
|
||||
endif ()
|
||||
|
||||
# TODO: remove these warning exclusions
|
||||
target_compile_options (_poco_net
|
||||
PRIVATE
|
||||
|
@ -30,7 +30,6 @@ namespace Net
|
||||
|
||||
|
||||
class HTTPServerRequest;
|
||||
class HTTPServerResponse;
|
||||
class HTTPRequestHandler;
|
||||
|
||||
|
||||
|
@ -93,7 +93,7 @@ void TCPServerDispatcher::release()
|
||||
|
||||
void TCPServerDispatcher::run()
|
||||
{
|
||||
AutoPtr<TCPServerDispatcher> guard(this, true); // ensure object stays alive
|
||||
AutoPtr<TCPServerDispatcher> guard(this); // ensure object stays alive
|
||||
|
||||
int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds();
|
||||
|
||||
@ -149,11 +149,13 @@ void TCPServerDispatcher::enqueue(const StreamSocket& socket)
|
||||
{
|
||||
try
|
||||
{
|
||||
this->duplicate();
|
||||
_threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName);
|
||||
++_currentThreads;
|
||||
}
|
||||
catch (Poco::Exception& exc)
|
||||
{
|
||||
this->release();
|
||||
++_refusedConnections;
|
||||
std::cerr << "Got exception while starting thread for connection. Error code: "
|
||||
<< exc.code() << ", message: '" << exc.displayText() << "'" << std::endl;
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54484)
|
||||
SET(VERSION_REVISION 54485)
|
||||
SET(VERSION_MAJOR 24)
|
||||
SET(VERSION_MINOR 3)
|
||||
SET(VERSION_MINOR 4)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 891689a41506d00aa169548f5b4a8774351242c4)
|
||||
SET(VERSION_DESCRIBE v24.3.1.1-testing)
|
||||
SET(VERSION_STRING 24.3.1.1)
|
||||
SET(VERSION_GITHASH 2c5c589a882ceec35439650337b92db3e76f0081)
|
||||
SET(VERSION_DESCRIBE v24.4.1.1-testing)
|
||||
SET(VERSION_STRING 24.4.1.1)
|
||||
# end of autochange
|
||||
|
@ -1,17 +0,0 @@
|
||||
# see ./CMakeLists.txt for variable declaration
|
||||
if (FUZZER)
|
||||
if (FUZZER STREQUAL "libfuzzer")
|
||||
# NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends.
|
||||
# NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them
|
||||
# (tests) have entry point for fuzzer and it's not checked.
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1")
|
||||
|
||||
# NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable
|
||||
if (NOT LIB_FUZZING_ENGINE)
|
||||
set (LIB_FUZZING_ENGINE "-fsanitize=fuzzer")
|
||||
endif ()
|
||||
else ()
|
||||
message (FATAL_ERROR "Unknown fuzzer type: ${FUZZER}")
|
||||
endif ()
|
||||
endif()
|
@ -30,7 +30,7 @@ if (SANITIZE)
|
||||
elseif (SANITIZE STREQUAL "thread")
|
||||
set (TSAN_FLAGS "-fsanitize=thread")
|
||||
if (COMPILER_CLANG)
|
||||
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
||||
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/tsan_ignorelist.txt")
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
|
||||
@ -48,7 +48,7 @@ if (SANITIZE)
|
||||
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
||||
endif()
|
||||
if (COMPILER_CLANG)
|
||||
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/ubsan_ignorelist.txt")
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 4a12f99dfc9d47c687ff7700b927cc76856225d1
|
||||
Subproject commit 08ac76ea80a37f89b12109c805eafe9f1dc9b991
|
@ -86,6 +86,8 @@ elseif (OS_DARWIN)
|
||||
target_compile_definitions(_c-ares PRIVATE -D_DARWIN_C_SOURCE)
|
||||
elseif (OS_FREEBSD)
|
||||
target_include_directories(_c-ares SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/c-ares-cmake/freebsd")
|
||||
elseif (OS_SUNOS)
|
||||
target_include_directories(_c-ares SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/c-ares-cmake/solaris")
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::c-ares ALIAS _c-ares)
|
||||
|
104
contrib/c-ares-cmake/solaris/ares_build.h
Normal file
104
contrib/c-ares-cmake/solaris/ares_build.h
Normal file
@ -0,0 +1,104 @@
|
||||
/* include/ares_build.h. Generated from ares_build.h.in by configure. */
|
||||
#ifndef __CARES_BUILD_H
|
||||
#define __CARES_BUILD_H
|
||||
|
||||
|
||||
/* Copyright (C) 2009 - 2021 by Daniel Stenberg et al
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software and its
|
||||
* documentation for any purpose and without fee is hereby granted, provided
|
||||
* that the above copyright notice appear in all copies and that both that
|
||||
* copyright notice and this permission notice appear in supporting
|
||||
* documentation, and that the name of M.I.T. not be used in advertising or
|
||||
* publicity pertaining to distribution of the software without specific,
|
||||
* written prior permission. M.I.T. makes no representations about the
|
||||
* suitability of this software for any purpose. It is provided "as is"
|
||||
* without express or implied warranty.
|
||||
*/
|
||||
|
||||
/* ================================================================ */
|
||||
/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */
|
||||
/* ================================================================ */
|
||||
|
||||
/*
|
||||
* NOTE 1:
|
||||
* -------
|
||||
*
|
||||
* Nothing in this file is intended to be modified or adjusted by the
|
||||
* c-ares library user nor by the c-ares library builder.
|
||||
*
|
||||
* If you think that something actually needs to be changed, adjusted
|
||||
* or fixed in this file, then, report it on the c-ares development
|
||||
* mailing list: http://lists.haxx.se/listinfo/c-ares/
|
||||
*
|
||||
* This header file shall only export symbols which are 'cares' or 'CARES'
|
||||
* prefixed, otherwise public name space would be polluted.
|
||||
*
|
||||
* NOTE 2:
|
||||
* -------
|
||||
*
|
||||
* Right now you might be staring at file ares_build.h.in or ares_build.h,
|
||||
* this is due to the following reason:
|
||||
*
|
||||
* On systems capable of running the configure script, the configure process
|
||||
* will overwrite the distributed ares_build.h file with one that is suitable
|
||||
* and specific to the library being configured and built, which is generated
|
||||
* from the ares_build.h.in template file.
|
||||
*
|
||||
*/
|
||||
|
||||
/* ================================================================ */
|
||||
/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */
|
||||
/* ================================================================ */
|
||||
|
||||
#ifdef CARES_TYPEOF_ARES_SOCKLEN_T
|
||||
# error "CARES_TYPEOF_ARES_SOCKLEN_T shall not be defined except in ares_build.h"
|
||||
Error Compilation_aborted_CARES_TYPEOF_ARES_SOCKLEN_T_already_defined
|
||||
#endif
|
||||
|
||||
#define CARES_HAVE_ARPA_NAMESER_H 1
|
||||
#define CARES_HAVE_ARPA_NAMESER_COMPAT_H 1
|
||||
|
||||
/* ================================================================ */
|
||||
/* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */
|
||||
/* ================================================================ */
|
||||
|
||||
/* Configure process defines this to 1 when it finds out that system */
|
||||
/* header file ws2tcpip.h must be included by the external interface. */
|
||||
/* #undef CARES_PULL_WS2TCPIP_H */
|
||||
#ifdef CARES_PULL_WS2TCPIP_H
|
||||
# ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# endif
|
||||
# include <windows.h>
|
||||
# include <winsock2.h>
|
||||
# include <ws2tcpip.h>
|
||||
#endif
|
||||
|
||||
/* Configure process defines this to 1 when it finds out that system */
|
||||
/* header file sys/types.h must be included by the external interface. */
|
||||
#define CARES_PULL_SYS_TYPES_H 1
|
||||
#ifdef CARES_PULL_SYS_TYPES_H
|
||||
# include <sys/types.h>
|
||||
#endif
|
||||
|
||||
/* Configure process defines this to 1 when it finds out that system */
|
||||
/* header file sys/socket.h must be included by the external interface. */
|
||||
#define CARES_PULL_SYS_SOCKET_H 1
|
||||
#ifdef CARES_PULL_SYS_SOCKET_H
|
||||
# include <sys/socket.h>
|
||||
#endif
|
||||
|
||||
/* Integral data type used for ares_socklen_t. */
|
||||
#define CARES_TYPEOF_ARES_SOCKLEN_T socklen_t
|
||||
|
||||
/* Data type definition of ares_socklen_t. */
|
||||
typedef CARES_TYPEOF_ARES_SOCKLEN_T ares_socklen_t;
|
||||
|
||||
/* Integral data type used for ares_ssize_t. */
|
||||
#define CARES_TYPEOF_ARES_SSIZE_T ssize_t
|
||||
|
||||
/* Data type definition of ares_ssize_t. */
|
||||
typedef CARES_TYPEOF_ARES_SSIZE_T ares_ssize_t;
|
||||
|
||||
#endif /* __CARES_BUILD_H */
|
503
contrib/c-ares-cmake/solaris/ares_config.h
Normal file
503
contrib/c-ares-cmake/solaris/ares_config.h
Normal file
@ -0,0 +1,503 @@
|
||||
/* src/lib/ares_config.h. Generated from ares_config.h.in by configure. */
|
||||
/* src/lib/ares_config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Define if building universal (internal helper macro) */
|
||||
/* #undef AC_APPLE_UNIVERSAL_BUILD */
|
||||
|
||||
/* define this if ares is built for a big endian system */
|
||||
/* #undef ARES_BIG_ENDIAN */
|
||||
|
||||
/* Defined for build that exposes internal static functions for testing. */
|
||||
/* #undef CARES_EXPOSE_STATICS */
|
||||
|
||||
/* a suitable file/device to read random data from */
|
||||
#define CARES_RANDOM_FILE "/dev/urandom"
|
||||
|
||||
/* Defined for build with symbol hiding. */
|
||||
#define CARES_SYMBOL_HIDING 1
|
||||
|
||||
/* Definition to make a library symbol externally visible. */
|
||||
#define CARES_SYMBOL_SCOPE_EXTERN __attribute__ ((__visibility__ ("default")))
|
||||
|
||||
/* the signed version of size_t */
|
||||
#define CARES_TYPEOF_ARES_SSIZE_T ssize_t
|
||||
|
||||
/* Use resolver library to configure cares */
|
||||
/* #undef CARES_USE_LIBRESOLV */
|
||||
|
||||
/* if a /etc/inet dir is being used */
|
||||
#define ETC_INET 1
|
||||
|
||||
/* Define to the type of arg 2 for gethostname. */
|
||||
#define GETHOSTNAME_TYPE_ARG2 int
|
||||
|
||||
/* Define to the type qualifier of arg 1 for getnameinfo. */
|
||||
#define GETNAMEINFO_QUAL_ARG1 const
|
||||
|
||||
/* Define to the type of arg 1 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG1 struct sockaddr *
|
||||
|
||||
/* Define to the type of arg 2 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG2 socklen_t
|
||||
|
||||
/* Define to the type of args 4 and 6 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG46 socklen_t
|
||||
|
||||
/* Define to the type of arg 7 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG7 int
|
||||
|
||||
/* Specifies the number of arguments to getservbyport_r */
|
||||
#define GETSERVBYPORT_R_ARGS 5
|
||||
|
||||
/* Specifies the size of the buffer to pass to getservbyport_r */
|
||||
#define GETSERVBYPORT_R_BUFSIZE 4096
|
||||
|
||||
/* Define to 1 if you have AF_INET6. */
|
||||
#define HAVE_AF_INET6 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/inet.h> header file. */
|
||||
#define HAVE_ARPA_INET_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser_compat.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_COMPAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_H 1
|
||||
|
||||
/* Define to 1 if you have the <assert.h> header file. */
|
||||
#define HAVE_ASSERT_H 1
|
||||
|
||||
/* Define to 1 if you have the `bitncmp' function. */
|
||||
/* #undef HAVE_BITNCMP */
|
||||
|
||||
/* Define to 1 if bool is an available type. */
|
||||
#define HAVE_BOOL_T 1
|
||||
|
||||
/* Define to 1 if you have the clock_gettime function and monotonic timer. */
|
||||
#define HAVE_CLOCK_GETTIME_MONOTONIC 1
|
||||
|
||||
/* Define to 1 if you have the closesocket function. */
|
||||
/* #undef HAVE_CLOSESOCKET */
|
||||
|
||||
/* Define to 1 if you have the CloseSocket camel case function. */
|
||||
/* #undef HAVE_CLOSESOCKET_CAMEL */
|
||||
|
||||
/* Define to 1 if you have the connect function. */
|
||||
#define HAVE_CONNECT 1
|
||||
|
||||
/* define if the compiler supports basic C++11 syntax */
|
||||
#define HAVE_CXX11 1
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Define to 1 if you have the <errno.h> header file. */
|
||||
#define HAVE_ERRNO_H 1
|
||||
|
||||
/* Define to 1 if you have the fcntl function. */
|
||||
#define HAVE_FCNTL 1
|
||||
|
||||
/* Define to 1 if you have the <fcntl.h> header file. */
|
||||
#define HAVE_FCNTL_H 1
|
||||
|
||||
/* Define to 1 if you have a working fcntl O_NONBLOCK function. */
|
||||
#define HAVE_FCNTL_O_NONBLOCK 1
|
||||
|
||||
/* Define to 1 if you have the freeaddrinfo function. */
|
||||
#define HAVE_FREEADDRINFO 1
|
||||
|
||||
/* Define to 1 if you have a working getaddrinfo function. */
|
||||
#define HAVE_GETADDRINFO 1
|
||||
|
||||
/* Define to 1 if the getaddrinfo function is threadsafe. */
|
||||
#define HAVE_GETADDRINFO_THREADSAFE 1
|
||||
|
||||
/* Define to 1 if you have the getenv function. */
|
||||
#define HAVE_GETENV 1
|
||||
|
||||
/* Define to 1 if you have the gethostbyaddr function. */
|
||||
#define HAVE_GETHOSTBYADDR 1
|
||||
|
||||
/* Define to 1 if you have the gethostbyname function. */
|
||||
#define HAVE_GETHOSTBYNAME 1
|
||||
|
||||
/* Define to 1 if you have the gethostname function. */
|
||||
#define HAVE_GETHOSTNAME 1
|
||||
|
||||
/* Define to 1 if you have the getnameinfo function. */
|
||||
#define HAVE_GETNAMEINFO 1
|
||||
|
||||
/* Define to 1 if you have the getservbyport_r function. */
|
||||
#define HAVE_GETSERVBYPORT_R 1
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
#define HAVE_GETTIMEOFDAY 1
|
||||
|
||||
/* Define to 1 if you have the `if_indextoname' function. */
|
||||
#define HAVE_IF_INDEXTONAME 1
|
||||
|
||||
/* Define to 1 if you have a IPv6 capable working inet_net_pton function. */
|
||||
/* #undef HAVE_INET_NET_PTON */
|
||||
|
||||
/* Define to 1 if you have a IPv6 capable working inet_ntop function. */
|
||||
#define HAVE_INET_NTOP 1
|
||||
|
||||
/* Define to 1 if you have a IPv6 capable working inet_pton function. */
|
||||
#define HAVE_INET_PTON 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the ioctl function. */
|
||||
#define HAVE_IOCTL 1
|
||||
|
||||
/* Define to 1 if you have the ioctlsocket function. */
|
||||
/* #undef HAVE_IOCTLSOCKET */
|
||||
|
||||
/* Define to 1 if you have the IoctlSocket camel case function. */
|
||||
/* #undef HAVE_IOCTLSOCKET_CAMEL */
|
||||
|
||||
/* Define to 1 if you have a working IoctlSocket camel case FIONBIO function.
|
||||
*/
|
||||
/* #undef HAVE_IOCTLSOCKET_CAMEL_FIONBIO */
|
||||
|
||||
/* Define to 1 if you have a working ioctlsocket FIONBIO function. */
|
||||
/* #undef HAVE_IOCTLSOCKET_FIONBIO */
|
||||
|
||||
/* Define to 1 if you have a working ioctl FIONBIO function. */
|
||||
/* #undef HAVE_IOCTL_FIONBIO */
|
||||
|
||||
/* Define to 1 if you have a working ioctl SIOCGIFADDR function. */
|
||||
/* #undef HAVE_IOCTL_SIOCGIFADDR */
|
||||
|
||||
/* Define to 1 if you have the `resolve' library (-lresolve). */
|
||||
/* #undef HAVE_LIBRESOLVE */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
|
||||
/* if your compiler supports LL */
|
||||
#define HAVE_LL 1
|
||||
|
||||
/* Define to 1 if the compiler supports the 'long long' data type. */
|
||||
#define HAVE_LONGLONG 1
|
||||
|
||||
/* Define to 1 if you have the malloc.h header file. */
|
||||
#define HAVE_MALLOC_H 1
|
||||
|
||||
/* Define to 1 if you have the memory.h header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if you have the MSG_NOSIGNAL flag. */
|
||||
#define HAVE_MSG_NOSIGNAL 1
|
||||
|
||||
/* Define to 1 if you have the <netdb.h> header file. */
|
||||
#define HAVE_NETDB_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/tcp.h> header file. */
|
||||
#define HAVE_NETINET_TCP_H 1
|
||||
|
||||
/* Define to 1 if you have the <net/if.h> header file. */
|
||||
#define HAVE_NET_IF_H 1
|
||||
|
||||
/* Define to 1 if you have PF_INET6. */
|
||||
#define HAVE_PF_INET6 1
|
||||
|
||||
/* Define to 1 if you have the recv function. */
|
||||
#define HAVE_RECV 1
|
||||
|
||||
/* Define to 1 if you have the recvfrom function. */
|
||||
#define HAVE_RECVFROM 1
|
||||
|
||||
/* Define to 1 if you have the send function. */
|
||||
#define HAVE_SEND 1
|
||||
|
||||
/* Define to 1 if you have the setsockopt function. */
|
||||
#define HAVE_SETSOCKOPT 1
|
||||
|
||||
/* Define to 1 if you have a working setsockopt SO_NONBLOCK function. */
|
||||
/* #undef HAVE_SETSOCKOPT_SO_NONBLOCK */
|
||||
|
||||
/* Define to 1 if you have the <signal.h> header file. */
|
||||
#define HAVE_SIGNAL_H 1
|
||||
|
||||
/* Define to 1 if sig_atomic_t is an available typedef. */
|
||||
#define HAVE_SIG_ATOMIC_T 1
|
||||
|
||||
/* Define to 1 if sig_atomic_t is already defined as volatile. */
|
||||
/* #undef HAVE_SIG_ATOMIC_T_VOLATILE */
|
||||
|
||||
/* Define to 1 if your struct sockaddr_in6 has sin6_scope_id. */
|
||||
#define HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID 1
|
||||
|
||||
/* Define to 1 if you have the socket function. */
|
||||
#define HAVE_SOCKET 1
|
||||
|
||||
/* Define to 1 if you have the <socket.h> header file. */
|
||||
/* #undef HAVE_SOCKET_H */
|
||||
|
||||
/* Define to 1 if you have the <stdbool.h> header file. */
|
||||
#define HAVE_STDBOOL_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdio.h> header file. */
|
||||
#define HAVE_STDIO_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the strcasecmp function. */
|
||||
#define HAVE_STRCASECMP 1
|
||||
|
||||
/* Define to 1 if you have the strcmpi function. */
|
||||
/* #undef HAVE_STRCMPI */
|
||||
|
||||
/* Define to 1 if you have the strdup function. */
|
||||
#define HAVE_STRDUP 1
|
||||
|
||||
/* Define to 1 if you have the stricmp function. */
|
||||
/* #undef HAVE_STRICMP */
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the strncasecmp function. */
|
||||
#define HAVE_STRNCASECMP 1
|
||||
|
||||
/* Define to 1 if you have the strncmpi function. */
|
||||
/* #undef HAVE_STRNCMPI */
|
||||
|
||||
/* Define to 1 if you have the strnicmp function. */
|
||||
/* #undef HAVE_STRNICMP */
|
||||
|
||||
/* Define to 1 if you have the <stropts.h> header file. */
|
||||
#define HAVE_STROPTS_H 1
|
||||
|
||||
/* Define to 1 if you have struct addrinfo. */
|
||||
#define HAVE_STRUCT_ADDRINFO 1
|
||||
|
||||
/* Define to 1 if you have struct in6_addr. */
|
||||
#define HAVE_STRUCT_IN6_ADDR 1
|
||||
|
||||
/* Define to 1 if you have struct sockaddr_in6. */
|
||||
#define HAVE_STRUCT_SOCKADDR_IN6 1
|
||||
|
||||
/* if struct sockaddr_storage is defined */
|
||||
#define HAVE_STRUCT_SOCKADDR_STORAGE 1
|
||||
|
||||
/* Define to 1 if you have the timeval struct. */
|
||||
#define HAVE_STRUCT_TIMEVAL 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ioctl.h> header file. */
|
||||
#define HAVE_SYS_IOCTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/param.h> header file. */
|
||||
#define HAVE_SYS_PARAM_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/uio.h> header file. */
|
||||
#define HAVE_SYS_UIO_H 1
|
||||
|
||||
/* Define to 1 if you have the <time.h> header file. */
|
||||
#define HAVE_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Define to 1 if you have the windows.h header file. */
|
||||
/* #undef HAVE_WINDOWS_H */
|
||||
|
||||
/* Define to 1 if you have the winsock2.h header file. */
|
||||
/* #undef HAVE_WINSOCK2_H */
|
||||
|
||||
/* Define to 1 if you have the winsock.h header file. */
|
||||
/* #undef HAVE_WINSOCK_H */
|
||||
|
||||
/* Define to 1 if you have the writev function. */
|
||||
#define HAVE_WRITEV 1
|
||||
|
||||
/* Define to 1 if you have the ws2tcpip.h header file. */
|
||||
/* #undef HAVE_WS2TCPIP_H */
|
||||
|
||||
/* Define if __system_property_get exists. */
|
||||
/* #undef HAVE___SYSTEM_PROPERTY_GET */
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Define to 1 if you need the malloc.h header file even with stdlib.h */
|
||||
/* #undef NEED_MALLOC_H */
|
||||
|
||||
/* Define to 1 if you need the memory.h header file even with stdlib.h */
|
||||
/* #undef NEED_MEMORY_H */
|
||||
|
||||
/* Define to 1 if _REENTRANT preprocessor symbol must be defined. */
|
||||
#define NEED_REENTRANT 1
|
||||
|
||||
/* Define to 1 if _THREAD_SAFE preprocessor symbol must be defined. */
|
||||
/* #undef NEED_THREAD_SAFE */
|
||||
|
||||
/* cpu-machine-OS */
|
||||
#define OS "x86_64-pc-solaris2.11"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "c-ares"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT "c-ares mailing list: http://lists.haxx.se/listinfo/c-ares"
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME "c-ares"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "c-ares 1.18.1"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "c-ares"
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "1.18.1"
|
||||
|
||||
/* Define to the type qualifier pointed by arg 5 for recvfrom. */
|
||||
#define RECVFROM_QUAL_ARG5
|
||||
|
||||
/* Define to the type of arg 1 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG1 int
|
||||
|
||||
/* Define to the type pointed by arg 2 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG2 void
|
||||
|
||||
/* Define to 1 if the type pointed by arg 2 for recvfrom is void. */
|
||||
#define RECVFROM_TYPE_ARG2_IS_VOID 1
|
||||
|
||||
/* Define to the type of arg 3 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG3 size_t
|
||||
|
||||
/* Define to the type of arg 4 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG4 int
|
||||
|
||||
/* Define to the type pointed by arg 5 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG5 struct sockaddr
|
||||
|
||||
/* Define to 1 if the type pointed by arg 5 for recvfrom is void. */
|
||||
/* #undef RECVFROM_TYPE_ARG5_IS_VOID */
|
||||
|
||||
/* Define to the type pointed by arg 6 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG6 void
|
||||
|
||||
/* Define to 1 if the type pointed by arg 6 for recvfrom is void. */
|
||||
#define RECVFROM_TYPE_ARG6_IS_VOID 1
|
||||
|
||||
/* Define to the function return type for recvfrom. */
|
||||
#define RECVFROM_TYPE_RETV ssize_t
|
||||
|
||||
/* Define to the type of arg 1 for recv. */
|
||||
#define RECV_TYPE_ARG1 int
|
||||
|
||||
/* Define to the type of arg 2 for recv. */
|
||||
#define RECV_TYPE_ARG2 void *
|
||||
|
||||
/* Define to the type of arg 3 for recv. */
|
||||
#define RECV_TYPE_ARG3 size_t
|
||||
|
||||
/* Define to the type of arg 4 for recv. */
|
||||
#define RECV_TYPE_ARG4 int
|
||||
|
||||
/* Define to the function return type for recv. */
|
||||
#define RECV_TYPE_RETV ssize_t
|
||||
|
||||
/* Define as the return type of signal handlers (`int' or `void'). */
|
||||
#define RETSIGTYPE void
|
||||
|
||||
/* Define to the type qualifier of arg 2 for send. */
|
||||
#define SEND_QUAL_ARG2 const
|
||||
|
||||
/* Define to the type of arg 1 for send. */
|
||||
#define SEND_TYPE_ARG1 int
|
||||
|
||||
/* Define to the type of arg 2 for send. */
|
||||
#define SEND_TYPE_ARG2 void *
|
||||
|
||||
/* Define to the type of arg 3 for send. */
|
||||
#define SEND_TYPE_ARG3 size_t
|
||||
|
||||
/* Define to the type of arg 4 for send. */
|
||||
#define SEND_TYPE_ARG4 int
|
||||
|
||||
/* Define to the function return type for send. */
|
||||
#define SEND_TYPE_RETV ssize_t
|
||||
|
||||
/* Define to 1 if all of the C90 standard headers exist (not just the ones
|
||||
required in a freestanding environment). This macro is provided for
|
||||
backward compatibility; new code need not use it. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. This
|
||||
macro is obsolete. */
|
||||
#define TIME_WITH_SYS_TIME 1
|
||||
|
||||
/* Define to disable non-blocking sockets. */
|
||||
/* #undef USE_BLOCKING_SOCKETS */
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "1.18.1"
|
||||
|
||||
/* Define to avoid automatic inclusion of winsock.h */
|
||||
/* #undef WIN32_LEAN_AND_MEAN */
|
||||
|
||||
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
|
||||
significant byte first (like Motorola and SPARC, unlike Intel). */
|
||||
#if defined AC_APPLE_UNIVERSAL_BUILD
|
||||
# if defined __BIG_ENDIAN__
|
||||
# define WORDS_BIGENDIAN 1
|
||||
# endif
|
||||
#else
|
||||
# ifndef WORDS_BIGENDIAN
|
||||
/* # undef WORDS_BIGENDIAN */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Define to 1 if OS is AIX. */
|
||||
#ifndef _ALL_SOURCE
|
||||
/* # undef _ALL_SOURCE */
|
||||
#endif
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
||||
/* Define for large files, on AIX-style hosts. */
|
||||
/* #undef _LARGE_FILES */
|
||||
|
||||
/* Define to empty if `const' does not conform to ANSI C. */
|
||||
/* #undef const */
|
||||
|
||||
/* Type to use in place of in_addr_t when system does not provide it. */
|
||||
/* #undef in_addr_t */
|
||||
|
||||
/* Define to `unsigned int' if <sys/types.h> does not define. */
|
||||
/* #undef size_t */
|
2
contrib/cppkafka
vendored
2
contrib/cppkafka
vendored
@ -1 +1 @@
|
||||
Subproject commit 5a119f689f8a4d90d10a9635e7ee2bee5c127de1
|
||||
Subproject commit 9c5ea0e332486961e612deacc6e3f0c1874c688d
|
@ -51,3 +51,8 @@
|
||||
#define USE_OPENSSL
|
||||
#define USE_THREADS_POSIX
|
||||
#define USE_ARES
|
||||
|
||||
#ifdef __illumos__
|
||||
#define HAVE_POSIX_STRERROR_R 1
|
||||
#define HAVE_STRERROR_R 1
|
||||
#endif
|
||||
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
||||
Subproject commit b9598e6016720a7c088bfe85ce1fa0410f9d2103
|
||||
Subproject commit 0d04201c45359f0d0701fb1e8297d25eff7cfecf
|
@ -17,6 +17,8 @@
|
||||
#ifndef METROHASH_METROHASH_128_H
|
||||
#define METROHASH_METROHASH_128_H
|
||||
|
||||
// NOLINTBEGIN(readability-avoid-const-params-in-decls)
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
class MetroHash128
|
||||
@ -68,5 +70,6 @@ private:
|
||||
void metrohash128_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out);
|
||||
void metrohash128_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out);
|
||||
|
||||
// NOLINTEND(readability-avoid-const-params-in-decls)
|
||||
|
||||
#endif // #ifndef METROHASH_METROHASH_128_H
|
||||
|
@ -26,13 +26,13 @@ const uint8_t MetroHash64::test_seed_1[8] = { 0x3B, 0x0D, 0x48, 0x1C, 0xF4, 0x
|
||||
|
||||
|
||||
|
||||
MetroHash64::MetroHash64(const uint64_t seed)
|
||||
MetroHash64::MetroHash64(uint64_t seed)
|
||||
{
|
||||
Initialize(seed);
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Initialize(const uint64_t seed)
|
||||
void MetroHash64::Initialize(uint64_t seed)
|
||||
{
|
||||
vseed = (static_cast<uint64_t>(seed) + k2) * k0;
|
||||
|
||||
@ -47,7 +47,7 @@ void MetroHash64::Initialize(const uint64_t seed)
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Update(const uint8_t * const buffer, const uint64_t length)
|
||||
void MetroHash64::Update(const uint8_t * const buffer, uint64_t length)
|
||||
{
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(buffer);
|
||||
const uint8_t * const end = ptr + length;
|
||||
@ -90,7 +90,7 @@ void MetroHash64::Update(const uint8_t * const buffer, const uint64_t length)
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Finalize(uint8_t * const hash)
|
||||
void MetroHash64::Finalize(uint8_t * hash)
|
||||
{
|
||||
// finalize bulk loop, if used
|
||||
if (bytes >= 32)
|
||||
@ -152,7 +152,7 @@ void MetroHash64::Finalize(uint8_t * const hash)
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Hash(const uint8_t * buffer, const uint64_t length, uint8_t * const hash, const uint64_t seed)
|
||||
void MetroHash64::Hash(const uint8_t * buffer, uint64_t length, uint8_t * const hash, uint64_t seed)
|
||||
{
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(buffer);
|
||||
const uint8_t * const end = ptr + length;
|
||||
|
@ -25,24 +25,24 @@ public:
|
||||
static const uint32_t bits = 64;
|
||||
|
||||
// Constructor initializes the same as Initialize()
|
||||
explicit MetroHash64(const uint64_t seed=0);
|
||||
explicit MetroHash64(uint64_t seed=0);
|
||||
|
||||
// Initializes internal state for new hash with optional seed
|
||||
void Initialize(const uint64_t seed=0);
|
||||
void Initialize(uint64_t seed=0);
|
||||
|
||||
// Update the hash state with a string of bytes. If the length
|
||||
// is sufficiently long, the implementation switches to a bulk
|
||||
// hashing algorithm directly on the argument buffer for speed.
|
||||
void Update(const uint8_t * buffer, const uint64_t length);
|
||||
void Update(const uint8_t * buffer, uint64_t length);
|
||||
|
||||
// Constructs the final hash and writes it to the argument buffer.
|
||||
// After a hash is finalized, this instance must be Initialized()-ed
|
||||
// again or the behavior of Update() and Finalize() is undefined.
|
||||
void Finalize(uint8_t * const hash);
|
||||
void Finalize(uint8_t * hash);
|
||||
|
||||
// A non-incremental function implementation. This can be significantly
|
||||
// faster than the incremental implementation for some usage patterns.
|
||||
static void Hash(const uint8_t * buffer, const uint64_t length, uint8_t * const hash, const uint64_t seed=0);
|
||||
static void Hash(const uint8_t * buffer, uint64_t length, uint8_t * hash, uint64_t seed=0);
|
||||
|
||||
// Does implementation correctly execute test vectors?
|
||||
static bool ImplementationVerified();
|
||||
|
@ -32,6 +32,7 @@ set(SRCS
|
||||
"${LIBRARY_DIR}/src/handle_custom_notification.cxx"
|
||||
"${LIBRARY_DIR}/src/handle_vote.cxx"
|
||||
"${LIBRARY_DIR}/src/launcher.cxx"
|
||||
"${LIBRARY_DIR}/src/log_entry.cxx"
|
||||
"${LIBRARY_DIR}/src/srv_config.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
|
||||
|
2
contrib/xxHash
vendored
2
contrib/xxHash
vendored
@ -1 +1 @@
|
||||
Subproject commit 3078dc6039f8c0bffcb1904f81cfe6b2c3209435
|
||||
Subproject commit bbb27a5efb85b92a0486cf361a8635715a53f6ba
|
@ -7,7 +7,7 @@ add_library(xxHash ${SRCS})
|
||||
target_include_directories(xxHash SYSTEM BEFORE INTERFACE "${LIBRARY_DIR}")
|
||||
|
||||
# XXH_INLINE_ALL - Make all functions inline, with implementations being directly included within xxhash.h. Inlining functions is beneficial for speed on small keys.
|
||||
# https://github.com/Cyan4973/xxHash/tree/v0.8.1#build-modifiers
|
||||
# https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers
|
||||
target_compile_definitions(xxHash PUBLIC XXH_INLINE_ALL)
|
||||
|
||||
add_library(ch_contrib::xxHash ALIAS xxHash)
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.2.1.2248"
|
||||
ARG VERSION="24.3.1.2672"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
@ -44,7 +44,10 @@ ARG DIRECT_DOWNLOAD_URLS=""
|
||||
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||
# installed to prevent picking those uid / gid by some unrelated software.
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
|
||||
ARG DEFAULT_UID="101"
|
||||
ARG DEFAULT_GID="101"
|
||||
RUN addgroup -S -g "${DEFAULT_GID}" clickhouse && \
|
||||
adduser -S -h "/var/lib/clickhouse" -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u "${DEFAULT_UID}" clickhouse
|
||||
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
@ -71,20 +74,21 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
fi \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse keeper" -u 101 clickhouse \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper \
|
||||
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||
&& chown root:clickhouse /var/log/clickhouse-keeper \
|
||||
&& chmod +x /entrypoint.sh \
|
||||
&& apk add --no-cache su-exec bash tzdata \
|
||||
&& cp /usr/share/zoneinfo/UTC /etc/localtime \
|
||||
&& echo "UTC" > /etc/timezone \
|
||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||
&& echo "UTC" > /etc/timezone
|
||||
|
||||
ARG DEFAULT_CONFIG_DIR="/etc/clickhouse-keeper"
|
||||
ARG DEFAULT_DATA_DIR="/var/lib/clickhouse-keeper"
|
||||
ARG DEFAULT_LOG_DIR="/var/log/clickhouse-keeper"
|
||||
RUN mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
|
||||
&& chown clickhouse:clickhouse "${DEFAULT_DATA_DIR}" \
|
||||
&& chown root:clickhouse "${DEFAULT_LOG_DIR}" \
|
||||
&& chmod ugo+Xrw -R "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}"
|
||||
|
||||
# /var/lib/clickhouse is necessary due to the current default configuration for Keeper
|
||||
VOLUME "${DEFAULT_DATA_DIR}" /var/lib/clickhouse
|
||||
EXPOSE 2181 10181 44444 9181
|
||||
|
||||
VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
@ -80,7 +80,7 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
# so the container can't be finished by ctrl+c
|
||||
export CLICKHOUSE_WATCHDOG_ENABLE
|
||||
|
||||
cd /var/lib/clickhouse
|
||||
cd "${DATA_DIR}"
|
||||
|
||||
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
|
||||
if [ -f "$KEEPER_CONFIG" ]; then
|
||||
|
@ -4,6 +4,9 @@ FROM clickhouse/fasttest:$FROM_TAG
|
||||
ENV CC=clang-${LLVM_VERSION}
|
||||
ENV CXX=clang++-${LLVM_VERSION}
|
||||
|
||||
# If the cctools is updated, then first build it in the CI, then update here in a different commit
|
||||
COPY --from=clickhouse/cctools:d9e3596e706b /cctools /cctools
|
||||
|
||||
# Rust toolchain and libraries
|
||||
ENV RUSTUP_HOME=/rust/rustup
|
||||
ENV CARGO_HOME=/rust/cargo
|
||||
@ -58,7 +61,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
&& rm /tmp/nfpm.deb
|
||||
|
||||
ARG GO_VERSION=1.19.10
|
||||
# We need go for clickhouse-diagnostics
|
||||
# We needed go for clickhouse-diagnostics (it is not used anymore)
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /tmp/go.tgz "https://go.dev/dl/go${GO_VERSION}.linux-${arch}.tar.gz" \
|
||||
&& tar -xzf /tmp/go.tgz -C /usr/local/ \
|
||||
@ -73,9 +76,6 @@ RUN curl -Lo /usr/bin/clang-tidy-cache \
|
||||
"https://raw.githubusercontent.com/matus-chochlik/ctcache/$CLANG_TIDY_SHA1/clang-tidy-cache" \
|
||||
&& chmod +x /usr/bin/clang-tidy-cache
|
||||
|
||||
# If the cctools is updated, then first build it in the CI, then update here in a different commit
|
||||
COPY --from=clickhouse/cctools:5a908f73878a /cctools /cctools
|
||||
|
||||
RUN mkdir /workdir && chmod 777 /workdir
|
||||
WORKDIR /workdir
|
||||
|
||||
|
@ -36,22 +36,6 @@ rm -f CMakeCache.txt
|
||||
|
||||
if [ -n "$MAKE_DEB" ]; then
|
||||
rm -rf /build/packages/root
|
||||
# NOTE: this is for backward compatibility with previous releases,
|
||||
# that does not diagnostics tool (only script).
|
||||
if [ -d /build/programs/diagnostics ]; then
|
||||
if [ -z "$SANITIZER" ]; then
|
||||
# We need to check if clickhouse-diagnostics is fine and build it
|
||||
(
|
||||
cd /build/programs/diagnostics
|
||||
make test-no-docker
|
||||
GOARCH="${DEB_ARCH}" CGO_ENABLED=0 make VERSION="$VERSION_STRING" build
|
||||
mv clickhouse-diagnostics ..
|
||||
)
|
||||
else
|
||||
echo -e "#!/bin/sh\necho 'Not implemented for this type of package'" > /build/programs/clickhouse-diagnostics
|
||||
chmod +x /build/programs/clickhouse-diagnostics
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@ -121,8 +105,6 @@ if [ -n "$MAKE_DEB" ]; then
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086
|
||||
DESTDIR=/build/packages/root ninja $NINJA_FLAGS programs/install
|
||||
cp /build/programs/clickhouse-diagnostics /build/packages/root/usr/bin
|
||||
cp /build/programs/clickhouse-diagnostics /output
|
||||
bash -x /build/packages/build
|
||||
fi
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
# It's based on the assumption that we don't care of the cctools version so much
|
||||
# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/fasttest:$FROM_TAG
|
||||
FROM clickhouse/fasttest:$FROM_TAG as builder
|
||||
|
||||
ENV CC=clang-${LLVM_VERSION}
|
||||
ENV CXX=clang++-${LLVM_VERSION}
|
||||
@ -29,3 +29,6 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||
&& make install -j$(nproc) \
|
||||
&& cd ../.. \
|
||||
&& rm -rf cctools-port
|
||||
|
||||
FROM scratch
|
||||
COPY --from=builder /cctools /cctools
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.2.1.2248"
|
||||
ARG VERSION="24.3.1.2672"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
@ -42,6 +42,10 @@ ARG DIRECT_DOWNLOAD_URLS=""
|
||||
# We do that in advance at the begining of Dockerfile before any packages will be
|
||||
# installed to prevent picking those uid / gid by some unrelated software.
|
||||
# The same uid / gid (101) is used both for alpine and ubuntu.
|
||||
ARG DEFAULT_UID="101"
|
||||
ARG DEFAULT_GID="101"
|
||||
RUN addgroup -S -g "${DEFAULT_GID}" clickhouse && \
|
||||
adduser -S -h "/var/lib/clickhouse" -s /bin/bash -G clickhouse -g "ClickHouse server" -u "${DEFAULT_UID}" clickhouse
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& cd /tmp \
|
||||
@ -66,23 +70,30 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
fi \
|
||||
; done \
|
||||
&& rm /tmp/*.tgz /install -r \
|
||||
&& addgroup -S -g 101 clickhouse \
|
||||
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \
|
||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server/config.d /etc/clickhouse-server/users.d /etc/clickhouse-client /docker-entrypoint-initdb.d \
|
||||
&& chown clickhouse:clickhouse /var/lib/clickhouse \
|
||||
&& chown root:clickhouse /var/log/clickhouse-server \
|
||||
&& chmod +x /entrypoint.sh \
|
||||
&& apk add --no-cache bash tzdata \
|
||||
&& cp /usr/share/zoneinfo/UTC /etc/localtime \
|
||||
&& echo "UTC" > /etc/timezone \
|
||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||
&& echo "UTC" > /etc/timezone
|
||||
|
||||
# we need to allow "others" access to clickhouse folder, because docker container
|
||||
# can be started with arbitrary uid (openshift usecase)
|
||||
ARG DEFAULT_CLIENT_CONFIG_DIR="/etc/clickhouse-client"
|
||||
ARG DEFAULT_SERVER_CONFIG_DIR="/etc/clickhouse-server"
|
||||
ARG DEFAULT_DATA_DIR="/var/lib/clickhouse"
|
||||
ARG DEFAULT_LOG_DIR="/var/log/clickhouse-server"
|
||||
|
||||
# we need to allow "others" access to ClickHouse folders, because docker containers
|
||||
# can be started with arbitrary uids (OpenShift usecase)
|
||||
RUN mkdir -p \
|
||||
"${DEFAULT_DATA_DIR}" \
|
||||
"${DEFAULT_LOG_DIR}" \
|
||||
"${DEFAULT_CLIENT_CONFIG_DIR}" \
|
||||
"${DEFAULT_SERVER_CONFIG_DIR}/config.d" \
|
||||
"${DEFAULT_SERVER_CONFIG_DIR}/users.d" \
|
||||
/docker-entrypoint-initdb.d \
|
||||
&& chown clickhouse:clickhouse "${DEFAULT_DATA_DIR}" \
|
||||
&& chown root:clickhouse "${DEFAULT_LOG_DIR}" \
|
||||
&& chmod ugo+Xrw -R "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CLIENT_CONFIG_DIR}" "${DEFAULT_SERVER_CONFIG_DIR}"
|
||||
|
||||
VOLUME "${DEFAULT_DATA_DIR}"
|
||||
EXPOSE 9000 8123 9009
|
||||
|
||||
VOLUME /var/lib/clickhouse \
|
||||
/var/log/clickhouse-server
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.2.1.2248"
|
||||
ARG VERSION="24.3.1.2672"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -173,9 +173,23 @@ function fuzz
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
|
||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
# server.log -> All server logs, including sanitizer
|
||||
# stderr.log -> Process logs (sanitizer) only
|
||||
clickhouse-server \
|
||||
--config-file db/config.xml \
|
||||
--pid-file /var/run/clickhouse-server/clickhouse-server.pid \
|
||||
-- --path db \
|
||||
--logger.console=0 \
|
||||
--logger.log=server.log 2>&1 | tee -a stderr.log >> server.log 2>&1 &
|
||||
for _ in {1..30}
|
||||
do
|
||||
if clickhouse-client --query "select 1"
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
server_pid=$(cat /var/run/clickhouse-server/clickhouse-server.pid)
|
||||
|
||||
kill -0 $server_pid
|
||||
|
||||
@ -427,6 +441,7 @@ p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-s
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log.zst">fuzzer.log.zst</a>
|
||||
<a href="server.log.zst">server.log.zst</a>
|
||||
<a href="stderr.log">stderr.log</a>
|
||||
<a href="main.log">main.log</a>
|
||||
<a href="dmesg.log">dmesg.log</a>
|
||||
${CORE_LINK}
|
||||
|
@ -126,7 +126,6 @@ RUN set -x \
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY compose/ /compose/
|
||||
COPY misc/ /misc/
|
||||
|
||||
|
||||
|
@ -138,7 +138,7 @@ ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS disk = disk(type = cache, path = '/var/lib/clickhouse/filesystem_caches/', max_size = '4G',
|
||||
SETTINGS disk = disk(type = cache, path = '/var/lib/clickhouse/filesystem_caches/stateful/', max_size = '4G',
|
||||
disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/'));
|
||||
|
||||
ATTACH TABLE datasets.visits_v1 UUID '5131f834-711f-4168-98a5-968b691a104b'
|
||||
@ -329,5 +329,5 @@ ENGINE = CollapsingMergeTree(Sign)
|
||||
PARTITION BY toYYYYMM(StartDate)
|
||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||
SAMPLE BY intHash32(UserID)
|
||||
SETTINGS disk = disk(type = cache, path = '/var/lib/clickhouse/filesystem_caches/', max_size = '4G',
|
||||
SETTINGS disk = disk(type = cache, path = '/var/lib/clickhouse/filesystem_caches/stateful/', max_size = '4G',
|
||||
disk = disk(type = web, endpoint = 'https://clickhouse-datasets-web.s3.us-east-1.amazonaws.com/'));
|
||||
|
@ -25,7 +25,7 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
cache_policy=""
|
||||
if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
||||
if [ $(($RANDOM%2)) -eq 1 ]; then
|
||||
cache_policy="SLRU"
|
||||
else
|
||||
cache_policy="LRU"
|
||||
|
@ -51,22 +51,22 @@ fi
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; then
|
||||
sudo cat /etc/clickhouse-server/config.d/zookeeper.xml \
|
||||
| sed "/<use_compression>1<\/use_compression>/d" \
|
||||
> /etc/clickhouse-server/config.d/zookeeper.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/zookeeper.xml.tmp /etc/clickhouse-server/config.d/zookeeper.xml
|
||||
sudo sed -i "/<use_compression>1<\/use_compression>/d" /etc/clickhouse-server/config.d/zookeeper.xml
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/handlers.yaml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
function remove_keeper_config()
|
||||
{
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "/<$1>$2<\/$1>/d" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo sed -i "/<$1>$2<\/$1>/d" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
}
|
||||
# commit_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
@ -77,7 +77,7 @@ fi
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
@ -88,10 +88,10 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
# simplest way to forward env variables to server
|
||||
@ -101,25 +101,13 @@ else
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
|
||||
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server2/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" \
|
||||
> /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_1/</custom_cached_disks_base_directory>|" \
|
||||
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_1/</custom_cached_disks_base_directory>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server2/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_2/</custom_cached_disks_base_directory>|" \
|
||||
> /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_2/</custom_cached_disks_base_directory>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
|
||||
mkdir -p /var/run/clickhouse-server1
|
||||
sudo chown clickhouse:clickhouse /var/run/clickhouse-server1
|
||||
@ -269,10 +257,10 @@ do
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
err=$( { clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
|
||||
err=$( { clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
fi
|
||||
|
@ -215,7 +215,7 @@ function check_server_start()
|
||||
function check_logs_for_critical_errors()
|
||||
{
|
||||
# Sanitizer asserts
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' >> /test_output/tmp
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
|
@ -27,7 +27,7 @@ install_packages package_folder
|
||||
# and find more potential issues.
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
@ -38,11 +38,11 @@ export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
export THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY=0.01
|
||||
export THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY=0.01
|
||||
@ -72,7 +72,7 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c
|
||||
|
||||
# Randomize cache policies.
|
||||
cache_policy=""
|
||||
if [ $(( $(date +%-d) % 2 )) -eq 1 ]; then
|
||||
if [ $(($RANDOM%2)) -eq 1 ]; then
|
||||
cache_policy="SLRU"
|
||||
else
|
||||
cache_policy="LRU"
|
||||
@ -87,6 +87,25 @@ if [ "$cache_policy" = "SLRU" ]; then
|
||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
fi
|
||||
|
||||
# Disable experimental WINDOW VIEW tests for stress tests, since they may be
|
||||
# created with old analyzer and then, after server restart it will refuse to
|
||||
# start.
|
||||
# FIXME: remove once the support for WINDOW VIEW will be implemented in analyzer.
|
||||
sudo cat /etc/clickhouse-server/users.d/stress_tests_overrides.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<allow_experimental_window_view>false</allow_experimental_window_view>
|
||||
<constraints>
|
||||
<allow_experimental_window_view>
|
||||
<readonly/>
|
||||
</allow_experimental_window_view>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
start_server
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
|
@ -8,20 +8,22 @@ ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
aspell \
|
||||
curl \
|
||||
git \
|
||||
file \
|
||||
libxml2-utils \
|
||||
moreutils \
|
||||
python3-fuzzywuzzy \
|
||||
python3-pip \
|
||||
yamllint \
|
||||
locales \
|
||||
&& pip3 install black==23.12.0 boto3 codespell==2.2.1 mypy==1.8.0 PyGithub unidiff pylint==3.1.0 \
|
||||
requests types-requests \
|
||||
aspell \
|
||||
curl \
|
||||
git \
|
||||
file \
|
||||
libxml2-utils \
|
||||
moreutils \
|
||||
python3-fuzzywuzzy \
|
||||
python3-pip \
|
||||
yamllint \
|
||||
locales \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
# python-magic is the same version as in Ubuntu 22.04
|
||||
RUN pip3 install black==23.12.0 boto3 codespell==2.2.1 mypy==1.8.0 PyGithub unidiff pylint==3.1.0 \
|
||||
python-magic==0.4.24 requests types-requests \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
|
@ -67,10 +67,7 @@ configure
|
||||
|
||||
function remove_keeper_config()
|
||||
{
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "/<$1>$2<\/$1>/d" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo sed -i "/<$1>$2<\/$1>/d" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
}
|
||||
|
||||
# async_replication setting doesn't exist on some older versions
|
||||
@ -80,16 +77,10 @@ remove_keeper_config "async_replication" "1"
|
||||
remove_keeper_config "create_if_not_exists" "[01]"
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
# latest_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
@ -120,22 +111,13 @@ export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# force_sync=false doesn't work correctly on some older versions
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo sed -i "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
# async_replication setting doesn't exist on some older versions
|
||||
remove_keeper_config "async_replication" "1"
|
||||
@ -150,10 +132,7 @@ remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo sed -i "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
@ -256,10 +235,7 @@ then
|
||||
fi
|
||||
|
||||
# Just in case previous version left some garbage in zk
|
||||
sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
| sed "s|>1<|>0<|g" \
|
||||
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
|
||||
sudo sed -i "s|>1<|>0<|g" /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
rm /etc/clickhouse-server/config.d/filesystem_caches_path.xml
|
||||
|
||||
start 500
|
||||
|
64
docs/changelogs/v23.12.5.81-stable.md
Normal file
64
docs/changelogs/v23.12.5.81-stable.md
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.12.5.81-stable (a0fbe3ae813) FIXME as compared to v23.12.4.15-stable (4233d111d20)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#60290](https://github.com/ClickHouse/ClickHouse/issues/60290): Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#60830](https://github.com/ClickHouse/ClickHouse/issues/60830): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#59883](https://github.com/ClickHouse/ClickHouse/issues/59883): If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix_kql_issue_found_by_wingfuzz [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix query start time on non initial queries [#59662](https://github.com/ClickHouse/ClickHouse/pull/59662) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* rabbitmq: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix parsing of partition expressions surrounded by parens [#59901](https://github.com/ClickHouse/ClickHouse/pull/59901) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix optimize_uniq_to_count removing the column alias [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Hide sensitive info for s3queue [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Reduce the number of read rows from `system.numbers` [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#60767](https://github.com/ClickHouse/ClickHouse/issues/60767): Decoupled changes from [#60408](https://github.com/ClickHouse/ClickHouse/issues/60408). [#60553](https://github.com/ClickHouse/ClickHouse/pull/60553) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#60582](https://github.com/ClickHouse/ClickHouse/issues/60582): Arm and amd docker build jobs use similar job names and thus overwrite job reports - aarch64 and amd64 suffixes added to fix this. [#60554](https://github.com/ClickHouse/ClickHouse/pull/60554) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61041](https://github.com/ClickHouse/ClickHouse/issues/61041): Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#61030](https://github.com/ClickHouse/ClickHouse/issues/61030): ... [#61022](https://github.com/ClickHouse/ClickHouse/pull/61022) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61224](https://github.com/ClickHouse/ClickHouse/issues/61224): ... [#61183](https://github.com/ClickHouse/ClickHouse/pull/61183) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Backported in [#61190](https://github.com/ClickHouse/ClickHouse/issues/61190): ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Backport [#59798](https://github.com/ClickHouse/ClickHouse/issues/59798) to 23.12: CI: do not reuse builds on release branches"'. [#59979](https://github.com/ClickHouse/ClickHouse/pull/59979) ([Max K.](https://github.com/maxknv)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* CI: move ci-specifics from job scripts to ci.py [#58516](https://github.com/ClickHouse/ClickHouse/pull/58516) ([Max K.](https://github.com/maxknv)).
|
||||
* Make ZooKeeper actually sequentialy consistent [#59735](https://github.com/ClickHouse/ClickHouse/pull/59735) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix special build reports in release branches [#59797](https://github.com/ClickHouse/ClickHouse/pull/59797) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* CI: do not reuse builds on release branches [#59798](https://github.com/ClickHouse/ClickHouse/pull/59798) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix mark release ready [#59994](https://github.com/ClickHouse/ClickHouse/pull/59994) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Ability to detect undead ZooKeeper sessions [#60044](https://github.com/ClickHouse/ClickHouse/pull/60044) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove broken test while we fix it [#60547](https://github.com/ClickHouse/ClickHouse/pull/60547) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Speed up cctools building [#61011](https://github.com/ClickHouse/ClickHouse/pull/61011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
24
docs/changelogs/v23.12.6.19-stable.md
Normal file
24
docs/changelogs/v23.12.6.19-stable.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.12.6.19-stable (40080a3c2a4) FIXME as compared to v23.12.5.81-stable (a0fbe3ae813)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Improve isolation of query cache entries under re-created users or role switches [#58611](https://github.com/ClickHouse/ClickHouse/pull/58611) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix possible incorrect result of aggregate function `uniqExact` [#61257](https://github.com/ClickHouse/ClickHouse/pull/61257) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix consecutive keys optimization for nullable keys [#61393](https://github.com/ClickHouse/ClickHouse/pull/61393) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix string search with const position [#61547](https://github.com/ClickHouse/ClickHouse/pull/61547) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix crash in `multiSearchAllPositionsCaseInsensitiveUTF8` for incorrect UTF-8 [#61749](https://github.com/ClickHouse/ClickHouse/pull/61749) ([pufit](https://github.com/pufit)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#61429](https://github.com/ClickHouse/ClickHouse/issues/61429):. [#61374](https://github.com/ClickHouse/ClickHouse/pull/61374) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#61486](https://github.com/ClickHouse/ClickHouse/issues/61486): ... [#61441](https://github.com/ClickHouse/ClickHouse/pull/61441) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61641](https://github.com/ClickHouse/ClickHouse/issues/61641):. [#61592](https://github.com/ClickHouse/ClickHouse/pull/61592) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#61811](https://github.com/ClickHouse/ClickHouse/issues/61811): ![Screenshot_20240323_025055](https://github.com/ClickHouse/ClickHouse/assets/18581488/ccaab212-a1d3-4dfb-8d56-b1991760b6bf). [#61801](https://github.com/ClickHouse/ClickHouse/pull/61801) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
24
docs/changelogs/v23.3.21.26-lts.md
Normal file
24
docs/changelogs/v23.3.21.26-lts.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.3.21.26-lts (d9672a3731f) FIXME as compared to v23.3.20.27-lts (cc974ba4f81)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix reading from sparse columns after restart [#49660](https://github.com/ClickHouse/ClickHouse/pull/49660) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#57104](https://github.com/ClickHouse/ClickHouse/pull/57104) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
13
docs/changelogs/v23.3.22.3-lts.md
Normal file
13
docs/changelogs/v23.3.22.3-lts.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.3.22.3-lts (04075bf96a1) FIXME as compared to v23.3.21.26-lts (d9672a3731f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix crash in `multiSearchAllPositionsCaseInsensitiveUTF8` for incorrect UTF-8 [#61749](https://github.com/ClickHouse/ClickHouse/pull/61749) ([pufit](https://github.com/pufit)).
|
||||
|
30
docs/changelogs/v23.8.11.28-lts.md
Normal file
30
docs/changelogs/v23.8.11.28-lts.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.8.11.28-lts (31879d2ab4c) FIXME as compared to v23.8.10.43-lts (a278225bba9)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#60828](https://github.com/ClickHouse/ClickHouse/issues/60828): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Use the current branch test-utils to build cctools'. [#61276](https://github.com/ClickHouse/ClickHouse/pull/61276) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#57104](https://github.com/ClickHouse/ClickHouse/pull/57104) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Detect io_uring in tests [#60373](https://github.com/ClickHouse/ClickHouse/pull/60373) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
20
docs/changelogs/v23.8.12.13-lts.md
Normal file
20
docs/changelogs/v23.8.12.13-lts.md
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v23.8.12.13-lts (bdbd0d87e5d) FIXME as compared to v23.8.11.28-lts (31879d2ab4c)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Improve isolation of query cache entries under re-created users or role switches [#58611](https://github.com/ClickHouse/ClickHouse/pull/58611) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix string search with const position [#61547](https://github.com/ClickHouse/ClickHouse/pull/61547) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix crash in `multiSearchAllPositionsCaseInsensitiveUTF8` for incorrect UTF-8 [#61749](https://github.com/ClickHouse/ClickHouse/pull/61749) ([pufit](https://github.com/pufit)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#61428](https://github.com/ClickHouse/ClickHouse/issues/61428):. [#61374](https://github.com/ClickHouse/ClickHouse/pull/61374) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#61484](https://github.com/ClickHouse/ClickHouse/issues/61484): ... [#61441](https://github.com/ClickHouse/ClickHouse/pull/61441) ([Max K.](https://github.com/maxknv)).
|
||||
|
26
docs/changelogs/v24.1.7.18-stable.md
Normal file
26
docs/changelogs/v24.1.7.18-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.1.7.18-stable (90925babd78) FIXME as compared to v24.1.6.52-stable (fa09f677bc9)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#61043](https://github.com/ClickHouse/ClickHouse/issues/61043): Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#61168](https://github.com/ClickHouse/ClickHouse/issues/61168): Just a preparation for the merge queue support. [#61099](https://github.com/ClickHouse/ClickHouse/pull/61099) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61192](https://github.com/ClickHouse/ClickHouse/issues/61192): ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
32
docs/changelogs/v24.1.8.22-stable.md
Normal file
32
docs/changelogs/v24.1.8.22-stable.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.1.8.22-stable (7fb8f96d3da) FIXME as compared to v24.1.7.18-stable (90925babd78)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix possible incorrect result of aggregate function `uniqExact` [#61257](https://github.com/ClickHouse/ClickHouse/pull/61257) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix consecutive keys optimization for nullable keys [#61393](https://github.com/ClickHouse/ClickHouse/pull/61393) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bug when reading system.parts using UUID (issue 61220). [#61479](https://github.com/ClickHouse/ClickHouse/pull/61479) ([Dan Wu](https://github.com/wudanzy)).
|
||||
* Fix client `-s` argument [#61530](https://github.com/ClickHouse/ClickHouse/pull/61530) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix string search with const position [#61547](https://github.com/ClickHouse/ClickHouse/pull/61547) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix crash in `multiSearchAllPositionsCaseInsensitiveUTF8` for incorrect UTF-8 [#61749](https://github.com/ClickHouse/ClickHouse/pull/61749) ([pufit](https://github.com/pufit)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#61431](https://github.com/ClickHouse/ClickHouse/issues/61431):. [#61374](https://github.com/ClickHouse/ClickHouse/pull/61374) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#61488](https://github.com/ClickHouse/ClickHouse/issues/61488): ... [#61441](https://github.com/ClickHouse/ClickHouse/pull/61441) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61642](https://github.com/ClickHouse/ClickHouse/issues/61642):. [#61592](https://github.com/ClickHouse/ClickHouse/pull/61592) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Backport [#61479](https://github.com/ClickHouse/ClickHouse/issues/61479) to 24.1: Fix bug when reading system.parts using UUID (issue 61220)."'. [#61775](https://github.com/ClickHouse/ClickHouse/pull/61775) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Speed up cctools building [#61011](https://github.com/ClickHouse/ClickHouse/pull/61011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
55
docs/changelogs/v24.2.2.71-stable.md
Normal file
55
docs/changelogs/v24.2.2.71-stable.md
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.2.2.71-stable (9293d361e72) FIXME as compared to v24.2.1.2248-stable (891689a4150)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#60834](https://github.com/ClickHouse/ClickHouse/issues/60834): Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* PartsSplitter invalid ranges for the same part [#60041](https://github.com/ClickHouse/ClickHouse/pull/60041) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Try to avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Reduce the number of read rows from `system.numbers` [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Don't output number tips for date types [#60577](https://github.com/ClickHouse/ClickHouse/pull/60577) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix multiple bugs in groupArraySorted [#61203](https://github.com/ClickHouse/ClickHouse/pull/61203) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#60758](https://github.com/ClickHouse/ClickHouse/issues/60758): Decoupled changes from [#60408](https://github.com/ClickHouse/ClickHouse/issues/60408). [#60553](https://github.com/ClickHouse/ClickHouse/pull/60553) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#60706](https://github.com/ClickHouse/ClickHouse/issues/60706): Eliminates the need to provide input args to docker server jobs to clean yml files. [#60602](https://github.com/ClickHouse/ClickHouse/pull/60602) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61045](https://github.com/ClickHouse/ClickHouse/issues/61045): Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#60721](https://github.com/ClickHouse/ClickHouse/issues/60721): Fix build_report job so that it's defined by ci_config only (not yml file). [#60613](https://github.com/ClickHouse/ClickHouse/pull/60613) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#60668](https://github.com/ClickHouse/ClickHouse/issues/60668): Do not await ci pending jobs on release branches decrease wait timeout to fit into gh job timeout. [#60652](https://github.com/ClickHouse/ClickHouse/pull/60652) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#60863](https://github.com/ClickHouse/ClickHouse/issues/60863): Set limited number of builds for "special build check" report in backports. [#60850](https://github.com/ClickHouse/ClickHouse/pull/60850) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#60946](https://github.com/ClickHouse/ClickHouse/issues/60946): ... [#60935](https://github.com/ClickHouse/ClickHouse/pull/60935) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#60972](https://github.com/ClickHouse/ClickHouse/issues/60972): ... [#60952](https://github.com/ClickHouse/ClickHouse/pull/60952) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#60980](https://github.com/ClickHouse/ClickHouse/issues/60980): ... [#60958](https://github.com/ClickHouse/ClickHouse/pull/60958) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61170](https://github.com/ClickHouse/ClickHouse/issues/61170): Just a preparation for the merge queue support. [#61099](https://github.com/ClickHouse/ClickHouse/pull/61099) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61181](https://github.com/ClickHouse/ClickHouse/issues/61181): ... [#61172](https://github.com/ClickHouse/ClickHouse/pull/61172) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61228](https://github.com/ClickHouse/ClickHouse/issues/61228): ... [#61183](https://github.com/ClickHouse/ClickHouse/pull/61183) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Backported in [#61194](https://github.com/ClickHouse/ClickHouse/issues/61194): ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61244](https://github.com/ClickHouse/ClickHouse/issues/61244): ... [#61214](https://github.com/ClickHouse/ClickHouse/pull/61214) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#61388](https://github.com/ClickHouse/ClickHouse/issues/61388):. [#61373](https://github.com/ClickHouse/ClickHouse/pull/61373) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* CI: make workflow yml abstract [#60421](https://github.com/ClickHouse/ClickHouse/pull/60421) ([Max K.](https://github.com/maxknv)).
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* General sanity in function `seriesOutliersDetectTukey` [#60535](https://github.com/ClickHouse/ClickHouse/pull/60535) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Speed up cctools building [#61011](https://github.com/ClickHouse/ClickHouse/pull/61011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
537
docs/changelogs/v24.3.1.2672-lts.md
Normal file
537
docs/changelogs/v24.3.1.2672-lts.md
Normal file
@ -0,0 +1,537 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.1.2672-lts (2c5c589a882) FIXME as compared to v24.2.1.2248-stable (891689a4150)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Don't allow to set max_parallel_replicas to 0 as it doesn't make sense. Setting it to 0 could lead to unexpected logical errors. Closes [#60140](https://github.com/ClickHouse/ClickHouse/issues/60140). [#60430](https://github.com/ClickHouse/ClickHouse/pull/60430) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Change the column name from `duration_ms` to `duration_microseconds` in the `system.zookeeper` table to reflect the reality that the duration is in the microsecond resolution. [#60774](https://github.com/ClickHouse/ClickHouse/pull/60774) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Reject incoming INSERT queries in case when query-level settings `async_insert` and `deduplicate_blocks_in_dependent_materialized_views` are enabled together. This behaviour is controlled by a setting `throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert` and enabled by default. This is a continuation of https://github.com/ClickHouse/ClickHouse/pull/59699 needed to unblock https://github.com/ClickHouse/ClickHouse/pull/59915. [#60888](https://github.com/ClickHouse/ClickHouse/pull/60888) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Utility `clickhouse-copier` is moved to a separate repository on GitHub: https://github.com/ClickHouse/copier. It is no longer included in the bundle but is still available as a separate download. This closes: [#60734](https://github.com/ClickHouse/ClickHouse/issues/60734) This closes: [#60540](https://github.com/ClickHouse/ClickHouse/issues/60540) This closes: [#60250](https://github.com/ClickHouse/ClickHouse/issues/60250) This closes: [#52917](https://github.com/ClickHouse/ClickHouse/issues/52917) This closes: [#51140](https://github.com/ClickHouse/ClickHouse/issues/51140) This closes: [#47517](https://github.com/ClickHouse/ClickHouse/issues/47517) This closes: [#47189](https://github.com/ClickHouse/ClickHouse/issues/47189) This closes: [#46598](https://github.com/ClickHouse/ClickHouse/issues/46598) This closes: [#40257](https://github.com/ClickHouse/ClickHouse/issues/40257) This closes: [#36504](https://github.com/ClickHouse/ClickHouse/issues/36504) This closes: [#35485](https://github.com/ClickHouse/ClickHouse/issues/35485) This closes: [#33702](https://github.com/ClickHouse/ClickHouse/issues/33702) This closes: [#26702](https://github.com/ClickHouse/ClickHouse/issues/26702) ### Documentation entry for user-facing changes. [#61058](https://github.com/ClickHouse/ClickHouse/pull/61058) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* To increase compatibility with MySQL, function `locate` now accepts arguments `(needle, haystack[, start_pos])` by default. The previous behavior `(haystack, needle, [, start_pos])` can be restored by setting `function_locate_has_mysql_compatible_argument_order = 0`. [#61092](https://github.com/ClickHouse/ClickHouse/pull/61092) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* The obsolete in-memory data parts have been deprecated since version 23.5 and have not been supported since version 23.10. Now the remaining code is removed. Continuation of [#55186](https://github.com/ClickHouse/ClickHouse/issues/55186) and [#45409](https://github.com/ClickHouse/ClickHouse/issues/45409). It is unlikely that you have used in-memory data parts because they were available only before version 23.5 and only when you enabled them manually by specifying the corresponding SETTINGS for a MergeTree table. To check if you have in-memory data parts, run the following query: `SELECT part_type, count() FROM system.parts GROUP BY part_type ORDER BY part_type`. To disable the usage of in-memory data parts, do `ALTER TABLE ... MODIFY SETTING min_bytes_for_compact_part = DEFAULT, min_rows_for_compact_part = DEFAULT`. Before upgrading from old ClickHouse releases, first check that you don't have in-memory data parts. If there are in-memory data parts, disable them first, then wait while there are no in-memory data parts and continue the upgrade. [#61127](https://github.com/ClickHouse/ClickHouse/pull/61127) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Forbid `SimpleAggregateFunction` in `ORDER BY` of `MergeTree` tables (like `AggregateFunction` is forbidden, but they are forbidden because they are not comparable) by default (use `allow_suspicious_primary_key` to allow them). [#61399](https://github.com/ClickHouse/ClickHouse/pull/61399) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. This is controlled by the settings, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`, `output_format_arrow_string_as_string`. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases. Parquet/ORC/Arrow supports many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools lack support for the faster `lz4` compression method, that's why we set `zstd` by default. This is controlled by the settings `output_format_parquet_compression_method`, `output_format_orc_compression_method`, and `output_format_arrow_compression_method`. We changed the default to `zstd` for Parquet and ORC, but not Arrow (it is emphasized for low-level usages). [#61817](https://github.com/ClickHouse/ClickHouse/pull/61817) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In the new ClickHouse version, the functions `geoDistance`, `greatCircleDistance`, and `greatCircleAngle` will use 64-bit double precision floating point data type for internal calculations and return type if all the arguments are Float64. This closes [#58476](https://github.com/ClickHouse/ClickHouse/issues/58476). In previous versions, the function always used Float32. You can switch to the old behavior by setting `geo_distance_returns_float64_on_float64_arguments` to `false` or setting `compatibility` to `24.2` or earlier. [#61848](https://github.com/ClickHouse/ClickHouse/pull/61848) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* Topk/topkweighed support mode, which return count of values and it's error. [#54508](https://github.com/ClickHouse/ClickHouse/pull/54508) ([UnamedRus](https://github.com/UnamedRus)).
|
||||
* Add generate_series as a table function. This function generates table with an arithmetic progression with natural numbers. [#59390](https://github.com/ClickHouse/ClickHouse/pull/59390) ([divanik](https://github.com/divanik)).
|
||||
* Support reading and writing backups as tar archives. [#59535](https://github.com/ClickHouse/ClickHouse/pull/59535) ([josh-hildred](https://github.com/josh-hildred)).
|
||||
* Implemented support for S3Express buckets. [#59965](https://github.com/ClickHouse/ClickHouse/pull/59965) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Allow to attach parts from a different disk * attach partition from the table on other disks using copy instead of hard link (such as instant table) * attach partition using copy when the hard link fails even on the same disk. [#60112](https://github.com/ClickHouse/ClickHouse/pull/60112) ([Unalian](https://github.com/Unalian)).
|
||||
* Added function `toMillisecond` which returns the millisecond component for values of type`DateTime` or `DateTime64`. [#60281](https://github.com/ClickHouse/ClickHouse/pull/60281) ([Shaun Struwig](https://github.com/Blargian)).
|
||||
* Make all format names case insensitive, like Tsv, or TSV, or tsv, or even rowbinary. [#60420](https://github.com/ClickHouse/ClickHouse/pull/60420) ([豪肥肥](https://github.com/HowePa)).
|
||||
* Add four properties to the `StorageMemory` (memory-engine) `min_bytes_to_keep, max_bytes_to_keep, min_rows_to_keep` and `max_rows_to_keep` - Add tests to reflect new changes - Update `memory.md` documentation - Add table `context` property to `MemorySink` to enable access to table parameter bounds. [#60612](https://github.com/ClickHouse/ClickHouse/pull/60612) ([Jake Bamrah](https://github.com/JakeBamrah)).
|
||||
* Added function `toMillisecond` which returns the millisecond component for values of type`DateTime` or `DateTime64`. [#60649](https://github.com/ClickHouse/ClickHouse/pull/60649) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Separate limits on number of waiting and executing queries. Added new server setting `max_waiting_queries` that limits the number of queries waiting due to `async_load_databases`. Existing limits on number of executing queries no longer count waiting queries. [#61053](https://github.com/ClickHouse/ClickHouse/pull/61053) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add support for `ATTACH PARTITION ALL`. [#61107](https://github.com/ClickHouse/ClickHouse/pull/61107) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* Add a new function, `getClientHTTPHeader`. This closes [#54665](https://github.com/ClickHouse/ClickHouse/issues/54665). Co-authored with @lingtaolf. [#61820](https://github.com/ClickHouse/ClickHouse/pull/61820) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Improve the performance of serialized aggregation method when involving multiple [nullable] columns. This is a general version of [#51399](https://github.com/ClickHouse/ClickHouse/issues/51399) that doesn't compromise on abstraction integrity. [#55809](https://github.com/ClickHouse/ClickHouse/pull/55809) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Lazy build join output to improve performance of ALL join. [#58278](https://github.com/ClickHouse/ClickHouse/pull/58278) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Improvements to aggregate functions ArgMin / ArgMax / any / anyLast / anyHeavy, as well as `ORDER BY {u8/u16/u32/u64/i8/i16/u32/i64) LIMIT 1` queries. [#58640](https://github.com/ClickHouse/ClickHouse/pull/58640) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Trivial optimize on column filter. Avoid those filter columns whoes underlying data type is not number being filtered with `result_size_hint = -1`. Peak memory can be reduced to 44% of the original in some cases. [#59698](https://github.com/ClickHouse/ClickHouse/pull/59698) ([李扬](https://github.com/taiyang-li)).
|
||||
* If the table's primary key contains mostly useless columns, don't keep them in memory. This is controlled by a new setting `primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns` with the value `0.9` by default, which means: for a composite primary key, if a column changes its value for at least 0.9 of all the times, the next columns after it will be not loaded. [#60255](https://github.com/ClickHouse/ClickHouse/pull/60255) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Execute multiIf function columnarly when result_type's underlying type is number. [#60384](https://github.com/ClickHouse/ClickHouse/pull/60384) ([李扬](https://github.com/taiyang-li)).
|
||||
* Faster (almost 2x) mutexes (was slower due to ThreadFuzzer). [#60823](https://github.com/ClickHouse/ClickHouse/pull/60823) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Move connection drain from prepare to work, and drain multiple connections in parallel. [#60845](https://github.com/ClickHouse/ClickHouse/pull/60845) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Optimize insertManyFrom of nullable number or nullable string. [#60846](https://github.com/ClickHouse/ClickHouse/pull/60846) ([李扬](https://github.com/taiyang-li)).
|
||||
* Optimized function `dotProduct` to omit unnecessary and expensive memory copies. [#60928](https://github.com/ClickHouse/ClickHouse/pull/60928) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Operations with the filesystem cache will suffer less from the lock contention. [#61066](https://github.com/ClickHouse/ClickHouse/pull/61066) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Optimize ColumnString::replicate and prevent memcpySmallAllowReadWriteOverflow15Impl from being optimized to built-in memcpy. Close [#61074](https://github.com/ClickHouse/ClickHouse/issues/61074). ColumnString::replicate speeds up by 2.46x on x86-64. [#61075](https://github.com/ClickHouse/ClickHouse/pull/61075) ([李扬](https://github.com/taiyang-li)).
|
||||
* 30x faster printing for 256-bit integers. [#61100](https://github.com/ClickHouse/ClickHouse/pull/61100) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* If a query with a syntax error contained COLUMNS matcher with a regular expression, the regular expression was compiled each time during the parser's backtracking, instead of being compiled once. This was a fundamental error. The compiled regexp was put to AST. But the letter A in AST means "abstract" which means it should not contain heavyweight objects. Parts of AST can be created and discarded during parsing, including a large number of backtracking. This leads to slowness on the parsing side and consequently allows DoS by a readonly user. But the main problem is that it prevents progress in fuzzers. [#61543](https://github.com/ClickHouse/ClickHouse/pull/61543) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a new analyzer pass to optimize in single value. [#61564](https://github.com/ClickHouse/ClickHouse/pull/61564) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
|
||||
#### Improvement
|
||||
* While running the MODIFY COLUMN query for materialized views, check the inner table's structure to ensure every column exists. [#47427](https://github.com/ClickHouse/ClickHouse/pull/47427) ([sunny](https://github.com/sunny19930321)).
|
||||
* Added table `system.keywords` which contains all the keywords from parser. Mostly needed and will be used for better fuzzing and syntax highlighting. [#51808](https://github.com/ClickHouse/ClickHouse/pull/51808) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Ordinary database engine is deprecated. You will receive a warning in clickhouse-client if your server is using it. This closes [#52229](https://github.com/ClickHouse/ClickHouse/issues/52229). [#56942](https://github.com/ClickHouse/ClickHouse/pull/56942) ([shabroo](https://github.com/shabroo)).
|
||||
* All zero copy locks related to a table have to be dropped when the table is dropped. The directory which contains these locks has to be removed also. [#57575](https://github.com/ClickHouse/ClickHouse/pull/57575) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Allow declaring enum in external table structure. [#57857](https://github.com/ClickHouse/ClickHouse/pull/57857) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Consider lightweight deleted rows when selecting parts to merge. [#58223](https://github.com/ClickHouse/ClickHouse/pull/58223) ([Zhuo Qiu](https://github.com/jewelzqiu)).
|
||||
* This PR makes http/https connections reusable for all uses cases. Even when response is 3xx or 4xx. [#58845](https://github.com/ClickHouse/ClickHouse/pull/58845) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Added comments for columns for more system tables. Continuation of https://github.com/ClickHouse/ClickHouse/pull/58356. [#59016](https://github.com/ClickHouse/ClickHouse/pull/59016) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Now we can use virtual columns in PREWHERE. It's worthwhile for non-const virtual columns like `_part_offset`. [#59033](https://github.com/ClickHouse/ClickHouse/pull/59033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add ability to skip read-only replicas for INSERT into Distributed engine (Controlled with `distributed_insert_skip_read_only_replicas` setting, by default OFF - backward compatible). [#59176](https://github.com/ClickHouse/ClickHouse/pull/59176) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Instead using a constant key, now object storage generates key for determining remove objects capability. [#59495](https://github.com/ClickHouse/ClickHouse/pull/59495) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Add positional pread in libhdfs3. If you want to call positional read in libhdfs3, use the hdfsPread function in hdfs.h as follows. `tSize hdfsPread(hdfsFS fs, hdfsFile file, void * buffer, tSize length, tOffset position);`. [#59624](https://github.com/ClickHouse/ClickHouse/pull/59624) ([M1eyu](https://github.com/M1eyu2018)).
|
||||
* Add asynchronous WriteBuffer for AzureBlobStorage similar to S3. [#59929](https://github.com/ClickHouse/ClickHouse/pull/59929) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Allow "local" as object storage type instead of "local_blob_storage". [#60165](https://github.com/ClickHouse/ClickHouse/pull/60165) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Improved overall usability of virtual columns. Now it is allowed to use virtual columns in `PREWHERE` (it's worthwhile for non-const virtual columns like `_part_offset`). Now a builtin documentation is available for virtual columns as a comment of column in `DESCRIBE` query with enabled setting `describe_include_virtual_columns`. [#60205](https://github.com/ClickHouse/ClickHouse/pull/60205) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Parallel flush of pending INSERT blocks of Distributed engine on `DETACH`/server shutdown and `SYSTEM FLUSH DISTRIBUTED` (Parallelism will work only if you have multi disk policy for table (like everything in Distributed engine right now)). [#60225](https://github.com/ClickHouse/ClickHouse/pull/60225) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Filter setting is improper in `joinRightColumnsSwitchNullability`, resolve [#59625](https://github.com/ClickHouse/ClickHouse/issues/59625). [#60259](https://github.com/ClickHouse/ClickHouse/pull/60259) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Add a setting to force read-through cache for merges. [#60308](https://github.com/ClickHouse/ClickHouse/pull/60308) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Issue [#57598](https://github.com/ClickHouse/ClickHouse/issues/57598) mentions a variant behaviour regarding transaction handling. An issued COMMIT/ROLLBACK when no transaction is active is reported as an error contrary to MySQL behaviour. [#60338](https://github.com/ClickHouse/ClickHouse/pull/60338) ([PapaToemmsn](https://github.com/PapaToemmsn)).
|
||||
* Added `none_only_active` mode for `distributed_ddl_output_mode` setting. [#60340](https://github.com/ClickHouse/ClickHouse/pull/60340) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Allow configuring HTTP redirect handlers for clickhouse-server. For example, you can make `/` redirect to the Play UI. [#60390](https://github.com/ClickHouse/ClickHouse/pull/60390) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The advanced dashboard has slightly better colors for multi-line graphs. [#60391](https://github.com/ClickHouse/ClickHouse/pull/60391) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Function `substring` now has a new alias `byteSlice`. [#60494](https://github.com/ClickHouse/ClickHouse/pull/60494) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Renamed server setting `dns_cache_max_size` to `dns_cache_max_entries` to reduce ambiguity. [#60500](https://github.com/ClickHouse/ClickHouse/pull/60500) ([Kirill Nikiforov](https://github.com/allmazz)).
|
||||
* `SHOW INDEX | INDEXES | INDICES | KEYS` no longer sorts by the primary key columns (which was unintuitive). [#60514](https://github.com/ClickHouse/ClickHouse/pull/60514) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Keeper improvement: abort during startup if an invalid snapshot is detected to avoid data loss. [#60537](https://github.com/ClickHouse/ClickHouse/pull/60537) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Added MergeTree read split ranges into intersecting and non intersecting fault injection using `merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_fault_probability` setting. [#60548](https://github.com/ClickHouse/ClickHouse/pull/60548) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* The Advanced dashboard now has controls always visible on scrolling. This allows you to add a new chart without scrolling up. [#60692](https://github.com/ClickHouse/ClickHouse/pull/60692) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* String types and Enums can be used in the same context, such as: arrays, UNION queries, conditional expressions. This closes [#60726](https://github.com/ClickHouse/ClickHouse/issues/60726). [#60727](https://github.com/ClickHouse/ClickHouse/pull/60727) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update tzdata to 2024a. [#60768](https://github.com/ClickHouse/ClickHouse/pull/60768) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support files without format extension in Filesystem database. [#60795](https://github.com/ClickHouse/ClickHouse/pull/60795) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Keeper improvement: support `leadership_expiry_ms` in Keeper's settings. [#60806](https://github.com/ClickHouse/ClickHouse/pull/60806) ([Brokenice0415](https://github.com/Brokenice0415)).
|
||||
* Always infer exponential numbers in JSON formats regardless of the setting `input_format_try_infer_exponent_floats`. Add setting `input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects` that allows to use String type for ambiguous paths instead of an exception during named Tuples inference from JSON objects. [#60808](https://github.com/ClickHouse/ClickHouse/pull/60808) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for `START TRANSACTION` syntax typically used in MySQL syntax, resolving https://github.com/ClickHouse/ClickHouse/discussions/60865. [#60886](https://github.com/ClickHouse/ClickHouse/pull/60886) ([Zach Naimon](https://github.com/ArctypeZach)).
|
||||
* Add a flag for SMJ to treat null as biggest/smallest. So the behavior can be compitable with other SQL systems, like Apache Spark. [#60896](https://github.com/ClickHouse/ClickHouse/pull/60896) ([loudongfeng](https://github.com/loudongfeng)).
|
||||
* Clickhouse version has been added to docker labels. Closes [#54224](https://github.com/ClickHouse/ClickHouse/issues/54224). [#60949](https://github.com/ClickHouse/ClickHouse/pull/60949) ([Nikolay Monkov](https://github.com/nikmonkov)).
|
||||
* Add a setting `parallel_replicas_allow_in_with_subquery = 1` which allows subqueries for IN work with parallel replicas. [#60950](https://github.com/ClickHouse/ClickHouse/pull/60950) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* DNSResolver shuffles set of resolved IPs. [#60965](https://github.com/ClickHouse/ClickHouse/pull/60965) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Support detect output format by file exctension in `clickhouse-client` and `clickhouse-local`. [#61036](https://github.com/ClickHouse/ClickHouse/pull/61036) ([豪肥肥](https://github.com/HowePa)).
|
||||
* Check memory limit update periodically. [#61049](https://github.com/ClickHouse/ClickHouse/pull/61049) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Enable processors profiling (time spent/in and out bytes for sorting, aggregation, ...) by default. [#61096](https://github.com/ClickHouse/ClickHouse/pull/61096) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add the function `toUInt128OrZero`, which was missed by mistake (the mistake is related to https://github.com/ClickHouse/ClickHouse/pull/945). The compatibility aliases `FROM_UNIXTIME` and `DATE_FORMAT` (they are not ClickHouse-native and only exist for MySQL compatibility) have been made case insensitive, as expected for SQL-compatibility aliases. [#61114](https://github.com/ClickHouse/ClickHouse/pull/61114) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improvements for the access checks, allowing to revoke of unpossessed rights in case the target user doesn't have the revoking grants either. Example: ```sql GRANT SELECT ON *.* TO user1; REVOKE SELECT ON system.* FROM user1;. [#61115](https://github.com/ClickHouse/ClickHouse/pull/61115) ([pufit](https://github.com/pufit)).
|
||||
* Fix an error in previeous opt: https://github.com/ClickHouse/ClickHouse/pull/59698: remove break to make sure the first filtered column has minimum size cc @jsc0218. [#61145](https://github.com/ClickHouse/ClickHouse/pull/61145) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix `has()` function with `Nullable` column (fixes [#60214](https://github.com/ClickHouse/ClickHouse/issues/60214)). [#61249](https://github.com/ClickHouse/ClickHouse/pull/61249) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Now it's possible to specify attribute `merge="true"` in config substitutions for subtrees `<include from_zk="/path" merge="true">`. In case this attribute specified, clickhouse will merge subtree with existing configuration, otherwise default behavior is append new content to configuration. [#61299](https://github.com/ClickHouse/ClickHouse/pull/61299) ([alesapin](https://github.com/alesapin)).
|
||||
* Add async metrics for virtual memory mappings: VMMaxMapCount & VMNumMaps. Closes [#60662](https://github.com/ClickHouse/ClickHouse/issues/60662). [#61354](https://github.com/ClickHouse/ClickHouse/pull/61354) ([Tuan Pham Anh](https://github.com/tuanpavn)).
|
||||
* Use `temporary_files_codec` setting in all places where we create temporary data, for example external memory sorting and external memory GROUP BY. Before it worked only in `partial_merge` JOIN algorithm. [#61456](https://github.com/ClickHouse/ClickHouse/pull/61456) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Remove duplicated check `containing_part.empty()`, It's already being checked here: https://github.com/ClickHouse/ClickHouse/blob/1296dac3c7e47670872c15e3f5e58f869e0bd2f2/src/Storages/MergeTree/MergeTreeData.cpp#L6141. [#61467](https://github.com/ClickHouse/ClickHouse/pull/61467) ([William Schoeffel](https://github.com/wiledusc)).
|
||||
* Add a new setting `max_parser_backtracks` which allows to limit the complexity of query parsing. [#61502](https://github.com/ClickHouse/ClickHouse/pull/61502) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Support parallel reading for azure blob storage. [#61503](https://github.com/ClickHouse/ClickHouse/pull/61503) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Less contention during dynamic resize of filesystem cache. [#61524](https://github.com/ClickHouse/ClickHouse/pull/61524) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disallow sharded mode of StorageS3 queue, because it will be rewritten. [#61537](https://github.com/ClickHouse/ClickHouse/pull/61537) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed typo: from `use_leagcy_max_level` to `use_legacy_max_level`. [#61545](https://github.com/ClickHouse/ClickHouse/pull/61545) ([William Schoeffel](https://github.com/wiledusc)).
|
||||
* Remove some duplicate entries in blob_storage_log. [#61622](https://github.com/ClickHouse/ClickHouse/pull/61622) ([YenchangChan](https://github.com/YenchangChan)).
|
||||
* Enable `allow_experimental_analyzer` setting by default. [#61652](https://github.com/ClickHouse/ClickHouse/pull/61652) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Added `current_user` function as a compatibility alias for MySQL. [#61770](https://github.com/ClickHouse/ClickHouse/pull/61770) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Use managed identity for backups IO when using Azure Blob Storage. Add a setting to prevent ClickHouse from attempting to create a non-existent container, which requires permissions at the storage account level. [#61785](https://github.com/ClickHouse/ClickHouse/pull/61785) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Enable `output_format_pretty_row_numbers` by default. It is better for usability. [#61791](https://github.com/ClickHouse/ClickHouse/pull/61791) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* In the previous version, some numbers in Pretty formats were not pretty enough. [#61794](https://github.com/ClickHouse/ClickHouse/pull/61794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* A long value in Pretty formats won't be cut if it is the single value in the resultset, such as in the result of the `SHOW CREATE TABLE` query. [#61795](https://github.com/ClickHouse/ClickHouse/pull/61795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Similarly to `clickhouse-local`, `clickhouse-client` will accept the `--output-format` option as a synonym to the `--format` option. This closes [#59848](https://github.com/ClickHouse/ClickHouse/issues/59848). [#61797](https://github.com/ClickHouse/ClickHouse/pull/61797) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* If stdout is a terminal and the output format is not specified, `clickhouse-client` and similar tools will use `PrettyCompact` by default, similarly to the interactive mode. `clickhouse-client` and `clickhouse-local` will handle command line arguments for input and output formats in a unified fashion. This closes [#61272](https://github.com/ClickHouse/ClickHouse/issues/61272). [#61800](https://github.com/ClickHouse/ClickHouse/pull/61800) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Underscore digit groups in Pretty formats for better readability. This is controlled by a new setting, `output_format_pretty_highlight_digit_groups`. [#61802](https://github.com/ClickHouse/ClickHouse/pull/61802) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to override initial INSERT SETTINGS via SYSTEM FLUSH DISTRIBUTED. [#61832](https://github.com/ClickHouse/ClickHouse/pull/61832) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed grammar from "a" to "the" in the warning message. There is only one Atomic engine, so it should be "to the new Atomic engine" instead of "to a new Atomic engine". [#61952](https://github.com/ClickHouse/ClickHouse/pull/61952) ([shabroo](https://github.com/shabroo)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Update sccache to the latest version; significantly reduce images size by reshaking the dependency trees; use the latest working odbc driver. [#59953](https://github.com/ClickHouse/ClickHouse/pull/59953) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update python related style checkers. Continue the [#50174](https://github.com/ClickHouse/ClickHouse/issues/50174). [#60408](https://github.com/ClickHouse/ClickHouse/pull/60408) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Upgrade `prqlc` to 0.11.3. [#60616](https://github.com/ClickHouse/ClickHouse/pull/60616) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* Attach gdb to running fuzzer process. [#60654](https://github.com/ClickHouse/ClickHouse/pull/60654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Use explicit template instantiation more aggressively. Get rid of templates in favor of overloaded functions in some places. [#60730](https://github.com/ClickHouse/ClickHouse/pull/60730) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* The real-time query profiler now works on AArch64. In previous versions, it worked only when a program didn't spend time inside a syscall. [#60807](https://github.com/ClickHouse/ClickHouse/pull/60807) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ... Too big translation unit in `Aggregator`. [#61211](https://github.com/ClickHouse/ClickHouse/pull/61211) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fixed flakiness of 01603_insert_select_too_many_parts test. Closes [#61158](https://github.com/ClickHouse/ClickHouse/issues/61158). [#61259](https://github.com/ClickHouse/ClickHouse/pull/61259) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Now it possible to use `chassert(expression, comment)` in the codebase. [#61263](https://github.com/ClickHouse/ClickHouse/pull/61263) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Teach the fuzzer to use other numeric types. [#61317](https://github.com/ClickHouse/ClickHouse/pull/61317) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Increase memory limit for coverage builds. [#61405](https://github.com/ClickHouse/ClickHouse/pull/61405) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add generic query text fuzzer in `clickhouse-local`. [#61508](https://github.com/ClickHouse/ClickHouse/pull/61508) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix function execution over const and LowCardinality with GROUP BY const for analyzer [#59986](https://github.com/ClickHouse/ClickHouse/pull/59986) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix finished_mutations_to_keep=0 for MergeTree (as docs says 0 is to keep everything) [#60031](https://github.com/ClickHouse/ClickHouse/pull/60031) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* PartsSplitter invalid ranges for the same part [#60041](https://github.com/ClickHouse/ClickHouse/pull/60041) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Azure Blob Storage : Fix issues endpoint and prefix [#60251](https://github.com/ClickHouse/ClickHouse/pull/60251) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* fix LRUResource Cache bug (Hive cache) [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)).
|
||||
* Force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Try to avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix_max_query_size_for_kql_compound_operator: [#60534](https://github.com/ClickHouse/ClickHouse/pull/60534) ([Yong Wang](https://github.com/kashwy)).
|
||||
* Keeper fix: add timeouts when waiting for commit logs [#60544](https://github.com/ClickHouse/ClickHouse/pull/60544) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Reduce the number of read rows from `system.numbers` [#60546](https://github.com/ClickHouse/ClickHouse/pull/60546) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* Don't output number tips for date types [#60577](https://github.com/ClickHouse/ClickHouse/pull/60577) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix reading from MergeTree with non-deterministic functions in filter [#60586](https://github.com/ClickHouse/ClickHouse/pull/60586) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix logical error on bad compatibility setting value type [#60596](https://github.com/ClickHouse/ClickHouse/pull/60596) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix inconsistent aggregate function states in mixed x86-64 / ARM clusters [#60610](https://github.com/ClickHouse/ClickHouse/pull/60610) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* fix(prql): Robust panic handler [#60615](https://github.com/ClickHouse/ClickHouse/pull/60615) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* Fix `intDiv` for decimal and date arguments [#60672](https://github.com/ClickHouse/ClickHouse/pull/60672) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Fix: expand CTE in alter modify query [#60682](https://github.com/ClickHouse/ClickHouse/pull/60682) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix system.parts for non-Atomic/Ordinary database engine (i.e. Memory) [#60689](https://github.com/ClickHouse/ClickHouse/pull/60689) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix "Invalid storage definition in metadata file" for parameterized views [#60708](https://github.com/ClickHouse/ClickHouse/pull/60708) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix buffer overflow in CompressionCodecMultiple [#60731](https://github.com/ClickHouse/ClickHouse/pull/60731) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove nonsense from SQL/JSON [#60738](https://github.com/ClickHouse/ClickHouse/pull/60738) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove wrong sanitize checking in aggregate function quantileGK [#60740](https://github.com/ClickHouse/ClickHouse/pull/60740) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix insert-select + insert_deduplication_token bug by setting streams to 1 [#60745](https://github.com/ClickHouse/ClickHouse/pull/60745) ([Jordi Villar](https://github.com/jrdi)).
|
||||
* Prevent setting custom metadata headers on unsupported multipart upload operations [#60748](https://github.com/ClickHouse/ClickHouse/pull/60748) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
|
||||
* Fix toStartOfInterval [#60763](https://github.com/ClickHouse/ClickHouse/pull/60763) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix crash in arrayEnumerateRanked [#60764](https://github.com/ClickHouse/ClickHouse/pull/60764) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix crash when using input() in INSERT SELECT JOIN [#60765](https://github.com/ClickHouse/ClickHouse/pull/60765) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix crash with different allow_experimental_analyzer value in subqueries [#60770](https://github.com/ClickHouse/ClickHouse/pull/60770) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Remove recursion when reading from S3 [#60849](https://github.com/ClickHouse/ClickHouse/pull/60849) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix possible stuck on error in HashedDictionaryParallelLoader [#60926](https://github.com/ClickHouse/ClickHouse/pull/60926) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix async RESTORE with Replicated database [#60934](https://github.com/ClickHouse/ClickHouse/pull/60934) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* fix csv format not support tuple [#60994](https://github.com/ClickHouse/ClickHouse/pull/60994) ([shuai.xu](https://github.com/shuai-xu)).
|
||||
* Fix deadlock in async inserts to `Log` tables via native protocol [#61055](https://github.com/ClickHouse/ClickHouse/pull/61055) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix lazy execution of default argument in dictGetOrDefault for RangeHashedDictionary [#61196](https://github.com/ClickHouse/ClickHouse/pull/61196) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix multiple bugs in groupArraySorted [#61203](https://github.com/ClickHouse/ClickHouse/pull/61203) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix Keeper reconfig for standalone binary [#61233](https://github.com/ClickHouse/ClickHouse/pull/61233) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix usage of session_token in S3 engine [#61234](https://github.com/ClickHouse/ClickHouse/pull/61234) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix possible incorrect result of aggregate function `uniqExact` [#61257](https://github.com/ClickHouse/ClickHouse/pull/61257) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix bugs in show database [#61269](https://github.com/ClickHouse/ClickHouse/pull/61269) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix logical error in RabbitMQ storage with MATERIALIZED columns [#61320](https://github.com/ClickHouse/ClickHouse/pull/61320) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix CREATE OR REPLACE DICTIONARY [#61356](https://github.com/ClickHouse/ClickHouse/pull/61356) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash in ObjectJson parsing array with nulls [#61364](https://github.com/ClickHouse/ClickHouse/pull/61364) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix ATTACH query with external ON CLUSTER [#61365](https://github.com/ClickHouse/ClickHouse/pull/61365) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix consecutive keys optimization for nullable keys [#61393](https://github.com/ClickHouse/ClickHouse/pull/61393) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* fix issue of actions dag split [#61458](https://github.com/ClickHouse/ClickHouse/pull/61458) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix finishing a failed RESTORE [#61466](https://github.com/ClickHouse/ClickHouse/pull/61466) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Disable async_insert_use_adaptive_busy_timeout correctly with compatibility settings [#61468](https://github.com/ClickHouse/ClickHouse/pull/61468) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow queuing in restore pool [#61475](https://github.com/ClickHouse/ClickHouse/pull/61475) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix bug when reading system.parts using UUID (issue 61220). [#61479](https://github.com/ClickHouse/ClickHouse/pull/61479) ([Dan Wu](https://github.com/wudanzy)).
|
||||
* Fix ALTER QUERY MODIFY SQL SECURITY [#61480](https://github.com/ClickHouse/ClickHouse/pull/61480) ([pufit](https://github.com/pufit)).
|
||||
* Fix crash in window view [#61526](https://github.com/ClickHouse/ClickHouse/pull/61526) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `repeat` with non native integers [#61527](https://github.com/ClickHouse/ClickHouse/pull/61527) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix client `-s` argument [#61530](https://github.com/ClickHouse/ClickHouse/pull/61530) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Reset part level upon attach from disk on MergeTree [#61536](https://github.com/ClickHouse/ClickHouse/pull/61536) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in arrayPartialReverseSort [#61539](https://github.com/ClickHouse/ClickHouse/pull/61539) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix string search with const position [#61547](https://github.com/ClickHouse/ClickHouse/pull/61547) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix addDays cause an error when used datetime64 [#61561](https://github.com/ClickHouse/ClickHouse/pull/61561) ([Shuai li](https://github.com/loneylee)).
|
||||
* disallow LowCardinality input type for JSONExtract [#61617](https://github.com/ClickHouse/ClickHouse/pull/61617) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||
* Fix `system.part_log` for async insert with deduplication [#61620](https://github.com/ClickHouse/ClickHouse/pull/61620) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Non-ready set for system.parts. [#61666](https://github.com/ClickHouse/ClickHouse/pull/61666) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Don't allow the same expression in ORDER BY with and without WITH FILL [#61667](https://github.com/ClickHouse/ClickHouse/pull/61667) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix actual_part_name for REPLACE_RANGE (`Entry actual part isn't empty yet`) [#61675](https://github.com/ClickHouse/ClickHouse/pull/61675) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix columns after executing MODIFY QUERY for a materialized view with internal table [#61734](https://github.com/ClickHouse/ClickHouse/pull/61734) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash in `multiSearchAllPositionsCaseInsensitiveUTF8` for incorrect UTF-8 [#61749](https://github.com/ClickHouse/ClickHouse/pull/61749) ([pufit](https://github.com/pufit)).
|
||||
* Fix RANGE frame is not supported for Nullable columns. [#61766](https://github.com/ClickHouse/ClickHouse/pull/61766) ([YuanLiu](https://github.com/ditgittube)).
|
||||
* Revert "Revert "Fix bug when reading system.parts using UUID (issue 61220)."" [#61779](https://github.com/ClickHouse/ClickHouse/pull/61779) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Decoupled changes from [#60408](https://github.com/ClickHouse/ClickHouse/issues/60408). [#60553](https://github.com/ClickHouse/ClickHouse/pull/60553) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Eliminates the need to provide input args to docker server jobs to clean yml files. [#60602](https://github.com/ClickHouse/ClickHouse/pull/60602) ([Max K.](https://github.com/maxknv)).
|
||||
* Debug and fix markreleaseready. [#60611](https://github.com/ClickHouse/ClickHouse/pull/60611) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix build_report job so that it's defined by ci_config only (not yml file). [#60613](https://github.com/ClickHouse/ClickHouse/pull/60613) ([Max K.](https://github.com/maxknv)).
|
||||
* Do not await ci pending jobs on release branches decrease wait timeout to fit into gh job timeout. [#60652](https://github.com/ClickHouse/ClickHouse/pull/60652) ([Max K.](https://github.com/maxknv)).
|
||||
* Set limited number of builds for "special build check" report in backports. [#60850](https://github.com/ClickHouse/ClickHouse/pull/60850) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#60935](https://github.com/ClickHouse/ClickHouse/pull/60935) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#60947](https://github.com/ClickHouse/ClickHouse/pull/60947) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#60952](https://github.com/ClickHouse/ClickHouse/pull/60952) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#60958](https://github.com/ClickHouse/ClickHouse/pull/60958) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#61022](https://github.com/ClickHouse/ClickHouse/pull/61022) ([Max K.](https://github.com/maxknv)).
|
||||
* Just a preparation for the merge queue support. [#61099](https://github.com/ClickHouse/ClickHouse/pull/61099) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#61133](https://github.com/ClickHouse/ClickHouse/pull/61133) ([Max K.](https://github.com/maxknv)).
|
||||
* In PRs: - run typos, aspell check - always - run pylint, mypy - only if py file(s) changed in PRs - run basic source files style check - only if not all changes in py files. [#61148](https://github.com/ClickHouse/ClickHouse/pull/61148) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#61172](https://github.com/ClickHouse/ClickHouse/pull/61172) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#61183](https://github.com/ClickHouse/ClickHouse/pull/61183) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* ... [#61185](https://github.com/ClickHouse/ClickHouse/pull/61185) ([Max K.](https://github.com/maxknv)).
|
||||
* TBD. [#61197](https://github.com/ClickHouse/ClickHouse/pull/61197) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* ... [#61214](https://github.com/ClickHouse/ClickHouse/pull/61214) ([Max K.](https://github.com/maxknv)).
|
||||
* ... [#61441](https://github.com/ClickHouse/ClickHouse/pull/61441) ([Max K.](https://github.com/maxknv)).
|
||||
* ![Screenshot_20240323_025055](https://github.com/ClickHouse/ClickHouse/assets/18581488/ccaab212-a1d3-4dfb-8d56-b1991760b6bf). [#61801](https://github.com/ClickHouse/ClickHouse/pull/61801) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ... [#61877](https://github.com/ClickHouse/ClickHouse/pull/61877) ([Max K.](https://github.com/maxknv)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Revert "Use `MergeTree` as a default table engine""'. [#60524](https://github.com/ClickHouse/ClickHouse/pull/60524) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Support resource request canceling""'. [#60558](https://github.com/ClickHouse/ClickHouse/pull/60558) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* NO CL ENTRY: 'Revert "Add `toMillisecond` function"'. [#60644](https://github.com/ClickHouse/ClickHouse/pull/60644) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Synchronize parsers"'. [#60759](https://github.com/ClickHouse/ClickHouse/pull/60759) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Fix wacky primary key sorting in `SHOW INDEX`"'. [#60898](https://github.com/ClickHouse/ClickHouse/pull/60898) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* NO CL ENTRY: 'Revert "CI: make style check faster"'. [#61142](https://github.com/ClickHouse/ClickHouse/pull/61142) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Don't allow to set max_parallel_replicas to 0 as it doesn't make sense"'. [#61200](https://github.com/ClickHouse/ClickHouse/pull/61200) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* NO CL ENTRY: 'Revert "Fix usage of session_token in S3 engine"'. [#61359](https://github.com/ClickHouse/ClickHouse/pull/61359) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Fix usage of session_token in S3 engine""'. [#61362](https://github.com/ClickHouse/ClickHouse/pull/61362) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* NO CL ENTRY: 'Reorder hidden and shown checks in comment, change url of Mergeable check'. [#61373](https://github.com/ClickHouse/ClickHouse/pull/61373) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* NO CL ENTRY: 'Remove unnecessary layers from clickhouse/cctools'. [#61374](https://github.com/ClickHouse/ClickHouse/pull/61374) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* NO CL ENTRY: 'Revert "Updated format settings references in the docs (datetime.md)"'. [#61435](https://github.com/ClickHouse/ClickHouse/pull/61435) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* NO CL ENTRY: 'Revert "CI: ARM integration tests: disable tests with HDFS "'. [#61449](https://github.com/ClickHouse/ClickHouse/pull/61449) ([Max K.](https://github.com/maxknv)).
|
||||
* NO CL ENTRY: 'Revert "Analyzer: Fix virtual columns in StorageMerge"'. [#61518](https://github.com/ClickHouse/ClickHouse/pull/61518) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Analyzer: Fix virtual columns in StorageMerge""'. [#61528](https://github.com/ClickHouse/ClickHouse/pull/61528) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* NO CL ENTRY: 'Improve build_download_helper'. [#61592](https://github.com/ClickHouse/ClickHouse/pull/61592) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* NO CL ENTRY: 'Revert "Un-flake `test_undrop_query`"'. [#61668](https://github.com/ClickHouse/ClickHouse/pull/61668) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* NO CL ENTRY: 'Fix flaky tests (stateless, integration)'. [#61816](https://github.com/ClickHouse/ClickHouse/pull/61816) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||
* NO CL ENTRY: 'Better usability of "expect" tests: less trouble with running directly'. [#61818](https://github.com/ClickHouse/ClickHouse/pull/61818) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Fix flaky `02122_parallel_formatting_Template`"'. [#61868](https://github.com/ClickHouse/ClickHouse/pull/61868) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Add --now option to enable and start the service" #job_Install_packages_amd64'. [#61878](https://github.com/ClickHouse/ClickHouse/pull/61878) ([Max K.](https://github.com/maxknv)).
|
||||
* NO CL ENTRY: 'Revert "disallow LowCardinality input type for JSONExtract"'. [#61960](https://github.com/ClickHouse/ClickHouse/pull/61960) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Improve query performance in case of very small blocks [#58879](https://github.com/ClickHouse/ClickHouse/pull/58879) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Analyzer: fixes for JOIN columns resolution [#59007](https://github.com/ClickHouse/ClickHouse/pull/59007) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix race on `Context::async_insert_queue` [#59082](https://github.com/ClickHouse/ClickHouse/pull/59082) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* CI: support batch specification in commit message [#59738](https://github.com/ClickHouse/ClickHouse/pull/59738) ([Max K.](https://github.com/maxknv)).
|
||||
* Update storing-data.md [#60024](https://github.com/ClickHouse/ClickHouse/pull/60024) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make max_insert_delayed_streams_for_parallel_write actually work [#60079](https://github.com/ClickHouse/ClickHouse/pull/60079) ([alesapin](https://github.com/alesapin)).
|
||||
* Analyzer: support join using column from select list [#60182](https://github.com/ClickHouse/ClickHouse/pull/60182) ([vdimir](https://github.com/vdimir)).
|
||||
* test for [#60223](https://github.com/ClickHouse/ClickHouse/issues/60223) [#60258](https://github.com/ClickHouse/ClickHouse/pull/60258) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Analyzer: Refactor execution name for ConstantNode [#60313](https://github.com/ClickHouse/ClickHouse/pull/60313) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix database iterator waiting code [#60314](https://github.com/ClickHouse/ClickHouse/pull/60314) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* QueryCache: Don't acquire the query count mutex if not necessary [#60348](https://github.com/ClickHouse/ClickHouse/pull/60348) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Fix bugfix check (due to unknown commit_logs_cache_size_threshold) [#60375](https://github.com/ClickHouse/ClickHouse/pull/60375) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable testing with `io_uring` back [#60383](https://github.com/ClickHouse/ClickHouse/pull/60383) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Analyzer - improve hiding secret arguments. [#60386](https://github.com/ClickHouse/ClickHouse/pull/60386) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* CI: make workflow yml abstract [#60421](https://github.com/ClickHouse/ClickHouse/pull/60421) ([Max K.](https://github.com/maxknv)).
|
||||
* Improve test test_reload_clusters_config [#60426](https://github.com/ClickHouse/ClickHouse/pull/60426) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Revert "Revert "Merge pull request [#56864](https://github.com/ClickHouse/ClickHouse/issues/56864) from ClickHouse/broken-projections-better-handling"" [#60452](https://github.com/ClickHouse/ClickHouse/pull/60452) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Do not check to and from files existence in metadata_storage because it does not see uncommitted changes [#60462](https://github.com/ClickHouse/ClickHouse/pull/60462) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix option ambiguous in `clickhouse-local` [#60475](https://github.com/ClickHouse/ClickHouse/pull/60475) ([豪肥肥](https://github.com/HowePa)).
|
||||
* Fix: test_parallel_replicas_custom_key_load_balancing [#60485](https://github.com/ClickHouse/ClickHouse/pull/60485) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix: progress bar for *Cluster table functions [#60491](https://github.com/ClickHouse/ClickHouse/pull/60491) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Analyzer: Support different ObjectJSON on shards [#60497](https://github.com/ClickHouse/ClickHouse/pull/60497) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#60499](https://github.com/ClickHouse/ClickHouse/pull/60499) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Refactor StorageSystemOneBlock [#60510](https://github.com/ClickHouse/ClickHouse/pull/60510) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Simple cleanup while fixing progress bar [#60513](https://github.com/ClickHouse/ClickHouse/pull/60513) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* PullingAsyncPipelineExecutor cleanup [#60515](https://github.com/ClickHouse/ClickHouse/pull/60515) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix bad error message [#60518](https://github.com/ClickHouse/ClickHouse/pull/60518) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Synchronize Access [#60519](https://github.com/ClickHouse/ClickHouse/pull/60519) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Synchronize metrics and Keeper [#60520](https://github.com/ClickHouse/ClickHouse/pull/60520) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enforce clang-tidy in `programs/` and `utils/` headers [#60521](https://github.com/ClickHouse/ClickHouse/pull/60521) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Synchronize parsers [#60522](https://github.com/ClickHouse/ClickHouse/pull/60522) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix a bunch of clang-tidy warnings in headers [#60523](https://github.com/ClickHouse/ClickHouse/pull/60523) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* General sanity in function `seriesOutliersDetectTukey` [#60535](https://github.com/ClickHouse/ClickHouse/pull/60535) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update Chinese document for max_query_size, max_parser_depth and optimize_functions_to_subcolumns [#60541](https://github.com/ClickHouse/ClickHouse/pull/60541) ([Alex Cheng](https://github.com/Alex-Cheng)).
|
||||
* Userspace page cache again [#60552](https://github.com/ClickHouse/ClickHouse/pull/60552) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Traverse shadow directory for system.remote_data_paths [#60585](https://github.com/ClickHouse/ClickHouse/pull/60585) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Add test for [#58906](https://github.com/ClickHouse/ClickHouse/issues/58906) [#60597](https://github.com/ClickHouse/ClickHouse/pull/60597) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Use python zipfile to have x-platform idempotent lambda packages [#60603](https://github.com/ClickHouse/ClickHouse/pull/60603) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* tests: suppress data-race in librdkafka statistics code [#60604](https://github.com/ClickHouse/ClickHouse/pull/60604) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update version after release [#60605](https://github.com/ClickHouse/ClickHouse/pull/60605) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v24.2.1.2248-stable [#60607](https://github.com/ClickHouse/ClickHouse/pull/60607) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Addition to changelog [#60609](https://github.com/ClickHouse/ClickHouse/pull/60609) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* internal: Refine rust prql code [#60617](https://github.com/ClickHouse/ClickHouse/pull/60617) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* fix(rust): Fix skim's panic handler [#60621](https://github.com/ClickHouse/ClickHouse/pull/60621) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* Resubmit "Analyzer: compute ALIAS columns right after reading" [#60641](https://github.com/ClickHouse/ClickHouse/pull/60641) ([vdimir](https://github.com/vdimir)).
|
||||
* Analyzer: Fix bug with join_use_nulls and PREWHERE [#60655](https://github.com/ClickHouse/ClickHouse/pull/60655) ([vdimir](https://github.com/vdimir)).
|
||||
* Add test for [#59891](https://github.com/ClickHouse/ClickHouse/issues/59891) [#60657](https://github.com/ClickHouse/ClickHouse/pull/60657) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix missed entries in system.part_log in case of fetch preferred over merges/mutations [#60659](https://github.com/ClickHouse/ClickHouse/pull/60659) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Always apply first minmax index among available skip indices [#60675](https://github.com/ClickHouse/ClickHouse/pull/60675) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Remove bad test `02152_http_external_tables_memory_tracking` [#60690](https://github.com/ClickHouse/ClickHouse/pull/60690) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix questionable behavior in the `parseDateTimeBestEffort` function. [#60691](https://github.com/ClickHouse/ClickHouse/pull/60691) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky checks [#60694](https://github.com/ClickHouse/ClickHouse/pull/60694) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Resubmit http_external_tables_memory_tracking test [#60695](https://github.com/ClickHouse/ClickHouse/pull/60695) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix bugfix and upgrade checks (due to "Unknown handler type 'redirect'" error) [#60696](https://github.com/ClickHouse/ClickHouse/pull/60696) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix test_grant_and_revoke/test.py::test_grant_all_on_table (after syncing with cloud) [#60699](https://github.com/ClickHouse/ClickHouse/pull/60699) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove unit test for ColumnObject [#60709](https://github.com/ClickHouse/ClickHouse/pull/60709) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve unit tests [#60710](https://github.com/ClickHouse/ClickHouse/pull/60710) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix scheduler fairness test [#60712](https://github.com/ClickHouse/ClickHouse/pull/60712) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Do not retry queries if container is down in integration tests (resubmit) [#60714](https://github.com/ClickHouse/ClickHouse/pull/60714) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Mark one setting as obsolete [#60715](https://github.com/ClickHouse/ClickHouse/pull/60715) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix a test with Analyzer [#60723](https://github.com/ClickHouse/ClickHouse/pull/60723) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Two tests are fixed with Analyzer [#60724](https://github.com/ClickHouse/ClickHouse/pull/60724) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove old code [#60728](https://github.com/ClickHouse/ClickHouse/pull/60728) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove more code from LIVE VIEW [#60729](https://github.com/ClickHouse/ClickHouse/pull/60729) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `test_keeper_back_to_back/test.py::test_concurrent_watches` [#60749](https://github.com/ClickHouse/ClickHouse/pull/60749) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Catch exceptions on finalize in `InterserverIOHTTPHandler` [#60769](https://github.com/ClickHouse/ClickHouse/pull/60769) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Reduce flakiness of 02932_refreshable_materialized_views [#60771](https://github.com/ClickHouse/ClickHouse/pull/60771) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Use 64-bit capabilities if available [#60775](https://github.com/ClickHouse/ClickHouse/pull/60775) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Include multiline logs in fuzzer fatal.log report [#60796](https://github.com/ClickHouse/ClickHouse/pull/60796) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add missing clone calls related to compression [#60810](https://github.com/ClickHouse/ClickHouse/pull/60810) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* New private runners [#60811](https://github.com/ClickHouse/ClickHouse/pull/60811) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Move userspace page cache settings to the correct section of SettingsChangeHistory.h [#60812](https://github.com/ClickHouse/ClickHouse/pull/60812) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Update version_date.tsv and changelogs after v23.8.10.43-lts [#60851](https://github.com/ClickHouse/ClickHouse/pull/60851) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix fuzzer report [#60853](https://github.com/ClickHouse/ClickHouse/pull/60853) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update version_date.tsv and changelogs after v23.3.20.27-lts [#60857](https://github.com/ClickHouse/ClickHouse/pull/60857) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Refactor OptimizeDateOrDateTimeConverterWithPreimageVisitor [#60875](https://github.com/ClickHouse/ClickHouse/pull/60875) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Fix race in PageCache [#60878](https://github.com/ClickHouse/ClickHouse/pull/60878) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Small changes in async inserts code [#60885](https://github.com/ClickHouse/ClickHouse/pull/60885) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Remove useless verbose logging from AWS library [#60921](https://github.com/ClickHouse/ClickHouse/pull/60921) ([alesapin](https://github.com/alesapin)).
|
||||
* Throw on query timeout in ZooKeeperRetries [#60922](https://github.com/ClickHouse/ClickHouse/pull/60922) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Bring clickhouse-test changes from private [#60924](https://github.com/ClickHouse/ClickHouse/pull/60924) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add debug info to exceptions in `IMergeTreeDataPart::checkConsistency()` [#60981](https://github.com/ClickHouse/ClickHouse/pull/60981) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix a typo [#60987](https://github.com/ClickHouse/ClickHouse/pull/60987) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Replace some header includes with forward declarations [#61003](https://github.com/ClickHouse/ClickHouse/pull/61003) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Speed up cctools building [#61011](https://github.com/ClickHouse/ClickHouse/pull/61011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix ASTRenameQuery::clone [#61013](https://github.com/ClickHouse/ClickHouse/pull/61013) ([vdimir](https://github.com/vdimir)).
|
||||
* Update README.md [#61021](https://github.com/ClickHouse/ClickHouse/pull/61021) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* Fix TableFunctionExecutable::skipAnalysisForArguments [#61037](https://github.com/ClickHouse/ClickHouse/pull/61037) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix: parallel replicas with PREWHERE (ubsan) [#61052](https://github.com/ClickHouse/ClickHouse/pull/61052) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fast fix tests. [#61056](https://github.com/ClickHouse/ClickHouse/pull/61056) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix `test_placement_info` [#61057](https://github.com/ClickHouse/ClickHouse/pull/61057) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Fix: parallel replicas with CTEs, crash in EXPLAIN SYNTAX with analyzer [#61059](https://github.com/ClickHouse/ClickHouse/pull/61059) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Debug fuzzer failures [#61062](https://github.com/ClickHouse/ClickHouse/pull/61062) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add regression tests for fixed issues [#61076](https://github.com/ClickHouse/ClickHouse/pull/61076) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Analyzer: Fix 01244_optimize_distributed_group_by_sharding_key [#61089](https://github.com/ClickHouse/ClickHouse/pull/61089) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Use global scalars cache with analyzer [#61104](https://github.com/ClickHouse/ClickHouse/pull/61104) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix removing is_active node after re-creation [#61105](https://github.com/ClickHouse/ClickHouse/pull/61105) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update 02962_system_sync_replica_lightweight_from_modifier.sh [#61110](https://github.com/ClickHouse/ClickHouse/pull/61110) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Simplify bridges [#61118](https://github.com/ClickHouse/ClickHouse/pull/61118) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* update cppkafka to v0.4.1 [#61119](https://github.com/ClickHouse/ClickHouse/pull/61119) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* CI: add wf class in ci_config [#61122](https://github.com/ClickHouse/ClickHouse/pull/61122) ([Max K.](https://github.com/maxknv)).
|
||||
* QueryFuzzer: replace element randomly when AST part buffer is full [#61124](https://github.com/ClickHouse/ClickHouse/pull/61124) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* CI: make style check fast [#61125](https://github.com/ClickHouse/ClickHouse/pull/61125) ([Max K.](https://github.com/maxknv)).
|
||||
* Better gitignore [#61128](https://github.com/ClickHouse/ClickHouse/pull/61128) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix something strange [#61129](https://github.com/ClickHouse/ClickHouse/pull/61129) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update check-large-objects.sh to be language neutral [#61130](https://github.com/ClickHouse/ClickHouse/pull/61130) ([Dan Wu](https://github.com/wudanzy)).
|
||||
* Throw memory limit exceptions to avoid OOM in some places [#61132](https://github.com/ClickHouse/ClickHouse/pull/61132) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix test_distributed_directory_monitor_split_batch_on_failure flakienss [#61136](https://github.com/ClickHouse/ClickHouse/pull/61136) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix llvm symbolizer on CI [#61147](https://github.com/ClickHouse/ClickHouse/pull/61147) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Some clang-tidy fixes [#61150](https://github.com/ClickHouse/ClickHouse/pull/61150) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Revive "Less contention in the cache, part 2" [#61152](https://github.com/ClickHouse/ClickHouse/pull/61152) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Enable black back [#61159](https://github.com/ClickHouse/ClickHouse/pull/61159) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* CI: fix nightly job issue [#61160](https://github.com/ClickHouse/ClickHouse/pull/61160) ([Max K.](https://github.com/maxknv)).
|
||||
* Split `RangeHashedDictionary` [#61162](https://github.com/ClickHouse/ClickHouse/pull/61162) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Remove a few templates from Aggregator.cpp [#61171](https://github.com/ClickHouse/ClickHouse/pull/61171) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Avoid some logical errors in experimantal Object type [#61173](https://github.com/ClickHouse/ClickHouse/pull/61173) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Update ReadSettings.h [#61174](https://github.com/ClickHouse/ClickHouse/pull/61174) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* CI: ARM integration tests: disable tests with HDFS [#61182](https://github.com/ClickHouse/ClickHouse/pull/61182) ([Max K.](https://github.com/maxknv)).
|
||||
* Disable sanitizers with 02784_parallel_replicas_automatic_decision_join [#61184](https://github.com/ClickHouse/ClickHouse/pull/61184) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix `02887_mutations_subcolumns` test flakiness [#61198](https://github.com/ClickHouse/ClickHouse/pull/61198) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Make variant tests a bit faster [#61199](https://github.com/ClickHouse/ClickHouse/pull/61199) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix strange log message [#61206](https://github.com/ClickHouse/ClickHouse/pull/61206) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix 01603_insert_select_too_many_parts flakiness [#61218](https://github.com/ClickHouse/ClickHouse/pull/61218) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make every style-checker runner types scaling-out very quickly [#61231](https://github.com/ClickHouse/ClickHouse/pull/61231) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Improve `test_failed_mutations` [#61235](https://github.com/ClickHouse/ClickHouse/pull/61235) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix `test_merge_tree_load_parts/test.py::test_merge_tree_load_parts_corrupted` [#61236](https://github.com/ClickHouse/ClickHouse/pull/61236) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* fix `forget_partition` test [#61237](https://github.com/ClickHouse/ClickHouse/pull/61237) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Print more info in `02572_system_logs_materialized_views_ignore_errors` to debug [#61246](https://github.com/ClickHouse/ClickHouse/pull/61246) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix runtime error in AST Fuzzer [#61248](https://github.com/ClickHouse/ClickHouse/pull/61248) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add retries to `02908_many_requests_to_system_replicas` [#61253](https://github.com/ClickHouse/ClickHouse/pull/61253) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Followup fix ASTRenameQuery::clone [#61254](https://github.com/ClickHouse/ClickHouse/pull/61254) ([vdimir](https://github.com/vdimir)).
|
||||
* Disable test 02998_primary_key_skip_columns.sql in sanitizer builds as it can be slow [#61256](https://github.com/ClickHouse/ClickHouse/pull/61256) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Update curl to curl with data race fix [#61264](https://github.com/ClickHouse/ClickHouse/pull/61264) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix `01417_freeze_partition_verbose` [#61266](https://github.com/ClickHouse/ClickHouse/pull/61266) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Free memory earlier in inserts [#61267](https://github.com/ClickHouse/ClickHouse/pull/61267) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixing test_build_sets_from_multiple_threads/test.py::test_set [#61286](https://github.com/ClickHouse/ClickHouse/pull/61286) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Analyzer: Fix virtual columns in StorageMerge [#61298](https://github.com/ClickHouse/ClickHouse/pull/61298) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix 01952_optimize_distributed_group_by_sharding_key with analyzer. [#61301](https://github.com/ClickHouse/ClickHouse/pull/61301) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* fix data race in poco tcp server [#61309](https://github.com/ClickHouse/ClickHouse/pull/61309) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Don't use default cluster in test test_distibuted_settings [#61314](https://github.com/ClickHouse/ClickHouse/pull/61314) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix false positive assertion in cache [#61319](https://github.com/ClickHouse/ClickHouse/pull/61319) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix test test_input_format_parallel_parsing_memory_tracking [#61322](https://github.com/ClickHouse/ClickHouse/pull/61322) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix 01761_cast_to_enum_nullable with analyzer. [#61323](https://github.com/ClickHouse/ClickHouse/pull/61323) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add zookeeper retries for exists check in forcefullyRemoveBrokenOutdatedPartFromZooKeeper [#61324](https://github.com/ClickHouse/ClickHouse/pull/61324) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Minor changes in stress and fuzzer reports [#61333](https://github.com/ClickHouse/ClickHouse/pull/61333) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Un-flake `test_undrop_query` [#61348](https://github.com/ClickHouse/ClickHouse/pull/61348) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Tiny improvement for replication.lib [#61361](https://github.com/ClickHouse/ClickHouse/pull/61361) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bugfix check (due to "unknown object storage type: azure") [#61363](https://github.com/ClickHouse/ClickHouse/pull/61363) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `01599_multiline_input_and_singleline_comments` 3 minute wait [#61371](https://github.com/ClickHouse/ClickHouse/pull/61371) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Terminate EC2 on spot event if runner isn't running [#61377](https://github.com/ClickHouse/ClickHouse/pull/61377) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Try fix docs check [#61378](https://github.com/ClickHouse/ClickHouse/pull/61378) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix `heap-use-after-free` for Merge table with alias [#61380](https://github.com/ClickHouse/ClickHouse/pull/61380) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable `optimize_rewrite_sum_if_to_count_if` if return type is nullable (new analyzer) [#61389](https://github.com/ClickHouse/ClickHouse/pull/61389) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Analyzer: Fix planner context for subquery in StorageMerge [#61392](https://github.com/ClickHouse/ClickHouse/pull/61392) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix `test_failed_async_inserts` [#61394](https://github.com/ClickHouse/ClickHouse/pull/61394) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix test test_system_clusters_actual_information flakiness [#61395](https://github.com/ClickHouse/ClickHouse/pull/61395) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove default cluster from default config from test config [#61396](https://github.com/ClickHouse/ClickHouse/pull/61396) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Enable clang-tidy in headers [#61406](https://github.com/ClickHouse/ClickHouse/pull/61406) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add sanity check for poll_max_batch_size FileLog setting [#61408](https://github.com/ClickHouse/ClickHouse/pull/61408) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ThreadFuzzer: randomize sleep time [#61410](https://github.com/ClickHouse/ClickHouse/pull/61410) ([Tomer Shafir](https://github.com/tomershafir)).
|
||||
* Update version_date.tsv and changelogs after v23.8.11.28-lts [#61416](https://github.com/ClickHouse/ClickHouse/pull/61416) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.3.21.26-lts [#61418](https://github.com/ClickHouse/ClickHouse/pull/61418) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v24.1.7.18-stable [#61419](https://github.com/ClickHouse/ClickHouse/pull/61419) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v24.2.2.71-stable [#61420](https://github.com/ClickHouse/ClickHouse/pull/61420) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.12.5.81-stable [#61421](https://github.com/ClickHouse/ClickHouse/pull/61421) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Restore automerge for approved PRs [#61433](https://github.com/ClickHouse/ClickHouse/pull/61433) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable broken SonarCloud [#61434](https://github.com/ClickHouse/ClickHouse/pull/61434) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix `01599_multiline_input_and_singleline_comments` properly [#61440](https://github.com/ClickHouse/ClickHouse/pull/61440) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Convert test 02998_system_dns_cache_table to smoke and mirrors [#61443](https://github.com/ClickHouse/ClickHouse/pull/61443) ([vdimir](https://github.com/vdimir)).
|
||||
* Check boundaries for some settings in parallel replicas [#61455](https://github.com/ClickHouse/ClickHouse/pull/61455) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Use SHARD_LOAD_QUEUE_BACKLOG for dictionaries in tests [#61462](https://github.com/ClickHouse/ClickHouse/pull/61462) ([vdimir](https://github.com/vdimir)).
|
||||
* Split `02125_lz4_compression_bug` [#61465](https://github.com/ClickHouse/ClickHouse/pull/61465) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Correctly process last stacktrace in `postprocess-traces.pl` [#61470](https://github.com/ClickHouse/ClickHouse/pull/61470) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix test `test_polymorphic_parts` [#61477](https://github.com/ClickHouse/ClickHouse/pull/61477) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* A definitive guide to CAST [#61491](https://github.com/ClickHouse/ClickHouse/pull/61491) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Minor rename in FileCache [#61494](https://github.com/ClickHouse/ClickHouse/pull/61494) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove useless code [#61498](https://github.com/ClickHouse/ClickHouse/pull/61498) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix fuzzers [#61499](https://github.com/ClickHouse/ClickHouse/pull/61499) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update jdbc.md [#61506](https://github.com/ClickHouse/ClickHouse/pull/61506) ([San](https://github.com/santrancisco)).
|
||||
* Fix error in clickhouse-client [#61507](https://github.com/ClickHouse/ClickHouse/pull/61507) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix clang-tidy build [#61519](https://github.com/ClickHouse/ClickHouse/pull/61519) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix infinite loop in function `hop` [#61523](https://github.com/ClickHouse/ClickHouse/pull/61523) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve tests 00159_parallel_formatting_* to to avoid timeouts [#61532](https://github.com/ClickHouse/ClickHouse/pull/61532) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Refactoring of reading from compact parts [#61535](https://github.com/ClickHouse/ClickHouse/pull/61535) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Don't run 01459_manual_write_to_replicas in debug build as it's too slow [#61538](https://github.com/ClickHouse/ClickHouse/pull/61538) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* CI: ARM integration test - skip hdfs, kerberos, kafka [#61542](https://github.com/ClickHouse/ClickHouse/pull/61542) ([Max K.](https://github.com/maxknv)).
|
||||
* More logging for loading of tables [#61546](https://github.com/ClickHouse/ClickHouse/pull/61546) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixing 01584_distributed_buffer_cannot_find_column with analyzer. [#61550](https://github.com/ClickHouse/ClickHouse/pull/61550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Wait for done mutation with more logs and asserts [#61554](https://github.com/ClickHouse/ClickHouse/pull/61554) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix read_rows count with external group by [#61555](https://github.com/ClickHouse/ClickHouse/pull/61555) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* queries-file should be used to specify file [#61557](https://github.com/ClickHouse/ClickHouse/pull/61557) ([danila-ermakov](https://github.com/danila-ermakov)).
|
||||
* Fix `02481_async_insert_dedup_token` [#61568](https://github.com/ClickHouse/ClickHouse/pull/61568) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add a comment after [#61458](https://github.com/ClickHouse/ClickHouse/issues/61458) [#61580](https://github.com/ClickHouse/ClickHouse/pull/61580) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix clickhouse-test client option and CLICKHOUSE_URL_PARAMS interference [#61596](https://github.com/ClickHouse/ClickHouse/pull/61596) ([vdimir](https://github.com/vdimir)).
|
||||
* CI: remove compose files from integration test docker [#61597](https://github.com/ClickHouse/ClickHouse/pull/61597) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix 01244_optimize_distributed_group_by_sharding_key by ordering output [#61602](https://github.com/ClickHouse/ClickHouse/pull/61602) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove some tests from analyzer_tech_debt [#61603](https://github.com/ClickHouse/ClickHouse/pull/61603) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Reduce header dependencies [#61604](https://github.com/ClickHouse/ClickHouse/pull/61604) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Remove some magic_enum from headers [#61606](https://github.com/ClickHouse/ClickHouse/pull/61606) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix configs for upgrade and bugfix [#61607](https://github.com/ClickHouse/ClickHouse/pull/61607) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add tests for multiple fuzzer issues [#61614](https://github.com/ClickHouse/ClickHouse/pull/61614) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Try to fix `02908_many_requests_to_system_replicas` again [#61616](https://github.com/ClickHouse/ClickHouse/pull/61616) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Verbose error message about analyzer_compatibility_join_using_top_level_identifier [#61631](https://github.com/ClickHouse/ClickHouse/pull/61631) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix 00223_shard_distributed_aggregation_memory_efficient with analyzer [#61649](https://github.com/ClickHouse/ClickHouse/pull/61649) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Better fuzzer logs [#61650](https://github.com/ClickHouse/ClickHouse/pull/61650) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix flaky `02122_parallel_formatting_Template` [#61651](https://github.com/ClickHouse/ClickHouse/pull/61651) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix Aggregator when data is empty [#61654](https://github.com/ClickHouse/ClickHouse/pull/61654) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Restore poco SUN files [#61655](https://github.com/ClickHouse/ClickHouse/pull/61655) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Another fix for `SumIfToCountIfPass` [#61656](https://github.com/ClickHouse/ClickHouse/pull/61656) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Keeper: fix data race during snapshot destructor call [#61657](https://github.com/ClickHouse/ClickHouse/pull/61657) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* CI: integration tests: use runner as py module [#61658](https://github.com/ClickHouse/ClickHouse/pull/61658) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix logging of autoscaling lambda, add test for effective_capacity [#61662](https://github.com/ClickHouse/ClickHouse/pull/61662) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Small change in `DatabaseOnDisk::iterateMetadataFiles()` [#61664](https://github.com/ClickHouse/ClickHouse/pull/61664) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Build improvements by removing magic enum from header and apply some explicit template instantiation [#61665](https://github.com/ClickHouse/ClickHouse/pull/61665) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Update the dictionary for OSSFuzz [#61672](https://github.com/ClickHouse/ClickHouse/pull/61672) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Inhibit randomization in some tests and exclude some long tests from debug runs [#61676](https://github.com/ClickHouse/ClickHouse/pull/61676) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#61669](https://github.com/ClickHouse/ClickHouse/issues/61669) [#61678](https://github.com/ClickHouse/ClickHouse/pull/61678) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix use-of-uninitialized-value in HedgedConnections [#61679](https://github.com/ClickHouse/ClickHouse/pull/61679) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Remove clickhouse-diagnostics from the package [#61681](https://github.com/ClickHouse/ClickHouse/pull/61681) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix use-of-uninitialized-value in parseDateTimeBestEffort [#61694](https://github.com/ClickHouse/ClickHouse/pull/61694) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* poco foundation: add illumos support [#61701](https://github.com/ClickHouse/ClickHouse/pull/61701) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* contrib/c-ares: add illumos as a platform [#61702](https://github.com/ClickHouse/ClickHouse/pull/61702) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* contrib/curl: Add illumos support [#61704](https://github.com/ClickHouse/ClickHouse/pull/61704) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fuzzer: Try a different way to wait for the server [#61706](https://github.com/ClickHouse/ClickHouse/pull/61706) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Disable some tests for SMT [#61708](https://github.com/ClickHouse/ClickHouse/pull/61708) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix signal handler for sanitizer signals [#61709](https://github.com/ClickHouse/ClickHouse/pull/61709) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Avoid `IsADirectoryError: Is a directory contrib/azure` [#61710](https://github.com/ClickHouse/ClickHouse/pull/61710) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Analyzer: fix group_by_use_nulls [#61717](https://github.com/ClickHouse/ClickHouse/pull/61717) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Analyzer: Clear list of broken integration tests [#61718](https://github.com/ClickHouse/ClickHouse/pull/61718) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* CI: modify CI from PR body [#61725](https://github.com/ClickHouse/ClickHouse/pull/61725) ([Max K.](https://github.com/maxknv)).
|
||||
* Add test for [#57820](https://github.com/ClickHouse/ClickHouse/issues/57820) [#61726](https://github.com/ClickHouse/ClickHouse/pull/61726) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Revert "Revert "Un-flake test_undrop_query"" [#61727](https://github.com/ClickHouse/ClickHouse/pull/61727) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* FunctionsConversion: Start simplifying templates [#61733](https://github.com/ClickHouse/ClickHouse/pull/61733) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* CI: modify it [#61735](https://github.com/ClickHouse/ClickHouse/pull/61735) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix segfault in SquashingTransform [#61736](https://github.com/ClickHouse/ClickHouse/pull/61736) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix DWARF format failing to skip DW_FORM_strx3 attributes [#61737](https://github.com/ClickHouse/ClickHouse/pull/61737) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* There is no such thing as broken tests [#61739](https://github.com/ClickHouse/ClickHouse/pull/61739) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Process removed files, decouple _check_mime [#61751](https://github.com/ClickHouse/ClickHouse/pull/61751) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Keeper fix: destroy `KeeperDispatcher` first [#61752](https://github.com/ClickHouse/ClickHouse/pull/61752) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky `03014_async_with_dedup_part_log_rmt` [#61757](https://github.com/ClickHouse/ClickHouse/pull/61757) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* FunctionsConversion: Remove another batch of bad templates [#61773](https://github.com/ClickHouse/ClickHouse/pull/61773) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Revert "Fix bug when reading system.parts using UUID (issue 61220)." [#61774](https://github.com/ClickHouse/ClickHouse/pull/61774) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* CI: disable grpc tests on ARM [#61778](https://github.com/ClickHouse/ClickHouse/pull/61778) ([Max K.](https://github.com/maxknv)).
|
||||
* Fix more tests with virtual columns in StorageMerge. [#61787](https://github.com/ClickHouse/ClickHouse/pull/61787) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Remove already not flaky tests with analyzer. [#61788](https://github.com/ClickHouse/ClickHouse/pull/61788) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Analyzer: Fix assert in JOIN with Distributed table [#61789](https://github.com/ClickHouse/ClickHouse/pull/61789) ([vdimir](https://github.com/vdimir)).
|
||||
* A test can be slow in debug build [#61796](https://github.com/ClickHouse/ClickHouse/pull/61796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Updated clang-19 to master. [#61798](https://github.com/ClickHouse/ClickHouse/pull/61798) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test "00002_log_and_exception_messages_formatting" [#61821](https://github.com/ClickHouse/ClickHouse/pull/61821) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* A test is too slow for debug [#61822](https://github.com/ClickHouse/ClickHouse/pull/61822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove DataStreams [#61824](https://github.com/ClickHouse/ClickHouse/pull/61824) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better message for logging errors [#61827](https://github.com/ClickHouse/ClickHouse/pull/61827) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix sanitizers suppressions [#61828](https://github.com/ClickHouse/ClickHouse/pull/61828) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove unused code [#61830](https://github.com/ClickHouse/ClickHouse/pull/61830) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove DataStreams (2) [#61831](https://github.com/ClickHouse/ClickHouse/pull/61831) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update xxhash to v0.8.2 [#61838](https://github.com/ClickHouse/ClickHouse/pull/61838) ([Shubham Ranjan](https://github.com/shubhamranjan)).
|
||||
* Fix: DISTINCT in subquery with analyzer [#61847](https://github.com/ClickHouse/ClickHouse/pull/61847) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Analyzer: fix limit/offset on shards [#61849](https://github.com/ClickHouse/ClickHouse/pull/61849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Remove PoolBase::AllocateNewBypassingPool [#61866](https://github.com/ClickHouse/ClickHouse/pull/61866) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Try to fix 02901_parallel_replicas_rollup with analyzer. [#61875](https://github.com/ClickHouse/ClickHouse/pull/61875) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add test for [#57808](https://github.com/ClickHouse/ClickHouse/issues/57808) [#61879](https://github.com/ClickHouse/ClickHouse/pull/61879) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* CI: merge queue support [#61881](https://github.com/ClickHouse/ClickHouse/pull/61881) ([Max K.](https://github.com/maxknv)).
|
||||
* Update create.sql [#61885](https://github.com/ClickHouse/ClickHouse/pull/61885) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* no smaller unit in date_trunc [#61888](https://github.com/ClickHouse/ClickHouse/pull/61888) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Move KQL trash where it is supposed to be [#61903](https://github.com/ClickHouse/ClickHouse/pull/61903) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Changelog for 24.3 [#61909](https://github.com/ClickHouse/ClickHouse/pull/61909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.3.22.3-lts [#61914](https://github.com/ClickHouse/ClickHouse/pull/61914) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.8.12.13-lts [#61915](https://github.com/ClickHouse/ClickHouse/pull/61915) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* No "please" [#61916](https://github.com/ClickHouse/ClickHouse/pull/61916) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.12.6.19-stable [#61917](https://github.com/ClickHouse/ClickHouse/pull/61917) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v24.1.8.22-stable [#61918](https://github.com/ClickHouse/ClickHouse/pull/61918) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix flaky test_broken_projestions/test.py::test_broken_ignored_replic… [#61932](https://github.com/ClickHouse/ClickHouse/pull/61932) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Check is Rust avaiable for build, if not, suggest a way to disable Rust support [#61938](https://github.com/ClickHouse/ClickHouse/pull/61938) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* CI: new ci menu in PR body [#61948](https://github.com/ClickHouse/ClickHouse/pull/61948) ([Max K.](https://github.com/maxknv)).
|
||||
* Remove flaky test `01193_metadata_loading` [#61961](https://github.com/ClickHouse/ClickHouse/pull/61961) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Packaging Improvement
|
||||
|
||||
* Adding the `--now` option to enable and start service automatically when installing the database server completely. [#60656](https://github.com/ClickHouse/ClickHouse/pull/60656) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
||||
|
@ -55,9 +55,7 @@ To build using Homebrew's vanilla Clang compiler (the only **recommended** way):
|
||||
cd ClickHouse
|
||||
mkdir build
|
||||
export PATH=$(brew --prefix llvm)/bin:$PATH
|
||||
export CC=$(brew --prefix llvm)/bin/clang
|
||||
export CXX=$(brew --prefix llvm)/bin/clang++
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_C_COMPILER=$(brew --prefix llvm)/bin/clang -DCMAKE_CXX_COMPILER=$(brew --prefix llvm)/bin/clang++ -S . -B build
|
||||
cmake --build build
|
||||
# The resulting binary will be created at: build/programs/clickhouse
|
||||
```
|
||||
|
@ -153,6 +153,26 @@ Upon successful build you get an executable file `ClickHouse/<build_dir>/program
|
||||
|
||||
ls -l programs/clickhouse
|
||||
|
||||
### Advanced Building Process {#advanced-building-process}
|
||||
|
||||
#### Minimal Build {#minimal-build}
|
||||
|
||||
If you are not interested in functionality provided by third-party libraries, you can further speed up the build using `cmake` options
|
||||
|
||||
```
|
||||
cmake -DENABLE_LIBRARIES=OFF
|
||||
```
|
||||
|
||||
In case of problems with any of the development options, you are on your own!
|
||||
|
||||
#### Rust support {#rust-support}
|
||||
|
||||
Rust requires internet connection, in case you don't have it, you can disable Rust support:
|
||||
|
||||
```
|
||||
cmake -DENABLE_RUST=OFF
|
||||
```
|
||||
|
||||
## Running the Built Executable of ClickHouse {#running-the-built-executable-of-clickhouse}
|
||||
|
||||
To run the server under the current user you need to navigate to `ClickHouse/programs/server/` (located outside of `build`) and run:
|
||||
@ -250,10 +270,3 @@ Most probably some of the builds will fail at first times. This is due to the fa
|
||||
You can use GitHub integrated code browser [here](https://github.dev/ClickHouse/ClickHouse).
|
||||
|
||||
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
|
||||
|
||||
If you are not interested in functionality provided by third-party libraries, you can further speed up the build using `cmake` options
|
||||
```
|
||||
-DENABLE_LIBRARIES=0
|
||||
```
|
||||
|
||||
In case of problems with any of the development options, you are on your own!
|
||||
|
@ -4,7 +4,11 @@ sidebar_position: 50
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
# MySQL
|
||||
import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
|
||||
|
||||
# MySQL Database Engine
|
||||
|
||||
<CloudNotSupportedBadge />
|
||||
|
||||
Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.
|
||||
|
||||
|
@ -6,6 +6,11 @@ sidebar_label: JDBC
|
||||
|
||||
# JDBC
|
||||
|
||||
:::note
|
||||
clickhouse-jdbc-bridge contains experimental codes and is no longer supported. It may contain reliability issues and security vulnerabilities. Use it at your own risk.
|
||||
ClickHouse recommend using built-in table functions in ClickHouse which provide a better alternative for ad-hoc querying scenarios (Postgres, MySQL, MongoDB, etc).
|
||||
:::
|
||||
|
||||
Allows ClickHouse to connect to external databases via [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity).
|
||||
|
||||
To implement the JDBC connection, ClickHouse uses the separate program [clickhouse-jdbc-bridge](https://github.com/ClickHouse/clickhouse-jdbc-bridge) that should run as a daemon.
|
||||
|
@ -4,7 +4,11 @@ sidebar_position: 138
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
# MySQL
|
||||
import CloudAvailableBadge from '@theme/badges/CloudAvailableBadge';
|
||||
|
||||
# MySQL Table Engine
|
||||
|
||||
<CloudAvailableBadge />
|
||||
|
||||
The MySQL engine allows you to perform `SELECT` and `INSERT` queries on data that is stored on a remote MySQL server.
|
||||
|
||||
|
@ -8,6 +8,10 @@ sidebar_label: PostgreSQL
|
||||
|
||||
The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote PostgreSQL server.
|
||||
|
||||
:::note
|
||||
Currently, only PostgreSQL versions 12 and up are supported.
|
||||
:::
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
|
@ -18,8 +18,8 @@ This engine allows integrating ClickHouse with [RabbitMQ](https://www.rabbitmq.c
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
|
||||
name1 [type1],
|
||||
name2 [type2],
|
||||
...
|
||||
) ENGINE = RabbitMQ SETTINGS
|
||||
rabbitmq_host_port = 'host:port' [or rabbitmq_address = 'amqp(s)://guest:guest@localhost/vhost'],
|
||||
@ -198,6 +198,10 @@ Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||
|
||||
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully.
|
||||
|
||||
## Caveats {#caveats}
|
||||
|
||||
Even though you may specify [default column expressions](/docs/en/sql-reference/statements/create/table.md/#default_values) (such as `DEFAULT`, `MATERIALIZED`, `ALIAS`) in the table definition, these will be ignored. Instead, the columns will be filled with their respective default values for their types.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
RabbitMQ engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
|
@ -946,96 +946,6 @@ You could change storage policy after table creation with [ALTER TABLE ... MODIF
|
||||
|
||||
The number of threads performing background moves of data parts can be changed by [background_move_pool_size](/docs/en/operations/server-configuration-parameters/settings.md/#background_move_pool_size) setting.
|
||||
|
||||
### Dynamic Storage
|
||||
|
||||
This example query shows how to attach a table stored at a URL and configure the
|
||||
remote storage within the query. The web storage is not configured in the ClickHouse
|
||||
configuration files; all the settings are in the CREATE/ATTACH query.
|
||||
|
||||
:::note
|
||||
The example uses `type=web`, but any disk type can be configured as dynamic, even Local disk. Local disks require a path argument to be inside the server config parameter `custom_local_disks_base_directory`, which has no default, so set that also when using local disk.
|
||||
:::
|
||||
|
||||
#### Example dynamic web storage
|
||||
|
||||
:::tip
|
||||
A [demo dataset](https://github.com/ClickHouse/web-tables-demo) is hosted in GitHub. To prepare your own tables for web storage see the tool [clickhouse-static-files-uploader](/docs/en/operations/storing-data.md/#storing-data-on-webserver)
|
||||
:::
|
||||
|
||||
In this `ATTACH TABLE` query the `UUID` provided matches the directory name of the data, and the endpoint is the URL for the raw GitHub content.
|
||||
|
||||
```sql
|
||||
# highlight-next-line
|
||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4),
|
||||
is_new UInt8,
|
||||
duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2)
|
||||
# highlight-start
|
||||
SETTINGS disk = disk(
|
||||
type=web,
|
||||
endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'
|
||||
);
|
||||
# highlight-end
|
||||
```
|
||||
|
||||
### Nested Dynamic Storage
|
||||
|
||||
This example query builds on the above dynamic disk configuration and shows how to
|
||||
use a local disk to cache data from a table stored at a URL. Neither the cache disk
|
||||
nor the web storage is configured in the ClickHouse configuration files; both are
|
||||
configured in the CREATE/ATTACH query settings.
|
||||
|
||||
In the settings highlighted below notice that the disk of `type=web` is nested within
|
||||
the disk of `type=cache`.
|
||||
|
||||
```sql
|
||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4),
|
||||
is_new UInt8,
|
||||
duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2)
|
||||
# highlight-start
|
||||
SETTINGS disk = disk(
|
||||
type=cache,
|
||||
max_size='1Gi',
|
||||
path='/var/lib/clickhouse/custom_disk_cache/',
|
||||
disk=disk(
|
||||
type=web,
|
||||
endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'
|
||||
)
|
||||
);
|
||||
# highlight-end
|
||||
```
|
||||
|
||||
### Details {#details}
|
||||
|
||||
In the case of `MergeTree` tables, data is getting to disk in different ways:
|
||||
@ -1064,13 +974,11 @@ During this time, they are not moved to other volumes or disks. Therefore, until
|
||||
|
||||
User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](/docs/en/operations/settings/merge-tree-settings.md/#min-bytes-to-rebalance-partition-over-jbod) setting.
|
||||
|
||||
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
||||
## Using External Storage for Data Storage {#table_engine-mergetree-s3}
|
||||
|
||||
:::note
|
||||
Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs).
|
||||
:::
|
||||
[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` using a disk with types `s3`, `azure_blob_storage`, `hdfs` accordingly. See [configuring external storage options](/docs/en/operations/storing-data.md/#configuring-external-storage) for more details.
|
||||
|
||||
`MergeTree` family table engines can store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
|
||||
Example for [S3](https://aws.amazon.com/s3/) as external storage using a disk with type `s3`.
|
||||
|
||||
Configuration markup:
|
||||
``` xml
|
||||
@ -1112,253 +1020,12 @@ Configuration markup:
|
||||
</storage_configuration>
|
||||
```
|
||||
|
||||
Also see [configuring external storage options](/docs/en/operations/storing-data.md/#configuring-external-storage).
|
||||
|
||||
:::note cache configuration
|
||||
ClickHouse versions 22.3 through 22.7 use a different cache configuration, see [using local cache](/docs/en/operations/storing-data.md/#using-local-cache) if you are using one of those versions.
|
||||
:::
|
||||
|
||||
### Configuring the S3 disk
|
||||
|
||||
Required parameters:
|
||||
|
||||
- `endpoint` — S3 endpoint URL in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint URL should contain a bucket and root path to store data.
|
||||
- `access_key_id` — S3 access key id.
|
||||
- `secret_access_key` — S3 secret access key.
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `region` — S3 region name.
|
||||
- `support_batch_delete` — This controls the check to see if batch deletes are supported. Set this to `false` when using Google Cloud Storage (GCS) as GCS does not support batch deletes and preventing the checks will prevent error messages in the logs.
|
||||
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
|
||||
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
|
||||
- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`.
|
||||
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.
|
||||
- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`.
|
||||
- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`.
|
||||
- `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`.
|
||||
- `single_read_retries` — Number of retry attempts in case of connection drop during read. Default value is `4`.
|
||||
- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`.
|
||||
- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
||||
- `server_side_encryption_kms_key_id` - If specified, required headers for accessing S3 objects with [SSE-KMS encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) will be set. If an empty string is specified, the AWS managed S3 key will be used. Optional.
|
||||
- `server_side_encryption_kms_encryption_context` - If specified alongside `server_side_encryption_kms_key_id`, the given encryption context header for SSE-KMS will be set. Optional.
|
||||
- `server_side_encryption_kms_bucket_key_enabled` - If specified alongside `server_side_encryption_kms_key_id`, the header to enable S3 bucket keys for SSE-KMS will be set. Optional, can be `true` or `false`, defaults to nothing (matches the bucket-level setting).
|
||||
- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited).
|
||||
- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`.
|
||||
- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited).
|
||||
- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`.
|
||||
- `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
- `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
- `key_template` — Define the format with which the object keys are generated. By default, Clickhouse takes `root path` from `endpoint` option and adds random generated suffix. That suffix is a dir with 3 random symbols and a file name with 29 random symbols. With that option you have a full control how to the object keys are generated. Some usage scenarios require having random symbols in the prefix or in the middle of object key. For example: `[a-z]{3}-prefix-random/constant-part/random-middle-[a-z]{3}/random-suffix-[a-z]{29}`. The value is parsed with [`re2`](https://github.com/google/re2/wiki/Syntax). Only some subset of the syntax is supported. Check if your preferred format is supported before using that option. Disk isn't initialized if clickhouse is unable to generate a key by the value of `key_template`. It requires enabled feature flag [storage_metadata_write_full_object_key](/docs/en/operations/settings/settings#storage_metadata_write_full_object_key). It forbids declaring the `root path` in `endpoint` option. It requires definition of the option `key_compatibility_prefix`.
|
||||
- `key_compatibility_prefix` — That option is required when option `key_template` is in use. In order to be able to read the objects keys which were stored in the metadata files with the metadata version lower that `VERSION_FULL_OBJECT_KEY`, the previous `root path` from the `endpoint` option should be set here.
|
||||
|
||||
### Configuring the cache
|
||||
|
||||
This is the cache configuration from above:
|
||||
```xml
|
||||
<s3_cache>
|
||||
<type>cache</type>
|
||||
<disk>s3</disk>
|
||||
<path>/var/lib/clickhouse/disks/s3_cache/</path>
|
||||
<max_size>10Gi</max_size>
|
||||
</s3_cache>
|
||||
```
|
||||
|
||||
These parameters define the cache layer:
|
||||
- `type` — If a disk is of type `cache` it caches mark and index files in memory.
|
||||
- `disk` — The name of the disk that will be cached.
|
||||
|
||||
Cache parameters:
|
||||
- `path` — The path where metadata for the cache is stored.
|
||||
- `max_size` — The size (amount of disk space) that the cache can grow to.
|
||||
|
||||
:::tip
|
||||
There are several other cache parameters that you can use to tune your storage, see [using local cache](/docs/en/operations/storing-data.md/#using-local-cache) for the details.
|
||||
:::
|
||||
|
||||
S3 disk can be configured as `main` or `cold` storage:
|
||||
``` xml
|
||||
<storage_configuration>
|
||||
...
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/root-path/</endpoint>
|
||||
<access_key_id>your_access_key_id</access_key_id>
|
||||
<secret_access_key>your_secret_access_key</secret_access_key>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3_main>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3_main>
|
||||
<s3_cold>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>default</disk>
|
||||
</main>
|
||||
<external>
|
||||
<disk>s3</disk>
|
||||
</external>
|
||||
</volumes>
|
||||
<move_factor>0.2</move_factor>
|
||||
</s3_cold>
|
||||
</policies>
|
||||
...
|
||||
</storage_configuration>
|
||||
```
|
||||
|
||||
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
|
||||
|
||||
## Using Azure Blob Storage for Data Storage {#table_engine-mergetree-azure-blob-storage}
|
||||
|
||||
`MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`.
|
||||
|
||||
As of February 2022, this feature is still a fresh addition, so expect that some Azure Blob Storage functionalities might be unimplemented.
|
||||
|
||||
Configuration markup:
|
||||
``` xml
|
||||
<storage_configuration>
|
||||
...
|
||||
<disks>
|
||||
<blob_storage_disk>
|
||||
<type>azure_blob_storage</type>
|
||||
<storage_account_url>http://account.blob.core.windows.net</storage_account_url>
|
||||
<container_name>container</container_name>
|
||||
<account_name>account</account_name>
|
||||
<account_key>pass123</account_key>
|
||||
<metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
|
||||
<cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</blob_storage_disk>
|
||||
</disks>
|
||||
...
|
||||
</storage_configuration>
|
||||
```
|
||||
|
||||
Connection parameters:
|
||||
* `endpoint` — AzureBlobStorage endpoint URL with container & prefix. Optionally can contain account_name if the authentication method used needs it. (`http://account.blob.core.windows.net:{port}/[account_name]{container_name}/{data_prefix}`) or these parameters can be provided separately using storage_account_url, account_name & container. For specifying prefix, endpoint should be used.
|
||||
* `endpoint_contains_account_name` - This flag is used to specify if endpoint contains account_name as it is only needed for certain authentication methods. (Default : true)
|
||||
* `storage_account_url` - Required if endpoint is not specified, Azure Blob Storage account URL, like `http://account.blob.core.windows.net` or `http://azurite1:10000/devstoreaccount1`.
|
||||
* `container_name` - Target container name, defaults to `default-container`.
|
||||
* `container_already_exists` - If set to `false`, a new container `container_name` is created in the storage account, if set to `true`, disk connects to the container directly, and if left unset, disk connects to the account, checks if the container `container_name` exists, and creates it if it doesn't exist yet.
|
||||
|
||||
Authentication parameters (the disk will try all available methods **and** Managed Identity Credential):
|
||||
* `connection_string` - For authentication using a connection string.
|
||||
* `account_name` and `account_key` - For authentication using Shared Key.
|
||||
|
||||
Limit parameters (mainly for internal usage):
|
||||
* `s3_max_single_part_upload_size` - Limits the size of a single block upload to Blob Storage.
|
||||
* `min_bytes_for_seek` - Limits the size of a seekable region.
|
||||
* `max_single_read_retries` - Limits the number of attempts to read a chunk of data from Blob Storage.
|
||||
* `max_single_download_retries` - Limits the number of attempts to download a readable buffer from Blob Storage.
|
||||
* `thread_pool_size` - Limits the number of threads with which `IDiskRemote` is instantiated.
|
||||
* `s3_max_inflight_parts_for_one_file` - Limits the number of put requests that can be run concurrently for one object.
|
||||
|
||||
Other parameters:
|
||||
* `metadata_path` - Path on local FS to store metadata files for Blob Storage. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
* `skip_access_check` - If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||
* `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
* `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
|
||||
Examples of working configurations can be found in integration tests directory (see e.g. [test_merge_tree_azure_blob_storage](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_merge_tree_azure_blob_storage/configs/config.d/storage_conf.xml) or [test_azure_blob_storage_zero_copy_replication](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_azure_blob_storage_zero_copy_replication/configs/config.d/storage_conf.xml)).
|
||||
|
||||
:::note Zero-copy replication is not ready for production
|
||||
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
|
||||
:::
|
||||
|
||||
## HDFS storage {#hdfs-storage}
|
||||
|
||||
In this sample configuration:
|
||||
- the disk is of type `hdfs`
|
||||
- the data is hosted at `hdfs://hdfs1:9000/clickhouse/`
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<hdfs>
|
||||
<type>hdfs</type>
|
||||
<endpoint>hdfs://hdfs1:9000/clickhouse/</endpoint>
|
||||
<skip_access_check>true</skip_access_check>
|
||||
</hdfs>
|
||||
<hdd>
|
||||
<type>local</type>
|
||||
<path>/</path>
|
||||
</hdd>
|
||||
</disks>
|
||||
<policies>
|
||||
<hdfs>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>hdfs</disk>
|
||||
</main>
|
||||
<external>
|
||||
<disk>hdd</disk>
|
||||
</external>
|
||||
</volumes>
|
||||
</hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Web storage (read-only) {#web-storage}
|
||||
|
||||
Web storage can be used for read-only purposes. An example use is for hosting sample
|
||||
data, or for migrating data.
|
||||
|
||||
:::tip
|
||||
Storage can also be configured temporarily within a query, if a web dataset is not expected
|
||||
to be used routinely, see [dynamic storage](#dynamic-storage) and skip editing the
|
||||
configuration file.
|
||||
:::
|
||||
|
||||
In this sample configuration:
|
||||
- the disk is of type `web`
|
||||
- the data is hosted at `http://nginx:80/test1/`
|
||||
- a cache on local storage is used
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<web>
|
||||
<type>web</type>
|
||||
<endpoint>http://nginx:80/test1/</endpoint>
|
||||
</web>
|
||||
<cached_web>
|
||||
<type>cache</type>
|
||||
<disk>web</disk>
|
||||
<path>cached_web_cache/</path>
|
||||
<max_size>100000000</max_size>
|
||||
</cached_web>
|
||||
</disks>
|
||||
<policies>
|
||||
<web>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>web</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</web>
|
||||
<cached_web>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>cached_web</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</cached_web>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_part` — Name of a part.
|
||||
|
@ -21,3 +21,79 @@ When restarting a server, data disappears from the table and the table becomes e
|
||||
Normally, using this table engine is not justified. However, it can be used for tests, and for tasks where maximum speed is required on a relatively small number of rows (up to approximately 100,000,000).
|
||||
|
||||
The Memory engine is used by the system for temporary tables with external query data (see the section “External data for processing a query”), and for implementing `GLOBAL IN` (see the section “IN operators”).
|
||||
|
||||
Upper and lower bounds can be specified to limit Memory engine table size, effectively allowing it to act as a circular buffer (see [Engine Parameters](#engine-parameters)).
|
||||
|
||||
## Engine Parameters {#engine-parameters}
|
||||
|
||||
- `min_bytes_to_keep` — Minimum bytes to keep when memory table is size-capped.
|
||||
- Default value: `0`
|
||||
- Requires `max_bytes_to_keep`
|
||||
- `max_bytes_to_keep` — Maximum bytes to keep within memory table where oldest rows are deleted on each insertion (i.e circular buffer). Max bytes can exceed the stated limit if the oldest batch of rows to remove falls under the `min_bytes_to_keep` limit when adding a large block.
|
||||
- Default value: `0`
|
||||
- `min_rows_to_keep` — Minimum rows to keep when memory table is size-capped.
|
||||
- Default value: `0`
|
||||
- Requires `max_rows_to_keep`
|
||||
- `max_rows_to_keep` — Maximum rows to keep within memory table where oldest rows are deleted on each insertion (i.e circular buffer). Max rows can exceed the stated limit if the oldest batch of rows to remove falls under the `min_rows_to_keep` limit when adding a large block.
|
||||
- Default value: `0`
|
||||
|
||||
## Usage {#usage}
|
||||
|
||||
|
||||
**Initialize settings**
|
||||
``` sql
|
||||
CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 100, max_rows_to_keep = 1000;
|
||||
```
|
||||
|
||||
**Note:** Both `bytes` and `rows` capping parameters can be set at the same time, however, the lower bounds of `max` and `min` will be adhered to.
|
||||
|
||||
## Examples {#examples}
|
||||
``` sql
|
||||
CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_bytes_to_keep = 4096, max_bytes_to_keep = 16384;
|
||||
|
||||
/* 1. testing oldest block doesn't get deleted due to min-threshold - 3000 rows */
|
||||
INSERT INTO memory SELECT * FROM numbers(0, 1600); -- 8'192 bytes
|
||||
|
||||
/* 2. adding block that doesn't get deleted */
|
||||
INSERT INTO memory SELECT * FROM numbers(1000, 100); -- 1'024 bytes
|
||||
|
||||
/* 3. testing oldest block gets deleted - 9216 bytes - 1100 */
|
||||
INSERT INTO memory SELECT * FROM numbers(9000, 1000); -- 8'192 bytes
|
||||
|
||||
/* 4. checking a very large block overrides all */
|
||||
INSERT INTO memory SELECT * FROM numbers(9000, 10000); -- 65'536 bytes
|
||||
|
||||
SELECT total_bytes, total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase();
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─total_bytes─┬─total_rows─┐
|
||||
│ 65536 │ 10000 │
|
||||
└─────────────┴────────────┘
|
||||
```
|
||||
|
||||
also, for rows:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 4000, max_rows_to_keep = 10000;
|
||||
|
||||
/* 1. testing oldest block doesn't get deleted due to min-threshold - 3000 rows */
|
||||
INSERT INTO memory SELECT * FROM numbers(0, 1600); -- 1'600 rows
|
||||
|
||||
/* 2. adding block that doesn't get deleted */
|
||||
INSERT INTO memory SELECT * FROM numbers(1000, 100); -- 100 rows
|
||||
|
||||
/* 3. testing oldest block gets deleted - 9216 bytes - 1100 */
|
||||
INSERT INTO memory SELECT * FROM numbers(9000, 1000); -- 1'000 rows
|
||||
|
||||
/* 4. checking a very large block overrides all */
|
||||
INSERT INTO memory SELECT * FROM numbers(9000, 10000); -- 10'000 rows
|
||||
|
||||
SELECT total_bytes, total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase();
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─total_bytes─┬─total_rows─┐
|
||||
│ 65536 │ 10000 │
|
||||
└─────────────┴────────────┘
|
||||
```
|
||||
|
@ -75,14 +75,14 @@ This is the output of `DESCRIBE`. Down further in this guide the field type cho
|
||||
</TabItem>
|
||||
<TabItem value="selfmanaged" label="Self-managed">
|
||||
|
||||
1. Download the snapshot of the dataset from February 2021: [cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (729 MB).
|
||||
1. Download the snapshot of the dataset from February 2021: [cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (686 MB).
|
||||
|
||||
2. Validate the integrity (optional step):
|
||||
```bash
|
||||
md5sum cell_towers.csv.xz
|
||||
```
|
||||
```response
|
||||
8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz
|
||||
8a797f7bdb55faba93f6cbc37d47b037 cell_towers.csv.xz
|
||||
```
|
||||
|
||||
3. Decompress it with the following command:
|
||||
@ -132,7 +132,7 @@ SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC
|
||||
┌─radio─┬────────c─┐
|
||||
│ UMTS │ 20686487 │
|
||||
│ LTE │ 12101148 │
|
||||
│ GSM │ 9931312 │
|
||||
│ GSM │ 9931304 │
|
||||
│ CDMA │ 556344 │
|
||||
│ NR │ 867 │
|
||||
└───────┴──────────┘
|
||||
|
@ -55,7 +55,7 @@ CREATE TABLE criteo_log (
|
||||
) ENGINE = Log;
|
||||
```
|
||||
|
||||
Download the data:
|
||||
Insert the data:
|
||||
|
||||
``` bash
|
||||
$ for i in {00..23}; do echo $i; zcat datasets/criteo/day_${i#0}.gz | sed -r 's/^/2000-01-'${i/00/24}'\t/' | clickhouse-client --host=example-perftest01j --query="INSERT INTO criteo_log FORMAT TabSeparated"; done
|
||||
|
@ -10,10 +10,14 @@ The embeddings and the metadata are stored in separate files in the raw data. A
|
||||
converts them to CSV and imports them into ClickHouse. You can use the following `download.sh` script for that:
|
||||
|
||||
```bash
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${1}.npy # download image embedding
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${1}.npy # download text embedding
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${1}.parquet # download metadata
|
||||
python3 process.py ${1} # merge files and convert to CSV
|
||||
number=${1}
|
||||
if [[ $number == '' ]]; then
|
||||
number=1
|
||||
fi;
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${number}.npy # download image embedding
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${number}.npy # download text embedding
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${number}.parquet # download metadata
|
||||
python3 process.py $number # merge files and convert to CSV
|
||||
```
|
||||
Script `process.py` is defined as follows:
|
||||
|
||||
|
@ -248,6 +248,9 @@ Some of the files might not download fully. Check the file sizes and re-download
|
||||
|
||||
``` bash
|
||||
$ curl -O https://datasets.clickhouse.com/trips_mergetree/partitions/trips_mergetree.tar
|
||||
# Validate the checksum
|
||||
$ md5sum trips_mergetree.tar
|
||||
# Checksum should be equal to: f3b8d469b41d9a82da064ded7245d12c
|
||||
$ tar xvf trips_mergetree.tar -C /var/lib/clickhouse # path to ClickHouse data directory
|
||||
$ # check permissions of unpacked data, fix if required
|
||||
$ sudo service clickhouse-server restart
|
||||
|
293
docs/en/getting-started/example-datasets/tw-weather.md
Normal file
293
docs/en/getting-started/example-datasets/tw-weather.md
Normal file
@ -0,0 +1,293 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/tw-weather
|
||||
sidebar_label: Taiwan Historical Weather Datasets
|
||||
sidebar_position: 1
|
||||
description: 131 million rows of weather observation data for the last 128 yrs
|
||||
---
|
||||
|
||||
# Taiwan Historical Weather Datasets
|
||||
|
||||
This dataset contains historical meteorological observations measurements for the last 128 years. Each row is a measurement for a point in date time and weather station.
|
||||
|
||||
The origin of this dataset is available [here](https://github.com/Raingel/historical_weather) and the list of weather station numbers can be found [here](https://github.com/Raingel/weather_station_list).
|
||||
|
||||
> The sources of meteorological datasets include the meteorological stations that are established by the Central Weather Administration (station code is beginning with C0, C1, and 4) and the agricultural meteorological stations belonging to the Council of Agriculture (station code other than those mentioned above):
|
||||
|
||||
- StationId
|
||||
- MeasuredDate, the observation time
|
||||
- StnPres, the station air pressure
|
||||
- SeaPres, the sea level pressure
|
||||
- Td, the dew point temperature
|
||||
- RH, the relative humidity
|
||||
- Other elements where available
|
||||
|
||||
## Downloading the data
|
||||
|
||||
- A [pre-processed version](#pre-processed-data) of the data for the ClickHouse, which has been cleaned, re-structured, and enriched. This dataset covers the years from 1896 to 2023.
|
||||
- [Download the original raw data](#original-raw-data) and convert to the format required by ClickHouse. Users wanting to add their own columns may wish to explore or complete their approaches.
|
||||
|
||||
### Pre-processed data
|
||||
|
||||
The dataset has also been re-structured from a measurement per line to a row per weather station id and measured date, i.e.
|
||||
|
||||
```csv
|
||||
StationId,MeasuredDate,StnPres,Tx,RH,WS,WD,WSGust,WDGust,Precp,GloblRad,TxSoil0cm,TxSoil5cm,TxSoil20cm,TxSoil50cm,TxSoil100cm,SeaPres,Td,PrecpHour,SunShine,TxSoil10cm,EvapA,Visb,UVI,Cloud Amount,TxSoil30cm,TxSoil200cm,TxSoil300cm,TxSoil500cm,VaporPressure
|
||||
C0X100,2016-01-01 01:00:00,1022.1,16.1,72,1.1,8.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 02:00:00,1021.6,16.0,73,1.2,358.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 03:00:00,1021.3,15.8,74,1.5,353.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 04:00:00,1021.2,15.8,74,1.7,8.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
```
|
||||
|
||||
It is easy to query and ensure that the resulting table has less sparse and some elements are null because they're not available to be measured in this weather station.
|
||||
|
||||
This dataset is available in the following Google CloudStorage location. Either download the dataset to your local filesystem (and insert them with the ClickHouse client) or insert them directly into the ClickHouse (see [Inserting from URL](#inserting-from-url)).
|
||||
|
||||
To download:
|
||||
|
||||
```bash
|
||||
wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/preprocessed_weather_daily_1896_2023.tar.gz
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum preprocessed_weather_daily_1896_2023.tar.gz
|
||||
# Checksum should be equal to: 11b484f5bd9ddafec5cfb131eb2dd008
|
||||
|
||||
tar -xzvf preprocessed_weather_daily_1896_2023.tar.gz
|
||||
daily_weather_preprocessed_1896_2023.csv
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum daily_weather_preprocessed_1896_2023.csv
|
||||
# Checksum should be equal to: 1132248c78195c43d93f843753881754
|
||||
```
|
||||
|
||||
### Original raw data
|
||||
|
||||
The following details are about the steps to download the original raw data to transform and convert as you want.
|
||||
|
||||
#### Download
|
||||
|
||||
To download the original raw data:
|
||||
|
||||
```bash
|
||||
mkdir tw_raw_weather_data && cd tw_raw_weather_data
|
||||
|
||||
wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/raw_data_weather_daily_1896_2023.tar.gz
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum raw_data_weather_daily_1896_2023.tar.gz
|
||||
# Checksum should be equal to: b66b9f137217454d655e3004d7d1b51a
|
||||
|
||||
tar -xzvf raw_data_weather_daily_1896_2023.tar.gz
|
||||
466920_1928.csv
|
||||
466920_1929.csv
|
||||
466920_1930.csv
|
||||
466920_1931.csv
|
||||
...
|
||||
|
||||
# Option: Validate the checksum
|
||||
cat *.csv | md5sum
|
||||
# Checksum should be equal to: b26db404bf84d4063fac42e576464ce1
|
||||
```
|
||||
|
||||
#### Retrieve the Taiwan weather stations
|
||||
|
||||
```bash
|
||||
wget -O weather_sta_list.csv https://github.com/Raingel/weather_station_list/raw/main/data/weather_sta_list.csv
|
||||
|
||||
# Option: Convert the UTF-8-BOM to UTF-8 encoding
|
||||
sed -i '1s/^\xEF\xBB\xBF//' weather_sta_list.csv
|
||||
```
|
||||
|
||||
## Create table schema
|
||||
|
||||
Create the MergeTree table in ClickHouse (from the ClickHouse client).
|
||||
|
||||
```bash
|
||||
CREATE TABLE tw_weather_data (
|
||||
StationId String null,
|
||||
MeasuredDate DateTime64,
|
||||
StnPres Float64 null,
|
||||
SeaPres Float64 null,
|
||||
Tx Float64 null,
|
||||
Td Float64 null,
|
||||
RH Float64 null,
|
||||
WS Float64 null,
|
||||
WD Float64 null,
|
||||
WSGust Float64 null,
|
||||
WDGust Float64 null,
|
||||
Precp Float64 null,
|
||||
PrecpHour Float64 null,
|
||||
SunShine Float64 null,
|
||||
GloblRad Float64 null,
|
||||
TxSoil0cm Float64 null,
|
||||
TxSoil5cm Float64 null,
|
||||
TxSoil10cm Float64 null,
|
||||
TxSoil20cm Float64 null,
|
||||
TxSoil50cm Float64 null,
|
||||
TxSoil100cm Float64 null,
|
||||
TxSoil30cm Float64 null,
|
||||
TxSoil200cm Float64 null,
|
||||
TxSoil300cm Float64 null,
|
||||
TxSoil500cm Float64 null,
|
||||
VaporPressure Float64 null,
|
||||
UVI Float64 null,
|
||||
"Cloud Amount" Float64 null,
|
||||
EvapA Float64 null,
|
||||
Visb Float64 null
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (MeasuredDate);
|
||||
```
|
||||
|
||||
## Inserting into ClickHouse
|
||||
|
||||
### Inserting from local file
|
||||
|
||||
Data can be inserted from a local file as follows (from the ClickHouse client):
|
||||
|
||||
```sql
|
||||
INSERT INTO tw_weather_data FROM INFILE '/path/to/daily_weather_preprocessed_1896_2023.csv'
|
||||
```
|
||||
|
||||
where `/path/to` represents the specific user path to the local file on the disk.
|
||||
|
||||
And the sample response output is as follows after inserting data into the ClickHouse:
|
||||
|
||||
```response
|
||||
Query id: 90e4b524-6e14-4855-817c-7e6f98fbeabb
|
||||
|
||||
Ok.
|
||||
131985329 rows in set. Elapsed: 71.770 sec. Processed 131.99 million rows, 10.06 GB (1.84 million rows/s., 140.14 MB/s.)
|
||||
Peak memory usage: 583.23 MiB.
|
||||
```
|
||||
|
||||
### Inserting from URL
|
||||
|
||||
```sql
|
||||
INSERT INTO tw_weather_data SELECT *
|
||||
FROM url('https://storage.googleapis.com/taiwan-weather-observaiton-datasets/daily_weather_preprocessed_1896_2023.csv', 'CSVWithNames')
|
||||
|
||||
```
|
||||
To know how to speed this up, please see our blog post on [tuning large data loads](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part2).
|
||||
|
||||
## Check data rows and sizes
|
||||
|
||||
1. Let's see how many rows are inserted:
|
||||
|
||||
```sql
|
||||
SELECT formatReadableQuantity(count())
|
||||
FROM tw_weather_data;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─formatReadableQuantity(count())─┐
|
||||
│ 131.99 million │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
2. Let's see how much disk space are used for this table:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
formatReadableSize(sum(bytes)) AS disk_size,
|
||||
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size
|
||||
FROM system.parts
|
||||
WHERE (`table` = 'tw_weather_data') AND active
|
||||
```
|
||||
|
||||
```response
|
||||
┌─disk_size─┬─uncompressed_size─┐
|
||||
│ 2.13 GiB │ 32.94 GiB │
|
||||
└───────────┴───────────────────┘
|
||||
```
|
||||
|
||||
## Sample queries
|
||||
|
||||
### Q1: Retrieve the highest dew point temperature for each weather station in the specific year
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
StationId,
|
||||
max(Td) AS max_td
|
||||
FROM tw_weather_data
|
||||
WHERE (year(MeasuredDate) = 2023) AND (Td IS NOT NULL)
|
||||
GROUP BY StationId
|
||||
|
||||
┌─StationId─┬─max_td─┐
|
||||
│ 466940 │ 1 │
|
||||
│ 467300 │ 1 │
|
||||
│ 467540 │ 1 │
|
||||
│ 467490 │ 1 │
|
||||
│ 467080 │ 1 │
|
||||
│ 466910 │ 1 │
|
||||
│ 467660 │ 1 │
|
||||
│ 467270 │ 1 │
|
||||
│ 467350 │ 1 │
|
||||
│ 467571 │ 1 │
|
||||
│ 466920 │ 1 │
|
||||
│ 467650 │ 1 │
|
||||
│ 467550 │ 1 │
|
||||
│ 467480 │ 1 │
|
||||
│ 467610 │ 1 │
|
||||
│ 467050 │ 1 │
|
||||
│ 467590 │ 1 │
|
||||
│ 466990 │ 1 │
|
||||
│ 467060 │ 1 │
|
||||
│ 466950 │ 1 │
|
||||
│ 467620 │ 1 │
|
||||
│ 467990 │ 1 │
|
||||
│ 466930 │ 1 │
|
||||
│ 467110 │ 1 │
|
||||
│ 466881 │ 1 │
|
||||
│ 467410 │ 1 │
|
||||
│ 467441 │ 1 │
|
||||
│ 467420 │ 1 │
|
||||
│ 467530 │ 1 │
|
||||
│ 466900 │ 1 │
|
||||
└───────────┴────────┘
|
||||
|
||||
30 rows in set. Elapsed: 0.045 sec. Processed 6.41 million rows, 187.33 MB (143.92 million rows/s., 4.21 GB/s.)
|
||||
```
|
||||
|
||||
### Q2: Raw data fetching with the specific duration time range, fields and weather station
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
StnPres,
|
||||
SeaPres,
|
||||
Tx,
|
||||
Td,
|
||||
RH,
|
||||
WS,
|
||||
WD,
|
||||
WSGust,
|
||||
WDGust,
|
||||
Precp,
|
||||
PrecpHour
|
||||
FROM tw_weather_data
|
||||
WHERE (StationId = 'C0UB10') AND (MeasuredDate >= '2023-12-23') AND (MeasuredDate < '2023-12-24')
|
||||
ORDER BY MeasuredDate ASC
|
||||
LIMIT 10
|
||||
```
|
||||
|
||||
```response
|
||||
┌─StnPres─┬─SeaPres─┬───Tx─┬───Td─┬─RH─┬──WS─┬──WD─┬─WSGust─┬─WDGust─┬─Precp─┬─PrecpHour─┐
|
||||
│ 1029.5 │ ᴺᵁᴸᴸ │ 11.8 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 271 │ 5.5 │ 275 │ -99.8 │ -99.8 │
|
||||
│ 1029.8 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 289 │ 5.5 │ 308 │ -99.8 │ -99.8 │
|
||||
│ 1028.6 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 79 │ 2.3 │ 251 │ 6.1 │ 289 │ -99.8 │ -99.8 │
|
||||
│ 1028.2 │ ᴺᵁᴸᴸ │ 13 │ ᴺᵁᴸᴸ │ 75 │ 4.3 │ 312 │ 7.5 │ 316 │ -99.8 │ -99.8 │
|
||||
│ 1027.8 │ ᴺᵁᴸᴸ │ 11.1 │ ᴺᵁᴸᴸ │ 89 │ 7.1 │ 310 │ 11.6 │ 322 │ -99.8 │ -99.8 │
|
||||
│ 1027.8 │ ᴺᵁᴸᴸ │ 11.6 │ ᴺᵁᴸᴸ │ 90 │ 3.1 │ 269 │ 10.7 │ 295 │ -99.8 │ -99.8 │
|
||||
│ 1027.9 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 89 │ 4.7 │ 296 │ 8.1 │ 310 │ -99.8 │ -99.8 │
|
||||
│ 1028.2 │ ᴺᵁᴸᴸ │ 12.2 │ ᴺᵁᴸᴸ │ 94 │ 2.5 │ 246 │ 7.1 │ 283 │ -99.8 │ -99.8 │
|
||||
│ 1028.4 │ ᴺᵁᴸᴸ │ 12.5 │ ᴺᵁᴸᴸ │ 94 │ 3.1 │ 265 │ 4.8 │ 297 │ -99.8 │ -99.8 │
|
||||
│ 1028.3 │ ᴺᵁᴸᴸ │ 13.6 │ ᴺᵁᴸᴸ │ 91 │ 1.2 │ 273 │ 4.4 │ 256 │ -99.8 │ -99.8 │
|
||||
└─────────┴─────────┴──────┴──────┴────┴─────┴─────┴────────┴────────┴───────┴───────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.009 sec. Processed 91.70 thousand rows, 2.33 MB (9.67 million rows/s., 245.31 MB/s.)
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
We would like to acknowledge the efforts of the Central Weather Administration and Agricultural Meteorological Observation Network (Station) of the Council of Agriculture for preparing, cleaning, and distributing this dataset. We appreciate your efforts.
|
||||
|
||||
Ou, J.-H., Kuo, C.-H., Wu, Y.-F., Lin, G.-C., Lee, M.-H., Chen, R.-K., Chou, H.-P., Wu, H.-Y., Chu, S.-C., Lai, Q.-J., Tsai, Y.-C., Lin, C.-C., Kuo, C.-C., Liao, C.-T., Chen, Y.-N., Chu, Y.-W., Chen, C.-Y., 2023. Application-oriented deep learning model for early warning of rice blast in Taiwan. Ecological Informatics 73, 101950. https://doi.org/10.1016/j.ecoinf.2022.101950 [13/12/2022]
|
@ -262,7 +262,7 @@ The required version can be downloaded with `curl` or `wget` from repository htt
|
||||
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest stable version:
|
||||
|
||||
``` bash
|
||||
LATEST_VERSION=$(curl -s https://packages.clickhouse.com/tgz/stable/ | \
|
||||
LATEST_VERSION=$(curl -s https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv | \
|
||||
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
|
||||
export LATEST_VERSION
|
||||
|
||||
|
@ -178,7 +178,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
- `--password` – The password. Default value: empty string.
|
||||
- `--ask-password` - Prompt the user to enter a password.
|
||||
- `--query, -q` – The query to process when using non-interactive mode. `--query` can be specified multiple times, e.g. `--query "SELECT 1" --query "SELECT 2"`. Cannot be used simultaneously with `--queries-file`.
|
||||
- `--queries-file` – file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--query queries1.sql --query queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||
- `--queries-file` – file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--queries-file queries1.sql --queries-file queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||
|
@ -1270,12 +1270,13 @@ SELECT * FROM json_each_row_nested
|
||||
- [input_format_json_read_arrays_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_arrays_as_strings) - allow to parse JSON arrays as strings in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_objects_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_objects_as_strings) - allow to parse JSON objects as strings in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`.
|
||||
- [input_format_json_try_infer_numbers_from_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_try_infer_numbers_from_strings) - Try to infer numbers from string fields while schema inference. Default value - `false`.
|
||||
- [input_format_json_try_infer_numbers_from_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_try_infer_numbers_from_strings) - try to infer numbers from string fields while schema inference. Default value - `false`.
|
||||
- [input_format_json_try_infer_named_tuples_from_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_try_infer_named_tuples_from_objects) - try to infer named tuple from JSON objects during schema inference. Default value - `true`.
|
||||
- [input_format_json_infer_incomplete_types_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_infer_incomplete_types_as_strings) - use type String for keys that contains only Nulls or empty objects/arrays during schema inference in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`.
|
||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - Ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
@ -2356,7 +2357,7 @@ You can select data from a ClickHouse table and save them into some file in the
|
||||
$ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filename.arrow}
|
||||
```
|
||||
|
||||
### Arrow format settings {#parquet-format-settings}
|
||||
### Arrow format settings {#arrow-format-settings}
|
||||
|
||||
- [output_format_arrow_low_cardinality_as_dictionary](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_low_cardinality_as_dictionary) - enable output ClickHouse LowCardinality type as Dictionary Arrow type. Default value - `false`.
|
||||
- [output_format_arrow_use_64_bit_indexes_for_dictionary](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_use_64_bit_indexes_for_dictionary) - use 64-bit integer type for Dictionary indexes. Default value - `false`.
|
||||
|
@ -87,6 +87,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||
- `storage_policy`: storage policy for the tables being restored. See [Using Multiple Block Devices for Data Storage](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). This setting is only applicable to the `RESTORE` command. The specified storage policy applies only to tables with an engine from the `MergeTree` family.
|
||||
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`
|
||||
- `azure_attempt_to_create_container`: when using Azure Blob Storage, whether the specified container will try to be created if it doesn't exist. Default: true.
|
||||
|
||||
### Usage examples
|
||||
|
||||
|
@ -95,9 +95,11 @@ which is equal to
|
||||
|
||||
## Substituting Configuration {#substitution}
|
||||
|
||||
The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/clickhouse/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md#macros)).
|
||||
The config can define substitutions. There are two types of substitutions:
|
||||
|
||||
If you want to replace an entire element with a substitution use `include` as the element name.
|
||||
- If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/clickhouse/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md#macros)).
|
||||
|
||||
- If you want to replace an entire element with a substitution, use `include` as the element name. Substitutions can also be performed from ZooKeeper by specifying attribute `from_zk = "/path/to/node"`. In this case, the element value is replaced with the contents of the Zookeeper node at `/path/to/node`. This also works with you store an entire XML subtree as a Zookeeper node, it will be fully inserted into the source element.
|
||||
|
||||
XML substitution example:
|
||||
|
||||
@ -114,7 +116,7 @@ XML substitution example:
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node, and it will be fully inserted into the source element.
|
||||
If you want to merge the substituting content with the existing configuration instead of appending you can use attribute `merge="true"`, for example: `<include from_zk="/some_path" merge="true">`. In this case, the existing configuration will be merged with the content from the substitution and the existing configuration settings will be replaced with values from substitution.
|
||||
|
||||
## Encrypting and Hiding Configuration {#encryption}
|
||||
|
||||
|
@ -3,6 +3,7 @@ slug: /en/operations/monitoring
|
||||
sidebar_position: 45
|
||||
sidebar_label: Monitoring
|
||||
description: You can monitor the utilization of hardware resources and also ClickHouse server metrics.
|
||||
keywords: [monitoring, observability, advanced dashboard, dashboard, observability dashboard]
|
||||
---
|
||||
|
||||
# Monitoring
|
||||
@ -15,11 +16,11 @@ You can monitor:
|
||||
- Utilization of hardware resources.
|
||||
- ClickHouse server metrics.
|
||||
|
||||
## Built-in observability dashboard
|
||||
## Built-in advanced observability dashboard
|
||||
|
||||
<img width="400" alt="Screenshot 2023-11-12 at 6 08 58 PM" src="https://github.com/ClickHouse/ClickHouse/assets/3936029/2bd10011-4a47-4b94-b836-d44557c7fdc1" />
|
||||
|
||||
ClickHouse comes with a built-in observability dashboard feature which can be accessed by `$HOST:$PORT/dashboard` (requires user and password) that shows the following metrics:
|
||||
ClickHouse comes with a built-in advanced observability dashboard feature which can be accessed by `$HOST:$PORT/dashboard` (requires user and password) that shows the following metrics:
|
||||
- Queries/second
|
||||
- CPU usage (cores)
|
||||
- Queries running
|
||||
|
@ -379,6 +379,18 @@ Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## max_waiting_queries
|
||||
|
||||
Limit on total number of concurrently waiting queries. Execution of a waiting query is blocked while required tables are loading asynchronously (see `async_load_databases`). Note that waiting queries are not counted when `max_concurrent_queries`, `max_concurrent_insert_queries`, `max_concurrent_select_queries`, `max_concurrent_queries_for_user` and `max_concurrent_queries_for_all_users` limits are checked. This correction is done to avoid hitting these limits just after server startup. Zero means unlimited.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## max_connections
|
||||
|
||||
Max server connections.
|
||||
@ -933,9 +945,9 @@ Hard limit is configured via system tools
|
||||
|
||||
## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec}
|
||||
|
||||
Sets the delay before remove table data in seconds. If the query has `SYNC` modifier, this setting is ignored.
|
||||
The delay during which a dropped table can be restored using the [UNDROP](/docs/en/sql-reference/statements/undrop.md) statement. If `DROP TABLE` ran with a `SYNC` modifier, the setting is ignored.
|
||||
|
||||
Default value: `480` (8 minute).
|
||||
Default value: `480` (8 minutes).
|
||||
|
||||
## database_catalog_unused_dir_hide_timeout_sec {#database_catalog_unused_dir_hide_timeout_sec}
|
||||
|
||||
@ -1342,6 +1354,7 @@ Keys:
|
||||
- `count` – The number of archived log files that ClickHouse stores.
|
||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||
- `formatting` – Specify log format to be printed in console log (currently only `json` supported).
|
||||
|
||||
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||
|
||||
@ -1410,6 +1423,8 @@ Writing to the console can be configured. Config example:
|
||||
</logger>
|
||||
```
|
||||
|
||||
### syslog
|
||||
|
||||
Writing to the syslog is also supported. Config example:
|
||||
|
||||
``` xml
|
||||
@ -1433,6 +1448,52 @@ Keys for syslog:
|
||||
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
||||
- format – Message format. Possible values: `bsd` and `syslog.`
|
||||
|
||||
### Log formats
|
||||
|
||||
You can specify the log format that will be outputted in the console log. Currently, only JSON is supported. Here is an example of an output JSON log:
|
||||
|
||||
```json
|
||||
{
|
||||
"date_time": "1650918987.180175",
|
||||
"thread_name": "#1",
|
||||
"thread_id": "254545",
|
||||
"level": "Trace",
|
||||
"query_id": "",
|
||||
"logger_name": "BaseDaemon",
|
||||
"message": "Received signal 2",
|
||||
"source_file": "../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()",
|
||||
"source_line": "192"
|
||||
}
|
||||
```
|
||||
To enable JSON logging support, use the following snippet:
|
||||
|
||||
```xml
|
||||
<logger>
|
||||
<formatting>
|
||||
<type>json</type>
|
||||
<names>
|
||||
<date_time>date_time</date_time>
|
||||
<thread_name>thread_name</thread_name>
|
||||
<thread_id>thread_id</thread_id>
|
||||
<level>level</level>
|
||||
<query_id>query_id</query_id>
|
||||
<logger_name>logger_name</logger_name>
|
||||
<message>message</message>
|
||||
<source_file>source_file</source_file>
|
||||
<source_line>source_line</source_line>
|
||||
</names>
|
||||
</formatting>
|
||||
</logger>
|
||||
```
|
||||
|
||||
**Renaming keys for JSON logs**
|
||||
|
||||
Key names can be modified by changing tag values inside the `<names>` tag. For example, to change `DATE_TIME` to `MY_DATE_TIME`, you can use `<date_time>MY_DATE_TIME</date_time>`.
|
||||
|
||||
**Omitting keys for JSON logs**
|
||||
|
||||
Log properties can be omitted by commenting out the property. For example, if you do not want your log to print `query_id`, you can comment out the `<query_id>` tag.
|
||||
|
||||
## send_crash_reports {#send_crash_reports}
|
||||
|
||||
Settings for opt-in sending crash reports to the ClickHouse core developers team via [Sentry](https://sentry.io).
|
||||
@ -1725,7 +1786,7 @@ Default value: `0.5`.
|
||||
|
||||
Asynchronous loading of databases and tables.
|
||||
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up.
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up. Also consider setting a limit `max_waiting_queries` for the total number of waiting queries.
|
||||
|
||||
If `false`, all databases are loaded when the server starts.
|
||||
|
||||
|
@ -867,3 +867,31 @@ Default value: `Never`
|
||||
Persists virtual column `_block_number` on merges.
|
||||
|
||||
Default value: false.
|
||||
|
||||
## exclude_deleted_rows_for_part_size_in_merge {#exclude_deleted_rows_for_part_size_in_merge}
|
||||
|
||||
If enabled, estimated actual size of data parts (i.e., excluding those rows that have been deleted through `DELETE FROM`) will be used when selecting parts to merge. Note that this behavior is only triggered for data parts affected by `DELETE FROM` executed after this setting is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- true, false
|
||||
|
||||
Default value: false
|
||||
|
||||
**See Also**
|
||||
|
||||
- [load_existing_rows_count_for_old_parts](#load_existing_rows_count_for_old_parts) setting
|
||||
|
||||
## load_existing_rows_count_for_old_parts {#load_existing_rows_count_for_old_parts}
|
||||
|
||||
If enabled along with [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge), deleted rows count for existing data parts will be calculated during table starting up. Note that it may slow down start up table loading.
|
||||
|
||||
Possible values:
|
||||
|
||||
- true, false
|
||||
|
||||
Default value: false
|
||||
|
||||
**See Also**
|
||||
|
||||
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
|
||||
|
@ -651,6 +651,12 @@ This setting works only when setting `input_format_json_named_tuples_as_objects`
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## input_format_json_throw_on_bad_escape_sequence {#input_format_json_throw_on_bad_escape_sequence}
|
||||
|
||||
Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## output_format_json_array_of_rows {#output_format_json_array_of_rows}
|
||||
|
||||
Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) format.
|
||||
@ -1367,7 +1373,7 @@ Default value: `1'000'000`.
|
||||
|
||||
While importing data, when column is not found in schema default value will be used instead of error.
|
||||
|
||||
Disabled by default.
|
||||
Enabled by default.
|
||||
|
||||
### input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference {#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference}
|
||||
|
||||
|
@ -2817,6 +2817,17 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## distributed_insert_skip_read_only_replicas {#distributed_insert_skip_read_only_replicas}
|
||||
|
||||
Enables skipping read-only replicas for INSERT queries into Distributed.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — INSERT was as usual, if it will go to read-only replica it will fail
|
||||
- 1 — Initiator will skip read-only replicas before sending data to shards.
|
||||
|
||||
Default value: `0`
|
||||
|
||||
## distributed_foreground_insert {#distributed_foreground_insert}
|
||||
|
||||
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table.
|
||||
@ -4337,6 +4348,18 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
|
||||
## function_locate_has_mysql_compatible_argument_order {#function-locate-has-mysql-compatible-argument-order}
|
||||
|
||||
Controls the order of arguments in function [locate](../../sql-reference/functions/string-search-functions.md#locate).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Function `locate` accepts arguments `(haystack, needle[, start_pos])`.
|
||||
- 1 — Function `locate` accepts arguments `(needle, haystack, [, start_pos])` (MySQL-compatible behavior)
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## date_time_overflow_behavior {#date_time_overflow_behavior}
|
||||
|
||||
Defines the behavior when [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md), [DateTime64](../../sql-reference/data-types/datetime64.md) or integers are converted into Date, Date32, DateTime or DateTime64 but the value cannot be represented in the result type.
|
||||
@ -5430,3 +5453,7 @@ Enabling this setting can lead to incorrect result as in case of evolved schema
|
||||
:::
|
||||
|
||||
Default value: 'false'.
|
||||
|
||||
## allow_suspicious_primary_key {#allow_suspicious_primary_key}
|
||||
|
||||
Allow suspicious `PRIMARY KEY`/`ORDER BY` for MergeTree (i.e. SimpleAggregateFunction).
|
||||
|
@ -5,26 +5,416 @@ sidebar_label: "External Disks for Storing Data"
|
||||
title: "External Disks for Storing Data"
|
||||
---
|
||||
|
||||
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
|
||||
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely. Various storages are supported:
|
||||
1. [Amazon S3](https://aws.amazon.com/s3/) object storage.
|
||||
2. The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html))
|
||||
3. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs).
|
||||
|
||||
To work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine.
|
||||
:::note ClickHouse also has support for external table engines, which are different from external storage option described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables.
|
||||
1. to work with data stored on `Amazon S3` disks, use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine.
|
||||
2. to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine.
|
||||
3. to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) table engine.
|
||||
:::
|
||||
|
||||
To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver).
|
||||
## Configuring external storage {#configuring-external-storage}
|
||||
|
||||
## Configuring HDFS {#configuring-hdfs}
|
||||
[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` using a disk with types `s3`, `azure_blob_storage`, `hdfs` accordingly.
|
||||
|
||||
[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`.
|
||||
Disk configuration requires:
|
||||
1. `type` section, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local_blob_storage`, `web`.
|
||||
2. Configuration of a specific external storage type.
|
||||
|
||||
Configuration markup:
|
||||
Starting from 24.1 clickhouse version, it is possible to use a new configuration option.
|
||||
It requires to specify:
|
||||
1. `type` equal to `object_storage`
|
||||
2. `object_storage_type`, equal to one of `s3`, `azure_blob_storage` (or just `azure` from `24.3`), `hdfs`, `local_blob_storage` (or just `local` from `24.3`), `web`.
|
||||
Optionally, `metadata_type` can be specified (it is equal to `local` by default), but it can also be set to `plain`, `web`.
|
||||
Usage of `plain` metadata type is described in [plain storage section](/docs/en/operations/storing-data.md/#storing-data-on-webserver), `web` metadata type can be used only with `web` object storage type, `local` metadata type stores metadata files locally (each metadata files contains mapping to files in object storage and some additional meta information about them).
|
||||
|
||||
E.g. configuration option
|
||||
``` xml
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
is equal to configuration (from `24.1`):
|
||||
``` xml
|
||||
<s3>
|
||||
<type>object_storage</type>
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
Configuration
|
||||
``` xml
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
is equal to
|
||||
``` xml
|
||||
<s3_plain>
|
||||
<type>object_storage</type>
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
Example of full storage configuration will look like:
|
||||
``` xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Starting with 24.1 clickhouse version, it can also look like:
|
||||
``` xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<s3>
|
||||
<type>object_storage</type>
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
In order to make a specific kind of storage a default option for all `MergeTree` tables add the following section to configuration file:
|
||||
``` xml
|
||||
<clickhouse>
|
||||
<merge_tree>
|
||||
<storage_policy>s3</storage_policy>
|
||||
</merge_tree>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
If you want to configure a specific storage policy only to specific table, you can define it in settings while creating the table:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree() ORDER BY a
|
||||
SETTINGS storage_policy = 's3';
|
||||
```
|
||||
|
||||
You can also use `disk` instead of `storage_policy`. In this case it is not requires to have `storage_policy` section in configuration file, only `disk` section would be enough.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test (a Int32, b String)
|
||||
ENGINE = MergeTree() ORDER BY a
|
||||
SETTINGS disk = 's3';
|
||||
```
|
||||
|
||||
## Dynamic Configuration {#dynamic-configuration}
|
||||
|
||||
There is also a possibility to specify storage configuration without a predefined disk in configuration in a configuration file, but can be configured in the `CREATE`/`ATTACH` query settings.
|
||||
|
||||
The following example query builds on the above dynamic disk configuration and shows how to use a local disk to cache data from a table stored at a URL.
|
||||
|
||||
```sql
|
||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4),
|
||||
is_new UInt8,
|
||||
duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2)
|
||||
# highlight-start
|
||||
SETTINGS disk = disk(
|
||||
type=web,
|
||||
endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'
|
||||
);
|
||||
# highlight-end
|
||||
```
|
||||
|
||||
The example below adds cache to external storage.
|
||||
|
||||
```sql
|
||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4),
|
||||
is_new UInt8,
|
||||
duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2)
|
||||
# highlight-start
|
||||
SETTINGS disk = disk(
|
||||
type=cache,
|
||||
max_size='1Gi',
|
||||
path='/var/lib/clickhouse/custom_disk_cache/',
|
||||
disk=disk(
|
||||
type=web,
|
||||
endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'
|
||||
)
|
||||
);
|
||||
# highlight-end
|
||||
```
|
||||
|
||||
In the settings highlighted below notice that the disk of `type=web` is nested within
|
||||
the disk of `type=cache`.
|
||||
|
||||
:::note
|
||||
The example uses `type=web`, but any disk type can be configured as dynamic, even Local disk. Local disks require a path argument to be inside the server config parameter `custom_local_disks_base_directory`, which has no default, so set that also when using local disk.
|
||||
:::
|
||||
|
||||
A combination of config-based configuration and sql-defined configuration is also possible:
|
||||
|
||||
```sql
|
||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4),
|
||||
is_new UInt8,
|
||||
duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2)
|
||||
# highlight-start
|
||||
SETTINGS disk = disk(
|
||||
type=cache,
|
||||
max_size='1Gi',
|
||||
path='/var/lib/clickhouse/custom_disk_cache/',
|
||||
disk=disk(
|
||||
type=web,
|
||||
endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'
|
||||
)
|
||||
);
|
||||
# highlight-end
|
||||
```
|
||||
|
||||
where `web` is a from a server configuration file:
|
||||
|
||||
``` xml
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<web>
|
||||
<type>web</type>
|
||||
<endpoint>'https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'</endpoint>
|
||||
</web>
|
||||
</disks>
|
||||
</storage_configuration>
|
||||
```
|
||||
|
||||
### Using S3 Storage {#s3-storage}
|
||||
|
||||
Required parameters:
|
||||
|
||||
- `endpoint` — S3 endpoint URL in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint URL should contain a bucket and root path to store data.
|
||||
- `access_key_id` — S3 access key id.
|
||||
- `secret_access_key` — S3 secret access key.
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `region` — S3 region name.
|
||||
- `support_batch_delete` — This controls the check to see if batch deletes are supported. Set this to `false` when using Google Cloud Storage (GCS) as GCS does not support batch deletes and preventing the checks will prevent error messages in the logs.
|
||||
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
|
||||
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Default value is `false`.
|
||||
- `expiration_window_seconds` — Grace period for checking if expiration-based credentials have expired. Optional, default value is `120`.
|
||||
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.
|
||||
- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`.
|
||||
- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`.
|
||||
- `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`.
|
||||
- `single_read_retries` — Number of retry attempts in case of connection drop during read. Default value is `4`.
|
||||
- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`.
|
||||
- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
||||
- `server_side_encryption_kms_key_id` - If specified, required headers for accessing S3 objects with [SSE-KMS encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) will be set. If an empty string is specified, the AWS managed S3 key will be used. Optional.
|
||||
- `server_side_encryption_kms_encryption_context` - If specified alongside `server_side_encryption_kms_key_id`, the given encryption context header for SSE-KMS will be set. Optional.
|
||||
- `server_side_encryption_kms_bucket_key_enabled` - If specified alongside `server_side_encryption_kms_key_id`, the header to enable S3 bucket keys for SSE-KMS will be set. Optional, can be `true` or `false`, defaults to nothing (matches the bucket-level setting).
|
||||
- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited).
|
||||
- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`.
|
||||
- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited).
|
||||
- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`.
|
||||
- `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
- `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
- `key_template` — Define the format with which the object keys are generated. By default, Clickhouse takes `root path` from `endpoint` option and adds random generated suffix. That suffix is a dir with 3 random symbols and a file name with 29 random symbols. With that option you have a full control how to the object keys are generated. Some usage scenarios require having random symbols in the prefix or in the middle of object key. For example: `[a-z]{3}-prefix-random/constant-part/random-middle-[a-z]{3}/random-suffix-[a-z]{29}`. The value is parsed with [`re2`](https://github.com/google/re2/wiki/Syntax). Only some subset of the syntax is supported. Check if your preferred format is supported before using that option. Disk isn't initialized if clickhouse is unable to generate a key by the value of `key_template`. It requires enabled feature flag [storage_metadata_write_full_object_key](/docs/en/operations/settings/settings#storage_metadata_write_full_object_key). It forbids declaring the `root path` in `endpoint` option. It requires definition of the option `key_compatibility_prefix`.
|
||||
- `key_compatibility_prefix` — That option is required when option `key_template` is in use. In order to be able to read the objects keys which were stored in the metadata files with the metadata version lower that `VERSION_FULL_OBJECT_KEY`, the previous `root path` from the `endpoint` option should be set here.
|
||||
|
||||
:::note
|
||||
Google Cloud Storage (GCS) is also supported using the type `s3`. See [GCS backed MergeTree](/docs/en/integrations/gcs).
|
||||
:::
|
||||
|
||||
### Using Plain Storage {#plain-storage}
|
||||
|
||||
In `22.10` a new disk type `s3_plain` was introduced, which provides a write-once storage. Configuration parameters are the same as for `s3` disk type.
|
||||
Unlike `s3` disk type, it stores data as is, e.g. instead of randomly-generated blob names, it uses normal file names (the same way as clickhouse stores files on local disk) and does not store any metadata locally, e.g. it is derived from data on `s3`.
|
||||
|
||||
This disk type allows to keep a static version of the table, as it does not allow executing merges on the existing data and does not allow inserting of new data.
|
||||
A use case for this disk type is to create backups on it, which can be done via `BACKUP TABLE data TO Disk('plain_disk_name', 'backup_name')`. Afterwards you can do `RESTORE TABLE data AS data_restored FROM Disk('plain_disk_name', 'backup_name')` or using `ATTACH TABLE data (...) ENGINE = MergeTree() SETTINGS disk = 'plain_disk_name'`.
|
||||
|
||||
Configuration:
|
||||
``` xml
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
Starting from `24.1` it is possible configure any object storage disk (`s3`, `azure`, `hdfs`, `local`) using `plain` metadata type.
|
||||
|
||||
Configuration:
|
||||
``` xml
|
||||
<s3_plain>
|
||||
<type>object_storage</type>
|
||||
<object_storage_type>azure</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
### Using Azure Blob Storage {#azure-blob-storage}
|
||||
|
||||
`MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`.
|
||||
|
||||
As of February 2022, this feature is still a fresh addition, so expect that some Azure Blob Storage functionalities might be unimplemented.
|
||||
|
||||
Configuration markup:
|
||||
``` xml
|
||||
<storage_configuration>
|
||||
...
|
||||
<disks>
|
||||
<blob_storage_disk>
|
||||
<type>azure_blob_storage</type>
|
||||
<storage_account_url>http://account.blob.core.windows.net</storage_account_url>
|
||||
<container_name>container</container_name>
|
||||
<account_name>account</account_name>
|
||||
<account_key>pass123</account_key>
|
||||
<metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
|
||||
<cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</blob_storage_disk>
|
||||
</disks>
|
||||
...
|
||||
</storage_configuration>
|
||||
```
|
||||
|
||||
Connection parameters:
|
||||
* `storage_account_url` - **Required**, Azure Blob Storage account URL, like `http://account.blob.core.windows.net` or `http://azurite1:10000/devstoreaccount1`.
|
||||
* `container_name` - Target container name, defaults to `default-container`.
|
||||
* `container_already_exists` - If set to `false`, a new container `container_name` is created in the storage account, if set to `true`, disk connects to the container directly, and if left unset, disk connects to the account, checks if the container `container_name` exists, and creates it if it doesn't exist yet.
|
||||
|
||||
Authentication parameters (the disk will try all available methods **and** Managed Identity Credential):
|
||||
* `connection_string` - For authentication using a connection string.
|
||||
* `account_name` and `account_key` - For authentication using Shared Key.
|
||||
|
||||
Limit parameters (mainly for internal usage):
|
||||
* `s3_max_single_part_upload_size` - Limits the size of a single block upload to Blob Storage.
|
||||
* `min_bytes_for_seek` - Limits the size of a seekable region.
|
||||
* `max_single_read_retries` - Limits the number of attempts to read a chunk of data from Blob Storage.
|
||||
* `max_single_download_retries` - Limits the number of attempts to download a readable buffer from Blob Storage.
|
||||
* `thread_pool_size` - Limits the number of threads with which `IDiskRemote` is instantiated.
|
||||
* `s3_max_inflight_parts_for_one_file` - Limits the number of put requests that can be run concurrently for one object.
|
||||
|
||||
Other parameters:
|
||||
* `metadata_path` - Path on local FS to store metadata files for Blob Storage. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
|
||||
* `skip_access_check` - If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||
* `read_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of read requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
* `write_resource` — Resource name to be used for [scheduling](/docs/en/operations/workload-scheduling.md) of write requests to this disk. Default value is empty string (IO scheduling is not enabled for this disk).
|
||||
|
||||
Examples of working configurations can be found in integration tests directory (see e.g. [test_merge_tree_azure_blob_storage](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_merge_tree_azure_blob_storage/configs/config.d/storage_conf.xml) or [test_azure_blob_storage_zero_copy_replication](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_azure_blob_storage_zero_copy_replication/configs/config.d/storage_conf.xml)).
|
||||
|
||||
:::note Zero-copy replication is not ready for production
|
||||
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
|
||||
:::
|
||||
|
||||
## Using HDFS storage {#hdfs-storage}
|
||||
|
||||
In this sample configuration:
|
||||
- the disk is of type `hdfs`
|
||||
- the data is hosted at `hdfs://hdfs1:9000/clickhouse/`
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<hdfs>
|
||||
<type>hdfs</type>
|
||||
<endpoint>hdfs://hdfs1:9000/clickhouse/</endpoint>
|
||||
<skip_access_check>true</skip_access_check>
|
||||
</hdfs>
|
||||
<hdd>
|
||||
<type>local</type>
|
||||
<path>/</path>
|
||||
</hdd>
|
||||
</disks>
|
||||
<policies>
|
||||
<hdfs>
|
||||
@ -32,26 +422,17 @@ Configuration markup:
|
||||
<main>
|
||||
<disk>hdfs</disk>
|
||||
</main>
|
||||
<external>
|
||||
<disk>hdd</disk>
|
||||
</external>
|
||||
</volumes>
|
||||
</hdfs>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
|
||||
- `endpoint` — HDFS endpoint URL in `path` format. Endpoint URL should contain a root path to store data.
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `min_bytes_for_seek` — The minimal number of bytes to use seek operation instead of sequential read. Default value: `1 Mb`.
|
||||
|
||||
## Using Virtual File System for Data Encryption {#encrypted-virtual-file-system}
|
||||
### Using Data Encryption {#encrypted-virtual-file-system}
|
||||
|
||||
You can encrypt the data stored on [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one.
|
||||
|
||||
@ -112,7 +493,7 @@ Example of disk configuration:
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Using local cache {#using-local-cache}
|
||||
### Using local cache {#using-local-cache}
|
||||
|
||||
It is possible to configure local cache over disks in storage configuration starting from version 22.3.
|
||||
For versions 22.3 - 22.7 cache is supported only for `s3` disk type. For versions >= 22.8 cache is supported for any disk type: S3, Azure, Local, Encrypted, etc.
|
||||
@ -139,13 +520,13 @@ Example of configuration for versions later or equal to 22.8:
|
||||
</cache>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3-cache>
|
||||
<s3_cache>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>cache</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3-cache>
|
||||
</s3_cache>
|
||||
<policies>
|
||||
</storage_configuration>
|
||||
```
|
||||
@ -165,13 +546,13 @@ Example of configuration for versions earlier than 22.8:
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
<s3-cache>
|
||||
<s3_cache>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>s3</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</s3-cache>
|
||||
</s3_cache>
|
||||
<policies>
|
||||
</storage_configuration>
|
||||
```
|
||||
@ -275,23 +656,92 @@ Cache profile events:
|
||||
|
||||
- `CachedWriteBufferCacheWriteBytes`, `CachedWriteBufferCacheWriteMicroseconds`
|
||||
|
||||
## Using in-memory cache (userspace page cache) {#userspace-page-cache}
|
||||
|
||||
The File Cache described above stores cached data in local files. Alternatively, object-store-based disks can be configured to use "Userspace Page Cache", which is RAM-only. Userspace page cache is recommended only if file cache can't be used for some reason, e.g. if the machine doesn't have a local disk at all. Note that file cache effectively uses RAM for caching too, since the OS caches contents of local files.
|
||||
|
||||
To enable userspace page cache for disks that don't use file cache, use setting `use_page_cache_for_disks_without_file_cache`.
|
||||
|
||||
By default, on Linux, the userspace page cache will use all available memory, similar to the OS page cache. In tools like `top` and `ps`, the clickhouse server process will typically show resident set size near 100% of the machine's RAM - this is normal, and most of this memory is actually reclaimable by the OS on memory pressure (`MADV_FREE`). This behavior can be disabled with server setting `page_cache_use_madv_free = 0`, making the userspace page cache just use a fixed amount of memory `page_cache_size` with no special interaction with the OS. On Mac OS, `page_cache_use_madv_free` is always disabled as it doesn't have lazy `MADV_FREE`.
|
||||
|
||||
Unfortunately, `page_cache_use_madv_free` makes it difficult to tell if the server is close to running out of memory, since the RSS metric becomes useless. Async metric `UnreclaimableRSS` shows the amount of physical memory used by the server, excluding the memory reclaimable by the OS: `select value from system.asynchronous_metrics where metric = 'UnreclaimableRSS'`. Use it for monitoring instead of RSS. This metric is only available if `page_cache_use_madv_free` is enabled.
|
||||
|
||||
## Storing Data on Web Server {#storing-data-on-webserver}
|
||||
|
||||
There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`.
|
||||
### Using static Web storage (read-only) {#web-storage}
|
||||
|
||||
This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/index.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md).
|
||||
Web storage can be used for read-only purposes. An example use is for hosting sample data, or for migrating data.
|
||||
There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`.
|
||||
|
||||
Web server storage is supported only for the [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`.
|
||||
In this sample configuration:
|
||||
- the disk is of type `web`
|
||||
- the data is hosted at `http://nginx:80/test1/`
|
||||
- a cache on local storage is used
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<web>
|
||||
<type>web</type>
|
||||
<endpoint>http://nginx:80/test1/</endpoint>
|
||||
</web>
|
||||
<cached_web>
|
||||
<type>cache</type>
|
||||
<disk>web</disk>
|
||||
<path>cached_web_cache/</path>
|
||||
<max_size>100000000</max_size>
|
||||
</cached_web>
|
||||
</disks>
|
||||
<policies>
|
||||
<web>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>web</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</web>
|
||||
<cached_web>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>cached_web</disk>
|
||||
</main>
|
||||
</volumes>
|
||||
</cached_web>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
:::tip
|
||||
Storage can also be configured temporarily within a query, if a web dataset is not expected
|
||||
to be used routinely, see [dynamic configuration](#dynamic-configuration) and skip editing the
|
||||
configuration file.
|
||||
:::
|
||||
|
||||
:::tip
|
||||
A [demo dataset](https://github.com/ClickHouse/web-tables-demo) is hosted in GitHub. To prepare your own tables for web storage see the tool [clickhouse-static-files-uploader](/docs/en/operations/storing-data.md/#storing-data-on-webserver)
|
||||
:::
|
||||
|
||||
In this `ATTACH TABLE` query the `UUID` provided matches the directory name of the data, and the endpoint is the URL for the raw GitHub content.
|
||||
|
||||
```sql
|
||||
# highlight-next-line
|
||||
ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7'
|
||||
(
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
postcode2 LowCardinality(String),
|
||||
type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4),
|
||||
is_new UInt8,
|
||||
duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2),
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street LowCardinality(String),
|
||||
locality LowCardinality(String),
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (postcode1, postcode2, addr1, addr2)
|
||||
# highlight-start
|
||||
SETTINGS disk = disk(
|
||||
type=web,
|
||||
endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/'
|
||||
);
|
||||
# highlight-end
|
||||
```
|
||||
|
||||
A ready test case. You need to add this configuration to config:
|
||||
|
||||
@ -487,7 +937,7 @@ If URL is not reachable on disk load when the server is starting up tables, then
|
||||
Use [http_max_single_read_retries](/docs/en/operations/settings/settings.md/#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read.
|
||||
|
||||
|
||||
## Zero-copy Replication (not ready for production) {#zero-copy}
|
||||
### Zero-copy Replication (not ready for production) {#zero-copy}
|
||||
|
||||
Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` disks. Zero-copy replication means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself.
|
||||
|
||||
|
@ -47,7 +47,7 @@ An example:
|
||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
|
@ -26,7 +26,9 @@ priority: 0
|
||||
is_active: 0
|
||||
active_children: 0
|
||||
dequeued_requests: 67
|
||||
canceled_requests: 0
|
||||
dequeued_cost: 4692272
|
||||
canceled_cost: 0
|
||||
busy_periods: 63
|
||||
vruntime: 938454.1999999989
|
||||
system_vruntime: ᴺᵁᴸᴸ
|
||||
@ -54,7 +56,9 @@ Columns:
|
||||
- `is_active` (`UInt8`) - Whether this node is currently active - has resource requests to be dequeued and constraints satisfied.
|
||||
- `active_children` (`UInt64`) - The number of children in active state.
|
||||
- `dequeued_requests` (`UInt64`) - The total number of resource requests dequeued from this node.
|
||||
- `canceled_requests` (`UInt64`) - The total number of resource requests canceled from this node.
|
||||
- `dequeued_cost` (`UInt64`) - The sum of costs (e.g. size in bytes) of all requests dequeued from this node.
|
||||
- `canceled_cost` (`UInt64`) - The sum of costs (e.g. size in bytes) of all requests canceled from this node.
|
||||
- `busy_periods` (`UInt64`) - The total number of deactivations of this node.
|
||||
- `vruntime` (`Nullable(Float64)`) - For children of `fair` nodes only. Virtual runtime of a node used by SFQ algorithm to select the next child to process in a max-min fair manner.
|
||||
- `system_vruntime` (`Nullable(Float64)`) - For `fair` nodes only. Virtual runtime showing `vruntime` of the last processed resource request. Used during child activation as the new value of `vruntime`.
|
||||
|
@ -483,7 +483,7 @@ Where:
|
||||
|
||||
- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition).
|
||||
- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions).
|
||||
- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
|
||||
- `r3`- the number of unique visitors who visited the site during a specific time period on 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
|
||||
|
||||
## uniqUpTo(N)(x)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user