Merge branch 'master' into fetch-remove-covered
2
.github/workflows/backport_branches.yml
vendored
@ -466,6 +466,7 @@ jobs:
|
||||
- BuilderDebTsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -504,6 +505,7 @@ jobs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
|
2
.github/workflows/cancel.yml
vendored
@ -6,7 +6,7 @@ env:
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
workflow_run:
|
||||
workflows: ["PullRequestCI", "ReleaseCI", "DocsCheck", "BackportPR"]
|
||||
workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
|
||||
types:
|
||||
- requested
|
||||
jobs:
|
||||
|
2
.github/workflows/debug.yml
vendored
@ -2,7 +2,7 @@
|
||||
name: Debug
|
||||
|
||||
'on':
|
||||
[push, pull_request, release, workflow_dispatch]
|
||||
[push, pull_request, release, workflow_dispatch, workflow_call]
|
||||
|
||||
jobs:
|
||||
DebugInfo:
|
||||
|
75
.github/workflows/master.yml
vendored
@ -974,6 +974,7 @@ jobs:
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -1021,6 +1022,7 @@ jobs:
|
||||
- BuilderBinClangTidy
|
||||
- BuilderDebShared
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -2992,6 +2994,77 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
###################################### SQLANCER FUZZERS ######################################
|
||||
##############################################################################################
|
||||
SQLancerTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/sqlancer_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=SQLancer (release)
|
||||
REPO_COPY=${{runner.temp}}/sqlancer_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: SQLancer
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 sqlancer_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SQLancerTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/sqlancer_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=SQLancer (debug)
|
||||
REPO_COPY=${{runner.temp}}/sqlancer_debug/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: SQLancer
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 sqlancer_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FinishCheck:
|
||||
needs:
|
||||
- DockerHubPush
|
||||
@ -3051,6 +3124,8 @@ jobs:
|
||||
- UnitTestsUBsan
|
||||
- UnitTestsReleaseClang
|
||||
- SharedBuildSmokeTest
|
||||
- SQLancerTestRelease
|
||||
- SQLancerTestDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
|
58
.github/workflows/nightly.yml
vendored
@ -10,6 +10,9 @@ env:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Debug:
|
||||
# The task for having a preserved ENV and event.json for later investigation
|
||||
uses: ./.github/workflows/debug.yml
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
@ -122,3 +125,58 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
SonarCloud:
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
SONAR_SCANNER_VERSION: 4.7.0.2747
|
||||
SONAR_SERVER_URL: "https://sonarcloud.io"
|
||||
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
|
||||
CC: clang-15
|
||||
CXX: clang++-15
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
submodules: true
|
||||
- name: Set up JDK 11
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 11
|
||||
- name: Download and set up sonar-scanner
|
||||
env:
|
||||
SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip
|
||||
run: |
|
||||
mkdir -p "$HOME/.sonar"
|
||||
curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}"
|
||||
unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/"
|
||||
echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH"
|
||||
- name: Download and set up build-wrapper
|
||||
env:
|
||||
BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip
|
||||
run: |
|
||||
curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}"
|
||||
unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/"
|
||||
echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH"
|
||||
- name: Set Up Build Tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -yq git cmake ccache python3 ninja-build
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
- name: Run build-wrapper
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cd ..
|
||||
build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/
|
||||
- name: Run sonar-scanner
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
run: |
|
||||
sonar-scanner \
|
||||
--define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \
|
||||
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
|
||||
--define sonar.projectKey="ClickHouse_ClickHouse" \
|
||||
--define sonar.organization="clickhouse-java" \
|
||||
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
|
||||
|
77
.github/workflows/pull_request.yml
vendored
@ -112,7 +112,7 @@ jobs:
|
||||
StyleCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ success() || failure() || always() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -2023,6 +2023,7 @@ jobs:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
TestsBugfixCheck:
|
||||
needs: [CheckLabels, StyleCheck]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3490,6 +3491,77 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
###################################### SQLANCER FUZZERS ######################################
|
||||
##############################################################################################
|
||||
SQLancerTestRelease:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/sqlancer_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=SQLancer (release)
|
||||
REPO_COPY=${{runner.temp}}/sqlancer_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: SQLancer
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 sqlancer_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SQLancerTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/sqlancer_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=SQLancer (debug)
|
||||
REPO_COPY=${{runner.temp}}/sqlancer_debug/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: SQLancer
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 sqlancer_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
#############################################################################################
|
||||
###################################### JEPSEN TESTS #########################################
|
||||
#############################################################################################
|
||||
@ -3500,7 +3572,6 @@ jobs:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'jepsen-test')
|
||||
needs: [BuilderBinRelease]
|
||||
uses: ./.github/workflows/jepsen.yml
|
||||
|
||||
FinishCheck:
|
||||
needs:
|
||||
- StyleCheck
|
||||
@ -3575,6 +3646,8 @@ jobs:
|
||||
- SharedBuildSmokeTest
|
||||
- CompatibilityCheck
|
||||
- IntegrationTestsFlakyCheck
|
||||
- SQLancerTestRelease
|
||||
- SQLancerTestDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
|
2
.github/workflows/release_branches.yml
vendored
@ -541,6 +541,7 @@ jobs:
|
||||
- BuilderDebMsan
|
||||
- BuilderDebDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -580,6 +581,7 @@ jobs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
|
3
.gitignore
vendored
@ -154,3 +154,6 @@ website/package-lock.json
|
||||
/programs/server/metadata
|
||||
/programs/server/store
|
||||
|
||||
# temporary test files
|
||||
tests/queries/0_stateless/test_*
|
||||
tests/queries/0_stateless/*.binary
|
||||
|
4
.snyk
Normal file
@ -0,0 +1,4 @@
|
||||
# Snyk (https://snyk.io) policy file
|
||||
exclude:
|
||||
global:
|
||||
- tests/**
|
@ -31,6 +31,8 @@
|
||||
* Add OpenTelemetry support to ON CLUSTER DDL (require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Added system table `asynchronous_insert_log`. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add support for methods `lz4`, `bz2`, `snappy` in HTTP's `Accept-Encoding` which is a non-standard extension to HTTP protocol. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Added new infrastructure for query analysis and planning under the `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
@ -66,8 +68,7 @@
|
||||
* Allow readable size values (like `1TB`) in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these periods ClickHouse can nevertheless try to establish a connection and produce errors. This behavior is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)).
|
||||
*
|
||||
Only allow clients connecting to a secure server with an invalid certificate only to proceed with the '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Only allow clients connecting to a secure server with an invalid certificate only to proceed with the '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add function `tryBase58Decode`, similar to the existing function `tryBase64Decode`. [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)).
|
||||
* Fix parallel parsing: segmentator now checks `max_block_size`. This fixed memory overallocation in case of parallel parsing and small LIMIT. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
@ -86,6 +87,8 @@ Only allow clients connecting to a secure server with an invalid certificate onl
|
||||
* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to use `Date32` arguments for `dateName` function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Now filters with NULL literals will be used during index analysis. [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). This is continuation of [#39550i](https://github.com/ClickHouse/ClickHouse/pull/39550) by [@fastio](https://github.com/fastio) who implemented most of the logic.
|
||||
* Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)). This represents the biggest advancement for ClickHouse testing in this year so far.
|
||||
|
@ -5,7 +5,7 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
## Useful Links
|
||||
|
||||
* [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page.
|
||||
* [ClickHouse Cloud](https://clickhouse.com/cloud) ClickHouse as a service, built by the creators and maintainers.
|
||||
* [ClickHouse Cloud](https://clickhouse.cloud) ClickHouse as a service, built by the creators and maintainers.
|
||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
|
||||
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
@ -16,5 +16,6 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming events
|
||||
* [**v22.10 Release Webinar**](https://clickhouse.com/company/events/v22-10-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**Introducing ClickHouse Cloud**](https://clickhouse.com/company/events/cloud-beta) Introducing ClickHouse as a service, built by creators and maintainers of the fastest OLAP database on earth. Join Tanya Bragin for a detailed walkthrough of ClickHouse Cloud capabilities, as well as a peek behind the curtain to understand the unique architecture that makes our service tick.
|
||||
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
||||
* [**AWS re:Invent**](https://clickhouse.com/company/events/aws-reinvent) Core members of the ClickHouse team -- including 2 of our founders -- will be at re:Invent from November 29 to December 3. We are available on the show floor, but are also determining interest in holding an event during the time there.
|
||||
|
@ -10,9 +10,11 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 22.10 | ✔️ |
|
||||
| 22.9 | ✔️ |
|
||||
| 22.8 | ✔️ |
|
||||
| 22.7 | ✔️ |
|
||||
| 22.6 | ✔️ |
|
||||
| 22.7 | ❌ |
|
||||
| 22.6 | ❌ |
|
||||
| 22.5 | ❌ |
|
||||
| 22.4 | ❌ |
|
||||
| 22.3 | ✔️ |
|
||||
|
@ -1,8 +1,10 @@
|
||||
#if defined(OS_LINUX)
|
||||
# include <sys/syscall.h>
|
||||
#endif
|
||||
#include <cstdlib>
|
||||
#include <unistd.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <base/defines.h> /// for THREAD_SANITIZER
|
||||
|
||||
[[noreturn]] void safeExit(int code)
|
||||
{
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include <link.h> // ElfW
|
||||
#include <errno.h>
|
||||
|
||||
#include "syscall.h"
|
||||
|
||||
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
||||
|
||||
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
||||
@ -39,7 +41,9 @@ ssize_t __retry_read(int fd, void * buf, size_t count)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
ssize_t ret = read(fd, buf, count);
|
||||
// We cannot use the read syscall as it will be intercept by sanitizers, which aren't
|
||||
// initialized yet. Emit syscall directly.
|
||||
ssize_t ret = __syscall_ret(__syscall(SYS_read, fd, buf, count));
|
||||
if (ret == -1)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
@ -90,6 +94,11 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
||||
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
||||
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
||||
{
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(memory_sanitizer)
|
||||
__msan_unpoison(&aux, sizeof(aux));
|
||||
#endif
|
||||
#endif
|
||||
if (aux.a_type == AT_NULL)
|
||||
{
|
||||
break;
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54467)
|
||||
SET(VERSION_REVISION 54468)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 10)
|
||||
SET(VERSION_MINOR 11)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 3030d4c7ff09ec44ab07d0a8069ea923227288a1)
|
||||
SET(VERSION_DESCRIBE v22.10.1.1-testing)
|
||||
SET(VERSION_STRING 22.10.1.1)
|
||||
SET(VERSION_GITHASH 98ab5a3c189232ea2a3dddb9d2be7196ae8b3434)
|
||||
SET(VERSION_DESCRIBE v22.11.1.1-testing)
|
||||
SET(VERSION_STRING 22.11.1.1)
|
||||
# end of autochange
|
||||
|
@ -3,10 +3,20 @@ option (ENABLE_CLANG_TIDY "Use clang-tidy static analyzer" OFF)
|
||||
|
||||
if (ENABLE_CLANG_TIDY)
|
||||
|
||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12")
|
||||
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
||||
if (CLANG_TIDY_CACHE_PATH)
|
||||
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12")
|
||||
|
||||
# Why do we use ';' here?
|
||||
# It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY
|
||||
# The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax.
|
||||
set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper")
|
||||
else ()
|
||||
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12")
|
||||
endif ()
|
||||
|
||||
if (CLANG_TIDY_PATH)
|
||||
message(STATUS
|
||||
message (STATUS
|
||||
"Using clang-tidy: ${CLANG_TIDY_PATH}.
|
||||
The checks will be run during build process.
|
||||
See the .clang-tidy file at the root directory to configure the checks.")
|
||||
@ -15,11 +25,15 @@ if (ENABLE_CLANG_TIDY)
|
||||
|
||||
# clang-tidy requires assertions to guide the analysis
|
||||
# Note that NDEBUG is set implicitly by CMake for non-debug builds
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -UNDEBUG")
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -UNDEBUG")
|
||||
|
||||
# The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code.
|
||||
# The variable CMAKE_CXX_CLANG_TIDY will be set inside the following directories with non third-party code.
|
||||
# - base
|
||||
# - programs
|
||||
# - src
|
||||
# - utils
|
||||
# set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||
else ()
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "clang-tidy is not found")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "clang-tidy is not found")
|
||||
endif ()
|
||||
endif ()
|
||||
|
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 1be805e7cb2494aa8170015493474379b0362dfc
|
||||
Subproject commit e4e746a24eb56861a86f3672771e3308d8c40722
|
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 7a454c25c7d16053bcd327cdd16329212a08fa4a
|
||||
Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5
|
2
contrib/libcxx
vendored
@ -1 +1 @@
|
||||
Subproject commit 172b2ae074f6755145b91c53a95c8540c1468239
|
||||
Subproject commit 4db7f838afd3139eb3761694b04d31275df45d2d
|
@ -25,6 +25,7 @@ set(SRCS
|
||||
"${LIBCXX_SOURCE_DIR}/src/ios.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/iostream.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/legacy_debug_handler.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/locale.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/memory.cpp"
|
||||
@ -49,6 +50,7 @@ set(SRCS
|
||||
"${LIBCXX_SOURCE_DIR}/src/valarray.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/variant.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/vector.cpp"
|
||||
"${LIBCXX_SOURCE_DIR}/src/verbose_abort.cpp"
|
||||
)
|
||||
|
||||
add_library(cxx ${SRCS})
|
||||
|
2
contrib/libcxxabi
vendored
@ -1 +1 @@
|
||||
Subproject commit 6eb7cc7a7bdd779e6734d1b9fb451df2274462d7
|
||||
Subproject commit a736a6b3c6a7b8aae2ebad629ca21b2c55b4820e
|
@ -9,6 +9,7 @@ set(SRCS
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp"
|
||||
# "${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp"
|
||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp"
|
||||
|
@ -21,6 +21,9 @@ set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm/include"
|
||||
)
|
||||
set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
# NOTE: You should not remove this line since otherwise it will use default 20,
|
||||
# and llvm cannot be compiled with bundled libcxx and 20 standard.
|
||||
set (CMAKE_CXX_STANDARD 14)
|
||||
|
||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
|
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9
|
||||
Subproject commit 2c8998e26c6d46b27c710d7829c3a15e34959f70
|
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
||||
Subproject commit bffad6f6fe74d6a2f92e2668390664a926c68733
|
||||
Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c
|
@ -91,6 +91,9 @@ ENV PATH="$PATH:/usr/local/go/bin"
|
||||
ENV GOPATH=/workdir/go
|
||||
ENV GOCACHE=/workdir/
|
||||
|
||||
RUN curl https://raw.githubusercontent.com/matus-chochlik/ctcache/7fd516e91c17779cbc6fc18bd119313d9532dd90/clang-tidy-cache -Lo /usr/bin/clang-tidy-cache \
|
||||
&& chmod +x /usr/bin/clang-tidy-cache
|
||||
|
||||
RUN mkdir /workdir && chmod 777 /workdir
|
||||
WORKDIR /workdir
|
||||
|
||||
|
@ -258,6 +258,10 @@ def parse_env_variables(
|
||||
if clang_tidy:
|
||||
# 15G is not enough for tidy build
|
||||
cache_maxsize = "25G"
|
||||
|
||||
# `CTCACHE_DIR` has the same purpose as the `CCACHE_DIR` above.
|
||||
# It's there to have the clang-tidy cache embedded into our standard `CCACHE_DIR`
|
||||
result.append("CTCACHE_DIR=/ccache/clang-tidy-cache")
|
||||
result.append(f"CCACHE_MAXSIZE={cache_maxsize}")
|
||||
|
||||
if distcc_hosts:
|
||||
@ -282,9 +286,7 @@ def parse_env_variables(
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
|
||||
if shared_libraries:
|
||||
cmake_flags.append(
|
||||
"-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1"
|
||||
)
|
||||
cmake_flags.append("-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1")
|
||||
# We can't always build utils because it requires too much space, but
|
||||
# we have to build them at least in some way in CI. The shared library
|
||||
# build is probably the least heavy disk-wise.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="22.9.3.18"
|
||||
ARG VERSION="22.10.2.11"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="22.9.3.18"
|
||||
ARG VERSION="22.10.2.11"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -27,9 +27,14 @@ RUN apt-get update \
|
||||
tar \
|
||||
tzdata \
|
||||
unixodbc \
|
||||
python3-pip \
|
||||
libcurl4-openssl-dev \
|
||||
libssl-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install pycurl
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t clickhouse/sqlancer-test .
|
||||
FROM ubuntu:20.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
|
@ -11,13 +11,15 @@ def process_result(result_folder):
|
||||
summary = []
|
||||
paths = []
|
||||
tests = [
|
||||
"TLPWhere",
|
||||
"TLPAggregate",
|
||||
"TLPDistinct",
|
||||
"TLPGroupBy",
|
||||
"TLPHaving",
|
||||
"TLPWhere",
|
||||
"TLPWhereGroupBy",
|
||||
"TLPDistinct",
|
||||
"TLPAggregate",
|
||||
"NoREC",
|
||||
]
|
||||
failed_tests = []
|
||||
|
||||
for test in tests:
|
||||
err_path = "{}/{}.err".format(result_folder, test)
|
||||
@ -33,15 +35,11 @@ def process_result(result_folder):
|
||||
with open(err_path, "r") as f:
|
||||
if "AssertionError" in f.read():
|
||||
summary.append((test, "FAIL"))
|
||||
failed_tests.append(test)
|
||||
status = "failure"
|
||||
else:
|
||||
summary.append((test, "OK"))
|
||||
|
||||
logs_path = "{}/logs.tar.gz".format(result_folder)
|
||||
if not os.path.exists(logs_path):
|
||||
logging.info("No logs tar on path %s", logs_path)
|
||||
else:
|
||||
paths.append(logs_path)
|
||||
stdout_path = "{}/stdout.log".format(result_folder)
|
||||
if not os.path.exists(stdout_path):
|
||||
logging.info("No stdout log on path %s", stdout_path)
|
||||
@ -53,18 +51,23 @@ def process_result(result_folder):
|
||||
else:
|
||||
paths.append(stderr_path)
|
||||
|
||||
description = "SQLancer test run. See report"
|
||||
description = "SQLancer run successfully"
|
||||
if status == "failure":
|
||||
description = f"Failed oracles: {failed_tests}"
|
||||
|
||||
return status, description, summary, paths
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
def write_results(
|
||||
results_file, status_file, description_file, results, status, description
|
||||
):
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
f.write(status + "\n")
|
||||
with open(description_file, "w") as f:
|
||||
f.write(description + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -72,13 +75,20 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of sqlancer test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
parser.add_argument("--in-results-dir", default="/workspace/")
|
||||
parser.add_argument("--out-results-file", default="/workspace/summary.tsv")
|
||||
parser.add_argument("--out-description-file", default="/workspace/description.txt")
|
||||
parser.add_argument("--out-status-file", default="/workspace/status.txt")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
status, description, summary, logs = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
write_results(
|
||||
args.out_results_file,
|
||||
args.out_status_file,
|
||||
args.out_description_file,
|
||||
summary,
|
||||
status,
|
||||
description,
|
||||
)
|
||||
logging.info("Result written")
|
||||
|
@ -1,33 +1,62 @@
|
||||
#!/bin/bash
|
||||
set -exu
|
||||
trap "exit" INT TERM
|
||||
|
||||
set -e -x
|
||||
function wget_with_retry
|
||||
{
|
||||
for _ in 1 2 3 4; do
|
||||
if wget -nv -nd -c "$1";then
|
||||
return 0
|
||||
else
|
||||
sleep 0.5
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
if [ -z ${BINARY_URL_TO_DOWNLOAD+x} ]
|
||||
then
|
||||
echo "No BINARY_URL_TO_DOWNLOAD provided."
|
||||
else
|
||||
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
|
||||
chmod +x /clickhouse
|
||||
fi
|
||||
|
||||
service clickhouse-server start && sleep 5
|
||||
if [[ -f "/clickhouse" ]]; then
|
||||
echo "/clickhouse exists"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd /workspace
|
||||
/clickhouse server -P /workspace/clickhouse-server.pid -L /workspace/clickhouse-server.log -E /workspace/clickhouse-server.log.err --daemon
|
||||
|
||||
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
||||
|
||||
cd /sqlancer/sqlancer-master
|
||||
|
||||
export TIMEOUT=300
|
||||
export NUM_QUERIES=1000
|
||||
TIMEOUT=300
|
||||
NUM_QUERIES=1000
|
||||
NUM_THREADS=10
|
||||
TESTS=( "TLPGroupBy" "TLPHaving" "TLPWhere" "TLPDistinct" "TLPAggregate" "NoREC" )
|
||||
echo "${TESTS[@]}"
|
||||
|
||||
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPWhere | tee /test_output/TLPWhere.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPWhere.err
|
||||
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPGroupBy | tee /test_output/TLPGroupBy.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPGroupBy.err
|
||||
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPHaving | tee /test_output/TLPHaving.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPHaving.err
|
||||
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPWhere --oracle TLPGroupBy | tee /test_output/TLPWhereGroupBy.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPWhereGroupBy.err
|
||||
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPDistinct | tee /test_output/TLPDistinct.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPDistinct.err
|
||||
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPAggregate | tee /test_output/TLPAggregate.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPAggregate.err
|
||||
for TEST in "${TESTS[@]}"; do
|
||||
echo "$TEST"
|
||||
if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]
|
||||
then
|
||||
echo "Server is OK"
|
||||
( java -jar target/sqlancer-*.jar --log-each-select true --print-failed false --num-threads "$NUM_THREADS" --timeout-seconds "$TIMEOUT" --num-queries "$NUM_QUERIES" --username default --password "" clickhouse --oracle "$TEST" | tee "/workspace/$TEST.out" ) 3>&1 1>&2 2>&3 | tee "/workspace/$TEST.err"
|
||||
else
|
||||
touch "/workspace/$TEST.err" "/workspace/$TEST.out"
|
||||
echo "Server is not responding" | tee /workspace/server_crashed.log
|
||||
fi
|
||||
done
|
||||
|
||||
service clickhouse stop
|
||||
ls /workspace
|
||||
pkill -F /workspace/clickhouse-server.pid || true
|
||||
|
||||
ls /var/log/clickhouse-server/
|
||||
tar czf /test_output/logs.tar.gz -C /var/log/clickhouse-server/ .
|
||||
tail -n 1000 /var/log/clickhouse-server/stderr.log > /test_output/stderr.log
|
||||
tail -n 1000 /var/log/clickhouse-server/stdout.log > /test_output/stdout.log
|
||||
tail -n 1000 /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log
|
||||
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then sleep 1 ; else break; fi ; done
|
||||
|
||||
/process_sqlancer_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
||||
ls /test_output
|
||||
/process_sqlancer_result.py || echo -e "failure\tCannot parse results" > /workspace/check_status.tsv
|
||||
ls /workspace
|
||||
|
@ -1,7 +1,7 @@
|
||||
# docker build -t clickhouse/style-test .
|
||||
FROM ubuntu:20.04
|
||||
ARG ACT_VERSION=0.2.25
|
||||
ARG ACTIONLINT_VERSION=1.6.8
|
||||
ARG ACT_VERSION=0.2.33
|
||||
ARG ACTIONLINT_VERSION=1.6.22
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
|
@ -212,4 +212,4 @@ Templates:
|
||||
|
||||
## How to Build Documentation
|
||||
|
||||
You can build your documentation manually by following the instructions in [docs/tools/README.md](../docs/tools/README.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it.
|
||||
You can build your documentation manually by following the instructions in the docs repo [contrib-writing-guide](https://github.com/ClickHouse/clickhouse-docs/blob/main/contrib-writing-guide.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it.
|
||||
|
352
docs/changelogs/v22.10.1.1877-stable.md
Normal file
@ -0,0 +1,352 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.10.1.1877-stable (98ab5a3c189) FIXME as compared to v22.9.1.2603-stable (3030d4c7ff0)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove support for the `WITH TIMEOUT` section for `LIVE VIEW`. This closes [#40557](https://github.com/ClickHouse/ClickHouse/issues/40557). [#42173](https://github.com/ClickHouse/ClickHouse/pull/42173) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
* Add Rust code support into ClickHouse with BLAKE3 hash-function library as an example. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)).
|
||||
* This is the initial implement of Kusto Query Language. (MVP). [#37961](https://github.com/ClickHouse/ClickHouse/pull/37961) ([Yong Wang](https://github.com/kashwy)).
|
||||
* * Support limiting of temporary data stored on disk using settings `max_temporary_data_on_disk_size_for_user`/`max_temporary_data_on_disk_size_for_query` . [#40893](https://github.com/ClickHouse/ClickHouse/pull/40893) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Support Java integers hashing in `javaHash`. [#41131](https://github.com/ClickHouse/ClickHouse/pull/41131) ([JackyWoo](https://github.com/JackyWoo)).
|
||||
* This PR is to support the OpenSSL in-house build like the BoringSSL submodule. Build flag i.e. ENABLE_CH_BUNDLE_BORINGSSL is used to choose between BoringSSL and OpenSSL. By default, the BoringSSL in-house build will be used. [#41142](https://github.com/ClickHouse/ClickHouse/pull/41142) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||
* Composable protocol configuration is added. [#41198](https://github.com/ClickHouse/ClickHouse/pull/41198) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add OpenTelemetry support to ON CLUSTER DDL(require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Add setting `format_json_object_each_row_column_for_object_name` to write/parse object name as column value in JSONObjectEachRow format. [#41703](https://github.com/ClickHouse/ClickHouse/pull/41703) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Implement support for different UUID binary formats with support for the two most prevalent ones: the default big-endian and Microsoft's mixed-endian as specified in [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). [#42108](https://github.com/ClickHouse/ClickHouse/pull/42108) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Added an aggregate function `analysisOfVariance` (`anova`) to perform a statistical test over several groups of normally distributed observations to find out whether all groups have the same mean or not. Original PR [#37872](https://github.com/ClickHouse/ClickHouse/issues/37872). [#42131](https://github.com/ClickHouse/ClickHouse/pull/42131) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* * Add `URL` Functions which conform rfc. Functions include: `cutToFirstSignificantSubdomainCustomRFC`, `cutToFirstSignificantSubdomainCustomWithWWWRFC`, `cutToFirstSignificantSubdomainRFC`, `cutToFirstSignificantSubdomainWithWWWRFC`, `domainRFC`, `domainWithoutWWWRFC`, `firstSignificantSubdomainCustomRFC`, `firstSignificantSubdomainRFC`, `portRFC`, `topLevelDomainRFC`. [#42274](https://github.com/ClickHouse/ClickHouse/pull/42274) ([Quanfa Fu](https://github.com/dentiscalprum)).
|
||||
* Added functions (`randUniform`, `randNormal`, `randLogNormal`, `randExponential`, `randChiSquared`, `randStudentT`, `randFisherF`, `randBernoulli`, `randBinomial`, `randNegativeBinomial`, `randPoisson` ) to generate random values according to the specified distributions. This closes [#21834](https://github.com/ClickHouse/ClickHouse/issues/21834). [#42411](https://github.com/ClickHouse/ClickHouse/pull/42411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Implement operator precedence element parser to resolve stack overflow issues and make the required stack size smaller. [#34892](https://github.com/ClickHouse/ClickHouse/pull/34892) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* DISTINCT in order optimization leverage sorting properties of data streams. This improvement will enable reading in order for DISTINCT if applicable (before it was necessary to provide ORDER BY for columns in DISTINCT). [#41014](https://github.com/ClickHouse/ClickHouse/pull/41014) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* ColumnVector: optimize UInt8 index with AVX512VBMI. [#41247](https://github.com/ClickHouse/ClickHouse/pull/41247) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* The performance experiments of **SSB** (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could bring a **2.95x** improvement of the geomean of all subcases' QPS. [#41675](https://github.com/ClickHouse/ClickHouse/pull/41675) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Fixed slowness in JSONExtract with LowCardinality(String) tuples. [#41726](https://github.com/ClickHouse/ClickHouse/pull/41726) ([AlfVII](https://github.com/AlfVII)).
|
||||
* Add ldapr capabilities to AArch64 builds. This is supported from Graviton 2+, Azure and GCP instances. Only appeared in clang-15 [not so long ago](https://github.com/llvm/llvm-project/commit/9609b5daffe9fd28d83d83da895abc5113f76c24). [#41778](https://github.com/ClickHouse/ClickHouse/pull/41778) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||
* Improve performance when comparing strings and one argument is empty constant string. [#41870](https://github.com/ClickHouse/ClickHouse/pull/41870) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* optimize insertFrom of ColumnAggregateFunction to share Aggregate State in some cases. [#41960](https://github.com/ClickHouse/ClickHouse/pull/41960) ([flynn](https://github.com/ucasfl)).
|
||||
* Relax the "Too many parts" threshold. This closes [#6551](https://github.com/ClickHouse/ClickHouse/issues/6551). Now ClickHouse will allow more parts in a partition if the average part size is large enough (at least 10 GiB). This allows to have up to petabytes of data in a single partition of a single table on a single server, which is possible using disk shelves or object storage. [#42002](https://github.com/ClickHouse/ClickHouse/pull/42002) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make writing to AzureBlobStorage more efficient (respect `max_single_part_upload_size` instead of writing a block per each buffer size). Inefficiency mentioned in [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42041](https://github.com/ClickHouse/ClickHouse/pull/42041) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make thread ids in the process list and query_log unique to avoid waste. [#42180](https://github.com/ClickHouse/ClickHouse/pull/42180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
* Added new infrastructure for query analysis and planning under `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* * Support expression `(EXPLAIN SELECT ...)` in a subquery. Queries like `SELECT * FROM (EXPLAIN PIPELINE SELECT col FROM TABLE ORDER BY col)` became valid. [#40630](https://github.com/ClickHouse/ClickHouse/pull/40630) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Currently changing `async_insert_max_data_size` or `async_insert_busy_timeout_ms` in scope of query makes no sense and this leads to bad user experience. E.g. user wants to insert data rarely and he doesn't have an access to server config to tune default settings. [#40668](https://github.com/ClickHouse/ClickHouse/pull/40668) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Embedded Keeper will always start in the background allowing ClickHouse to start without achieving quorum. [#40991](https://github.com/ClickHouse/ClickHouse/pull/40991) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Improvements for reading from remote filesystems, made threadpool size for reads/writes configurable. Closes [#41070](https://github.com/ClickHouse/ClickHouse/issues/41070). [#41011](https://github.com/ClickHouse/ClickHouse/pull/41011) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Made reestablishing a new connection more reactive in case of expiration of the previous one. Previously there was a task which spawns every minute by default and thus a table could be in readonly state for about this time. [#41092](https://github.com/ClickHouse/ClickHouse/pull/41092) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Support all combinators combination in WindowTransform/arratReduce*/initializeAggregation/aggregate functions versioning. Previously combinators like `ForEach/Resample/Map` didn't work in these places, using them led to exception like`State function ... inserts results into non-state column`. [#41107](https://github.com/ClickHouse/ClickHouse/pull/41107) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Now projections can be used with zero copy replication. [#41147](https://github.com/ClickHouse/ClickHouse/pull/41147) ([alesapin](https://github.com/alesapin)).
|
||||
* - Add function tryDecrypt that returns NULL when decrypt fail (e.g. decrypt with incorrect key) instead of throwing exception. [#41206](https://github.com/ClickHouse/ClickHouse/pull/41206) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Add the `unreserved_space` column to the `system.disks` table to check how much space is not taken by reservations per disk. [#41254](https://github.com/ClickHouse/ClickHouse/pull/41254) ([filimonov](https://github.com/filimonov)).
|
||||
* Support s3 authorisation headers from ast arguments. [#41261](https://github.com/ClickHouse/ClickHouse/pull/41261) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add setting 'allow_implicit_no_password' that forbids creating a user with no password unless 'IDENTIFIED WITH no_password' is explicitly specified. [#41341](https://github.com/ClickHouse/ClickHouse/pull/41341) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* keeper-improvement: add support for uploading snapshots to S3. S3 information can be defined inside `keeper_server.s3_snapshot`. [#41342](https://github.com/ClickHouse/ClickHouse/pull/41342) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add support for MultiRead in Keeper and internal ZooKeeper client. [#41410](https://github.com/ClickHouse/ClickHouse/pull/41410) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* add a support for decimal type comparing with floating point literal in IN operator. [#41544](https://github.com/ClickHouse/ClickHouse/pull/41544) ([liang.huang](https://github.com/lhuang09287750)).
|
||||
* Allow readable size values in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Check file path for path traversal attacks in errors logger for input formats. [#41694](https://github.com/ClickHouse/ClickHouse/pull/41694) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these period ClickHouse can nevertheless try to establish a connection and produce errors. This behaviour is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* For client when connecting to a secure server with invalid certificate only allow to proceed with '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add function "tryBase58Decode()", similar to the existing function "tryBase64Decode()". [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)).
|
||||
* Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)).
|
||||
* Fix parallel parsing: segmentator now checks max_block_size. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Don't report TABLE_IS_DROPPED exception in order to skip table in case is was just dropped. [#41908](https://github.com/ClickHouse/ClickHouse/pull/41908) ([AlfVII](https://github.com/AlfVII)).
|
||||
* Improve option enable_extended_results_for_datetime_functions to return results of type DateTime64 for functions toStartOfDay, toStartOfHour, toStartOfFifteenMinutes, toStartOfTenMinutes, toStartOfFiveMinutes, toStartOfMinute and timeSlot. [#41910](https://github.com/ClickHouse/ClickHouse/pull/41910) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Improve DateTime type inference for text formats. Now it respect setting `date_time_input_format` and doesn't try to infer datetimes from numbers as timestamps. Closes [#41389](https://github.com/ClickHouse/ClickHouse/issues/41389) Closes [#42206](https://github.com/ClickHouse/ClickHouse/issues/42206). [#41912](https://github.com/ClickHouse/ClickHouse/pull/41912) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove confusing warning when inserting with `perform_ttl_move_on_insert`=false. [#41980](https://github.com/ClickHouse/ClickHouse/pull/41980) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Allow user to write `countState(*)` similar to `count(*)`. This closes [#9338](https://github.com/ClickHouse/ClickHouse/issues/9338). [#41983](https://github.com/ClickHouse/ClickHouse/pull/41983) ([Amos Bird](https://github.com/amosbird)).
|
||||
* - Fix rankCorr size overflow. [#42020](https://github.com/ClickHouse/ClickHouse/pull/42020) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Added an option to specify an arbitrary string as an environment name in the Sentry's config for more handy reports. [#42037](https://github.com/ClickHouse/ClickHouse/pull/42037) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Added system table `asynchronous_insert_log `. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix parsing out-of-range Date from CSV:. [#42044](https://github.com/ClickHouse/ClickHouse/pull/42044) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* parseDataTimeBestEffort support comma between date and time. Closes [#42038](https://github.com/ClickHouse/ClickHouse/issues/42038). [#42049](https://github.com/ClickHouse/ClickHouse/pull/42049) ([flynn](https://github.com/ucasfl)).
|
||||
* Add support for methods lz4, bz2, snappy in 'Accept-Encoding'. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Various minor fixes for BLAKE3 function. [#42073](https://github.com/ClickHouse/ClickHouse/pull/42073) ([BoloniniD](https://github.com/BoloniniD)).
|
||||
* Improved stale replica recovery process for `ReplicatedMergeTree`. If lost replica have some parts which absent on a healthy replica, but these parts should appear in future according to replication queue of the healthy replica, then lost replica will keep such parts instead of detaching them. [#42134](https://github.com/ClickHouse/ClickHouse/pull/42134) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Support BACKUP to S3 with as-is path/data structure. [#42232](https://github.com/ClickHouse/ClickHouse/pull/42232) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add a possibility to use Date32 arguments for date_diff function. Fix issue in date_diff function when using DateTime64 arguments with start date before Unix epoch and end date after Unix epoch. [#42308](https://github.com/ClickHouse/ClickHouse/pull/42308) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* When uploading big parts to minio, 'Complete Multipart Upload' can take a long time. Minio sends heartbeats every 10 seconds (see https://github.com/minio/minio/pull/7198). But clickhouse times out earlier, because the default send/receive timeout is [set](https://github.com/ClickHouse/ClickHouse/blob/cc24fcd6d5dfb67f5f66f5483e986bd1010ad9cf/src/IO/S3/PocoHTTPClient.cpp#L123) to 5 seconds. [#42321](https://github.com/ClickHouse/ClickHouse/pull/42321) ([filimonov](https://github.com/filimonov)).
|
||||
* Add `S3` as a new type of the destination of backups. [#42333](https://github.com/ClickHouse/ClickHouse/pull/42333) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support skipping cache completely (both download to cache and reading cached data) in case the requested read range exceeds the threshold defined by cache setting `bypass_cache_threashold`, requires to be enabled with `enable_bypass_cache_with_threshold`). [#42418](https://github.com/ClickHouse/ClickHouse/pull/42418) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Enabled CompiledExpressionCache in clickhouse-local. [#42477](https://github.com/ClickHouse/ClickHouse/pull/42477) ([AlfVII](https://github.com/AlfVII)).
|
||||
* Remove support for the `{database}` macro from the client's prompt. It was displayed incorrectly if the database was unspecified and it was not updated on `USE` statements. This closes [#25891](https://github.com/ClickHouse/ClickHouse/issues/25891). [#42508](https://github.com/ClickHouse/ClickHouse/pull/42508) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* - Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Allow to use Date32 arguments for dateName function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)).
|
||||
|
||||
#### Bug Fix
|
||||
* Now filters with NULL literals will be used during index analysis. This closes https://github.com/ClickHouse/ClickHouse/pull/41814 [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)).
|
||||
* - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix using subqueries in row policy filters. This PR fixes [#32463](https://github.com/ClickHouse/ClickHouse/issues/32463). [#42562](https://github.com/ClickHouse/ClickHouse/pull/42562) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Added support of WHERE clause generation to AST Fuzzer and possibility to add or remove ORDER BY and WHERE clause. [#38519](https://github.com/ClickHouse/ClickHouse/pull/38519) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Aarch64 binaries now require at least ARMv8.2, released in 2016. Most notably, this enables use of ARM LSE, i.e. native atomic operations. Also, CMake build option "NO_ARMV81_OR_HIGHER" has been added to allow compilation of binaries for older ARMv8.0 hardware, e.g. Raspberry Pi 4. [#41610](https://github.com/ClickHouse/ClickHouse/pull/41610) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* After updating runners to 22.04 cgroups stopped to work in privileged mode, here's the issue https://github.com/moby/moby/issues/42275#issuecomment-1115055846. [#41857](https://github.com/ClickHouse/ClickHouse/pull/41857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Allow building ClickHouse with Musl (small changes after it was already supported but broken). [#41987](https://github.com/ClickHouse/ClickHouse/pull/41987) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* - Add the `$CLICKHOUSE_CRONFILE` file checking to avoid running the `sed` command to get the file not found error. [#42081](https://github.com/ClickHouse/ClickHouse/pull/42081) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
||||
* Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix power8 support. [#42462](https://github.com/ClickHouse/ClickHouse/pull/42462) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Several fixes for DiskWeb. [#41652](https://github.com/ClickHouse/ClickHouse/pull/41652) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixes issue when docker run will fail if "https_port" is not present in config. [#41693](https://github.com/ClickHouse/ClickHouse/pull/41693) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Mutations were not cancelled properly on server shutdown or `SYSTEM STOP MERGES` query and cancellation might take long time, it's fixed. [#41699](https://github.com/ClickHouse/ClickHouse/pull/41699) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix wrong result of queries with `ORDER BY` or `GROUP BY` by columns from prefix of sorting key, wrapped into monotonic functions, with enable "read in order" optimization (settings `optimize_read_in_order` and `optimize_aggregation_in_order`). [#41701](https://github.com/ClickHouse/ClickHouse/pull/41701) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible crash in `SELECT` from `Merge` table with enabled `optimize_monotonous_functions_in_order_by` setting. Fixes [#41269](https://github.com/ClickHouse/ClickHouse/issues/41269). [#41740](https://github.com/ClickHouse/ClickHouse/pull/41740) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Fix a bug that CORS headers are missing in some HTTP responses. [#41792](https://github.com/ClickHouse/ClickHouse/pull/41792) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* 22.9 might fail to startup `ReplicatedMergeTree` table if that table was created by 20.3 or older version and was never altered, it's fixed. Fixes [#41742](https://github.com/ClickHouse/ClickHouse/issues/41742). [#41796](https://github.com/ClickHouse/ClickHouse/pull/41796) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* When the batch sending fails for some reason, it cannot be automatically recovered, and if it is not processed in time, it will lead to accumulation, and the printed error message will become longer and longer, which will cause the http thread to block. [#41813](https://github.com/ClickHouse/ClickHouse/pull/41813) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Fix compact parts with compressed marks setting. Fixes [#41783](https://github.com/ClickHouse/ClickHouse/issues/41783) and [#41746](https://github.com/ClickHouse/ClickHouse/issues/41746). [#41823](https://github.com/ClickHouse/ClickHouse/pull/41823) ([alesapin](https://github.com/alesapin)).
|
||||
* Old versions of Replicated database doesn't have a special marker in [Zoo]Keeper. We need to check only whether the node contains come obscure data instead of special mark. [#41875](https://github.com/ClickHouse/ClickHouse/pull/41875) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix possible exception in fs cache. [#41884](https://github.com/ClickHouse/ClickHouse/pull/41884) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix use_environment_credentials for s3 table function. [#41970](https://github.com/ClickHouse/ClickHouse/pull/41970) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fixed "Directory already exists and is not empty" error on detaching broken part that might prevent `ReplicatedMergeTree` table from starting replication. Fixes [#40957](https://github.com/ClickHouse/ClickHouse/issues/40957). [#41981](https://github.com/ClickHouse/ClickHouse/pull/41981) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* toDateTime64() now returns the same output with negative integer and float arguments. [#42025](https://github.com/ClickHouse/ClickHouse/pull/42025) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix write into AzureBlobStorage. Partially closes [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42034](https://github.com/ClickHouse/ClickHouse/pull/42034) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix the bzip2 decoding issue for specific bzip2 files. [#42046](https://github.com/ClickHouse/ClickHouse/pull/42046) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* - Fix SQL function "toLastDayOfMonth()" with setting "enable_extended_results_for_datetime_functions = 1" at the beginning of the extended range (January 1900). - Fix SQL function "toRelativeWeekNum()" with setting "enable_extended_results_for_datetime_functions = 1" at the end of extended range (December 2299). - Improve the performance of for SQL functions "toISOYear()", "toFirstDayNumOfISOYearIndex()" and "toYearWeekOfNewyearMode()" by avoiding unnecessary index arithmetics. [#42084](https://github.com/ClickHouse/ClickHouse/pull/42084) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* The maximum size of fetches for each table accidentally was set to 8 while the pool size could be bigger. Now the maximum size of fetches for table is equal to the pool size. [#42090](https://github.com/ClickHouse/ClickHouse/pull/42090) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* A table might be shut down and a dictionary might be detached before checking if can be dropped without breaking dependencies between table, it's fixed. Fixes [#41982](https://github.com/ClickHouse/ClickHouse/issues/41982). [#42106](https://github.com/ClickHouse/ClickHouse/pull/42106) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix bad inefficiency of `remote_filesystem_read_method=read` with filesystem cache. Closes [#42125](https://github.com/ClickHouse/ClickHouse/issues/42125). [#42129](https://github.com/ClickHouse/ClickHouse/pull/42129) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible timeout exception for distributed queries with use_hedged_requests=0. [#42130](https://github.com/ClickHouse/ClickHouse/pull/42130) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed a minor bug inside function `runningDifference` in case of using it with `Date32` type. Previously `Date` was used and it may cause some logical errors like `Bad cast from type DB::ColumnVector<int> to DB::ColumnVector<unsigned short>'`. [#42143](https://github.com/ClickHouse/ClickHouse/pull/42143) ([Alfred Xu](https://github.com/sperlingxx)).
|
||||
* Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* DISTINCT in order fails with LOGICAL_ERROR if first column in sorting key contains function. [#42186](https://github.com/ClickHouse/ClickHouse/pull/42186) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* - Fix read from buffer with read in order desc. [#42236](https://github.com/ClickHouse/ClickHouse/pull/42236) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix a bug which prevents ClickHouse to start when background_pool_size setting is set on default profile but background_merges_mutations_concurrency_ratio is not. [#42315](https://github.com/ClickHouse/ClickHouse/pull/42315) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix a data race in query finish/cancel. This closes [#42346](https://github.com/ClickHouse/ClickHouse/issues/42346). [#42362](https://github.com/ClickHouse/ClickHouse/pull/42362) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix assert cast in join on falsy condition, Close [#42380](https://github.com/ClickHouse/ClickHouse/issues/42380). [#42407](https://github.com/ClickHouse/ClickHouse/pull/42407) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* `AggregateFunctionQuantile` now correctly works with UInt128 columns. Previously, the quantile state interpreted `UInt128` columns as `Int128` which could have led to incorrect results. [#42473](https://github.com/ClickHouse/ClickHouse/pull/42473) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix bad_assert during INSERT into Annoy indexes over non-Float32 columns. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix function `arrayElement` with type `Map` with `Nullable` values and `Nullable` index. [#42623](https://github.com/ClickHouse/ClickHouse/pull/42623) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build Improvement
|
||||
|
||||
* Fixed SipHash Endian issue for s390x platform. [#41372](https://github.com/ClickHouse/ClickHouse/pull/41372) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Enable lib base64 for ppc64le platform. [#41974](https://github.com/ClickHouse/ClickHouse/pull/41974) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Fixed Endian issue in T64 compression codec on s390x. [#42314](https://github.com/ClickHouse/ClickHouse/pull/42314) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Disable parallel s3 multipart upload for part moves."'. [#41681](https://github.com/ClickHouse/ClickHouse/pull/41681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Attempt to fix abort from parallel parsing"'. [#42545](https://github.com/ClickHouse/ClickHouse/pull/42545) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* NO CL ENTRY: 'Revert "Low cardinality cases moved to the function for its corresponding type"'. [#42633](https://github.com/ClickHouse/ClickHouse/pull/42633) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Test for ignore function in PARTITION KEY [#39875](https://github.com/ClickHouse/ClickHouse/pull/39875) ([UnamedRus](https://github.com/UnamedRus)).
|
||||
* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add missing tests for legacy geobase [#40684](https://github.com/ClickHouse/ClickHouse/pull/40684) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove obsolete comment from the config.xml [#41518](https://github.com/ClickHouse/ClickHouse/pull/41518) ([filimonov](https://github.com/filimonov)).
|
||||
* Resurrect parallel distributed insert select with s3Cluster [#41535](https://github.com/ClickHouse/ClickHouse/pull/41535) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Update runners to a recent version to install on 22.04 [#41556](https://github.com/ClickHouse/ClickHouse/pull/41556) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Refactor wiping sensitive information from logs. [#41562](https://github.com/ClickHouse/ClickHouse/pull/41562) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Better S3 logs [#41587](https://github.com/ClickHouse/ClickHouse/pull/41587) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix typos in JSON formats after [#40910](https://github.com/ClickHouse/ClickHouse/issues/40910) [#41614](https://github.com/ClickHouse/ClickHouse/pull/41614) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix drop for KeeperMap [#41616](https://github.com/ClickHouse/ClickHouse/pull/41616) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* increase default max_suspicious_broken_parts to 100 [#41619](https://github.com/ClickHouse/ClickHouse/pull/41619) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Release AWS SDK log level + replace one exception [#41649](https://github.com/ClickHouse/ClickHouse/pull/41649) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix a destruction order for views ThreadStatus [#41650](https://github.com/ClickHouse/ClickHouse/pull/41650) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add very explicit logging on disk choice for fetch [#41653](https://github.com/ClickHouse/ClickHouse/pull/41653) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix race between ~BackgroundSchedulePool and ~DNSCacheUpdater [#41654](https://github.com/ClickHouse/ClickHouse/pull/41654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add changelog for 22.9 [#41668](https://github.com/ClickHouse/ClickHouse/pull/41668) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version after release [#41670](https://github.com/ClickHouse/ClickHouse/pull/41670) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix error message [#41680](https://github.com/ClickHouse/ClickHouse/pull/41680) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add test for setting output_format_json_validate_utf8 [#41691](https://github.com/ClickHouse/ClickHouse/pull/41691) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Resolve findings from clang-tidy [#41702](https://github.com/ClickHouse/ClickHouse/pull/41702) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Ignore Keeper errors from ReplicatedMergeTreeAttachThread in stress tests [#41717](https://github.com/ClickHouse/ClickHouse/pull/41717) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Collect logs in Stress test using clickhouse-local [#41721](https://github.com/ClickHouse/ClickHouse/pull/41721) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable flaky `test_merge_tree_azure_blob_storage` [#41722](https://github.com/ClickHouse/ClickHouse/pull/41722) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update version_date.tsv and changelogs after v22.9.2.7-stable [#41724](https://github.com/ClickHouse/ClickHouse/pull/41724) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix part removal retries [#41728](https://github.com/ClickHouse/ClickHouse/pull/41728) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Try fix azure tests [#41731](https://github.com/ClickHouse/ClickHouse/pull/41731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix test build [#41732](https://github.com/ClickHouse/ClickHouse/pull/41732) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Change logging levels in cache [#41733](https://github.com/ClickHouse/ClickHouse/pull/41733) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Revert of "Revert the revert of "ColumnVector: optimize filter with AVX512 VBMI2 compress store" [#40033](https://github.com/ClickHouse/ClickHouse/issues/40033)" [#41752](https://github.com/ClickHouse/ClickHouse/pull/41752) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix SET query parameters formatting [#41755](https://github.com/ClickHouse/ClickHouse/pull/41755) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support to run testcases on macOS [#41760](https://github.com/ClickHouse/ClickHouse/pull/41760) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Bump LLVM from 12 to 13 [#41762](https://github.com/ClickHouse/ClickHouse/pull/41762) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* ColumnVector: re-enable AVX512_VBMI/AVX512_VBMI2 optimized filter and index [#41765](https://github.com/ClickHouse/ClickHouse/pull/41765) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* Update 02354_annoy.sql [#41767](https://github.com/ClickHouse/ClickHouse/pull/41767) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix the typo preventing building latest images [#41769](https://github.com/ClickHouse/ClickHouse/pull/41769) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Make automatic download script choose between ARMv8.0 or ARMv8.2 builds [#41775](https://github.com/ClickHouse/ClickHouse/pull/41775) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix tests for docker-ci [#41777](https://github.com/ClickHouse/ClickHouse/pull/41777) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Possible fix for KeeperMap drop [#41784](https://github.com/ClickHouse/ClickHouse/pull/41784) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix drop of completely dropped table [#41789](https://github.com/ClickHouse/ClickHouse/pull/41789) ([alesapin](https://github.com/alesapin)).
|
||||
* Log git hash during startup [#41790](https://github.com/ClickHouse/ClickHouse/pull/41790) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Revert "ColumnVector: optimize UInt8 index with AVX512VBMI ([#41247](https://github.com/ClickHouse/ClickHouse/issues/41247))" [#41797](https://github.com/ClickHouse/ClickHouse/pull/41797) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Small fix in dashboard [#41798](https://github.com/ClickHouse/ClickHouse/pull/41798) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Keep the most important log in stress tests [#41821](https://github.com/ClickHouse/ClickHouse/pull/41821) ([alesapin](https://github.com/alesapin)).
|
||||
* Use copy for some operations instead of hardlinks [#41832](https://github.com/ClickHouse/ClickHouse/pull/41832) ([alesapin](https://github.com/alesapin)).
|
||||
* Remove unused variable in registerStorageMergeTree.cpp [#41839](https://github.com/ClickHouse/ClickHouse/pull/41839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix Jepsen [#41845](https://github.com/ClickHouse/ClickHouse/pull/41845) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Increase `request_timeout_ms` for s3 tests in CI [#41853](https://github.com/ClickHouse/ClickHouse/pull/41853) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* tests: fix debug symbols (and possible crashes) for backward compatiblity check [#41854](https://github.com/ClickHouse/ClickHouse/pull/41854) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove two redundant lines [#41856](https://github.com/ClickHouse/ClickHouse/pull/41856) ([alesapin](https://github.com/alesapin)).
|
||||
* Infer Object type only when allow_experimental_object_type is enabled [#41858](https://github.com/ClickHouse/ClickHouse/pull/41858) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add default UNION/EXCEPT/INTERSECT to the echo query text [#41862](https://github.com/ClickHouse/ClickHouse/pull/41862) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Consolidate CMake-generated config headers [#41873](https://github.com/ClickHouse/ClickHouse/pull/41873) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Docs: Remove obsolete modelEvaluate() mention [#41878](https://github.com/ClickHouse/ClickHouse/pull/41878) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Better exception message for duplicate column names in schema inference [#41885](https://github.com/ClickHouse/ClickHouse/pull/41885) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Docs: Reference external papers as DOIs [#41886](https://github.com/ClickHouse/ClickHouse/pull/41886) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Make LDAPR a prerequisite for downloading the ARMv8.2 build [#41897](https://github.com/ClickHouse/ClickHouse/pull/41897) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Another sync replicas in test_recovery_replica [#41898](https://github.com/ClickHouse/ClickHouse/pull/41898) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* remove unused code [#41921](https://github.com/ClickHouse/ClickHouse/pull/41921) ([flynn](https://github.com/ucasfl)).
|
||||
* Move all queries for MV creation to the end of queue during recovering [#41932](https://github.com/ClickHouse/ClickHouse/pull/41932) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix broken test_disks_app_func [#41933](https://github.com/ClickHouse/ClickHouse/pull/41933) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Temporarily disable ThreadFuzzer with TSan [#41943](https://github.com/ClickHouse/ClickHouse/pull/41943) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Enable some disabled S3 tests [#41945](https://github.com/ClickHouse/ClickHouse/pull/41945) ([alesapin](https://github.com/alesapin)).
|
||||
* QOL log improvements [#41947](https://github.com/ClickHouse/ClickHouse/pull/41947) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix non-deterministic test results [#41948](https://github.com/ClickHouse/ClickHouse/pull/41948) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Earlier throw exception in PullingAsyncPipelineExecutor. [#41949](https://github.com/ClickHouse/ClickHouse/pull/41949) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix linker error [#41950](https://github.com/ClickHouse/ClickHouse/pull/41950) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Bump LLVM from 13 to 14 [#41951](https://github.com/ClickHouse/ClickHouse/pull/41951) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update version_date.tsv and changelogs after v22.3.13.80-lts [#41953](https://github.com/ClickHouse/ClickHouse/pull/41953) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.7.6.74-stable [#41954](https://github.com/ClickHouse/ClickHouse/pull/41954) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.8.6.71-lts [#41955](https://github.com/ClickHouse/ClickHouse/pull/41955) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.9.3.18-stable [#41956](https://github.com/ClickHouse/ClickHouse/pull/41956) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Rename max_temp_data_on_disk -> max_temporary_data_on_disk [#41984](https://github.com/ClickHouse/ClickHouse/pull/41984) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add more checkStackSize calls [#41991](https://github.com/ClickHouse/ClickHouse/pull/41991) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test 02403_big_http_chunk_size [#41996](https://github.com/ClickHouse/ClickHouse/pull/41996) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* More sane behavior of part number thresholds override in query level settings [#42001](https://github.com/ClickHouse/ClickHouse/pull/42001) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove useless code [#42004](https://github.com/ClickHouse/ClickHouse/pull/42004) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Refactoring: Uninline some error handling methods [#42010](https://github.com/ClickHouse/ClickHouse/pull/42010) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix warning that ENABLE_REPLXX is unused [#42013](https://github.com/ClickHouse/ClickHouse/pull/42013) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Drop leftovers of libexecinfo [#42014](https://github.com/ClickHouse/ClickHouse/pull/42014) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* More detailed exception message [#42022](https://github.com/ClickHouse/ClickHouse/pull/42022) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Build against an LLVM version which has clang[-extra-tools], lldb and lld removed [#42023](https://github.com/ClickHouse/ClickHouse/pull/42023) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add log message and lower the retry timeout in MergeTreeRestartingThread [#42026](https://github.com/ClickHouse/ClickHouse/pull/42026) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Update amqp-cpp [#42031](https://github.com/ClickHouse/ClickHouse/pull/42031) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix No such key during table drop [#42036](https://github.com/ClickHouse/ClickHouse/pull/42036) ([alesapin](https://github.com/alesapin)).
|
||||
* Temporarily disable too aggressive tests [#42050](https://github.com/ClickHouse/ClickHouse/pull/42050) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix style check [#42055](https://github.com/ClickHouse/ClickHouse/pull/42055) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Function name normalization fix functions header [#42063](https://github.com/ClickHouse/ClickHouse/pull/42063) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* remove unused virtual keyword [#42065](https://github.com/ClickHouse/ClickHouse/pull/42065) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix crash in `SummingMergeTree` with `LowCardinality` [#42066](https://github.com/ClickHouse/ClickHouse/pull/42066) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix drop of completely dropped table [#42067](https://github.com/ClickHouse/ClickHouse/pull/42067) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix assertion in bloom filter index [#42072](https://github.com/ClickHouse/ClickHouse/pull/42072) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Ignore core.autocrlf for tests references [#42076](https://github.com/ClickHouse/ClickHouse/pull/42076) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix progress for INSERT SELECT [#42078](https://github.com/ClickHouse/ClickHouse/pull/42078) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Avoid adding extra new line after using fuzzy history search [#42080](https://github.com/ClickHouse/ClickHouse/pull/42080) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `at` to runner AMI, bump gh runner version [#42082](https://github.com/ClickHouse/ClickHouse/pull/42082) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Use send_metadata instead of send_object_metadata [#42085](https://github.com/ClickHouse/ClickHouse/pull/42085) ([Elena Torró](https://github.com/elenatorro)).
|
||||
* Docs: Preparations to remove misc statements page [#42086](https://github.com/ClickHouse/ClickHouse/pull/42086) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Followup for TemporaryDataOnDisk [#42103](https://github.com/ClickHouse/ClickHouse/pull/42103) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Disable 02122_join_group_by_timeout for debug [#42104](https://github.com/ClickHouse/ClickHouse/pull/42104) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Update version_date.tsv and changelogs after v22.6.9.11-stable [#42114](https://github.com/ClickHouse/ClickHouse/pull/42114) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* JIT compilation migration to LLVM 15 [#42123](https://github.com/ClickHouse/ClickHouse/pull/42123) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix build without TSA [#42128](https://github.com/ClickHouse/ClickHouse/pull/42128) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update codespell-ignore-words.list [#42132](https://github.com/ClickHouse/ClickHouse/pull/42132) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Add null pointer checks [#42135](https://github.com/ClickHouse/ClickHouse/pull/42135) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Follow up for [#42129](https://github.com/ClickHouse/ClickHouse/issues/42129) [#42144](https://github.com/ClickHouse/ClickHouse/pull/42144) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix checking parent for old-format parts [#42147](https://github.com/ClickHouse/ClickHouse/pull/42147) ([alesapin](https://github.com/alesapin)).
|
||||
* Revert "Resurrect parallel distributed insert select with s3Cluster [#42150](https://github.com/ClickHouse/ClickHouse/pull/42150) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Docs: Add "TABLE" to CHECK/DESCRIBE statements in sidebar [#42152](https://github.com/ClickHouse/ClickHouse/pull/42152) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add logging during merge tree startup [#42163](https://github.com/ClickHouse/ClickHouse/pull/42163) ([alesapin](https://github.com/alesapin)).
|
||||
* Abort instead of `__builtin_unreachable` in debug builds [#42168](https://github.com/ClickHouse/ClickHouse/pull/42168) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* [RFC] Enable -Wshorten-64-to-32 [#42190](https://github.com/ClickHouse/ClickHouse/pull/42190) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix dialect setting description [#42196](https://github.com/ClickHouse/ClickHouse/pull/42196) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add a test for #658 [#42197](https://github.com/ClickHouse/ClickHouse/pull/42197) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* use alias for MergeMutateSelectedEntry share ptr [#42211](https://github.com/ClickHouse/ClickHouse/pull/42211) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||
* Fix LLVM build [#42216](https://github.com/ClickHouse/ClickHouse/pull/42216) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Exclude comments from style-check defined extern [#42217](https://github.com/ClickHouse/ClickHouse/pull/42217) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Update safeExit.cpp [#42220](https://github.com/ClickHouse/ClickHouse/pull/42220) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable concurrent parts removal [#42222](https://github.com/ClickHouse/ClickHouse/pull/42222) ([alesapin](https://github.com/alesapin)).
|
||||
* Fail fast on empty URL in HDFS [#42223](https://github.com/ClickHouse/ClickHouse/pull/42223) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Add a test for [#2389](https://github.com/ClickHouse/ClickHouse/issues/2389) [#42235](https://github.com/ClickHouse/ClickHouse/pull/42235) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Use MultiRead where possible [#42243](https://github.com/ClickHouse/ClickHouse/pull/42243) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Minor cleanups of LLVM integration [#42249](https://github.com/ClickHouse/ClickHouse/pull/42249) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* remove useless code [#42253](https://github.com/ClickHouse/ClickHouse/pull/42253) ([flynn](https://github.com/ucasfl)).
|
||||
* Early return of corner cases in selectPartsToMutate function [#42254](https://github.com/ClickHouse/ClickHouse/pull/42254) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||
* Refactor the implementation of user-defined functions [#42263](https://github.com/ClickHouse/ClickHouse/pull/42263) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* assert unused value in test_replicated_merge_tree_compatibility [#42266](https://github.com/ClickHouse/ClickHouse/pull/42266) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Fix Date Interval add/minus over DataTypeDate32 [#42279](https://github.com/ClickHouse/ClickHouse/pull/42279) ([Alfred Xu](https://github.com/sperlingxx)).
|
||||
* Fix log-level in `clickhouse-disks` [#42302](https://github.com/ClickHouse/ClickHouse/pull/42302) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Remove forgotten debug logging [#42313](https://github.com/ClickHouse/ClickHouse/pull/42313) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix another trash in zero-copy replication [#42317](https://github.com/ClickHouse/ClickHouse/pull/42317) ([alesapin](https://github.com/alesapin)).
|
||||
* go update for diagnostics tool [#42325](https://github.com/ClickHouse/ClickHouse/pull/42325) ([Dale McDiarmid](https://github.com/gingerwizard)).
|
||||
* Better logging for asynchronous inserts [#42345](https://github.com/ClickHouse/ClickHouse/pull/42345) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Use nfpm packager for archlinux packages [#42349](https://github.com/ClickHouse/ClickHouse/pull/42349) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Bump llvm/clang to 15.0.2 [#42351](https://github.com/ClickHouse/ClickHouse/pull/42351) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make getResource() independent from the order of the sections [#42353](https://github.com/ClickHouse/ClickHouse/pull/42353) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Smaller threshold for multipart upload part size increase [#42392](https://github.com/ClickHouse/ClickHouse/pull/42392) ([alesapin](https://github.com/alesapin)).
|
||||
* Better error message for unsupported delimiters in custom formats [#42406](https://github.com/ClickHouse/ClickHouse/pull/42406) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix formatting of `ALTER FREEZE` [#42409](https://github.com/ClickHouse/ClickHouse/pull/42409) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Replace table name in ast fuzzer more often [#42413](https://github.com/ClickHouse/ClickHouse/pull/42413) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add *-15 tools to cmake.tools for GCC build [#42430](https://github.com/ClickHouse/ClickHouse/pull/42430) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Deactivate tasks in ReplicatedMergeTree until startup [#42441](https://github.com/ClickHouse/ClickHouse/pull/42441) ([alesapin](https://github.com/alesapin)).
|
||||
* Revert "Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787)" [#42442](https://github.com/ClickHouse/ClickHouse/pull/42442) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Update woboq_codebrowser location [#42448](https://github.com/ClickHouse/ClickHouse/pull/42448) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* add mdx and jsx to list of doc files [#42454](https://github.com/ClickHouse/ClickHouse/pull/42454) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Remove code browser docs [#42455](https://github.com/ClickHouse/ClickHouse/pull/42455) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||
* Better workaround for emitting .debug_aranges section [#42457](https://github.com/ClickHouse/ClickHouse/pull/42457) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flaky test [#42459](https://github.com/ClickHouse/ClickHouse/pull/42459) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix UBSan report in Julian Day functions [#42464](https://github.com/ClickHouse/ClickHouse/pull/42464) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* rename filesystem_query_cache [#42472](https://github.com/ClickHouse/ClickHouse/pull/42472) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Add convenience typedefs for Date/Date32/DateTime/DateTime64 columns [#42476](https://github.com/ClickHouse/ClickHouse/pull/42476) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add error "Destination table is myself" to exception list in BC check [#42479](https://github.com/ClickHouse/ClickHouse/pull/42479) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Get current clickhouse version without sending query in BC check [#42483](https://github.com/ClickHouse/ClickHouse/pull/42483) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix logical error from welchTTest [#42487](https://github.com/ClickHouse/ClickHouse/pull/42487) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Attempt to fix abort from parallel parsing [#42496](https://github.com/ClickHouse/ClickHouse/pull/42496) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Increase threshold for using physical cores for `max_threads` [#42503](https://github.com/ClickHouse/ClickHouse/pull/42503) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add a test for [#16827](https://github.com/ClickHouse/ClickHouse/issues/16827) [#42511](https://github.com/ClickHouse/ClickHouse/pull/42511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#13653](https://github.com/ClickHouse/ClickHouse/issues/13653) [#42512](https://github.com/ClickHouse/ClickHouse/pull/42512) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix aliases [#42514](https://github.com/ClickHouse/ClickHouse/pull/42514) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* tests: fix 00705_drop_create_merge_tree flakiness [#42522](https://github.com/ClickHouse/ClickHouse/pull/42522) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix sanitizer reports in integration tests [#42529](https://github.com/ClickHouse/ClickHouse/pull/42529) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `KeeperTCPHandler` data race [#42532](https://github.com/ClickHouse/ClickHouse/pull/42532) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable `test_storage_nats`, because it's permanently broken [#42535](https://github.com/ClickHouse/ClickHouse/pull/42535) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Better logs in clickhouse-disks [#42549](https://github.com/ClickHouse/ClickHouse/pull/42549) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* add lib_fuzzer and lib_fuzzer_no_main to llvm-project build [#42550](https://github.com/ClickHouse/ClickHouse/pull/42550) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Some polishing: replicated merge tree [#42560](https://github.com/ClickHouse/ClickHouse/pull/42560) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Temporarily disable flaky `test_replicated_merge_tree_hdfs_zero_copy` [#42563](https://github.com/ClickHouse/ClickHouse/pull/42563) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Adapt internal data structures to 512-bit era [#42564](https://github.com/ClickHouse/ClickHouse/pull/42564) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix strange code in date monotonicity [#42574](https://github.com/ClickHouse/ClickHouse/pull/42574) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Clear thread::id when ThreadFromGlobalPool exits. [#42577](https://github.com/ClickHouse/ClickHouse/pull/42577) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* ci/stress: fix memory limits overrides for hung check [#42585](https://github.com/ClickHouse/ClickHouse/pull/42585) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests: avoid model overlap for obfuscator [#42586](https://github.com/ClickHouse/ClickHouse/pull/42586) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible segfault in expression parser [#42598](https://github.com/ClickHouse/ClickHouse/pull/42598) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix incorrect trace log line on dict reload [#42609](https://github.com/ClickHouse/ClickHouse/pull/42609) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix flaky 02458_datediff_date32 test [#42611](https://github.com/ClickHouse/ClickHouse/pull/42611) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Revert revert 41268 disable s3 parallel write for part moves to disk s3 [#42617](https://github.com/ClickHouse/ClickHouse/pull/42617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Try to fix data race on zookeeper vs DDLWorker at server shutdown. [#42620](https://github.com/ClickHouse/ClickHouse/pull/42620) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add a template for installation issues [#42626](https://github.com/ClickHouse/ClickHouse/pull/42626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix typo in cmake code related to fuzzing [#42627](https://github.com/ClickHouse/ClickHouse/pull/42627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix build [#42635](https://github.com/ClickHouse/ClickHouse/pull/42635) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add .rgignore for test data [#42639](https://github.com/ClickHouse/ClickHouse/pull/42639) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky 02457_datediff_via_unix_epoch test [#42655](https://github.com/ClickHouse/ClickHouse/pull/42655) ([Roman Vasin](https://github.com/rvasin)).
|
||||
|
18
docs/changelogs/v22.10.2.11-stable.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.10.2.11-stable (d2bfcaba002) FIXME as compared to v22.10.1.1877-stable (98ab5a3c189)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42750](https://github.com/ClickHouse/ClickHouse/issues/42750): A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} <Fatal> BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} <Fatal> BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} <Fatal> BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} <Fatal> BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} <Fatal> BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} <Fatal> BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} <Fatal> BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Backported in [#42793](https://github.com/ClickHouse/ClickHouse/issues/42793): Fix a bug in ParserFunction that could have led to a segmentation fault. [#42724](https://github.com/ClickHouse/ClickHouse/pull/42724) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
26
docs/changelogs/v22.3.14.18-lts.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.14.18-lts (642946f61b2) FIXME as compared to v22.3.13.80-lts (e2708b01fba)
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
29
docs/changelogs/v22.3.14.23-lts.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.14.23-lts (74956bfee4d) FIXME as compared to v22.3.13.80-lts (e2708b01fba)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42527](https://github.com/ClickHouse/ClickHouse/issues/42527): Fix issue with passing MySQL timeouts for MySQL database engine and MySQL table function. Closes [#34168](https://github.com/ClickHouse/ClickHouse/issues/34168)?notification_referrer_id=NT_kwDOAzsV57MzMDMxNjAzNTY5OjU0MjAzODc5. [#40751](https://github.com/ClickHouse/ClickHouse/pull/40751) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
29
docs/changelogs/v22.7.7.24-stable.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.7.7.24-stable (02ad1f979a8) FIXME as compared to v22.7.6.74-stable (c00ffb3c11a)
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42433](https://github.com/ClickHouse/ClickHouse/issues/42433): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42329](https://github.com/ClickHouse/ClickHouse/issues/42329): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42359](https://github.com/ClickHouse/ClickHouse/issues/42359): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42268](https://github.com/ClickHouse/ClickHouse/issues/42268): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#42299](https://github.com/ClickHouse/ClickHouse/issues/42299): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42386](https://github.com/ClickHouse/ClickHouse/issues/42386): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#42498](https://github.com/ClickHouse/ClickHouse/issues/42498): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#42593](https://github.com/ClickHouse/ClickHouse/issues/42593): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
37
docs/changelogs/v22.8.7.34-lts.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.8.7.34-lts (3c38e5e8ab9) FIXME as compared to v22.8.6.71-lts (7bf38a43e30)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42096](https://github.com/ClickHouse/ClickHouse/issues/42096): Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)).
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42434](https://github.com/ClickHouse/ClickHouse/issues/42434): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42296](https://github.com/ClickHouse/ClickHouse/issues/42296): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42360](https://github.com/ClickHouse/ClickHouse/issues/42360): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42489](https://github.com/ClickHouse/ClickHouse/issues/42489): Removed skipping of mutations in unaffected partitions of `MergeTree` tables, because this feature never worked correctly and might cause resurrection of finished mutations. [#40589](https://github.com/ClickHouse/ClickHouse/pull/40589) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#42121](https://github.com/ClickHouse/ClickHouse/issues/42121): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* - Prevent crash when passing wrong aggregation states to groupBitmap*. [#41972](https://github.com/ClickHouse/ClickHouse/pull/41972) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* - Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41973](https://github.com/ClickHouse/ClickHouse/pull/41973) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#42269](https://github.com/ClickHouse/ClickHouse/issues/42269): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#42300](https://github.com/ClickHouse/ClickHouse/issues/42300): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42387](https://github.com/ClickHouse/ClickHouse/issues/42387): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#42499](https://github.com/ClickHouse/ClickHouse/issues/42499): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#42571](https://github.com/ClickHouse/ClickHouse/issues/42571): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42594](https://github.com/ClickHouse/ClickHouse/issues/42594): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
13
docs/changelogs/v22.8.8.3-lts.md
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.8.8.3-lts (ac5a6cababc) FIXME as compared to v22.8.7.34-lts (3c38e5e8ab9)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42677](https://github.com/ClickHouse/ClickHouse/issues/42677): keeper-fix: fix race in accessing logs while snapshot is being installed. [#40627](https://github.com/ClickHouse/ClickHouse/pull/40627) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
33
docs/changelogs/v22.9.4.32-stable.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.9.4.32-stable (3db8bcf1a70) FIXME as compared to v22.9.3.18-stable (0cb4b15d2fa)
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42435](https://github.com/ClickHouse/ClickHouse/issues/42435): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42297](https://github.com/ClickHouse/ClickHouse/issues/42297): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42361](https://github.com/ClickHouse/ClickHouse/issues/42361): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42122](https://github.com/ClickHouse/ClickHouse/issues/42122): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#41938](https://github.com/ClickHouse/ClickHouse/issues/41938): Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Backported in [#42179](https://github.com/ClickHouse/ClickHouse/issues/42179): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#42301](https://github.com/ClickHouse/ClickHouse/issues/42301): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42388](https://github.com/ClickHouse/ClickHouse/issues/42388): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#42500](https://github.com/ClickHouse/ClickHouse/issues/42500): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#42581](https://github.com/ClickHouse/ClickHouse/issues/42581): This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42572](https://github.com/ClickHouse/ClickHouse/issues/42572): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42595](https://github.com/ClickHouse/ClickHouse/issues/42595): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
@ -105,7 +105,7 @@ ninja
|
||||
Example for Fedora Rawhide:
|
||||
``` bash
|
||||
sudo yum update
|
||||
yum --nogpg install git cmake make clang-c++ python3
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build && cd build
|
||||
cmake ../ClickHouse
|
||||
|
@ -86,7 +86,7 @@ node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
|
||||
|
||||
``` text
|
||||
┌─hosts─┬─groupArray(n)─┐
|
||||
│ node1 │ [1,3,5,7,9] │
|
||||
│ node3 │ [1,3,5,7,9] │
|
||||
│ node2 │ [0,2,4,6,8] │
|
||||
└───────┴───────────────┘
|
||||
```
|
||||
|
@ -139,7 +139,7 @@ The following settings can be specified in configuration file for given endpoint
|
||||
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`.
|
||||
- `region` — Specifies S3 region name. Optional.
|
||||
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
||||
- `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional.
|
||||
|
||||
|
@ -68,36 +68,57 @@ In the results of `SELECT` query, the values of `AggregateFunction` type have im
|
||||
|
||||
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
||||
|
||||
`AggregatingMergeTree` materialized view that watches the `test.visits` table:
|
||||
We will create the table `test.visits` that contain the raw data:
|
||||
|
||||
``` sql
|
||||
CREATE MATERIALIZED VIEW test.basic
|
||||
ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate)
|
||||
CREATE TABLE test.visits
|
||||
(
|
||||
StartDate DateTime64 NOT NULL,
|
||||
CounterID UInt64,
|
||||
Sign Nullable(Int32),
|
||||
UserID Nullable(Int32)
|
||||
) ENGINE = MergeTree ORDER BY (StartDate, CounterID);
|
||||
```
|
||||
|
||||
`AggregatingMergeTree` materialized view that watches the `test.visits` table, and use the `AggregateFunction` type:
|
||||
|
||||
``` sql
|
||||
CREATE MATERIALIZED VIEW test.mv_visits
|
||||
(
|
||||
StartDate DateTime64 NOT NULL,
|
||||
CounterID UInt64,
|
||||
Visits AggregateFunction(sum, Nullable(Int32)),
|
||||
Users AggregateFunction(uniq, Nullable(Int32))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID)
|
||||
AS SELECT
|
||||
CounterID,
|
||||
StartDate,
|
||||
sumState(Sign) AS Visits,
|
||||
CounterID,
|
||||
sumState(Sign) AS Visits,
|
||||
uniqState(UserID) AS Users
|
||||
FROM test.visits
|
||||
GROUP BY CounterID, StartDate;
|
||||
GROUP BY StartDate, CounterID;
|
||||
```
|
||||
|
||||
Inserting data into the `test.visits` table.
|
||||
|
||||
``` sql
|
||||
INSERT INTO test.visits ...
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1667446031, 1, 3, 4)
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1667446031, 1, 6, 3)
|
||||
```
|
||||
|
||||
The data are inserted in both the table and view `test.basic` that will perform the aggregation.
|
||||
The data are inserted in both the table and the materialized view `test.mv_visits`.
|
||||
|
||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the view `test.basic`:
|
||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
StartDate,
|
||||
sumMerge(Visits) AS Visits,
|
||||
uniqMerge(Users) AS Users
|
||||
FROM test.basic
|
||||
FROM test.mv_visits
|
||||
GROUP BY StartDate
|
||||
ORDER BY StartDate;
|
||||
```
|
||||
|
@ -4,25 +4,39 @@ sidebar_label: Cell Towers
|
||||
sidebar_position: 3
|
||||
title: "Cell Towers"
|
||||
---
|
||||
import ConnectionDetails from '@site/docs/en/_snippets/_gather_your_details_http.mdx';
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import CodeBlock from '@theme/CodeBlock';
|
||||
import ActionsMenu from '@site/docs/en/_snippets/_service_actions_menu.md';
|
||||
import SQLConsoleDetail from '@site/docs/en/_snippets/_launch_sql_console.md';
|
||||
import SupersetDocker from '@site/docs/en/_snippets/_add_superset_detail.md';
|
||||
|
||||
This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
|
||||
## Goal
|
||||
|
||||
In this guide you will learn how to:
|
||||
- Load the OpenCelliD data in Clickhouse
|
||||
- Connect Apache Superset to ClickHouse
|
||||
- Build a dashboard based on data available in the dataset
|
||||
|
||||
Here is a preview of the dashboard created in this guide:
|
||||
|
||||
![Dashboard of cell towers by radio type in mcc 204](@site/docs/en/getting-started/example-datasets/images/superset-cell-tower-dashboard.png)
|
||||
|
||||
## Get the Dataset {#get-the-dataset}
|
||||
|
||||
This dataset is from [OpenCelliD](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
|
||||
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).
|
||||
|
||||
OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License, and we redistribute a snapshot of this dataset under the terms of the same license. The up-to-date version of the dataset is available to download after sign in.
|
||||
|
||||
|
||||
## Get the Dataset {#get-the-dataset}
|
||||
|
||||
<Tabs groupId="deployMethod">
|
||||
<TabItem value="serverless" label="ClickHouse Cloud" default>
|
||||
|
||||
### Load the sample data
|
||||
|
||||
ClickHouse Cloud provides an easy-button for uploading this dataset from S3. Log in to your ClickHouse Cloud organization, or create a free trial at [ClickHouse.cloud](https://clickhouse.cloud).
|
||||
<ActionsMenu menu="Load Data" />
|
||||
|
||||
@ -30,13 +44,33 @@ Choose the **Cell Towers** dataset from the **Sample data** tab, and **Load data
|
||||
|
||||
![Load cell towers dataset](@site/docs/en/_snippets/images/cloud-load-data-sample.png)
|
||||
|
||||
Examine the schema of the cell_towers table:
|
||||
### Examine the schema of the cell_towers table
|
||||
```sql
|
||||
DESCRIBE TABLE cell_towers
|
||||
```
|
||||
|
||||
<SQLConsoleDetail />
|
||||
|
||||
This is the output of `DESCRIBE`. Down further in this guide the field type choices will be described.
|
||||
```response
|
||||
┌─name──────────┬─type──────────────────────────────────────────────────────────────────┬
|
||||
│ radio │ Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5) │
|
||||
│ mcc │ UInt16 │
|
||||
│ net │ UInt16 │
|
||||
│ area │ UInt16 │
|
||||
│ cell │ UInt64 │
|
||||
│ unit │ Int16 │
|
||||
│ lon │ Float64 │
|
||||
│ lat │ Float64 │
|
||||
│ range │ UInt32 │
|
||||
│ samples │ UInt32 │
|
||||
│ changeable │ UInt8 │
|
||||
│ created │ DateTime │
|
||||
│ updated │ DateTime │
|
||||
│ averageSignal │ UInt8 │
|
||||
└───────────────┴───────────────────────────────────────────────────────────────────────┴
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="selfmanaged" label="Self-managed">
|
||||
|
||||
@ -86,7 +120,7 @@ clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_t
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Example queries {#examples}
|
||||
## Run some example queries {#examples}
|
||||
|
||||
1. A number of cell towers by type:
|
||||
|
||||
@ -127,13 +161,13 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10
|
||||
10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.)
|
||||
```
|
||||
|
||||
So, the top countries are: the USA, Germany, and Russia.
|
||||
Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia.
|
||||
|
||||
You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
||||
|
||||
## Use case: Incorporate geo data {#use-case}
|
||||
|
||||
Using `pointInPolygon` function.
|
||||
Using the [`pointInPolygon`](/docs/en/sql-reference/functions/geo/coordinates.md/#pointinpolygon) function.
|
||||
|
||||
1. Create a table where we will store polygons:
|
||||
|
||||
@ -224,6 +258,110 @@ WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow))
|
||||
1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.)
|
||||
```
|
||||
|
||||
The data is also available for interactive queries in the [Playground](https://play.clickhouse.com/play?user=play), [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=).
|
||||
## Review of the schema
|
||||
|
||||
Although you cannot create temporary tables there.
|
||||
Before building visualizations in Superset have a look at the columns that you will use. This dataset primarily provides the location (Longitude and Latitude) and radio types at mobile cellular towers worldwide. The column descriptions can be found in the [community forum](https://community.opencellid.org/t/documenting-the-columns-in-the-downloadable-cells-database-csv/186). The columns used in the visualizations that will be built are described below
|
||||
|
||||
Here is a description of the columns taken from the OpenCelliD forum:
|
||||
|
||||
| Column | Description |
|
||||
|--------------|--------------------------------------------------------|
|
||||
| radio | Technology generation: CDMA, GSM, UMTS, 5G NR |
|
||||
| mcc | Mobile Country Code: `204` is The Netherlands |
|
||||
| lon | Longitude: With Latitude, approximate tower location |
|
||||
| lat | Latitude: With Longitude, approximate tower location |
|
||||
|
||||
:::tip mcc
|
||||
To find your MCC check [Mobile network codes](https://en.wikipedia.org/wiki/Mobile_country_code), and use the three digits in the **Mobile country code** column.
|
||||
:::
|
||||
|
||||
The schema for this table was designed for compact storage on disk and query speed.
|
||||
- The `radio` data is stored as an `Enum8` (`UInt8`) rather than a string.
|
||||
- `mcc` or Mobile country code, is stored as a `UInt16` as we know the range is 1 - 999.
|
||||
- `lon` and `lat` are `Float64`.
|
||||
|
||||
None of the other fields are used in the queries or visualizations in this guide, but they are described in the forum linked above if you are interested.
|
||||
|
||||
## Build visualizations with Apache Superset
|
||||
|
||||
Superset is easy to run from Docker. If you already have Superset running, all you need to do is add ClickHouse Connect with `pip install clickhouse-connect`. If you need to install Superset open the **Launch Apache Superset in Docker** directly below.
|
||||
|
||||
<SupersetDocker />
|
||||
|
||||
To build a Superset dashboard using the OpenCelliD dataset you should:
|
||||
- Add your ClickHouse service as a Superset **database**
|
||||
- Add the table **cell_towers** as a Superset **dataset**
|
||||
- Create some **charts**
|
||||
- Add the charts to a **dashboard**
|
||||
|
||||
### Add your ClickHouse service as a Superset database
|
||||
|
||||
<ConnectionDetails />
|
||||
|
||||
In Superset a database can be added by choosing the database type, and then providing the connection details. Open Superset and look for the **+**, it has a menu with **Data** and then **Connect database** options.
|
||||
|
||||
![Add a database](@site/docs/en/getting-started/example-datasets/images/superset-add.png)
|
||||
|
||||
Choose **ClickHouse Connect** from the list:
|
||||
|
||||
![Choose clickhouse connect as database type](@site/docs/en/getting-started/example-datasets/images/superset-choose-a-database.png)
|
||||
|
||||
:::note
|
||||
If **ClickHouse Connect** is not one of your options, then you will need to install it. The comand is `pip install clickhouse-connect`, and more info is [available here](https://pypi.org/project/clickhouse-connect/).
|
||||
:::
|
||||
|
||||
#### Add your connection details:
|
||||
|
||||
:::tip
|
||||
Make sure that you set **SSL** on when connecting to ClickHouse Cloud or other ClickHouse systems that enforce the use of SSL.
|
||||
:::
|
||||
|
||||
![Add ClickHouse as a Superset datasource](@site/docs/en/getting-started/example-datasets/images/superset-connect-a-database.png)
|
||||
|
||||
### Add the table **cell_towers** as a Superset **dataset**
|
||||
|
||||
In Superset a **dataset** maps to a table within a database. Click on add a dataset and choose your ClickHouse service, the database containing your table (`default`), and choose the `cell_towers` table:
|
||||
|
||||
![Add cell_towers table as a dataset](@site/docs/en/getting-started/example-datasets/images/superset-add-dataset.png)
|
||||
|
||||
### Create some **charts**
|
||||
|
||||
When you choose to add a chart in Superset you have to specify the dataset (`cell_towers`) and the chart type. Since the OpenCelliD dataset provides longitude and latitude coordinates for cell towers we will create a **Map** chart. The **deck.gL Scatterplot** type is suited to this dataset as it works well with dense data points on a map.
|
||||
|
||||
![Create a map in Superset](@site/docs/en/getting-started/example-datasets/images/superset-create-map.png)
|
||||
|
||||
#### Specify the query used for the map
|
||||
|
||||
A deck.gl Scatterplot requires a longitude and latitude, and one or more filters can also be applied to the query. In this example two filters are applied, one for cell towers with UMTS radios, and one for the Mobile country code assigned to The Netherlands.
|
||||
|
||||
The fields `lon` and `lat` contain the longitude and latitude:
|
||||
|
||||
![Specify longitude and latitude fields](@site/docs/en/getting-started/example-datasets/images/superset-lon-lat.png)
|
||||
|
||||
Add a filter with `mcc` = `204` (or substitute any other `mcc` value):
|
||||
|
||||
![Filter on MCC 204](@site/docs/en/getting-started/example-datasets/images/superset-mcc-204.png)
|
||||
|
||||
Add a filter with `radio` = `'UMTS'` (or substitute any other `radio` value, you can see the choices in the output of `DESCRIBE TABLE cell_towers`):
|
||||
|
||||
![Filter on radio = UMTS](@site/docs/en/getting-started/example-datasets/images/superset-radio-umts.png)
|
||||
|
||||
This is the full configuration for the chart that filters on `radio = 'UMTS'` and `mcc = 204`:
|
||||
|
||||
![Chart for UMTS radios in MCC 204](@site/docs/en/getting-started/example-datasets/images/superset-umts-netherlands.png)
|
||||
|
||||
Click on **UPDATE CHART** to render the visualization.
|
||||
|
||||
### Add the charts to a **dashboard**
|
||||
|
||||
This screenshot shows cell tower locations with LTE, UMTS, and GSM radios. The charts are all created in the same way and they are added to a dashboard.
|
||||
|
||||
![Dashboard of cell towers by radio type in mcc 204](@site/docs/en/getting-started/example-datasets/images/superset-cell-tower-dashboard.png)
|
||||
|
||||
:::tip
|
||||
The data is also available for interactive queries in the [Playground](https://play.clickhouse.com/play?user=play).
|
||||
|
||||
This [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=) will populate the username and even the query for you.
|
||||
|
||||
Although you cannot create tables in the Playground, you can run all of the queries and even use Superset (adjust the hostname and port number).
|
||||
:::
|
||||
|
After Width: | Height: | Size: 35 KiB |
BIN
docs/en/getting-started/example-datasets/images/superset-add.png
Normal file
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 475 KiB |
After Width: | Height: | Size: 53 KiB |
After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 290 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 46 KiB |
@ -33,7 +33,7 @@ CREATE TABLE trips (
|
||||
tip_amount Float32,
|
||||
tolls_amount Float32,
|
||||
total_amount Float32,
|
||||
payment_type Enum('CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4),
|
||||
payment_type Enum('CSH' = 1, 'CRE' = 2, 'NOC' = 3, 'DIS' = 4, 'UNK' = 5),
|
||||
pickup_ntaname LowCardinality(String),
|
||||
dropoff_ntaname LowCardinality(String)
|
||||
)
|
||||
@ -63,7 +63,7 @@ SELECT
|
||||
payment_type,
|
||||
pickup_ntaname,
|
||||
dropoff_ntaname
|
||||
FROM url(
|
||||
FROM s3(
|
||||
'https://datasets-documentation.s3.eu-west-3.amazonaws.com/nyc-taxi/trips_{0..2}.gz',
|
||||
'TabSeparatedWithNames'
|
||||
)
|
||||
|
@ -4,7 +4,7 @@ sidebar_label: Recipes Dataset
|
||||
title: "Recipes Dataset"
|
||||
---
|
||||
|
||||
RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB.
|
||||
The RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB.
|
||||
|
||||
## Download and Unpack the Dataset
|
||||
|
||||
|
@ -128,6 +128,24 @@ clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Migration Method for installing the deb-packages</summary>
|
||||
|
||||
```bash
|
||||
sudo apt-key del E0C56BD4
|
||||
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
|
||||
echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \
|
||||
/etc/apt/sources.list.d/clickhouse.list
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install -y clickhouse-server clickhouse-client
|
||||
|
||||
sudo service clickhouse-server start
|
||||
clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
You can replace `stable` with `lts` to use different [release kinds](/docs/en/faq/operations/production.md) based on your needs.
|
||||
|
||||
You can also download and install packages manually from [here](https://packages.clickhouse.com/deb/pool/main/c/).
|
||||
|
@ -1,9 +1,12 @@
|
||||
---
|
||||
slug: /en/operations/backup
|
||||
sidebar_position: 49
|
||||
sidebar_label: Data backup and restore
|
||||
title: Data backup and restore
|
||||
---
|
||||
|
||||
[//]: # (This file is included in Manage > Backups)
|
||||
|
||||
- [Backup to a local disk](#backup-to-a-local-disk)
|
||||
- [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint)
|
||||
- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk)
|
||||
- [Alternatives](#alternatives)
|
||||
|
||||
## Background
|
||||
|
||||
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
|
||||
|
||||
@ -15,7 +18,9 @@ Each company has different resources available and business requirements, so the
|
||||
Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly.
|
||||
:::
|
||||
|
||||
## Configure a backup destination
|
||||
## Backup to a local disk
|
||||
|
||||
### Configure a backup destination
|
||||
|
||||
In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list:
|
||||
|
||||
@ -39,7 +44,7 @@ In the examples below you will see the backup destination specified like `Disk('
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Parameters
|
||||
### Parameters
|
||||
|
||||
Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected.
|
||||
|
||||
@ -52,7 +57,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
|
||||
## Usage examples
|
||||
### Usage examples
|
||||
|
||||
Backup and then restore a table:
|
||||
```
|
||||
@ -81,7 +86,7 @@ RESTORE TABLE test.table AS test.table2 FROM Disk('backups', '1.zip')
|
||||
BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip')
|
||||
```
|
||||
|
||||
## Incremental backups
|
||||
### Incremental backups
|
||||
|
||||
Incremental backups can be taken by specifying the `base_backup`.
|
||||
:::note
|
||||
@ -100,7 +105,7 @@ RESTORE TABLE test.table AS test.table2
|
||||
FROM Disk('backups', 'incremental-a.zip');
|
||||
```
|
||||
|
||||
## Assign a password to the backup
|
||||
### Assign a password to the backup
|
||||
|
||||
Backups written to disk can have a password applied to the file:
|
||||
```
|
||||
@ -116,7 +121,7 @@ RESTORE TABLE test.table
|
||||
SETTINGS password='qwerty'
|
||||
```
|
||||
|
||||
## Compression settings
|
||||
### Compression settings
|
||||
|
||||
If you would like to specify the compression method or level:
|
||||
```
|
||||
@ -125,14 +130,14 @@ BACKUP TABLE test.table
|
||||
SETTINGS compression_method='lzma', compression_level=3
|
||||
```
|
||||
|
||||
## Restore specific partitions
|
||||
### Restore specific partitions
|
||||
If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup:
|
||||
```
|
||||
RESTORE TABLE test.table PARTITIONS '2', '3'
|
||||
FROM Disk('backups', 'filename.zip')
|
||||
```
|
||||
|
||||
## Check the status of backups
|
||||
### Check the status of backups
|
||||
|
||||
The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file:
|
||||
```sql
|
||||
@ -171,13 +176,118 @@ end_time: 2022-08-30 09:21:46
|
||||
1 row in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
||||
## Backup to S3
|
||||
## Configuring BACKUP/RESTORE to use an S3 Endpoint
|
||||
|
||||
It is possible to `BACKUP`/`RESTORE` to S3, but this disk should be configured
|
||||
in a proper way, since by default you will need to backup metadata from local
|
||||
disk to make backup full.
|
||||
To write backups to an S3 bucket you need three pieces of information:
|
||||
- S3 endpoint,
|
||||
for example `https://mars-doc-test.s3.amazonaws.com/backup-S3/`
|
||||
- Access key ID,
|
||||
for example `ABC123`
|
||||
- Secret access key,
|
||||
for example `Abc+123`
|
||||
|
||||
First of all, you need to configure S3 disk in a special way:
|
||||
:::note
|
||||
Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk](/docs/en/integrations/data-ingestion/s3/configuring-s3-for-clickhouse-use.md), just come back to this doc after saving the policy, there is no need to configure ClickHouse to use the S3 bucket.
|
||||
:::
|
||||
|
||||
The destination for a backup will be specified like this:
|
||||
```
|
||||
S3('<S3 endpoint>/<directory>', '<Access key ID>', '<Secret access key>)
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE TABLE data
|
||||
(
|
||||
`key` Int,
|
||||
`value` String,
|
||||
`array` Array(String)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY tuple()
|
||||
```
|
||||
|
||||
```sql
|
||||
INSERT INTO data SELECT *
|
||||
FROM generateRandom('key Int, value String, array Array(String)')
|
||||
LIMIT 1000
|
||||
```
|
||||
|
||||
### Create a base (initial) backup
|
||||
|
||||
Incremental backups require a _base_ backup to start from, this example will be used
|
||||
later as the base backup. The first parameter of the S3 destination is the S3 endpoint followed by the directory within the bucket to use for this backup. In this example the directory is named `my_backup`.
|
||||
|
||||
```sql
|
||||
BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123')
|
||||
```
|
||||
|
||||
```response
|
||||
┌─id───────────────────────────────────┬─status─────────┐
|
||||
│ de442b75-a66c-4a3c-a193-f76f278c70f3 │ BACKUP_CREATED │
|
||||
└──────────────────────────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
### Add more data
|
||||
|
||||
Incremental backups are populated with the difference between the base backup and the current content of the table being backed up. Add more data before taking the incremental backup:
|
||||
|
||||
```sql
|
||||
INSERT INTO data SELECT *
|
||||
FROM generateRandom('key Int, value String, array Array(String)')
|
||||
LIMIT 100
|
||||
```
|
||||
### Take an incremental backup
|
||||
|
||||
This backup command is similar to the base backup, but adds `SETTINGS base_backup` and the location of the base backup. Note that the destination for the incremental backup is not the same directory as the base, it is the same endpoint with a different target directory within the bucket. The base backup is in `my_backup`, and the incremental will be written to `my_incremental`:
|
||||
```sql
|
||||
BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123') SETTINGS base_backup = S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123')
|
||||
```
|
||||
|
||||
```response
|
||||
┌─id───────────────────────────────────┬─status─────────┐
|
||||
│ f6cd3900-850f-41c9-94f1-0c4df33ea528 │ BACKUP_CREATED │
|
||||
└──────────────────────────────────────┴────────────────┘
|
||||
```
|
||||
### Restore from the incremental backup
|
||||
|
||||
This command restores the incremental backup into a new table, `data3`. Note that when an incremental backup is restored, the base backup is also included. Specify only the incremental backup when restoring:
|
||||
```sql
|
||||
RESTORE TABLE data AS data3 FROM S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123')
|
||||
```
|
||||
|
||||
```response
|
||||
┌─id───────────────────────────────────┬─status───┐
|
||||
│ ff0c8c39-7dff-4324-a241-000796de11ca │ RESTORED │
|
||||
└──────────────────────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
### Verify the count
|
||||
|
||||
There were two inserts into the original table `data`, one with 1,000 rows and one with 100 rows, for a total of 1,100. Verify that the restored table has 1,100 rows:
|
||||
```sql
|
||||
SELECT count()
|
||||
FROM data3
|
||||
```
|
||||
```response
|
||||
┌─count()─┐
|
||||
│ 1100 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
### Verify the content
|
||||
This compares the content of the original table, `data` with the restored table `data3`:
|
||||
```sql
|
||||
SELECT throwIf((
|
||||
SELECT groupArray(tuple(*))
|
||||
FROM data
|
||||
) != (
|
||||
SELECT groupArray(tuple(*))
|
||||
FROM data3
|
||||
), 'Data does not match after BACKUP/RESTORE')
|
||||
```
|
||||
## BACKUP/RESTORE Using an S3 Disk
|
||||
|
||||
It is also possible to `BACKUP`/`RESTORE` to S3 by configuring an S3 disk in the ClickHouse storage configuration. Configure the disk like this by adding a file to `/etc/clickhouse-server/config.d`:
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
@ -1,9 +1,5 @@
|
||||
---
|
||||
slug: /en/operations/troubleshooting
|
||||
sidebar_position: 46
|
||||
sidebar_label: Troubleshooting
|
||||
title: Troubleshooting
|
||||
---
|
||||
|
||||
[//]: # (This file is included in FAQ > Troubleshooting)
|
||||
|
||||
- [Installation](#troubleshooting-installation-errors)
|
||||
- [Connecting to the server](#troubleshooting-accepts-no-connections)
|
||||
@ -28,18 +24,34 @@ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D7
|
||||
sudo apt-get update
|
||||
```
|
||||
|
||||
### You Get the Unsupported Architecture Warning with Apt-get {#you-get-the-unsupported-architecture-warning-with-apt-get}
|
||||
### You Get Different Warnings with `apt-get update` {#you-get-different-warnings-with-apt-get-update}
|
||||
|
||||
- The completed warning message is as follows:
|
||||
- The completed warning messages are as one of following:
|
||||
|
||||
```
|
||||
N: Skipping acquire of configured file 'main/binary-i386/Packages' as repository 'https://packages.clickhouse.com/deb stable InRelease' doesn't support architecture 'i386'
|
||||
```
|
||||
|
||||
```
|
||||
E: Failed to fetch https://packages.clickhouse.com/deb/dists/stable/main/binary-amd64/Packages.gz File has unexpected size (30451 != 28154). Mirror sync in progress?
|
||||
```
|
||||
|
||||
```
|
||||
E: Repository 'https://packages.clickhouse.com/deb stable InRelease' changed its 'Origin' value from 'Artifactory' to 'ClickHouse'
|
||||
E: Repository 'https://packages.clickhouse.com/deb stable InRelease' changed its 'Label' value from 'Artifactory' to 'ClickHouse'
|
||||
N: Repository 'https://packages.clickhouse.com/deb stable InRelease' changed its 'Suite' value from 'stable' to ''
|
||||
N: This must be accepted explicitly before updates for this repository can be applied. See apt-secure(8) manpage for details.
|
||||
```
|
||||
|
||||
```
|
||||
Err:11 https://packages.clickhouse.com/deb stable InRelease
|
||||
400 Bad Request [IP: 172.66.40.249 443]
|
||||
```
|
||||
|
||||
To resolve the above issue, please use the following script:
|
||||
|
||||
```bash
|
||||
sudo rm /var/lib/apt/lists/packages.clickhouse.com_* /var/lib/dpkg/arch
|
||||
sudo rm /var/lib/apt/lists/packages.clickhouse.com_* /var/lib/dpkg/arch /var/lib/apt/lists/partial/packages.clickhouse.com_*
|
||||
sudo apt-get clean
|
||||
sudo apt-get autoclean
|
||||
```
|
@ -1,10 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/update
|
||||
sidebar_position: 47
|
||||
sidebar_label: ClickHouse Upgrade
|
||||
---
|
||||
|
||||
# ClickHouse Upgrade
|
||||
[//]: # (This file is included in Manage > Updates)
|
||||
|
||||
## Self-managed ClickHouse Upgrade
|
||||
|
||||
If ClickHouse was installed from `deb` packages, execute the following commands on the server:
|
||||
|
@ -126,7 +126,7 @@ clickhouse keeper --config /etc/your_path_to_config/config.xml
|
||||
|
||||
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
||||
|
||||
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro`.
|
||||
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif`.
|
||||
|
||||
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
|
||||
|
||||
@ -309,7 +309,26 @@ Sessions with Ephemerals (1):
|
||||
/clickhouse/task_queue/ddl
|
||||
```
|
||||
|
||||
## [experimental] Migration from ZooKeeper {#migration-from-zookeeper}
|
||||
- `csnp`: Schedule a snapshot creation task. Return the last committed log index of the scheduled snapshot if success or `Failed to schedule snapshot creation task.` if failed. Note that `lgif` command can help you determine whether the snapshot is done.
|
||||
|
||||
```
|
||||
100
|
||||
```
|
||||
|
||||
- `lgif`: Keeper log information. `first_log_idx` : my first log index in log store; `first_log_term` : my first log term; `last_log_idx` : my last log index in log store; `last_log_term` : my last log term; `last_committed_log_idx` : my last committed log index in state machine; `leader_committed_log_idx` : leader's committed log index from my perspective; `target_committed_log_idx` : target log index should be committed to; `last_snapshot_idx` : the largest committed log index in last snapshot.
|
||||
|
||||
```
|
||||
first_log_idx 1
|
||||
first_log_term 1
|
||||
last_log_idx 101
|
||||
last_log_term 1
|
||||
last_committed_log_idx 100
|
||||
leader_committed_log_idx 101
|
||||
target_committed_log_idx 101
|
||||
last_snapshot_idx 50
|
||||
```
|
||||
|
||||
## Migration from ZooKeeper {#migration-from-zookeeper}
|
||||
|
||||
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
||||
|
||||
|
@ -2939,7 +2939,7 @@ Possible values:
|
||||
- 0 — Projection optimization disabled.
|
||||
- 1 — Projection optimization enabled.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
## force_optimize_projection {#force-optimize-projection}
|
||||
|
||||
|
@ -178,7 +178,7 @@ Columns:
|
||||
- `view_definition` ([String](../../sql-reference/data-types/string.md)) — `SELECT` query for view.
|
||||
- `check_option` ([String](../../sql-reference/data-types/string.md)) — `NONE`, no checking.
|
||||
- `is_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the view is not updated.
|
||||
- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — Shows whether the created view is [materialized](../../sql-reference/statements/create/view/#materialized). Possible values:
|
||||
- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — Shows whether the created view is [materialized](../../sql-reference/statements/create/view.md/#materialized-view). Possible values:
|
||||
- `NO` — The created view is not materialized.
|
||||
- `YES` — The created view is materialized.
|
||||
- `is_trigger_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the trigger is not updated.
|
||||
|
@ -68,6 +68,5 @@ thread_id: 54
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system/#query-language-system-replicated)
|
||||
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#managing-replicatedmergetree-tables)
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system_tables/replicated_fetches) <!--hide-->
|
||||
|
@ -24,6 +24,7 @@ Columns:
|
||||
- `DOUBLE_SHA1_PASSWORD`
|
||||
- `LDAP`
|
||||
- `KERBEROS`
|
||||
- `SSL_CERTIFICATE`
|
||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
||||
|
@ -12,7 +12,7 @@ Columns:
|
||||
|
||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
||||
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
||||
|
||||
- `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`.
|
||||
|
||||
|
@ -109,56 +109,38 @@ In the report you can find:
|
||||
|
||||
`clickhouse-benchmark` can compare performances for two running ClickHouse servers.
|
||||
|
||||
To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately.
|
||||
To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown in a table.
|
||||
|
||||
## Example {#clickhouse-benchmark-example}
|
||||
|
||||
``` bash
|
||||
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10
|
||||
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark --host=localhost --port=9001 --host=localhost --port=9000 -i 10
|
||||
```
|
||||
|
||||
``` text
|
||||
Loaded 1 queries.
|
||||
|
||||
Queries executed: 6.
|
||||
Queries executed: 5.
|
||||
|
||||
localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459.
|
||||
localhost:9001, queries 2, QPS: 3.764, RPS: 75446929.370, MiB/s: 575.614, result RPS: 37639659.982, result MiB/s: 287.168.
|
||||
localhost:9000, queries 3, QPS: 3.815, RPS: 76466659.385, MiB/s: 583.394, result RPS: 38148392.297, result MiB/s: 291.049.
|
||||
|
||||
0.000% 0.159 sec.
|
||||
10.000% 0.159 sec.
|
||||
20.000% 0.159 sec.
|
||||
30.000% 0.160 sec.
|
||||
40.000% 0.160 sec.
|
||||
50.000% 0.162 sec.
|
||||
60.000% 0.164 sec.
|
||||
70.000% 0.165 sec.
|
||||
80.000% 0.166 sec.
|
||||
90.000% 0.166 sec.
|
||||
95.000% 0.167 sec.
|
||||
99.000% 0.167 sec.
|
||||
99.900% 0.167 sec.
|
||||
99.990% 0.167 sec.
|
||||
0.000% 0.258 sec. 0.250 sec.
|
||||
10.000% 0.258 sec. 0.250 sec.
|
||||
20.000% 0.258 sec. 0.250 sec.
|
||||
30.000% 0.258 sec. 0.267 sec.
|
||||
40.000% 0.258 sec. 0.267 sec.
|
||||
50.000% 0.273 sec. 0.267 sec.
|
||||
60.000% 0.273 sec. 0.267 sec.
|
||||
70.000% 0.273 sec. 0.267 sec.
|
||||
80.000% 0.273 sec. 0.269 sec.
|
||||
90.000% 0.273 sec. 0.269 sec.
|
||||
95.000% 0.273 sec. 0.269 sec.
|
||||
99.000% 0.273 sec. 0.269 sec.
|
||||
99.900% 0.273 sec. 0.269 sec.
|
||||
99.990% 0.273 sec. 0.269 sec.
|
||||
|
||||
|
||||
|
||||
Queries executed: 10.
|
||||
|
||||
localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986.
|
||||
|
||||
0.000% 0.159 sec.
|
||||
10.000% 0.159 sec.
|
||||
20.000% 0.160 sec.
|
||||
30.000% 0.163 sec.
|
||||
40.000% 0.164 sec.
|
||||
50.000% 0.165 sec.
|
||||
60.000% 0.166 sec.
|
||||
70.000% 0.166 sec.
|
||||
80.000% 0.167 sec.
|
||||
90.000% 0.167 sec.
|
||||
95.000% 0.170 sec.
|
||||
99.000% 0.172 sec.
|
||||
99.900% 0.172 sec.
|
||||
99.990% 0.172 sec.
|
||||
No difference proven at 99.5% confidence
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark.md) <!--hide-->
|
||||
|
@ -303,17 +303,25 @@ or
|
||||
CREATE DICTIONARY somedict (
|
||||
id UInt64,
|
||||
first Date,
|
||||
last Date
|
||||
last Date,
|
||||
advertiser_id UInt64
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(TABLE 'date_table'))
|
||||
LIFETIME(MIN 1 MAX 1000)
|
||||
LAYOUT(RANGE_HASHED())
|
||||
RANGE(MIN first MAX last)
|
||||
```
|
||||
|
||||
To work with these dictionaries, you need to pass an additional argument to the `dictGetT` function, for which a range is selected:
|
||||
To work with these dictionaries, you need to pass an additional argument to the `dictGet` function, for which a range is selected:
|
||||
|
||||
``` sql
|
||||
dictGetT('dict_name', 'attr_name', id, date)
|
||||
dictGet('dict_name', 'attr_name', id, date)
|
||||
```
|
||||
Query example:
|
||||
|
||||
``` sql
|
||||
SELECT dictGet('somedict', 'advertiser_id', 1, '2022-10-20 23:20:10.000'::DateTime64::UInt64);
|
||||
```
|
||||
|
||||
This function returns the value for the specified `id`s and the date range that includes the passed date.
|
||||
|
@ -14,8 +14,10 @@ Example of a polygon dictionary configuration:
|
||||
<dictionary>
|
||||
<structure>
|
||||
<key>
|
||||
<name>key</name>
|
||||
<type>Array(Array(Array(Array(Float64))))</type>
|
||||
<attribute>
|
||||
<name>key</name>
|
||||
<type>Array(Array(Array(Array(Float64))))</type>
|
||||
</attribute>
|
||||
</key>
|
||||
|
||||
<attribute>
|
||||
|
@ -1068,7 +1068,7 @@ Example:
|
||||
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
||||
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
||||
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
||||
```
|
||||
```
|
||||
``` text
|
||||
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
||||
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
||||
@ -1244,7 +1244,7 @@ Result:
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
When there are two arguments: first is an [Integer](../../sql-reference/data-types/int-uint.md) or [DateTime](../../sql-reference/data-types/datetime.md), second is a constant format string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type.
|
||||
When there are two or three arguments, the first an [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second a constant format string and the third an optional constant time zone string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type.
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -8,70 +8,69 @@ title: "Geo Functions"
|
||||
|
||||
## Geographical Coordinates Functions
|
||||
|
||||
- [greatCircleDistance](./coordinates.md#greatCircleDistance)
|
||||
- [geoDistance](./coordinates.md#geoDistance)
|
||||
- [greatCircleAngle](./coordinates.md#greatCircleAngle)
|
||||
- [pointInEllipses](./coordinates.md#pointInEllipses)
|
||||
- [pointInPolygon](./coordinates.md#pointInPolygon)
|
||||
- [greatCircleDistance](./coordinates.md#greatcircledistance)
|
||||
- [geoDistance](./coordinates.md#geodistance)
|
||||
- [greatCircleAngle](./coordinates.md#greatcircleangle)
|
||||
- [pointInEllipses](./coordinates.md#pointinellipses)
|
||||
- [pointInPolygon](./coordinates.md#pointinpolygon)
|
||||
|
||||
## Geohash Functions
|
||||
- [geohashEncode](./geohash.md#geohashEncode)
|
||||
- [geohashDecode](./geohash.md#geohashDecode)
|
||||
- [geohashesInBox](./geohash.md#geohashesInBox)
|
||||
- [geohashEncode](./geohash.md#geohashencode)
|
||||
- [geohashDecode](./geohash.md#geohashdecode)
|
||||
- [geohashesInBox](./geohash.md#geohashesinbox)
|
||||
|
||||
## H3 Indexes Functions
|
||||
|
||||
- [h3IsValid](./h3.md#h3IsValid)
|
||||
- [h3GetResolution](./h3.md#h3GetResolution)
|
||||
- [h3EdgeAngle](./h3.md#h3EdgeAngle)
|
||||
- [h3EdgeLengthM](./h3.md#h3EdgeLengthM)
|
||||
- [h3EdgeLengthKm](./h3.md#h3EdgeLengthKm)
|
||||
- [geoToH3](./h3.md#geoToH3)
|
||||
- [h3ToGeo](./h3.md#h3ToGeo)
|
||||
- [h3ToGeoBoundary](./h3.md#h3ToGeoBoundary)
|
||||
- [h3kRing](./h3.md#h3kRing)
|
||||
- [h3GetBaseCell](./h3.md#h3GetBaseCell)
|
||||
- [h3HexAreaM2](./h3.md#h3HexAreaM2)
|
||||
- [h3HexAreaKm2](./h3.md#h3HexAreaKm2)
|
||||
- [h3IndexesAreNeighbors](./h3.md#h3IndexesAreNeighbors)
|
||||
- [h3ToChildren](./h3.md#h3ToChildren)
|
||||
- [h3ToParent](./h3.md#h3ToParent)
|
||||
- [h3ToString](./h3.md#h3ToString)
|
||||
- [stringToH3](./h3.md#stringToH3)
|
||||
- [h3GetResolution](./h3.md#h3GetResolution)
|
||||
- [h3IsResClassIII](./h3.md#h3IsResClassIII)
|
||||
- [h3IsPentagon](./h3.md#h3IsPentagon)
|
||||
- [h3GetFaces](./h3.md#h3GetFaces)
|
||||
- [h3CellAreaM2](./h3.md#h3CellAreaM2)
|
||||
- [h3CellAreaRads2](./h3.md#h3CellAreaRads2)
|
||||
- [h3ToCenterChild](./h3.md#h3ToCenterChild)
|
||||
- [h3ExactEdgeLengthM](./h3.md#h3ExactEdgeLengthM)
|
||||
- [h3ExactEdgeLengthKm](./h3.md#h3ExactEdgeLengthKm)
|
||||
- [h3ExactEdgeLengthRads](./h3.md#h3ExactEdgeLengthRads)
|
||||
- [h3NumHexagons](./h3.md#h3NumHexagons)
|
||||
- [h3Line](./h3.md#h3Line)
|
||||
- [h3Distance](./h3.md#h3Distance)
|
||||
- [h3HexRing](./h3.md#h3HexRing)
|
||||
- [h3GetUnidirectionalEdge](./h3.md#h3GetUnidirectionalEdge)
|
||||
- [h3UnidirectionalEdgeIsValid](./h3.md#h3UnidirectionalEdgeIsValid)
|
||||
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3GetOriginIndexFromUnidirectionalEdge)
|
||||
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3GetDestinationIndexFromUnidirectionalEdge)
|
||||
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3GetIndexesFromUnidirectionalEdge)
|
||||
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3GetUnidirectionalEdgesFromHexagon)
|
||||
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3GetUnidirectionalEdgeBoundary)
|
||||
- [h3IsValid](./h3.md#h3isvalid)
|
||||
- [h3GetResolution](./h3.md#h3getresolution)
|
||||
- [h3EdgeAngle](./h3.md#h3edgeangle)
|
||||
- [h3EdgeLengthM](./h3.md#h3edgelengthm)
|
||||
- [h3EdgeLengthKm](./h3.md#h3edgelengthkm)
|
||||
- [geoToH3](./h3.md#geotoh3)
|
||||
- [h3ToGeo](./h3.md#h3togeo)
|
||||
- [h3ToGeoBoundary](./h3.md#h3togeoboundary)
|
||||
- [h3kRing](./h3.md#h3kring)
|
||||
- [h3GetBaseCell](./h3.md#h3getbasecell)
|
||||
- [h3HexAreaM2](./h3.md#h3hexaream2)
|
||||
- [h3HexAreaKm2](./h3.md#h3hexareakm2)
|
||||
- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors)
|
||||
- [h3ToChildren](./h3.md#h3tochildren)
|
||||
- [h3ToParent](./h3.md#h3toparent)
|
||||
- [h3ToString](./h3.md#h3tostring)
|
||||
- [stringToH3](./h3.md#stringtoh3)
|
||||
- [h3GetResolution](./h3.md#h3getresolution)
|
||||
- [h3IsResClassIII](./h3.md#h3isresclassiii)
|
||||
- [h3IsPentagon](./h3.md#h3ispentagon)
|
||||
- [h3GetFaces](./h3.md#h3getfaces)
|
||||
- [h3CellAreaM2](./h3.md#h3cellaream2)
|
||||
- [h3CellAreaRads2](./h3.md#h3cellarearads2)
|
||||
- [h3ToCenterChild](./h3.md#h3tocenterchild)
|
||||
- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm)
|
||||
- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm)
|
||||
- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads)
|
||||
- [h3NumHexagons](./h3.md#h3numhexagons)
|
||||
- [h3Line](./h3.md#h3line)
|
||||
- [h3Distance](./h3.md#h3distance)
|
||||
- [h3HexRing](./h3.md#h3hexring)
|
||||
- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge)
|
||||
- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid)
|
||||
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge)
|
||||
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge)
|
||||
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge)
|
||||
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon)
|
||||
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary)
|
||||
|
||||
## S2 Index Functions
|
||||
|
||||
- [geoToS2](./s2.md#geoToS2)
|
||||
- [s2ToGeo](./s2.md#s2ToGeo)
|
||||
- [s2GetNeighbors](./s2.md#s2GetNeighbors)
|
||||
- [s2CellsIntersect](./s2.md#s2CellsIntersect)
|
||||
- [s2CapContains](./s2.md#s2CapContains)
|
||||
- [s2CapUnion](./s2.md#s2CapUnion)
|
||||
- [s2RectAdd](./s2.md#s2RectAdd)
|
||||
- [s2RectContains](./s2.md#s2RectContains)
|
||||
- [s2RectUinion](./s2.md#s2RectUinion)
|
||||
- [s2RectIntersection](./s2.md#s2RectIntersection)
|
||||
- [geoToS2](./s2.md#geotos2)
|
||||
- [s2ToGeo](./s2.md#s2togeo)
|
||||
- [s2GetNeighbors](./s2.md#s2getneighbors)
|
||||
- [s2CellsIntersect](./s2.md#s2cellsintersect)
|
||||
- [s2CapContains](./s2.md#s2capcontains)
|
||||
- [s2CapUnion](./s2.md#s2capunion)
|
||||
- [s2RectAdd](./s2.md#s2rectadd)
|
||||
- [s2RectContains](./s2.md#s2rectcontains)
|
||||
- [s2RectUnion](./s2.md#s2rectunion)
|
||||
- [s2RectIntersection](./s2.md#s2rectintersection)
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/geo/) <!--hide-->
|
||||
|
@ -593,6 +593,27 @@ LIMIT 10
|
||||
└────────────────┴─────────┘
|
||||
```
|
||||
|
||||
## formatReadableDecimalSize(x)
|
||||
|
||||
Accepts the size (number of bytes). Returns a rounded size with a suffix (KB, MB, etc.) as a string.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
arrayJoin([1, 1024, 1024*1024, 192851925]) AS filesize_bytes,
|
||||
formatReadableDecimalSize(filesize_bytes) AS filesize
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─filesize_bytes─┬─filesize───┐
|
||||
│ 1 │ 1.00 B │
|
||||
│ 1024 │ 1.02 KB │
|
||||
│ 1048576 │ 1.05 MB │
|
||||
│ 192851925 │ 192.85 MB │
|
||||
└────────────────┴────────────┘
|
||||
```
|
||||
|
||||
## formatReadableSize(x)
|
||||
|
||||
Accepts the size (number of bytes). Returns a rounded size with a suffix (KiB, MiB, etc.) as a string.
|
||||
|
@ -571,13 +571,13 @@ Similar to base58Decode, but returns an empty string in case of error.
|
||||
|
||||
## base64Encode(s)
|
||||
|
||||
Encodes ‘s’ string into base64
|
||||
Encodes ‘s’ FixedString or String into base64.
|
||||
|
||||
Alias: `TO_BASE64`.
|
||||
|
||||
## base64Decode(s)
|
||||
|
||||
Decode base64-encoded string ‘s’ into original string. In case of failure raises an exception.
|
||||
Decode base64-encoded FixedString or String ‘s’ into original string. In case of failure raises an exception.
|
||||
|
||||
Alias: `FROM_BASE64`.
|
||||
|
||||
@ -1150,3 +1150,13 @@ A text with tags .
|
||||
The content within <b>CDATA</b>
|
||||
Do Nothing for 2 Minutes 2:00
|
||||
```
|
||||
|
||||
## ascii(s) {#ascii}
|
||||
|
||||
Returns the ASCII code point of the first character of str. The result type is Int32.
|
||||
|
||||
If s is empty, the result is 0. If the first character is not an ASCII character or not part of the Latin-1 Supplement range of UTF-16, the result is undefined.
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -6,28 +6,29 @@ sidebar_label: For Replacing in Strings
|
||||
|
||||
# Functions for Searching and Replacing in Strings
|
||||
|
||||
:::note
|
||||
:::note
|
||||
Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [other manipulations with strings](../../sql-reference/functions/string-functions.md) are described separately.
|
||||
:::
|
||||
|
||||
## replaceOne(haystack, pattern, replacement)
|
||||
|
||||
Replaces the first occurrence, if it exists, of the ‘pattern’ substring in ‘haystack’ with the ‘replacement’ substring.
|
||||
Hereafter, ‘pattern’ and ‘replacement’ must be constants.
|
||||
Replaces the first occurrence of the substring ‘pattern’ (if it exists) in ‘haystack’ by the ‘replacement’ string.
|
||||
‘pattern’ and ‘replacement’ must be constants.
|
||||
|
||||
## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement)
|
||||
|
||||
Replaces all occurrences of the ‘pattern’ substring in ‘haystack’ with the ‘replacement’ substring.
|
||||
Replaces all occurrences of the substring ‘pattern’ in ‘haystack’ by the ‘replacement’ string.
|
||||
|
||||
## replaceRegexpOne(haystack, pattern, replacement)
|
||||
|
||||
Replacement using the ‘pattern’ regular expression. A re2 regular expression.
|
||||
Replaces only the first occurrence, if it exists.
|
||||
A pattern can be specified as ‘replacement’. This pattern can include substitutions `\0-\9`.
|
||||
The substitution `\0` includes the entire regular expression. Substitutions `\1-\9` correspond to the subpattern numbers.To use the `\` character in a template, escape it using `\`.
|
||||
Also keep in mind that a string literal requires an extra escape.
|
||||
Replaces the first occurrence of the substring matching the regular expression ‘pattern’ in ‘haystack‘ by the ‘replacement‘ string.
|
||||
‘pattern‘ must be a constant [re2 regular expression](https://github.com/google/re2/wiki/Syntax).
|
||||
‘replacement’ must be a plain constant string or a constant string containing substitutions `\0-\9`.
|
||||
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
|
||||
To use a verbatim `\` character in the ‘pattern‘ or ‘replacement‘ string, escape it using `\`.
|
||||
Also keep in mind that string literals require an extra escaping.
|
||||
|
||||
Example 1. Converting the date to American format:
|
||||
Example 1. Converting ISO dates to American format:
|
||||
|
||||
``` sql
|
||||
SELECT DISTINCT
|
||||
@ -62,7 +63,7 @@ SELECT replaceRegexpOne('Hello, World!', '.*', '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0')
|
||||
|
||||
## replaceRegexpAll(haystack, pattern, replacement)
|
||||
|
||||
This does the same thing, but replaces all the occurrences. Example:
|
||||
Like ‘replaceRegexpOne‘, but replaces all occurrences of the pattern. Example:
|
||||
|
||||
``` sql
|
||||
SELECT replaceRegexpAll('Hello, World!', '.', '\\0\\0') AS res
|
||||
|
@ -107,7 +107,7 @@ ALTER TABLE visits RENAME COLUMN webBrowser TO browser
|
||||
CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name
|
||||
```
|
||||
|
||||
Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to set the partition expression](partition.md#how-to-set-partition-expression).
|
||||
|
||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||
|
||||
@ -204,8 +204,9 @@ It is used if it is necessary to add or update a column with a complicated expre
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table MATERIALIZE COLUMN col;
|
||||
ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE COLUMN col [IN PARTITION partition | IN PARTITION ID 'partition_id'];
|
||||
```
|
||||
- If you specify a PARTITION, a column will be materialized with only the specified partition.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -39,7 +39,7 @@ ALTER TABLE mt DETACH PARTITION '2020-11-21';
|
||||
ALTER TABLE mt DETACH PART 'all_2_2_0';
|
||||
```
|
||||
|
||||
Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it.
|
||||
|
||||
@ -53,7 +53,7 @@ ALTER TABLE table_name [ON CLUSTER cluster] DROP PARTITION|PART partition_expr
|
||||
|
||||
Deletes the specified partition from the table. This query tags the partition as inactive and deletes data completely, approximately in 10 minutes.
|
||||
|
||||
Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
The query is replicated – it deletes data on all replicas.
|
||||
|
||||
@ -71,7 +71,7 @@ ALTER TABLE table_name [ON CLUSTER cluster] DROP DETACHED PARTITION|PART partiti
|
||||
```
|
||||
|
||||
Removes the specified part or all parts of the specified partition from `detached`.
|
||||
Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read more about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
## ATTACH PARTITION\|PART
|
||||
|
||||
@ -86,7 +86,7 @@ ALTER TABLE visits ATTACH PARTITION 201901;
|
||||
ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
||||
```
|
||||
|
||||
Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read more about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
||||
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
|
||||
@ -166,7 +166,7 @@ This query creates a local backup of a specified partition. If the `PARTITION` c
|
||||
The entire backup process is performed without stopping the server.
|
||||
:::
|
||||
|
||||
Note that for old-styled tables you can specify the prefix of the partition name (for example, `2019`) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Note that for old-styled tables you can specify the prefix of the partition name (for example, `2019`) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
At the time of execution, for a data snapshot, the query creates hardlinks to a table data. Hardlinks are placed in the directory `/var/lib/clickhouse/shadow/N/...`, where:
|
||||
|
||||
@ -194,7 +194,7 @@ To restore data from a backup, do the following:
|
||||
|
||||
Restoring from a backup does not require stopping the server.
|
||||
|
||||
For more information about backups and restoring data, see the [Data Backup](../../../operations/backup.md) section.
|
||||
For more information about backups and restoring data, see the [Data Backup](/docs/en/manage/backups.mdx) section.
|
||||
|
||||
## UNFREEZE PARTITION
|
||||
|
||||
|
@ -7,18 +7,26 @@ title: "Manipulating Projections"
|
||||
|
||||
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
|
||||
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
## ADD PROJECTION
|
||||
|
||||
- `ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
||||
- `ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
## DROP PROJECTION
|
||||
|
||||
- `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## MATERIALIZE PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## CLEAR PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
||||
|
||||
Also, they are replicated, syncing projections metadata via ZooKeeper.
|
||||
Also, they are replicated, syncing projections metadata via ClickHouse Keeper or ZooKeeper.
|
||||
|
||||
:::note
|
||||
Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants).
|
||||
|
@ -12,7 +12,7 @@ Syntax:
|
||||
``` sql
|
||||
ALTER USER [IF EXISTS] name1 [ON CLUSTER cluster_name1] [RENAME TO new_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] [RENAME TO new_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
|
||||
[[ADD | DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]
|
||||
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]
|
||||
|
@ -8,7 +8,7 @@ title: "CHECK TABLE Statement"
|
||||
Checks if the data in the table is corrupted.
|
||||
|
||||
``` sql
|
||||
CHECK TABLE [db.]name
|
||||
CHECK TABLE [db.]name [PARTITION partition_expr]
|
||||
```
|
||||
|
||||
The `CHECK TABLE` query compares actual file sizes with the expected values which are stored on the server. If the file sizes do not match the stored values, it means the data is corrupted. This can be caused, for example, by a system crash during query execution.
|
||||
|
@ -31,7 +31,7 @@ By default, ClickHouse uses its own [Atomic](../../../engines/database-engines/a
|
||||
|
||||
### COMMENT
|
||||
|
||||
You can add a comment to the database when you creating it.
|
||||
You can add a comment to the database when you are creating it.
|
||||
|
||||
The comment is supported for all database engines.
|
||||
|
||||
|
@ -4,7 +4,7 @@ sidebar_position: 38
|
||||
sidebar_label: FUNCTION
|
||||
---
|
||||
|
||||
# CREATE FUNCTION
|
||||
# CREATE FUNCTION — user defined function (UDF)
|
||||
|
||||
Creates a user defined function from a lambda expression. The expression must consist of function parameters, constants, operators, or other function calls.
|
||||
|
||||
|
@ -12,7 +12,7 @@ Syntax:
|
||||
``` sql
|
||||
CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
[, name2 [ON CLUSTER cluster_name2] ...]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']}]
|
||||
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
|
||||
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
|
||||
[DEFAULT ROLE role [,...]]
|
||||
[DEFAULT DATABASE database | NONE]
|
||||
@ -34,6 +34,7 @@ There are multiple ways of user identification:
|
||||
- `IDENTIFIED WITH double_sha1_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH ldap SERVER 'server_name'`
|
||||
- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'`
|
||||
- `IDENTIFIED WITH ssl_certificate CN 'mysite.com:user'`
|
||||
|
||||
For identification with sha256_hash using `SALT` - hash must be calculated from concatination of 'password' and 'salt'.
|
||||
|
||||
@ -54,7 +55,7 @@ Another way of specifying host is to use `@` syntax following the username. Exam
|
||||
- `CREATE USER mira@'localhost'` — Equivalent to the `HOST LOCAL` syntax.
|
||||
- `CREATE USER mira@'192.168.%.%'` — Equivalent to the `HOST LIKE` syntax.
|
||||
|
||||
:::warning
|
||||
:::warning
|
||||
ClickHouse treats `user_name@'address'` as a username as a whole. Thus, technically you can create multiple users with the same `user_name` and different constructions after `@`. However, we do not recommend to do so.
|
||||
:::
|
||||
|
||||
|
@ -22,7 +22,7 @@ The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/me
|
||||
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`).
|
||||
|
||||
- If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting.
|
||||
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr).
|
||||
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter/partition.md#how-to-set-partition-expression).
|
||||
- If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed.
|
||||
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
|
||||
|
||||
|
@ -7,7 +7,7 @@ sidebar_label: INTERSECT
|
||||
|
||||
The `INTERSECT` clause returns only those rows that result from both the first and the second queries. The queries must match the number of columns, order, and type. The result of `INTERSECT` can contain duplicate rows.
|
||||
|
||||
Multiple `INTERSECT` statements are executes left to right if parenthesis are not specified. The `INTERSECT` operator has a higher priority than the `UNION` and `EXCEPT` clause.
|
||||
Multiple `INTERSECT` statements are executed left to right if parentheses are not specified. The `INTERSECT` operator has a higher priority than the `UNION` and `EXCEPT` clauses.
|
||||
|
||||
|
||||
``` sql
|
||||
|
@ -281,8 +281,8 @@ After running this statement the `[db.]replicated_merge_tree_family_table_name`
|
||||
|
||||
### RESTART REPLICA
|
||||
|
||||
Provides possibility to reinitialize Zookeeper sessions state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed.
|
||||
Initialization replication queue based on ZooKeeper date happens in the same way as `ATTACH TABLE` statement. For a short time the table will be unavailable for any operations.
|
||||
Provides possibility to reinitialize Zookeeper session's state for `ReplicatedMergeTree` table, will compare current state with Zookeeper as source of truth and add tasks to Zookeeper queue if needed.
|
||||
Initialization of replication queue based on ZooKeeper data happens in the same way as for `ATTACH TABLE` statement. For a short time, the table will be unavailable for any operations.
|
||||
|
||||
``` sql
|
||||
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
||||
|
@ -81,6 +81,7 @@ Multiple path components can have globs. For being processed file must exist and
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders.
|
||||
- `**` - Fetches all files inside the folder recursively.
|
||||
|
||||
Constructions with `{}` are similar to the [remote](remote.md) table function.
|
||||
|
||||
@ -119,6 +120,22 @@ Query the data from files named `file000`, `file001`, … , `file999`:
|
||||
SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32');
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query the data from all files inside `big_dir` directory recursively:
|
||||
|
||||
``` sql
|
||||
SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32');
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query the data from all `file002` files from any folder inside `big_dir` directory recursively:
|
||||
|
||||
``` sql
|
||||
SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32');
|
||||
```
|
||||
|
||||
## Virtual Columns
|
||||
|
||||
- `_path` — Path to the file.
|
||||
|
@ -127,6 +127,18 @@ INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-
|
||||
SELECT name, value FROM existing_table;
|
||||
```
|
||||
|
||||
Glob ** can be used for recursive directory traversal. Consider the below example, it will fetch all files from `my-test-bucket-768` directory recursively:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**', 'CSV', 'name String, value UInt32', 'gzip');
|
||||
```
|
||||
|
||||
The below get data from all `test-data.csv.gz` files from any folder inside `my-test-bucket` directory recursively:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip');
|
||||
```
|
||||
|
||||
## Partitioned Write
|
||||
|
||||
If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.
|
||||
|
@ -87,14 +87,15 @@ SETTINGS
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
:::note "Attention"
|
||||
Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше.
|
||||
:::note "Attention"
|
||||
Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
||||
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages])
|
||||
```
|
||||
:::
|
||||
|
||||
</details>
|
||||
|
||||
## Описание {#opisanie}
|
||||
|
@ -39,9 +39,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
|
@ -43,9 +43,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
@ -59,7 +60,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
- `sign` — Имя столбца с типом строки: `1` — строка состояния, `-1` — строка отмены состояния.
|
||||
|
||||
Тип данных столбца — `Int8`.
|
||||
Тип данных столбца — `Int8`.
|
||||
|
||||
</details>
|
||||
|
||||
|
@ -55,9 +55,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
|
@ -115,9 +115,10 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше.
|
||||
:::
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
|
@ -42,9 +42,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
<summary>Устаревший способ создания таблицы</summary>
|
||||
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
:::note "Attention"
|
||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
|
@ -316,9 +316,9 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101;
|
||||
Возвращается дата.
|
||||
|
||||
:::note "Attention"
|
||||
Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами.
|
||||
Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами.
|
||||
:::
|
||||
|
||||
|
||||
## toMonday {#tomonday}
|
||||
|
||||
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
||||
@ -1126,8 +1126,7 @@ SELECT FROM_UNIXTIME(423543535);
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
В случае, когда есть два аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md) или [DateTime](../../sql-reference/data-types/datetime.md), а второй является строкой постоянного формата — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string).
|
||||
|
||||
В случае, когда есть два или три аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md), а второй является строкой постоянного формата и третий является строкой постоянной временной зоны — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string).
|
||||
|
||||
Запрос:
|
||||
|
||||
|
@ -122,9 +122,9 @@ FROM t_null
|
||||
|
||||
Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса.
|
||||
|
||||
:::note "Attention"
|
||||
Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`.
|
||||
:::
|
||||
:::note "Attention"
|
||||
Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`.
|
||||
:::
|
||||
При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`.
|
||||
|
||||
При использовании `GLOBAL IN` / `GLOBAL JOIN-а`, сначала выполняются все подзапросы для `GLOBAL IN` / `GLOBAL JOIN-ов`, и результаты складываются во временные таблицы. Затем эти временные таблицы передаются на каждый удалённый сервер, и на них выполняются запросы, с использованием этих переданных временных данных.
|
||||
|
@ -37,7 +37,7 @@ deb:
|
||||
contents:
|
||||
- src: root/etc/clickhouse-client/config.xml
|
||||
dst: /etc/clickhouse-client/config.xml
|
||||
type: config
|
||||
type: config|noreplace
|
||||
- src: root/usr/bin/clickhouse-benchmark
|
||||
dst: /usr/bin/clickhouse-benchmark
|
||||
- src: root/usr/bin/clickhouse-compressor
|
||||
|