mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge remote-tracking branch 'upstream/master' into better_asterisk_parser
This commit is contained in:
commit
72acedfba6
2
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
2
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
@ -13,6 +13,8 @@ assignees: ''
|
|||||||
|
|
||||||
> A clear and concise description of what works not as it is supposed to.
|
> A clear and concise description of what works not as it is supposed to.
|
||||||
|
|
||||||
|
> A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
|
||||||
|
|
||||||
**Does it reproduce on recent release?**
|
**Does it reproduce on recent release?**
|
||||||
|
|
||||||
[The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
|
[The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
|
||||||
|
@ -7,6 +7,8 @@ assignees: ''
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
**I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors
|
||||||
|
|
||||||
**Installation type**
|
**Installation type**
|
||||||
|
|
||||||
Packages, docker, single binary, curl?
|
Packages, docker, single binary, curl?
|
||||||
|
28
.github/workflows/backport_branches.yml
vendored
28
.github/workflows/backport_branches.yml
vendored
@ -145,8 +145,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -190,8 +190,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -233,8 +233,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -276,8 +276,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -319,8 +319,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -364,8 +364,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -409,8 +409,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
2
.github/workflows/debug.yml
vendored
2
.github/workflows/debug.yml
vendored
@ -2,7 +2,7 @@
|
|||||||
name: Debug
|
name: Debug
|
||||||
|
|
||||||
'on':
|
'on':
|
||||||
[push, pull_request, release, workflow_dispatch]
|
[push, pull_request, release, workflow_dispatch, workflow_call]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
DebugInfo:
|
DebugInfo:
|
||||||
|
33
.github/workflows/jepsen.yml
vendored
33
.github/workflows/jepsen.yml
vendored
@ -32,10 +32,41 @@ jobs:
|
|||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
cd "$REPO_COPY/tests/ci"
|
cd "$REPO_COPY/tests/ci"
|
||||||
python3 keeper_jepsen_check.py
|
python3 jepsen_check.py keeper
|
||||||
- name: Cleanup
|
- name: Cleanup
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
# ServerJepsenRelease:
|
||||||
|
# runs-on: [self-hosted, style-checker]
|
||||||
|
# if: ${{ always() }}
|
||||||
|
# needs: [KeeperJepsenRelease]
|
||||||
|
# steps:
|
||||||
|
# - name: Set envs
|
||||||
|
# run: |
|
||||||
|
# cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
# TEMP_PATH=${{runner.temp}}/server_jepsen
|
||||||
|
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
|
||||||
|
# EOF
|
||||||
|
# - name: Clear repository
|
||||||
|
# run: |
|
||||||
|
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
# - name: Check out repository code
|
||||||
|
# uses: actions/checkout@v2
|
||||||
|
# with:
|
||||||
|
# fetch-depth: 0
|
||||||
|
# - name: Jepsen Test
|
||||||
|
# run: |
|
||||||
|
# sudo rm -fr "$TEMP_PATH"
|
||||||
|
# mkdir -p "$TEMP_PATH"
|
||||||
|
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
# cd "$REPO_COPY/tests/ci"
|
||||||
|
# python3 jepsen_check.py server
|
||||||
|
# - name: Cleanup
|
||||||
|
# if: always()
|
||||||
|
# run: |
|
||||||
|
# docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
# docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
# sudo rm -fr "$TEMP_PATH"
|
||||||
|
231
.github/workflows/master.yml
vendored
231
.github/workflows/master.yml
vendored
@ -209,8 +209,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -251,8 +251,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -295,8 +295,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -338,8 +338,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -381,8 +381,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -424,8 +424,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -467,8 +467,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -510,8 +510,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -556,8 +556,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -599,8 +599,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -644,8 +644,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -689,8 +689,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -734,8 +734,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -779,8 +779,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -824,8 +824,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -869,8 +869,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -914,8 +914,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -1056,6 +1056,23 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
MarkReleaseReady:
|
||||||
|
needs:
|
||||||
|
- BuilderBinDarwin
|
||||||
|
- BuilderBinDarwinAarch64
|
||||||
|
- BuilderDebRelease
|
||||||
|
- BuilderDebAarch64
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Mark Commit Release Ready
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 mark_release_ready.py
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
@ -2994,6 +3011,150 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-0:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-1:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-2:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-3:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
###################################### SQLANCER FUZZERS ######################################
|
###################################### SQLANCER FUZZERS ######################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
@ -3069,6 +3230,8 @@ jobs:
|
|||||||
needs:
|
needs:
|
||||||
- DockerHubPush
|
- DockerHubPush
|
||||||
- BuilderReport
|
- BuilderReport
|
||||||
|
- BuilderSpecialReport
|
||||||
|
- MarkReleaseReady
|
||||||
- FunctionalStatelessTestDebug0
|
- FunctionalStatelessTestDebug0
|
||||||
- FunctionalStatelessTestDebug1
|
- FunctionalStatelessTestDebug1
|
||||||
- FunctionalStatelessTestDebug2
|
- FunctionalStatelessTestDebug2
|
||||||
|
5
.github/workflows/nightly.yml
vendored
5
.github/workflows/nightly.yml
vendored
@ -10,6 +10,9 @@ env:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
Debug:
|
||||||
|
# The task for having a preserved ENV and event.json for later investigation
|
||||||
|
uses: ./.github/workflows/debug.yml
|
||||||
DockerHubPushAarch64:
|
DockerHubPushAarch64:
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
@ -102,7 +105,7 @@ jobs:
|
|||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
1078
.github/workflows/pull_request.yml
vendored
1078
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
54
.github/workflows/release_branches.yml
vendored
54
.github/workflows/release_branches.yml
vendored
@ -136,8 +136,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -178,8 +178,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -220,8 +220,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -263,8 +263,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -306,8 +306,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -349,8 +349,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -392,8 +392,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -437,8 +437,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -482,8 +482,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -615,6 +615,23 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
MarkReleaseReady:
|
||||||
|
needs:
|
||||||
|
- BuilderBinDarwin
|
||||||
|
- BuilderBinDarwinAarch64
|
||||||
|
- BuilderDebRelease
|
||||||
|
- BuilderDebAarch64
|
||||||
|
runs-on: [self-hosted, style-checker]
|
||||||
|
steps:
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Mark Commit Release Ready
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 mark_release_ready.py
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
########################### FUNCTIONAl STATELESS TESTS #######################################
|
########################### FUNCTIONAl STATELESS TESTS #######################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
@ -1888,6 +1905,7 @@ jobs:
|
|||||||
- DockerServerImages
|
- DockerServerImages
|
||||||
- BuilderReport
|
- BuilderReport
|
||||||
- BuilderSpecialReport
|
- BuilderSpecialReport
|
||||||
|
- MarkReleaseReady
|
||||||
- FunctionalStatelessTestDebug0
|
- FunctionalStatelessTestDebug0
|
||||||
- FunctionalStatelessTestDebug1
|
- FunctionalStatelessTestDebug1
|
||||||
- FunctionalStatelessTestDebug2
|
- FunctionalStatelessTestDebug2
|
||||||
|
3
.github/workflows/tags_stable.yml
vendored
3
.github/workflows/tags_stable.yml
vendored
@ -38,7 +38,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
ref: master
|
ref: master
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Generate versions
|
- name: Update versions, docker version, changelog, security
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
@ -51,6 +51,7 @@ jobs:
|
|||||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
||||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
||||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
||||||
|
python ./utils/security-generator/generate_security.py > SECURITY.md
|
||||||
git diff HEAD
|
git diff HEAD
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
uses: peter-evans/create-pull-request@v3
|
uses: peter-evans/create-pull-request@v3
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -290,3 +290,6 @@
|
|||||||
[submodule "contrib/morton-nd"]
|
[submodule "contrib/morton-nd"]
|
||||||
path = contrib/morton-nd
|
path = contrib/morton-nd
|
||||||
url = https://github.com/morton-nd/morton-nd
|
url = https://github.com/morton-nd/morton-nd
|
||||||
|
[submodule "contrib/xxHash"]
|
||||||
|
path = contrib/xxHash
|
||||||
|
url = https://github.com/Cyan4973/xxHash.git
|
||||||
|
110
CHANGELOG.md
110
CHANGELOG.md
@ -1,4 +1,5 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
|
**[ClickHouse release v22.11, 2022-11-17](#2211)**<br/>
|
||||||
**[ClickHouse release v22.10, 2022-10-25](#2210)**<br/>
|
**[ClickHouse release v22.10, 2022-10-25](#2210)**<br/>
|
||||||
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
|
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
|
||||||
**[ClickHouse release v22.8-lts, 2022-08-18](#228)**<br/>
|
**[ClickHouse release v22.8-lts, 2022-08-18](#228)**<br/>
|
||||||
@ -11,6 +12,109 @@
|
|||||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
||||||
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
||||||
|
|
||||||
|
### <a id="2211"></a> ClickHouse release 22.11, 2022-11-17
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* `JSONExtract` family of functions will now attempt to coerce to the requested type. [#41502](https://github.com/ClickHouse/ClickHouse/pull/41502) ([Márcio Martins](https://github.com/marcioapm)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Adds support for retries during INSERTs into ReplicatedMergeTree when a session with ClickHouse Keeper is lost. Apart from fault tolerance, it aims to provide better user experience, - avoid returning a user an error during insert if keeper is restarted (for example, due to upgrade). This is controlled by the `insert_keeper_max_retries` setting, which is disabled by default. [#42607](https://github.com/ClickHouse/ClickHouse/pull/42607) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Add `Hudi` and `DeltaLake` table engines, read-only, only for tables on S3. [#41054](https://github.com/ClickHouse/ClickHouse/pull/41054) ([Daniil Rubin](https://github.com/rubin-do), [Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add table function `hudi` and `deltaLake`. [#43080](https://github.com/ClickHouse/ClickHouse/pull/43080) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Support for composite time intervals. 1. Add, subtract and negate operations are now available on Intervals. In the case where the types of Intervals are different, they will be transformed into the Tuple of those types. 2. A tuple of intervals can be added to or subtracted from a Date/DateTime field. 3. Added parsing of Intervals with different types, for example: `INTERVAL '1 HOUR 1 MINUTE 1 SECOND'`. [#42195](https://github.com/ClickHouse/ClickHouse/pull/42195) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Added `**` glob support for recursive directory traversal of the filesystem and S3. Resolves [#36316](https://github.com/ClickHouse/ClickHouse/issues/36316). [#42376](https://github.com/ClickHouse/ClickHouse/pull/42376) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Introduce `s3_plain` disk type for write-once-read-many operations. Implement `ATTACH` of `MergeTree` table for `s3_plain` disk. [#42628](https://github.com/ClickHouse/ClickHouse/pull/42628) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added applied row-level policies to `system.query_log`. [#39819](https://github.com/ClickHouse/ClickHouse/pull/39819) ([Vladimir Chebotaryov](https://github.com/quickhouse)).
|
||||||
|
* Add four-letter command `csnp` for manually creating snapshots in ClickHouse Keeper. Additionally, `lgif` was added to get Raft information for a specific node (e.g. index of last created snapshot, last committed log index). [#41766](https://github.com/ClickHouse/ClickHouse/pull/41766) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Add function `ascii` like in Apache Spark: https://spark.apache.org/docs/latest/api/sql/#ascii. [#42670](https://github.com/ClickHouse/ClickHouse/pull/42670) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add function `positive_modulo` (`pmod`) which returns non-negative result based on modulo. [#42755](https://github.com/ClickHouse/ClickHouse/pull/42755) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add function `formatReadableDecimalSize`. [#42774](https://github.com/ClickHouse/ClickHouse/pull/42774) ([Alejandro](https://github.com/alexon1234)).
|
||||||
|
* Add function `randCanonical`, which is similar to the `rand` function in Apache Spark or Impala. The function generates pseudo random results with independent and identically distributed uniformly distributed values in [0, 1). [#43124](https://github.com/ClickHouse/ClickHouse/pull/43124) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add function `displayName`, closes [#36770](https://github.com/ClickHouse/ClickHouse/issues/36770). [#37681](https://github.com/ClickHouse/ClickHouse/pull/37681) ([hongbin](https://github.com/xlwh)).
|
||||||
|
* Add `min_age_to_force_merge_on_partition_only` setting to optimize old parts for the entire partition only. [#42659](https://github.com/ClickHouse/ClickHouse/pull/42659) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add generic implementation for arbitrary structured named collections, access type and `system.named_collections`. [#43147](https://github.com/ClickHouse/ClickHouse/pull/43147) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Parallelized merging of `uniqExact` states for aggregation without key, i.e. queries like `SELECT uniqExact(number) FROM table`. The improvement becomes noticeable when the number of unique keys approaches 10^6. Also `uniq` performance is slightly optimized. [#43072](https://github.com/ClickHouse/ClickHouse/pull/43072) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* `match` function can use the index if it's a condition on string prefix. This closes [#37333](https://github.com/ClickHouse/ClickHouse/issues/37333). [#42458](https://github.com/ClickHouse/ClickHouse/pull/42458) ([clarkcaoliu](https://github.com/Clark0)).
|
||||||
|
* Speed up AND and OR operators when they are sequenced. [#42214](https://github.com/ClickHouse/ClickHouse/pull/42214) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Support parallel parsing for `LineAsString` input format. This improves performance just slightly. This closes [#42502](https://github.com/ClickHouse/ClickHouse/issues/42502). [#42780](https://github.com/ClickHouse/ClickHouse/pull/42780) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* ClickHouse Keeper performance improvement: improve commit performance for cases when many different nodes have uncommitted states. This should help with cases when a follower node can't sync fast enough. [#42926](https://github.com/ClickHouse/ClickHouse/pull/42926) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* A condition like `NOT LIKE 'prefix%'` can use the primary index. [#42209](https://github.com/ClickHouse/ClickHouse/pull/42209) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Support type `Object` inside other types, e.g. `Array(JSON)`. [#36969](https://github.com/ClickHouse/ClickHouse/pull/36969) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Ignore MySQL binlog SAVEPOINT event for MaterializedMySQL. [#42931](https://github.com/ClickHouse/ClickHouse/pull/42931) ([zzsmdfj](https://github.com/zzsmdfj)). Handle (ignore) SAVEPOINT queries in MaterializedMySQL. [#43086](https://github.com/ClickHouse/ClickHouse/pull/43086) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Trivial queries with small LIMIT will properly determine the number of estimated rows to read, so that the threshold will be checked properly. Closes [#7071](https://github.com/ClickHouse/ClickHouse/issues/7071). [#42580](https://github.com/ClickHouse/ClickHouse/pull/42580) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Add support for interactive parameters in INSERT VALUES queries. [#43077](https://github.com/ClickHouse/ClickHouse/pull/43077) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Added new field `allow_readonly` in `system.table_functions` to allow using table functions in readonly mode. Resolves [#42414](https://github.com/ClickHouse/ClickHouse/issues/42414) Implementation: * Added a new field allow_readonly to table system.table_functions. * Updated to use new field allow_readonly to allow using table functions in readonly mode. Testing: * Added a test for filesystem tests/queries/0_stateless/02473_functions_in_readonly_mode.sh Documentation: * Updated the english documentation for Table Functions. [#42708](https://github.com/ClickHouse/ClickHouse/pull/42708) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* The `system.asynchronous_metrics` gets embedded documentation. This documentation is also exported to Prometheus. Fixed an error with the metrics about `cache` disks - they were calculated only for one arbitrary cache disk instead all of them. This closes [#7644](https://github.com/ClickHouse/ClickHouse/issues/7644). [#43194](https://github.com/ClickHouse/ClickHouse/pull/43194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Throttling algorithm changed to token bucket. [#42665](https://github.com/ClickHouse/ClickHouse/pull/42665) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Mask passwords and secret keys both in `system.query_log` and `/var/log/clickhouse-server/*.log` and also in error messages. [#42484](https://github.com/ClickHouse/ClickHouse/pull/42484) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Remove covered parts for fetched part (to avoid possible replication delay grows). [#39737](https://github.com/ClickHouse/ClickHouse/pull/39737) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* If `/dev/tty` is available, the progress in clickhouse-client and clickhouse-local will be rendered directly to the terminal, without writing to STDERR. It allows getting progress even if STDERR is redirected to a file, and the file will not be polluted by terminal escape sequences. The progress can be disabled by `--progress false`. This closes [#32238](https://github.com/ClickHouse/ClickHouse/issues/32238). [#42003](https://github.com/ClickHouse/ClickHouse/pull/42003) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add support for `FixedString` input to base64 coding functions. [#42285](https://github.com/ClickHouse/ClickHouse/pull/42285) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Add columns `bytes_on_disk` and `path` to `system.detached_parts`. Closes [#42264](https://github.com/ClickHouse/ClickHouse/issues/42264). [#42303](https://github.com/ClickHouse/ClickHouse/pull/42303) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Improve using structure from insertion table in table functions, now setting `use_structure_from_insertion_table_in_table_functions` has new possible value - `2` that means that ClickHouse will try to determine if we can use structure from insertion table or not automatically. Closes [#40028](https://github.com/ClickHouse/ClickHouse/issues/40028). [#42320](https://github.com/ClickHouse/ClickHouse/pull/42320) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix no progress indication on INSERT FROM INFILE. Closes [#42548](https://github.com/ClickHouse/ClickHouse/issues/42548). [#42634](https://github.com/ClickHouse/ClickHouse/pull/42634) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Refactor function `tokens` to enable max tokens returned for related functions (disabled by default). [#42673](https://github.com/ClickHouse/ClickHouse/pull/42673) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Allow to use `Date32` arguments for `formatDateTime` and `FROM_UNIXTIME` functions. [#42737](https://github.com/ClickHouse/ClickHouse/pull/42737) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `FailedAsyncInsertQuery` event metric for async inserts. [#42814](https://github.com/ClickHouse/ClickHouse/pull/42814) ([Krzysztof Góralski](https://github.com/kgoralski)).
|
||||||
|
* Implement `read-in-order` optimization on top of query plan. It is enabled by default. Set `query_plan_read_in_order = 0` to use previous AST-based version. [#42829](https://github.com/ClickHouse/ClickHouse/pull/42829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Increase the size of upload part exponentially for backup to S3 to avoid errors about max 10 000 parts limit of the multipart upload to s3. [#42833](https://github.com/ClickHouse/ClickHouse/pull/42833) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* When the merge task is continuously busy and the disk space is insufficient, the completely expired parts cannot be selected and dropped, resulting in insufficient disk space. My idea is that when the entire Part expires, there is no need for additional disk space to guarantee, ensure the normal execution of TTL. [#42869](https://github.com/ClickHouse/ClickHouse/pull/42869) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* Add `oss` function and `OSS` table engine (this is convenient for users). oss is fully compatible with s3. [#43155](https://github.com/ClickHouse/ClickHouse/pull/43155) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||||
|
* Improve error reporting in the collection of OS-related info for the `system.asynchronous_metrics` table. [#43192](https://github.com/ClickHouse/ClickHouse/pull/43192) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Modify the `INFORMATION_SCHEMA` tables in a way so that ClickHouse can connect to itself using the MySQL compatibility protocol. Add columns instead of aliases (related to [#9769](https://github.com/ClickHouse/ClickHouse/issues/9769)). It will improve the compatibility with various MySQL clients. [#43198](https://github.com/ClickHouse/ClickHouse/pull/43198) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
* Add some functions for compatibility with PowerBI, when it connects using MySQL protocol [#42612](https://github.com/ClickHouse/ClickHouse/pull/42612) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
* Better usability for Dashboard on changes [#42872](https://github.com/ClickHouse/ClickHouse/pull/42872) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Run SQLancer for each pull request and commit to master. [SQLancer](https://github.com/sqlancer/sqlancer) is an OpenSource fuzzer that focuses on automatic detection of logical bugs. [#42397](https://github.com/ClickHouse/ClickHouse/pull/42397) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Update to latest zlib-ng. [#42463](https://github.com/ClickHouse/ClickHouse/pull/42463) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||||
|
* Add support for testing ClickHouse server with Jepsen. By the way, we already have support for testing ClickHouse Keeper with Jepsen. This pull request extends it to Replicated tables. [#42619](https://github.com/ClickHouse/ClickHouse/pull/42619) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Use https://github.com/matus-chochlik/ctcache for clang-tidy results caching. [#42913](https://github.com/ClickHouse/ClickHouse/pull/42913) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Remove some libraries from Ubuntu Docker image. [#42622](https://github.com/ClickHouse/ClickHouse/pull/42622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Updated normaliser to clone the alias ast. Resolves [#42452](https://github.com/ClickHouse/ClickHouse/issues/42452) Implementation: * Updated QueryNormalizer to clone alias ast, when its replaced. Previously just assigning the same leads to exception in LogicalExpressinsOptimizer as it would be the same parent being inserted again. * This bug is not seen with new analyser (allow_experimental_analyzer), so no changes for it. I added a test for the same. [#42827](https://github.com/ClickHouse/ClickHouse/pull/42827) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix race for backup of tables in `Lazy` databases. [#43104](https://github.com/ClickHouse/ClickHouse/pull/43104) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix for `skip_unavailable_shards`: it did not work with the `s3Cluster` table function. [#43131](https://github.com/ClickHouse/ClickHouse/pull/43131) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Fix schema inference in `s3Cluster` and improvement in `hdfsCluster`. [#41979](https://github.com/ClickHouse/ClickHouse/pull/41979) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix retries while reading from URL table engines / table function. (retriable errors could be retries more times than needed, non-retriable errors resulted in failed assertion in code). [#42224](https://github.com/ClickHouse/ClickHouse/pull/42224) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* A segmentation fault related to DNS & c-ares has been reported and fixed. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Fix `LOGICAL_ERROR` `Arguments of 'plus' have incorrect data types` which may happen in PK analysis (monotonicity check). Fix invalid PK analysis for monotonic binary functions with first constant argument. [#42410](https://github.com/ClickHouse/ClickHouse/pull/42410) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix incorrect key analysis when key types cannot be inside Nullable. This fixes [#42456](https://github.com/ClickHouse/ClickHouse/issues/42456). [#42469](https://github.com/ClickHouse/ClickHouse/pull/42469) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix typo in a setting name that led to bad usage of schema inference cache while using setting `input_format_csv_use_best_effort_in_schema_inference`. Closes [#41735](https://github.com/ClickHouse/ClickHouse/issues/41735). [#42536](https://github.com/ClickHouse/ClickHouse/pull/42536) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix creating a Set with wrong header when data type is LowCardinality. Closes [#42460](https://github.com/ClickHouse/ClickHouse/issues/42460). [#42579](https://github.com/ClickHouse/ClickHouse/pull/42579) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* `(U)Int128` and `(U)Int256` values were correctly checked in `PREWHERE`. [#42605](https://github.com/ClickHouse/ClickHouse/pull/42605) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix a bug in functions parser that could have led to a segmentation fault. [#42724](https://github.com/ClickHouse/ClickHouse/pull/42724) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix the locking in `truncate table`. [#42728](https://github.com/ClickHouse/ClickHouse/pull/42728) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix possible crash in `web` disks when file does not exist (or `OPTIMIZE TABLE FINAL`, that also can got the same error eventually). [#42767](https://github.com/ClickHouse/ClickHouse/pull/42767) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `auth_type` mapping in `system.session_log`, by including `SSL_CERTIFICATE` for the enum values. [#42782](https://github.com/ClickHouse/ClickHouse/pull/42782) ([Miel Donkers](https://github.com/mdonkers)).
|
||||||
|
* Fix stack-use-after-return under ASAN build in the Create User query parser. [#42804](https://github.com/ClickHouse/ClickHouse/pull/42804) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix `lowerUTF8`/`upperUTF8` in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Additional bound check was added to LZ4 decompression routine to fix misbehaviour in case of malformed input. [#42868](https://github.com/ClickHouse/ClickHouse/pull/42868) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix rare possible hang on query cancellation. [#42874](https://github.com/ClickHouse/ClickHouse/pull/42874) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix incorrect behavior with multiple disjuncts in hash join, close [#42832](https://github.com/ClickHouse/ClickHouse/issues/42832). [#42876](https://github.com/ClickHouse/ClickHouse/pull/42876) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* A null pointer will be generated when select if as from ‘three table join’ , For example, this SQL query: [#42883](https://github.com/ClickHouse/ClickHouse/pull/42883) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||||
|
* Fix memory sanitizer report in Cluster Discovery, close [#42763](https://github.com/ClickHouse/ClickHouse/issues/42763). [#42905](https://github.com/ClickHouse/ClickHouse/pull/42905) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Improve DateTime schema inference in case of empty string. [#42911](https://github.com/ClickHouse/ClickHouse/pull/42911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix ATTACH TABLE in `PostgreSQL` database engine if the table contains DATETIME data type. Closes [#42817](https://github.com/ClickHouse/ClickHouse/issues/42817). [#42960](https://github.com/ClickHouse/ClickHouse/pull/42960) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix lambda parsing. Closes [#41848](https://github.com/ClickHouse/ClickHouse/issues/41848). [#42979](https://github.com/ClickHouse/ClickHouse/pull/42979) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix incorrect key analysis when nullable keys appear in the middle of a hyperrectangle. This fixes [#43111](https://github.com/ClickHouse/ClickHouse/issues/43111) . [#43133](https://github.com/ClickHouse/ClickHouse/pull/43133) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix several buffer over-reads in deserialization of carefully crafted aggregate function states. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix function `if` in case of NULL and const Nullable arguments. Closes [#43069](https://github.com/ClickHouse/ClickHouse/issues/43069). [#43178](https://github.com/ClickHouse/ClickHouse/pull/43178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix decimal math overflow in parsing DateTime with the 'best effort' algorithm. Closes [#43061](https://github.com/ClickHouse/ClickHouse/issues/43061). [#43180](https://github.com/ClickHouse/ClickHouse/pull/43180) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* The `indent` field produced by the `git-import` tool was miscalculated. See https://clickhouse.com/docs/en/getting-started/example-datasets/github/. [#43191](https://github.com/ClickHouse/ClickHouse/pull/43191) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed unexpected behaviour of `Interval` types with subquery and casting. [#43193](https://github.com/ClickHouse/ClickHouse/pull/43193) ([jh0x](https://github.com/jh0x)).
|
||||||
|
|
||||||
### <a id="2210"></a> ClickHouse release 22.10, 2022-10-26
|
### <a id="2210"></a> ClickHouse release 22.10, 2022-10-26
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
@ -570,7 +674,7 @@
|
|||||||
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||||
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Remove subprocess run for kerberos initialization. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)).
|
* Remove subprocess run for Kerberos initialization. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
* * Add setting `multiple_joins_try_to_keep_original_names` to not rewrite identifier name on multiple JOINs rewrite, close [#34697](https://github.com/ClickHouse/ClickHouse/issues/34697). [#38149](https://github.com/ClickHouse/ClickHouse/pull/38149) ([Vladimir C](https://github.com/vdimir)).
|
* * Add setting `multiple_joins_try_to_keep_original_names` to not rewrite identifier name on multiple JOINs rewrite, close [#34697](https://github.com/ClickHouse/ClickHouse/issues/34697). [#38149](https://github.com/ClickHouse/ClickHouse/pull/38149) ([Vladimir C](https://github.com/vdimir)).
|
||||||
* Improved trace-visualizer UX. [#38169](https://github.com/ClickHouse/ClickHouse/pull/38169) ([Sergei Trifonov](https://github.com/serxa)).
|
* Improved trace-visualizer UX. [#38169](https://github.com/ClickHouse/ClickHouse/pull/38169) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
* Enable stack trace collection and query profiler for AArch64. [#38181](https://github.com/ClickHouse/ClickHouse/pull/38181) ([Maksim Kita](https://github.com/kitaisreal)).
|
* Enable stack trace collection and query profiler for AArch64. [#38181](https://github.com/ClickHouse/ClickHouse/pull/38181) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
@ -850,8 +954,8 @@
|
|||||||
|
|
||||||
#### Upgrade Notes
|
#### Upgrade Notes
|
||||||
|
|
||||||
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. This only affects the metric values, and makes them better. This change does not introduce any incompatibility, but you may wonder about the changes of metrics, so we put in this category. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Now, background merges, mutations, and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. This only affects the metric values and makes them better. This change does not introduce any incompatibility, but you may wonder about the changes to the metrics, so we put in this category. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)). The ciphers `aes-192-cfb128` and `aes-256-cfb128` were removed, because they are not included in the FIPS certified version of BoringSSL.
|
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant in this area. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)). The ciphers `aes-192-cfb128` and `aes-256-cfb128` were removed, because they are not included in the FIPS certified version of BoringSSL.
|
||||||
* `max_memory_usage` setting is removed from the default user profile in `users.xml`. This enables flexible memory limits for queries instead of the old rigid limit of 10 GB.
|
* `max_memory_usage` setting is removed from the default user profile in `users.xml`. This enables flexible memory limits for queries instead of the old rigid limit of 10 GB.
|
||||||
* Disable `log_query_threads` setting by default. It controls the logging of statistics about every thread participating in query execution. After supporting asynchronous reads, the total number of distinct thread ids became too large, and logging into the `query_thread_log` has become too heavy. [#37077](https://github.com/ClickHouse/ClickHouse/pull/37077) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Disable `log_query_threads` setting by default. It controls the logging of statistics about every thread participating in query execution. After supporting asynchronous reads, the total number of distinct thread ids became too large, and logging into the `query_thread_log` has become too heavy. [#37077](https://github.com/ClickHouse/ClickHouse/pull/37077) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Remove function `groupArraySorted` which has a bug. [#36822](https://github.com/ClickHouse/ClickHouse/pull/36822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Remove function `groupArraySorted` which has a bug. [#36822](https://github.com/ClickHouse/ClickHouse/pull/36822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
@ -202,7 +202,7 @@ option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold lin
|
|||||||
|
|
||||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||||
if (LINKER_NAME MATCHES "lld")
|
if (LINKER_NAME MATCHES "lld" AND OS_LINUX)
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
|
||||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
||||||
@ -248,7 +248,7 @@ endif ()
|
|||||||
|
|
||||||
# Create BuildID when using lld. For other linkers it is created by default.
|
# Create BuildID when using lld. For other linkers it is created by default.
|
||||||
# (NOTE: LINKER_NAME can be either path or name, and in different variants)
|
# (NOTE: LINKER_NAME can be either path or name, and in different variants)
|
||||||
if (LINKER_NAME MATCHES "lld")
|
if (LINKER_NAME MATCHES "lld" AND OS_LINUX)
|
||||||
# SHA1 is not cryptographically secure but it is the best what lld is offering.
|
# SHA1 is not cryptographically secure but it is the best what lld is offering.
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
|
||||||
endif ()
|
endif ()
|
||||||
@ -442,8 +442,9 @@ elseif (OS_DARWIN)
|
|||||||
include(cmake/darwin/default_libs.cmake)
|
include(cmake/darwin/default_libs.cmake)
|
||||||
elseif (OS_FREEBSD)
|
elseif (OS_FREEBSD)
|
||||||
include(cmake/freebsd/default_libs.cmake)
|
include(cmake/freebsd/default_libs.cmake)
|
||||||
|
else()
|
||||||
|
link_libraries(global-group)
|
||||||
endif ()
|
endif ()
|
||||||
link_libraries(global-group)
|
|
||||||
|
|
||||||
if (NOT (OS_LINUX OR OS_DARWIN))
|
if (NOT (OS_LINUX OR OS_DARWIN))
|
||||||
# Using system libs can cause a lot of warnings in includes (on macro expansion).
|
# Using system libs can cause a lot of warnings in includes (on macro expansion).
|
||||||
@ -592,7 +593,7 @@ add_subdirectory (programs)
|
|||||||
add_subdirectory (tests)
|
add_subdirectory (tests)
|
||||||
add_subdirectory (utils)
|
add_subdirectory (utils)
|
||||||
|
|
||||||
include (cmake/sanitize_target_link_libraries.cmake)
|
include (cmake/sanitize_targets.cmake)
|
||||||
|
|
||||||
# Build native targets if necessary
|
# Build native targets if necessary
|
||||||
get_property(NATIVE_BUILD_TARGETS GLOBAL PROPERTY NATIVE_BUILD_TARGETS)
|
get_property(NATIVE_BUILD_TARGETS GLOBAL PROPERTY NATIVE_BUILD_TARGETS)
|
||||||
|
@ -17,5 +17,7 @@ ClickHouse® is an open-source column-oriented database management system that a
|
|||||||
|
|
||||||
## Upcoming events
|
## Upcoming events
|
||||||
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||||
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
* [**ClickHosue Meetup at the RELEX Solutions office in Stockholm**](https://www.meetup.com/clickhouse-stockholm-user-group/events/289492084/) - Dec 1 - Formulate by RELEX is a Swedish promotion planning and analytics company. They will share why they chose ClickHouse for their real time analytics and forecasting solution. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
||||||
* [**AWS re:Invent**](https://clickhouse.com/company/events/aws-reinvent) Core members of the ClickHouse team -- including 2 of our founders -- will be at re:Invent from November 29 to December 3. We are available on the show floor, but are also determining interest in holding an event during the time there.
|
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) - Dec 5 - Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
||||||
|
* [**ClickHouse Meetup at the Rokt offices in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/289403909/) - Dec 6 - We are very excited to be holding our next in-person ClickHouse meetup at the Rokt offices in Manhattan. Featuring talks from Bloomberg, Disney Streaming, Prequel, Rokt, and ClickHouse
|
||||||
|
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
<!--
|
||||||
|
the file is autogenerated by utils/security-generator/generate_security.py
|
||||||
|
-->
|
||||||
|
|
||||||
# Security Policy
|
# Security Policy
|
||||||
|
|
||||||
@ -10,6 +13,7 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 22.11 | ✔️ |
|
||||||
| 22.10 | ✔️ |
|
| 22.10 | ✔️ |
|
||||||
| 22.9 | ✔️ |
|
| 22.9 | ✔️ |
|
||||||
| 22.8 | ✔️ |
|
| 22.8 | ✔️ |
|
||||||
@ -61,5 +65,5 @@ As the security issue moves from triage, to identified fix, to release planning
|
|||||||
|
|
||||||
## Public Disclosure Timing
|
## Public Disclosure Timing
|
||||||
|
|
||||||
A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect the report date to disclosure date to be on the order of 7 days.
|
A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect the report date to disclosure date to be on the order of 7 days.
|
||||||
|
|
||||||
|
@ -12,7 +12,21 @@
|
|||||||
template <typename To, typename From>
|
template <typename To, typename From>
|
||||||
std::decay_t<To> bit_cast(const From & from)
|
std::decay_t<To> bit_cast(const From & from)
|
||||||
{
|
{
|
||||||
|
/**
|
||||||
|
* Assume the source value is 0xAABBCCDD (i.e. sizeof(from) == 4).
|
||||||
|
* Its BE representation is 0xAABBCCDD, the LE representation is 0xDDCCBBAA.
|
||||||
|
* Further assume, sizeof(res) == 8 and that res is initially zeroed out.
|
||||||
|
* With LE, the result after bit_cast will be 0xDDCCBBAA00000000 --> input value == output value.
|
||||||
|
* With BE, the result after bit_cast will be 0x00000000AABBCCDD --> input value == output value.
|
||||||
|
*/
|
||||||
To res {};
|
To res {};
|
||||||
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
|
if constexpr (std::endian::native == std::endian::little)
|
||||||
|
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
|
||||||
|
else
|
||||||
|
{
|
||||||
|
uint32_t offset_to = (sizeof(res) > sizeof(from)) ? (sizeof(res) - sizeof(from)) : 0;
|
||||||
|
uint32_t offset_from = (sizeof(from) > sizeof(res)) ? (sizeof(from) - sizeof(res)) : 0;
|
||||||
|
memcpy(reinterpret_cast<char *>(&res) + offset_to, reinterpret_cast<const char *>(&from) + offset_from, std::min(sizeof(res), sizeof(from)));
|
||||||
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
# include <sys/syscall.h>
|
# include <sys/syscall.h>
|
||||||
#endif
|
#endif
|
||||||
|
#include <cstdlib>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <base/safeExit.h>
|
#include <base/safeExit.h>
|
||||||
|
#include <base/defines.h> /// for THREAD_SANITIZER
|
||||||
|
|
||||||
[[noreturn]] void safeExit(int code)
|
[[noreturn]] void safeExit(int code)
|
||||||
{
|
{
|
||||||
|
@ -220,13 +220,13 @@ struct statx {
|
|||||||
uint32_t stx_dev_minor;
|
uint32_t stx_dev_minor;
|
||||||
uint64_t spare[14];
|
uint64_t spare[14];
|
||||||
};
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
int statx(int fd, const char *restrict path, int flag,
|
int statx(int fd, const char *restrict path, int flag,
|
||||||
unsigned int mask, struct statx *restrict statxbuf)
|
unsigned int mask, struct statx *restrict statxbuf)
|
||||||
{
|
{
|
||||||
return syscall(SYS_statx, fd, path, flag, mask, statxbuf);
|
return syscall(SYS_statx, fd, path, flag, mask, statxbuf);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#include <syscall.h>
|
#include <syscall.h>
|
||||||
|
@ -8,6 +8,14 @@
|
|||||||
#include <link.h> // ElfW
|
#include <link.h> // ElfW
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
|
#include "syscall.h"
|
||||||
|
|
||||||
|
#if defined(__has_feature)
|
||||||
|
#if __has_feature(memory_sanitizer)
|
||||||
|
#include <sanitizer/msan_interface.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
||||||
|
|
||||||
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
||||||
@ -39,7 +47,9 @@ ssize_t __retry_read(int fd, void * buf, size_t count)
|
|||||||
{
|
{
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
ssize_t ret = read(fd, buf, count);
|
// We cannot use the read syscall as it will be intercept by sanitizers, which aren't
|
||||||
|
// initialized yet. Emit syscall directly.
|
||||||
|
ssize_t ret = __syscall_ret(__syscall(SYS_read, fd, buf, count));
|
||||||
if (ret == -1)
|
if (ret == -1)
|
||||||
{
|
{
|
||||||
if (errno == EINTR)
|
if (errno == EINTR)
|
||||||
@ -90,6 +100,11 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
|||||||
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
||||||
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
||||||
{
|
{
|
||||||
|
#if defined(__has_feature)
|
||||||
|
#if __has_feature(memory_sanitizer)
|
||||||
|
__msan_unpoison(&aux, sizeof(aux));
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
if (aux.a_type == AT_NULL)
|
if (aux.a_type == AT_NULL)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54468)
|
SET(VERSION_REVISION 54469)
|
||||||
SET(VERSION_MAJOR 22)
|
SET(VERSION_MAJOR 22)
|
||||||
SET(VERSION_MINOR 11)
|
SET(VERSION_MINOR 12)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH 98ab5a3c189232ea2a3dddb9d2be7196ae8b3434)
|
SET(VERSION_GITHASH 0d211ed19849fe44b0e43fdebe2c15d76d560a77)
|
||||||
SET(VERSION_DESCRIBE v22.11.1.1-testing)
|
SET(VERSION_DESCRIBE v22.12.1.1-testing)
|
||||||
SET(VERSION_STRING 22.11.1.1)
|
SET(VERSION_STRING 22.12.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -23,6 +23,7 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
|
|||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
include (cmake/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
link_libraries(global-group)
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
|
@ -24,6 +24,7 @@ find_package(Threads REQUIRED)
|
|||||||
|
|
||||||
include (cmake/unwind.cmake)
|
include (cmake/unwind.cmake)
|
||||||
include (cmake/cxx.cmake)
|
include (cmake/cxx.cmake)
|
||||||
|
link_libraries(global-group)
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
|
@ -34,6 +34,13 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
include (cmake/unwind.cmake)
|
||||||
|
include (cmake/cxx.cmake)
|
||||||
|
|
||||||
|
# Delay the call to link the global interface after the libc++ libraries are included to avoid circular dependencies
|
||||||
|
# which are ok with static libraries but not with dynamic ones
|
||||||
|
link_libraries(global-group)
|
||||||
|
|
||||||
if (NOT OS_ANDROID)
|
if (NOT OS_ANDROID)
|
||||||
if (NOT USE_MUSL)
|
if (NOT USE_MUSL)
|
||||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||||
@ -42,9 +49,6 @@ if (NOT OS_ANDROID)
|
|||||||
add_subdirectory(base/harmful)
|
add_subdirectory(base/harmful)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include (cmake/unwind.cmake)
|
|
||||||
include (cmake/cxx.cmake)
|
|
||||||
|
|
||||||
target_link_libraries(global-group INTERFACE
|
target_link_libraries(global-group INTERFACE
|
||||||
-Wl,--start-group
|
-Wl,--start-group
|
||||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||||
|
@ -16,7 +16,9 @@ endmacro()
|
|||||||
|
|
||||||
if (SANITIZE)
|
if (SANITIZE)
|
||||||
if (SANITIZE STREQUAL "address")
|
if (SANITIZE STREQUAL "address")
|
||||||
set (ASAN_FLAGS "-fsanitize=address -fsanitize-address-use-after-scope")
|
# LLVM-15 has a bug in Address Sanitizer, preventing the usage of 'sanitize-address-use-after-scope',
|
||||||
|
# see https://github.com/llvm/llvm-project/issues/58633
|
||||||
|
set (ASAN_FLAGS "-fsanitize=address -fno-sanitize-address-use-after-scope")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}")
|
||||||
|
|
||||||
|
@ -1,3 +1,13 @@
|
|||||||
|
# https://stackoverflow.com/a/62311397/328260
|
||||||
|
macro (get_all_targets_recursive targets dir)
|
||||||
|
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
|
||||||
|
foreach (subdir ${subdirectories})
|
||||||
|
get_all_targets_recursive (${targets} ${subdir})
|
||||||
|
endforeach ()
|
||||||
|
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
|
||||||
|
list (APPEND ${targets} ${current_targets})
|
||||||
|
endmacro ()
|
||||||
|
|
||||||
# When you will try to link target with the directory (that exists), cmake will
|
# When you will try to link target with the directory (that exists), cmake will
|
||||||
# skip this without an error, only the following warning will be reported:
|
# skip this without an error, only the following warning will be reported:
|
||||||
#
|
#
|
||||||
@ -18,23 +28,12 @@
|
|||||||
# -- but cannot be used with link_libraries()
|
# -- but cannot be used with link_libraries()
|
||||||
# - use BUILDSYSTEM_TARGETS property to get list of all targets and sanitize
|
# - use BUILDSYSTEM_TARGETS property to get list of all targets and sanitize
|
||||||
# -- this will work.
|
# -- this will work.
|
||||||
|
|
||||||
# https://stackoverflow.com/a/62311397/328260
|
|
||||||
function (get_all_targets var)
|
function (get_all_targets var)
|
||||||
set (targets)
|
set (targets)
|
||||||
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR})
|
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
set (${var} ${targets} PARENT_SCOPE)
|
set (${var} ${targets} PARENT_SCOPE)
|
||||||
endfunction()
|
endfunction()
|
||||||
macro (get_all_targets_recursive targets dir)
|
function (sanitize_link_libraries target)
|
||||||
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
|
|
||||||
foreach (subdir ${subdirectories})
|
|
||||||
get_all_targets_recursive (${targets} ${subdir})
|
|
||||||
endforeach ()
|
|
||||||
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
|
|
||||||
list (APPEND ${targets} ${current_targets})
|
|
||||||
endmacro ()
|
|
||||||
|
|
||||||
macro (sanitize_link_libraries target)
|
|
||||||
get_target_property(target_type ${target} TYPE)
|
get_target_property(target_type ${target} TYPE)
|
||||||
if (${target_type} STREQUAL "INTERFACE_LIBRARY")
|
if (${target_type} STREQUAL "INTERFACE_LIBRARY")
|
||||||
get_property(linked_libraries TARGET ${target} PROPERTY INTERFACE_LINK_LIBRARIES)
|
get_property(linked_libraries TARGET ${target} PROPERTY INTERFACE_LINK_LIBRARIES)
|
||||||
@ -48,9 +47,35 @@ macro (sanitize_link_libraries target)
|
|||||||
message(FATAL_ERROR "${target} requested to link with directory: ${linked_library}")
|
message(FATAL_ERROR "${target} requested to link with directory: ${linked_library}")
|
||||||
endif()
|
endif()
|
||||||
endforeach()
|
endforeach()
|
||||||
endmacro()
|
endfunction()
|
||||||
|
|
||||||
get_all_targets (all_targets)
|
get_all_targets (all_targets)
|
||||||
foreach (target ${all_targets})
|
foreach (target ${all_targets})
|
||||||
sanitize_link_libraries(${target})
|
sanitize_link_libraries(${target})
|
||||||
endforeach()
|
endforeach()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Do not allow to define -W* from contrib publically (INTERFACE/PUBLIC).
|
||||||
|
#
|
||||||
|
function (get_contrib_targets var)
|
||||||
|
set (targets)
|
||||||
|
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR}/contrib)
|
||||||
|
set (${var} ${targets} PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
function (sanitize_interface_flags target)
|
||||||
|
get_target_property(target_type ${target} TYPE)
|
||||||
|
get_property(compile_definitions TARGET ${target} PROPERTY INTERFACE_COMPILE_DEFINITIONS)
|
||||||
|
get_property(compile_options TARGET ${target} PROPERTY INTERFACE_COMPILE_OPTIONS)
|
||||||
|
if (NOT "${compile_options}" STREQUAL "")
|
||||||
|
message(FATAL_ERROR "${target} set INTERFACE_COMPILE_OPTIONS to ${compile_options}. This is forbidden.")
|
||||||
|
endif()
|
||||||
|
if ("${compile_definitions}" MATCHES "-Wl,")
|
||||||
|
# linker option - OK
|
||||||
|
elseif ("${compile_definitions}" MATCHES "-W")
|
||||||
|
message(FATAL_ERROR "${target} contains ${compile_definitions} flags in INTERFACE_COMPILE_DEFINITIONS. This is forbidden.")
|
||||||
|
endif()
|
||||||
|
endfunction()
|
||||||
|
get_contrib_targets (contrib_targets)
|
||||||
|
foreach (contrib_target ${contrib_targets})
|
||||||
|
sanitize_interface_flags(${contrib_target})
|
||||||
|
endforeach()
|
||||||
|
|
@ -58,13 +58,19 @@ if (NOT LINKER_NAME)
|
|||||||
find_program (LLD_PATH NAMES "ld.lld")
|
find_program (LLD_PATH NAMES "ld.lld")
|
||||||
find_program (GOLD_PATH NAMES "ld.gold")
|
find_program (GOLD_PATH NAMES "ld.gold")
|
||||||
elseif (COMPILER_CLANG)
|
elseif (COMPILER_CLANG)
|
||||||
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld")
|
# llvm lld is a generic driver.
|
||||||
|
# Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld (WebAssembly) instead
|
||||||
|
if (OS_LINUX)
|
||||||
|
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
|
||||||
|
elseif (OS_DARWIN)
|
||||||
|
find_program (LLD_PATH NAMES "ld64.lld-${COMPILER_VERSION_MAJOR}" "ld64.lld")
|
||||||
|
endif ()
|
||||||
find_program (GOLD_PATH NAMES "ld.gold" "gold")
|
find_program (GOLD_PATH NAMES "ld.gold" "gold")
|
||||||
endif ()
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (OS_LINUX AND NOT LINKER_NAME)
|
if ((OS_LINUX OR OS_DARWIN) AND NOT LINKER_NAME)
|
||||||
# prefer lld linker over gold or ld on linux
|
# prefer lld linker over gold or ld on linux and macos
|
||||||
if (LLD_PATH)
|
if (LLD_PATH)
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
# GCC driver requires one of supported linker names like "lld".
|
# GCC driver requires one of supported linker names like "lld".
|
||||||
|
4
contrib/CMakeLists.txt
vendored
4
contrib/CMakeLists.txt
vendored
@ -167,7 +167,9 @@ add_contrib (c-ares-cmake c-ares)
|
|||||||
add_contrib (qpl-cmake qpl)
|
add_contrib (qpl-cmake qpl)
|
||||||
add_contrib (morton-nd-cmake morton-nd)
|
add_contrib (morton-nd-cmake morton-nd)
|
||||||
|
|
||||||
add_contrib(annoy-cmake annoy)
|
add_contrib (annoy-cmake annoy)
|
||||||
|
|
||||||
|
add_contrib (xxHash-cmake xxHash)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1be805e7cb2494aa8170015493474379b0362dfc
|
Subproject commit e4e746a24eb56861a86f3672771e3308d8c40722
|
@ -57,7 +57,7 @@ add_library(cxx ${SRCS})
|
|||||||
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
|
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
|
||||||
|
|
||||||
target_include_directories(cxx SYSTEM BEFORE PRIVATE $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/src>)
|
target_include_directories(cxx SYSTEM BEFORE PRIVATE $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/src>)
|
||||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
|
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>>)
|
||||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||||
|
|
||||||
# Enable capturing stack traces for all exceptions.
|
# Enable capturing stack traces for all exceptions.
|
||||||
|
1
contrib/xxHash
vendored
Submodule
1
contrib/xxHash
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 3078dc6039f8c0bffcb1904f81cfe6b2c3209435
|
13
contrib/xxHash-cmake/CMakeLists.txt
Normal file
13
contrib/xxHash-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/xxHash")
|
||||||
|
set (SRCS
|
||||||
|
"${LIBRARY_DIR}/xxhash.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(xxHash ${SRCS})
|
||||||
|
target_include_directories(xxHash SYSTEM BEFORE INTERFACE "${LIBRARY_DIR}")
|
||||||
|
|
||||||
|
# XXH_INLINE_ALL - Make all functions inline, with implementations being directly included within xxhash.h. Inlining functions is beneficial for speed on small keys.
|
||||||
|
# https://github.com/Cyan4973/xxHash/tree/v0.8.1#build-modifiers
|
||||||
|
target_compile_definitions(xxHash PUBLIC XXH_INLINE_ALL)
|
||||||
|
|
||||||
|
add_library(ch_contrib::xxHash ALIAS xxHash)
|
@ -25,6 +25,7 @@ done
|
|||||||
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
||||||
|
|
||||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||||
|
export CI=true
|
||||||
exec yarn build "$@"
|
exec yarn build "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -6,29 +6,24 @@ FROM clickhouse/test-util:$FROM_TAG
|
|||||||
# Rust toolchain and libraries
|
# Rust toolchain and libraries
|
||||||
ENV RUSTUP_HOME=/rust/rustup
|
ENV RUSTUP_HOME=/rust/rustup
|
||||||
ENV CARGO_HOME=/rust/cargo
|
ENV CARGO_HOME=/rust/cargo
|
||||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
|
|
||||||
RUN chmod 777 -R /rust
|
|
||||||
ENV PATH="/rust/cargo/env:${PATH}"
|
ENV PATH="/rust/cargo/env:${PATH}"
|
||||||
ENV PATH="/rust/cargo/bin:${PATH}"
|
ENV PATH="/rust/cargo/bin:${PATH}"
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu && \
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||||
rustup target add x86_64-apple-darwin && \
|
chmod 777 -R /rust && \
|
||||||
rustup target add x86_64-unknown-freebsd && \
|
rustup target add aarch64-unknown-linux-gnu && \
|
||||||
rustup target add aarch64-apple-darwin && \
|
rustup target add x86_64-apple-darwin && \
|
||||||
rustup target add powerpc64le-unknown-linux-gnu
|
rustup target add x86_64-unknown-freebsd && \
|
||||||
RUN apt-get install \
|
rustup target add aarch64-apple-darwin && \
|
||||||
|
rustup target add powerpc64le-unknown-linux-gnu
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install --yes \
|
||||||
gcc-aarch64-linux-gnu \
|
gcc-aarch64-linux-gnu \
|
||||||
build-essential \
|
build-essential \
|
||||||
libc6 \
|
libc6 \
|
||||||
libc6-dev \
|
libc6-dev \
|
||||||
libc6-dev-arm64-cross \
|
libc6-dev-arm64-cross && \
|
||||||
--yes
|
apt-get clean
|
||||||
|
|
||||||
# Install CMake 3.20+ for Rust compilation
|
|
||||||
# Used https://askubuntu.com/a/1157132 as reference
|
|
||||||
RUN apt purge cmake --yes
|
|
||||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null
|
|
||||||
RUN apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main'
|
|
||||||
RUN apt update && apt install cmake --yes
|
|
||||||
|
|
||||||
ENV CC=clang-${LLVM_VERSION}
|
ENV CC=clang-${LLVM_VERSION}
|
||||||
ENV CXX=clang++-${LLVM_VERSION}
|
ENV CXX=clang++-${LLVM_VERSION}
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="22.10.2.11"
|
ARG VERSION="22.11.1.1360"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="22.10.2.11"
|
ARG VERSION="22.11.1.1360"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
@ -80,6 +80,16 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
|
||||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
|
||||||
|
|
||||||
|
# Remove as much of Ubuntu as possible.
|
||||||
|
# ClickHouse does not need Ubuntu. It can run on top of Linux kernel without any OS distribution.
|
||||||
|
# ClickHouse does not need Docker at all. ClickHouse is above all that.
|
||||||
|
# It does not care about Ubuntu, Docker, or other cruft and you should neither.
|
||||||
|
# The fact that this Docker image is based on Ubuntu is just a misconception.
|
||||||
|
# Some vulnerability scanners are arguing about Ubuntu, which is not relevant to ClickHouse at all.
|
||||||
|
# ClickHouse does not care when you report false vulnerabilities by running some Docker scanners.
|
||||||
|
|
||||||
|
RUN apt-get remove --purge -y libksba8 && apt-get autoremove -y
|
||||||
|
|
||||||
# we need to allow "others" access to clickhouse folder, because docker container
|
# we need to allow "others" access to clickhouse folder, because docker container
|
||||||
# can be started with arbitrary uid (openshift usecase)
|
# can be started with arbitrary uid (openshift usecase)
|
||||||
|
|
||||||
|
@ -137,6 +137,7 @@ function clone_submodules
|
|||||||
contrib/hashidsxx
|
contrib/hashidsxx
|
||||||
contrib/c-ares
|
contrib/c-ares
|
||||||
contrib/morton-nd
|
contrib/morton-nd
|
||||||
|
contrib/xxHash
|
||||||
)
|
)
|
||||||
|
|
||||||
git submodule sync
|
git submodule sync
|
||||||
|
@ -38,7 +38,7 @@ COPY * /
|
|||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
CMD set -o pipefail \
|
CMD set -o pipefail \
|
||||||
&& cd /workspace \
|
&& cd /workspace \
|
||||||
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
&& timeout -s 9 1h /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
||||||
|
|
||||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
|
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031
|
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031,SC2010,SC2015
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
@ -10,11 +10,6 @@ set -e
|
|||||||
set -u
|
set -u
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
trap "exit" INT TERM
|
|
||||||
# The watchdog is in the separate process group, so we have to kill it separately
|
|
||||||
# if the script terminates earlier.
|
|
||||||
trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT
|
|
||||||
|
|
||||||
stage=${stage:-}
|
stage=${stage:-}
|
||||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
echo "$script_dir"
|
echo "$script_dir"
|
||||||
@ -110,26 +105,6 @@ function configure
|
|||||||
EOL
|
EOL
|
||||||
}
|
}
|
||||||
|
|
||||||
function watchdog
|
|
||||||
{
|
|
||||||
sleep 1800
|
|
||||||
|
|
||||||
echo "Fuzzing run has timed out"
|
|
||||||
for _ in {1..10}
|
|
||||||
do
|
|
||||||
# Only kill by pid the particular client that runs the fuzzing, or else
|
|
||||||
# we can kill some clickhouse-client processes this script starts later,
|
|
||||||
# e.g. for checking server liveness.
|
|
||||||
if ! kill $fuzzer_pid
|
|
||||||
then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
kill -9 -- $fuzzer_pid ||:
|
|
||||||
}
|
|
||||||
|
|
||||||
function filter_exists_and_template
|
function filter_exists_and_template
|
||||||
{
|
{
|
||||||
local path
|
local path
|
||||||
@ -175,10 +150,8 @@ function fuzz
|
|||||||
|
|
||||||
mkdir -p /var/run/clickhouse-server
|
mkdir -p /var/run/clickhouse-server
|
||||||
|
|
||||||
# interferes with gdb
|
|
||||||
export CLICKHOUSE_WATCHDOG_ENABLE=0
|
|
||||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > >(tail -100000 > server.log) 2>&1 &
|
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz &
|
||||||
server_pid=$!
|
server_pid=$!
|
||||||
|
|
||||||
kill -0 $server_pid
|
kill -0 $server_pid
|
||||||
@ -214,7 +187,7 @@ detach
|
|||||||
quit
|
quit
|
||||||
" > script.gdb
|
" > script.gdb
|
||||||
|
|
||||||
gdb -batch -command script.gdb -p $server_pid &
|
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" &
|
||||||
sleep 5
|
sleep 5
|
||||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||||
@ -236,7 +209,7 @@ quit
|
|||||||
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
||||||
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
||||||
# shellcheck disable=SC2012,SC2046
|
# shellcheck disable=SC2012,SC2046
|
||||||
clickhouse-client \
|
timeout -s TERM --preserve-status 30m clickhouse-client \
|
||||||
--receive_timeout=10 \
|
--receive_timeout=10 \
|
||||||
--receive_data_timeout_ms=10000 \
|
--receive_data_timeout_ms=10000 \
|
||||||
--stacktrace \
|
--stacktrace \
|
||||||
@ -249,16 +222,6 @@ quit
|
|||||||
fuzzer_pid=$!
|
fuzzer_pid=$!
|
||||||
echo "Fuzzer pid is $fuzzer_pid"
|
echo "Fuzzer pid is $fuzzer_pid"
|
||||||
|
|
||||||
# Start a watchdog that should kill the fuzzer on timeout.
|
|
||||||
# The shell won't kill the child sleep when we kill it, so we have to put it
|
|
||||||
# into a separate process group so that we can kill them all.
|
|
||||||
set -m
|
|
||||||
watchdog &
|
|
||||||
watchdog_pid=$!
|
|
||||||
set +m
|
|
||||||
# Check that the watchdog has started.
|
|
||||||
kill -0 $watchdog_pid
|
|
||||||
|
|
||||||
# Wait for the fuzzer to complete.
|
# Wait for the fuzzer to complete.
|
||||||
# Note that the 'wait || ...' thing is required so that the script doesn't
|
# Note that the 'wait || ...' thing is required so that the script doesn't
|
||||||
# exit because of 'set -e' when 'wait' returns nonzero code.
|
# exit because of 'set -e' when 'wait' returns nonzero code.
|
||||||
@ -266,8 +229,6 @@ quit
|
|||||||
wait "$fuzzer_pid" || fuzzer_exit_code=$?
|
wait "$fuzzer_pid" || fuzzer_exit_code=$?
|
||||||
echo "Fuzzer exit code is $fuzzer_exit_code"
|
echo "Fuzzer exit code is $fuzzer_exit_code"
|
||||||
|
|
||||||
kill -- -$watchdog_pid ||:
|
|
||||||
|
|
||||||
# If the server dies, most often the fuzzer returns code 210: connetion
|
# If the server dies, most often the fuzzer returns code 210: connetion
|
||||||
# refused, and sometimes also code 32: attempt to read after eof. For
|
# refused, and sometimes also code 32: attempt to read after eof. For
|
||||||
# simplicity, check again whether the server is accepting connections, using
|
# simplicity, check again whether the server is accepting connections, using
|
||||||
@ -297,7 +258,7 @@ quit
|
|||||||
# The server has died.
|
# The server has died.
|
||||||
task_exit_code=210
|
task_exit_code=210
|
||||||
echo "failure" > status.txt
|
echo "failure" > status.txt
|
||||||
if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt
|
if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt
|
||||||
then
|
then
|
||||||
echo "Lost connection to server. See the logs." > description.txt
|
echo "Lost connection to server. See the logs." > description.txt
|
||||||
fi
|
fi
|
||||||
@ -333,6 +294,8 @@ quit
|
|||||||
pigz core.*
|
pigz core.*
|
||||||
mv core.*.gz core.gz
|
mv core.*.gz core.gz
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
dmesg -T | grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' && echo "OOM in dmesg" ||:
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$stage" in
|
case "$stage" in
|
||||||
@ -391,8 +354,9 @@ th { cursor: pointer; }
|
|||||||
|
|
||||||
<h1>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</h1>
|
<h1>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</h1>
|
||||||
<p class="links">
|
<p class="links">
|
||||||
|
<a href="runlog.log">runlog.log</a>
|
||||||
<a href="fuzzer.log">fuzzer.log</a>
|
<a href="fuzzer.log">fuzzer.log</a>
|
||||||
<a href="server.log">server.log</a>
|
<a href="server.log.gz">server.log.gz</a>
|
||||||
<a href="main.log">main.log</a>
|
<a href="main.log">main.log</a>
|
||||||
${CORE_LINK}
|
${CORE_LINK}
|
||||||
</p>
|
</p>
|
||||||
|
@ -15,8 +15,8 @@ if [ -z "$CLICKHOUSE_REPO_PATH" ]; then
|
|||||||
ls -lath ||:
|
ls -lath ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse-keeper"
|
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||||
|
|
||||||
(lein run test-all --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --snapshot-distance 100 --stale-log-gap 100 --reserved-log-items 10 --lightweight-run --clickhouse-source "$CLICKHOUSE_PACKAGE" -q --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
(lein run keeper test-all --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --snapshot-distance 100 --stale-log-gap 100 --reserved-log-items 10 --lightweight-run --clickhouse-source "$CLICKHOUSE_PACKAGE" -q --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||||
|
|
||||||
mv store "$TEST_OUTPUT/"
|
mv store "$TEST_OUTPUT/"
|
||||||
|
43
docker/test/server-jepsen/Dockerfile
Normal file
43
docker/test/server-jepsen/Dockerfile
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# rebuild in #33610
|
||||||
|
# docker build -t clickhouse/server-jepsen-test .
|
||||||
|
ARG FROM_TAG=latest
|
||||||
|
FROM clickhouse/test-base:$FROM_TAG
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV CLOJURE_VERSION=1.10.3.814
|
||||||
|
|
||||||
|
# arguments
|
||||||
|
ENV PR_TO_TEST=""
|
||||||
|
ENV SHA_TO_TEST=""
|
||||||
|
|
||||||
|
ENV NODES_USERNAME="root"
|
||||||
|
ENV NODES_PASSWORD=""
|
||||||
|
ENV TESTS_TO_RUN="8"
|
||||||
|
ENV TIME_LIMIT="30"
|
||||||
|
|
||||||
|
ENV KEEPER_NODE=""
|
||||||
|
|
||||||
|
|
||||||
|
# volumes
|
||||||
|
ENV NODES_FILE_PATH="/nodes.txt"
|
||||||
|
ENV TEST_OUTPUT="/test_output"
|
||||||
|
|
||||||
|
RUN mkdir "/root/.ssh"
|
||||||
|
RUN touch "/root/.ssh/known_hosts"
|
||||||
|
|
||||||
|
# install java
|
||||||
|
RUN apt-get update && apt-get install default-jre default-jdk libjna-java libjna-jni ssh gnuplot graphviz --yes --no-install-recommends
|
||||||
|
|
||||||
|
# install clojure
|
||||||
|
RUN curl -O "https://download.clojure.org/install/linux-install-${CLOJURE_VERSION}.sh" && \
|
||||||
|
chmod +x "linux-install-${CLOJURE_VERSION}.sh" && \
|
||||||
|
bash "./linux-install-${CLOJURE_VERSION}.sh"
|
||||||
|
|
||||||
|
# install leiningen
|
||||||
|
RUN curl -O "https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein" && \
|
||||||
|
chmod +x ./lein && \
|
||||||
|
mv ./lein /usr/bin
|
||||||
|
|
||||||
|
COPY run.sh /
|
||||||
|
|
||||||
|
CMD ["/bin/bash", "/run.sh"]
|
22
docker/test/server-jepsen/run.sh
Normal file
22
docker/test/server-jepsen/run.sh
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
|
||||||
|
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-15_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
|
||||||
|
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "$CLICKHOUSE_REPO_PATH" ]; then
|
||||||
|
CLICKHOUSE_REPO_PATH=ch
|
||||||
|
rm -rf ch ||:
|
||||||
|
mkdir ch ||:
|
||||||
|
wget -nv -nd -c "https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
|
||||||
|
tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz
|
||||||
|
ls -lath ||:
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse"
|
||||||
|
|
||||||
|
(lein run server test-all --keeper "$KEEPER_NODE" --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --clickhouse-source "$CLICKHOUSE_PACKAGE" --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log"
|
||||||
|
|
||||||
|
mv store "$TEST_OUTPUT/"
|
@ -131,7 +131,14 @@ function stop()
|
|||||||
# Preserve the pid, since the server can hung after the PID will be deleted.
|
# Preserve the pid, since the server can hung after the PID will be deleted.
|
||||||
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
||||||
|
|
||||||
clickhouse stop --do-not-kill && return
|
# --max-tries is supported only since 22.12
|
||||||
|
if dpkg --compare-versions "$(clickhouse local -q 'select version()')" ge "22.12"; then
|
||||||
|
# Increase default waiting timeout for sanitizers and debug builds
|
||||||
|
clickhouse stop --max-tries 180 --do-not-kill && return
|
||||||
|
else
|
||||||
|
clickhouse stop --do-not-kill && return
|
||||||
|
fi
|
||||||
|
|
||||||
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
||||||
kill -TERM "$(pidof gdb)" ||:
|
kill -TERM "$(pidof gdb)" ||:
|
||||||
sleep 5
|
sleep 5
|
||||||
@ -254,7 +261,7 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau
|
|||||||
|
|
||||||
start
|
start
|
||||||
|
|
||||||
./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" \
|
./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||||
&& echo -e 'Test script exit code\tOK' >> /test_output/test_results.tsv \
|
&& echo -e 'Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv
|
|| echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||||
|
|
||||||
@ -388,6 +395,11 @@ else
|
|||||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||||
|
|
||||||
|
# Turn on after 22.12
|
||||||
|
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||||
|
# it uses recently introduced settings which previous versions may not have
|
||||||
|
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||||
|
|
||||||
start
|
start
|
||||||
|
|
||||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||||
@ -448,11 +460,12 @@ else
|
|||||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||||
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
||||||
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
||||||
|
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||||
echo "Check for Error messages in server log:"
|
echo "Check for Error messages in server log:"
|
||||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||||
-e "REPLICA_IS_ALREADY_EXIST" \
|
-e "REPLICA_ALREADY_EXISTS" \
|
||||||
-e "ALL_REPLICAS_LOST" \
|
-e "ALL_REPLICAS_LOST" \
|
||||||
-e "DDLWorker: Cannot parse DDL task query" \
|
-e "DDLWorker: Cannot parse DDL task query" \
|
||||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||||
@ -481,6 +494,9 @@ else
|
|||||||
-e "The set of parts restored in place of" \
|
-e "The set of parts restored in place of" \
|
||||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||||
|
-e "Coordination::Exception: Connection loss" \
|
||||||
|
-e "MutateFromLogEntryTask" \
|
||||||
|
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# docker build -t clickhouse/style-test .
|
# docker build -t clickhouse/style-test .
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
ARG ACT_VERSION=0.2.25
|
ARG ACT_VERSION=0.2.33
|
||||||
ARG ACTIONLINT_VERSION=1.6.8
|
ARG ACTIONLINT_VERSION=1.6.22
|
||||||
|
|
||||||
# ARG for quick switch to a given ubuntu mirror
|
# ARG for quick switch to a given ubuntu mirror
|
||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
|
@ -13,6 +13,7 @@ RUN apt-get update \
|
|||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
apt-utils \
|
apt-utils \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
curl \
|
||||||
dnsutils \
|
dnsutils \
|
||||||
gnupg \
|
gnupg \
|
||||||
iputils-ping \
|
iputils-ping \
|
||||||
@ -24,10 +25,16 @@ RUN apt-get update \
|
|||||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||||
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||||
/etc/apt/sources.list \
|
/etc/apt/sources.list \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
# Install cmake 3.20+ for rust support
|
||||||
|
# Used https://askubuntu.com/a/1157132 as reference
|
||||||
|
RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \
|
||||||
|
gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg && \
|
||||||
|
echo "deb https://apt.kitware.com/ubuntu/ $(lsb_release -cs) main" >> /etc/apt/sources.list
|
||||||
|
|
||||||
# initial packages
|
# initial packages
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
@ -37,7 +44,6 @@ RUN apt-get update \
|
|||||||
clang-${LLVM_VERSION} \
|
clang-${LLVM_VERSION} \
|
||||||
clang-tidy-${LLVM_VERSION} \
|
clang-tidy-${LLVM_VERSION} \
|
||||||
cmake \
|
cmake \
|
||||||
curl \
|
|
||||||
fakeroot \
|
fakeroot \
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
|
249
docs/changelogs/v22.11.1.1360-stable.md
Normal file
249
docs/changelogs/v22.11.1.1360-stable.md
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.11.1.1360-stable (0d211ed1984) FIXME as compared to v22.10.1.1877-stable (98ab5a3c189)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* JSONExtract family of functions will now attempt to coerce to the request type. [#41502](https://github.com/ClickHouse/ClickHouse/pull/41502) ([Márcio Martins](https://github.com/marcioapm)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* - Add function `displayName`, closes [#36770](https://github.com/ClickHouse/ClickHouse/issues/36770). [#37681](https://github.com/ClickHouse/ClickHouse/pull/37681) ([hongbin](https://github.com/xlwh)).
|
||||||
|
* Added applied row-level policies to `system.query_log`. [#39819](https://github.com/ClickHouse/ClickHouse/pull/39819) ([Vladimir Chebotaryov](https://github.com/quickhouse)).
|
||||||
|
* Add Hudi and DeltaLake table engines, read-only, only for tables on S3. [#41054](https://github.com/ClickHouse/ClickHouse/pull/41054) ([Daniil Rubin](https://github.com/rubin-do)).
|
||||||
|
* Add 4LW command `csnp` for manually creating snapshots. Additionally, `lgif` was added to get Raft information for a specific node (e.g. index of last created snapshot, last committed log index). [#41766](https://github.com/ClickHouse/ClickHouse/pull/41766) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Support for keeper request retries during insert into replicated merge trees. Apart from fault tolerance, it aims to provide better user experience, - avoid returning a user an error during insert if keeper is restarted (for example, due to upgrade). [#42607](https://github.com/ClickHouse/ClickHouse/pull/42607) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Add function ascii like in spark: https://spark.apache.org/docs/latest/api/sql/#ascii. [#42670](https://github.com/ClickHouse/ClickHouse/pull/42670) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add function pmod which return non-negative result based on modulo. [#42755](https://github.com/ClickHouse/ClickHouse/pull/42755) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Published function `formatReadableDecimalSize`. [#42774](https://github.com/ClickHouse/ClickHouse/pull/42774) ([Alejandro](https://github.com/alexon1234)).
|
||||||
|
* Added S3 PUTs and GETs request per second rate throttling. Settings `s3_max_get_rps`, `s3_max_get_burst`, `s3_max_put_rps`, `s3_max_put_burst` are used to configure token bucket throttler. Can be used with both S3 ObjectStorage and S3 table function. Different limits can be configured for different S3 disks or endpoints. [#43014](https://github.com/ClickHouse/ClickHouse/pull/43014) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Add table function hudi and deltaLake. [#43080](https://github.com/ClickHouse/ClickHouse/pull/43080) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add function factorial, as in Impala or Spark. [#43110](https://github.com/ClickHouse/ClickHouse/pull/43110) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Add function randCanonical, which is similar to rand function in spark or impala. The function generates pseudo random results with independent and identically distributed uniformly distributed values in [0, 1). [#43124](https://github.com/ClickHouse/ClickHouse/pull/43124) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Currently, the only saturable operators are And and Or, and their code paths are affected by this change. [#42214](https://github.com/ClickHouse/ClickHouse/pull/42214) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* `match` function can use the index if it's a condition on string prefix. This closes [#37333](https://github.com/ClickHouse/ClickHouse/issues/37333). [#42458](https://github.com/ClickHouse/ClickHouse/pull/42458) ([clarkcaoliu](https://github.com/Clark0)).
|
||||||
|
* Fixed slowness in JSONExtract with LowCardinality(String) tuples. [#42761](https://github.com/ClickHouse/ClickHouse/pull/42761) ([AlfVII](https://github.com/AlfVII)).
|
||||||
|
* Support parallel parsing for LineAsString input format. This improves performance just slightly. This closes [#42502](https://github.com/ClickHouse/ClickHouse/issues/42502). [#42780](https://github.com/ClickHouse/ClickHouse/pull/42780) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Keeper performance improvement: improve commit performance for cases when many different nodes have uncommitted states. This should help with cases when a follower node can't sync fast enough. [#42926](https://github.com/ClickHouse/ClickHouse/pull/42926) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Parallelized merging of `uniqExact` states for aggregation without a key, i.e. queries like `SELECT uniqExact(number) FROM table`. The improvement becomes noticeable when the number of unique keys approaches 10^6. Also `uniq` performance is slightly optimized. This closes [#4510](https://github.com/ClickHouse/ClickHouse/issues/4510). [#43072](https://github.com/ClickHouse/ClickHouse/pull/43072) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Support type `Object` inside other types, e.g. `Array(JSON)`. [#36969](https://github.com/ClickHouse/ClickHouse/pull/36969) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Remove covered parts for fetched part (to avoid possible replication delay grows). [#39737](https://github.com/ClickHouse/ClickHouse/pull/39737) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* ClickHouse Client and ClickHouse Local will show progress by default even in non-interactive mode. If `/dev/tty` is available, the progress will be rendered directly to the terminal, without writing to stderr. It allows to get progress even if stderr is redirected to a file, and the file will not be polluted by terminal escape sequences. The progress can be disabled by `--progress false`. This closes [#32238](https://github.com/ClickHouse/ClickHouse/issues/32238). [#42003](https://github.com/ClickHouse/ClickHouse/pull/42003) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* 1. Add, subtract and negate operations are now available on Intervals. In case when the types of Intervals are different they will be transformed into the Tuple of those types. 2. A tuple of intervals can be added to or subtracted from a Date/DateTime field. 3. Added parsing of Intervals with different types, for example: `INTERVAL '1 HOUR 1 MINUTE 1 SECOND'`. [#42195](https://github.com/ClickHouse/ClickHouse/pull/42195) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* - Add `notLike` to key condition atom map, so condition like `NOT LIKE 'prefix%'` can use primary index. [#42209](https://github.com/ClickHouse/ClickHouse/pull/42209) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add support for FixedString input to base64 coding functions. [#42285](https://github.com/ClickHouse/ClickHouse/pull/42285) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Add columns `bytes_on_disk` and `path` to `system.detached_parts`. Closes [#42264](https://github.com/ClickHouse/ClickHouse/issues/42264). [#42303](https://github.com/ClickHouse/ClickHouse/pull/42303) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Improve using structure from insertion table in table functions, now setting `use_structure_from_insertion_table_in_table_functions` has new possible value - `2` that means that ClickHouse will try to determine if we can use structure from insertion table or not automatically. Closes [#40028](https://github.com/ClickHouse/ClickHouse/issues/40028). [#42320](https://github.com/ClickHouse/ClickHouse/pull/42320) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added ** glob support for recursive directory traversal to filesystem and S3. resolves [#36316](https://github.com/ClickHouse/ClickHouse/issues/36316). [#42376](https://github.com/ClickHouse/ClickHouse/pull/42376) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Mask passwords and secret keys both in `system.query_log` and `/var/log/clickhouse-server/*.log` and also in error messages. [#42484](https://github.com/ClickHouse/ClickHouse/pull/42484) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Add a new variable call `limit` in query_info, indicating whether this query is a limit-trivial query. If so, we will adjust the approximate total rows for later estimation. Closes [#7071](https://github.com/ClickHouse/ClickHouse/issues/7071). [#42580](https://github.com/ClickHouse/ClickHouse/pull/42580) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Implement `ATTACH` of `MergeTree` table for `s3_plain` disk (plus some fixes for `s3_plain`). [#42628](https://github.com/ClickHouse/ClickHouse/pull/42628) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix no progress indication on INSERT FROM INFILE. Closes [#42548](https://github.com/ClickHouse/ClickHouse/issues/42548). [#42634](https://github.com/ClickHouse/ClickHouse/pull/42634) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Add `min_age_to_force_merge_on_partition_only` setting to optimize old parts for the entire partition only. [#42659](https://github.com/ClickHouse/ClickHouse/pull/42659) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Throttling algorithm changed to token bucket. [#42665](https://github.com/ClickHouse/ClickHouse/pull/42665) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Refactor FunctionTokens to enable max tokens returned for related functions(default disabled). [#42673](https://github.com/ClickHouse/ClickHouse/pull/42673) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Added new field allow_readonly in system.table_functions to allow using table functions in readonly mode resolves [#42414](https://github.com/ClickHouse/ClickHouse/issues/42414) Implementation: * Added a new field allow_readonly to table system.table_functions. * Updated to use new field allow_readonly to allow using table functions in readonly mode. Testing: * Added a test for filesystem tests/queries/0_stateless/02473_functions_in_readonly_mode.sh Documentation: * Updated the english documentation for Table Functions. [#42708](https://github.com/ClickHouse/ClickHouse/pull/42708) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Allow to use Date32 arguments for formatDateTime and FROM_UNIXTIME functions. [#42737](https://github.com/ClickHouse/ClickHouse/pull/42737) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `FailedAsyncInsertQuery` event metric for async inserts. [#42814](https://github.com/ClickHouse/ClickHouse/pull/42814) ([Krzysztof Góralski](https://github.com/kgoralski)).
|
||||||
|
* Implement `read-in-order` optimization on top of query plan. It is enabled by default. Set `query_plan_read_in_order = 0` to use previous AST-based version. [#42829](https://github.com/ClickHouse/ClickHouse/pull/42829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Increase the size of upload part exponentially for backup to S3. [#42833](https://github.com/ClickHouse/ClickHouse/pull/42833) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* When the merge task is continuously busy and the disk space is insufficient, the completely expired parts cannot be selected and dropped, resulting in insufficient disk space. My idea is that when the entire Part expires, there is no need for additional disk space to guarantee, ensure the normal execution of TTL. [#42869](https://github.com/ClickHouse/ClickHouse/pull/42869) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* bugfix: [#42856](https://github.com/ClickHouse/ClickHouse/issues/42856) ignore Mysql binlog SAVEPOINT event. [#42931](https://github.com/ClickHouse/ClickHouse/pull/42931) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||||
|
* Add support for interactive parameters in INSERT VALUES queries. [#43077](https://github.com/ClickHouse/ClickHouse/pull/43077) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Add generic implementation for arbitrary structured named collections, access type and system.named_collections. [#43147](https://github.com/ClickHouse/ClickHouse/pull/43147) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* add oss function and StorageOSS (This is convenient for users). oss is fully compatible with s3. [#43155](https://github.com/ClickHouse/ClickHouse/pull/43155) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||||
|
* Improve error reporting in the collection of OS-related info for the `system.asynchronous_metrics` table. [#43192](https://github.com/ClickHouse/ClickHouse/pull/43192) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The `system.asynchronous_metrics` gets embedded documentation. This documentation is also exported to Prometheus. Fixed an error with the metrics about `cache` disks - they were calculated only for one arbitrary cache disk instead all of them. This closes [#7644](https://github.com/ClickHouse/ClickHouse/issues/7644). [#43194](https://github.com/ClickHouse/ClickHouse/pull/43194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Modify the `INFORMATION_SCHEMA` tables in a way so that now ClickHouse can connect to itself using the MySQL compatibility protocol. Add columns instead of aliases (related to [#9769](https://github.com/ClickHouse/ClickHouse/issues/9769)). It will improve the compatibility with various MySQL clients. [#43198](https://github.com/ClickHouse/ClickHouse/pull/43198) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
* Disable `deltaLake` and `hudi` table functions in readonly mode. [#43316](https://github.com/ClickHouse/ClickHouse/pull/43316) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Updated normaliser to clone the alias ast. resolves [#42452](https://github.com/ClickHouse/ClickHouse/issues/42452) Implementation: * Updated QueryNormalizer to clone alias ast, when its replaced. Previously just assigning the same leads to exception in LogicalExpressinsOptimizer as it would be the same parent being inserted again. * This bug is not seen with new analyser (allow_experimental_analyzer), so no changes for it. I added a test for the same. [#42827](https://github.com/ClickHouse/ClickHouse/pull/42827) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix race for backup of tables in Lazy databases. [#43104](https://github.com/ClickHouse/ClickHouse/pull/43104) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* fix skip_unavailable_shards does not work using s3Cluster table function. [#43131](https://github.com/ClickHouse/ClickHouse/pull/43131) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Run SQLancer for each pull request and commit to master. [SQLancer](https://github.com/sqlancer/sqlancer) is an OpenSource fuzzer that focuses on automatic detection of logical bugs. [#42397](https://github.com/ClickHouse/ClickHouse/pull/42397) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Update to latest zlib-ng. [#42463](https://github.com/ClickHouse/ClickHouse/pull/42463) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||||
|
* use llvm `l64.lld` in macOS suppress ld warnings, close [#42282](https://github.com/ClickHouse/ClickHouse/issues/42282). [#42470](https://github.com/ClickHouse/ClickHouse/pull/42470) ([Lloyd-Pottiger](https://github.com/Lloyd-Pottiger)).
|
||||||
|
* Add support for testing ClickHouse server with Jepsen. By the way, we already have support for testing ClickHouse Keeper with Jepsen. This pull request extends it to Replicated tables. [#42619](https://github.com/ClickHouse/ClickHouse/pull/42619) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* * Improve bugfix validation check: fix bug with skipping the check, port separate status in CI, run after check labels and style check. Close [#40349](https://github.com/ClickHouse/ClickHouse/issues/40349). [#42702](https://github.com/ClickHouse/ClickHouse/pull/42702) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Use https://github.com/matus-chochlik/ctcache for clang-tidy results caching. [#42913](https://github.com/ClickHouse/ClickHouse/pull/42913) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Fix schema inference in s3Cluster and improve in hdfsCluster. [#41979](https://github.com/ClickHouse/ClickHouse/pull/41979) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix retries while reading from http table engines / table function. (retrtiable errors could be retries more times than needed, non-retrialble errors resulted in failed assertion in code). [#42224](https://github.com/ClickHouse/ClickHouse/pull/42224) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} <Fatal> BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} <Fatal> BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} <Fatal> BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} <Fatal> BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} <Fatal> BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} <Fatal> BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} <Fatal> BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Fix `LOGICAL_ERROR` `Arguments of 'plus' have incorrect data types` which may happen in PK analysis (monotonicity check). Fix invalid PK analysis for monotonic binary functions with first constant argument. [#42410](https://github.com/ClickHouse/ClickHouse/pull/42410) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix incorrect key analysis when key types cannot be inside Nullable. This fixes [#42456](https://github.com/ClickHouse/ClickHouse/issues/42456). [#42469](https://github.com/ClickHouse/ClickHouse/pull/42469) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix typo in setting name that led to bad usage of schema inference cache while using setting `input_format_csv_use_best_effort_in_schema_inference`. Closes [#41735](https://github.com/ClickHouse/ClickHouse/issues/41735). [#42536](https://github.com/ClickHouse/ClickHouse/pull/42536) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix create Set with wrong header when data type is LowCardinality. Closes [#42460](https://github.com/ClickHouse/ClickHouse/issues/42460). [#42579](https://github.com/ClickHouse/ClickHouse/pull/42579) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* `(U)Int128` and `(U)Int256` values are correctly checked in `PREWHERE`. [#42605](https://github.com/ClickHouse/ClickHouse/pull/42605) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix a bug in ParserFunction that could have led to a segmentation fault. [#42724](https://github.com/ClickHouse/ClickHouse/pull/42724) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix truncate table does not hold lock correctly. [#42728](https://github.com/ClickHouse/ClickHouse/pull/42728) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix possible SIGSEGV for web disks when file does not exists (or `OPTIMIZE TABLE FINAL`, that also can got the same error eventually). [#42767](https://github.com/ClickHouse/ClickHouse/pull/42767) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `auth_type` mapping in `system.session_log`, by including `SSL_CERTIFICATE` for the enum values. [#42782](https://github.com/ClickHouse/ClickHouse/pull/42782) ([Miel Donkers](https://github.com/mdonkers)).
|
||||||
|
* Fix stack-use-after-return under ASAN build in ParserCreateUserQuery. [#42804](https://github.com/ClickHouse/ClickHouse/pull/42804) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix lowerUTF8()/upperUTF8() in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Additional bound check was added to lz4 decompression routine to fix misbehaviour in case of malformed input. [#42868](https://github.com/ClickHouse/ClickHouse/pull/42868) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix rare possible hung on query cancellation. [#42874](https://github.com/ClickHouse/ClickHouse/pull/42874) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* * Fix incorrect saved_block_sample with multiple disjuncts in hash join, close [#42832](https://github.com/ClickHouse/ClickHouse/issues/42832). [#42876](https://github.com/ClickHouse/ClickHouse/pull/42876) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* A null pointer will be generated when select if as from ‘three table join’ , For example, the SQL:. [#42883](https://github.com/ClickHouse/ClickHouse/pull/42883) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||||
|
* Fix memory sanitizer report in ClusterDiscovery, close [#42763](https://github.com/ClickHouse/ClickHouse/issues/42763). [#42905](https://github.com/ClickHouse/ClickHouse/pull/42905) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix datetime schema inference in case of empty string. [#42911](https://github.com/ClickHouse/ClickHouse/pull/42911) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fixes for s3_plain disk that will allow to attach Wide parts. [#42950](https://github.com/ClickHouse/ClickHouse/pull/42950) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix ATTACH TABLE in PostgreSQL database engine if the table contains DATETIME data type. Closes [#42817](https://github.com/ClickHouse/ClickHouse/issues/42817). [#42960](https://github.com/ClickHouse/ClickHouse/pull/42960) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix lambda parsing. Closes [#41848](https://github.com/ClickHouse/ClickHouse/issues/41848). [#42979](https://github.com/ClickHouse/ClickHouse/pull/42979) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Handle (ignore) SAVEPOINT queries in MaterializedMySQL. [#43086](https://github.com/ClickHouse/ClickHouse/pull/43086) ([Stig Bakken](https://github.com/stigsb)).
|
||||||
|
* Fix incorrect key analysis when nullable keys appear in the middle of a hyperrectangle. This fixes [#43111](https://github.com/ClickHouse/ClickHouse/issues/43111) . [#43133](https://github.com/ClickHouse/ClickHouse/pull/43133) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix function if in case of NULL and const Nullable arguments. Closes [#43069](https://github.com/ClickHouse/ClickHouse/issues/43069). [#43178](https://github.com/ClickHouse/ClickHouse/pull/43178) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix decimal math overflow in parsing datetime with 'best effort' algorithm. Closes [#43061](https://github.com/ClickHouse/ClickHouse/issues/43061). [#43180](https://github.com/ClickHouse/ClickHouse/pull/43180) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* The `indent` field produced by the `git-import` tool was miscalculated. See https://clickhouse.com/docs/en/getting-started/example-datasets/github/. [#43191](https://github.com/ClickHouse/ClickHouse/pull/43191) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed unexpected behaviour of Interval types with subquery and casting. [#43193](https://github.com/ClickHouse/ClickHouse/pull/43193) ([jh0x](https://github.com/jh0x)).
|
||||||
|
* * Fix logical error in `sumMap/minMap/maxMap` functions executing `TOTALS/ROLLUP/CUBE` on `NULL` values. Close [#43022](https://github.com/ClickHouse/ClickHouse/issues/43022). [#43232](https://github.com/ClickHouse/ClickHouse/pull/43232) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* - Fix ubsan in AggregateFunctionMinMaxAny::read with high sizes. [#43249](https://github.com/ClickHouse/ClickHouse/pull/43249) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix IS (NOT) NULL operator priority in regard to other operators. [#43265](https://github.com/ClickHouse/ClickHouse/pull/43265) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
|
||||||
|
#### Build Improvement
|
||||||
|
|
||||||
|
* ... Add support for format ipv6 on s390x. [#42412](https://github.com/ClickHouse/ClickHouse/pull/42412) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Sonar Cloud Workflow"'. [#42725](https://github.com/ClickHouse/ClickHouse/pull/42725) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert " Keeper retries during insert (clean)"'. [#43116](https://github.com/ClickHouse/ClickHouse/pull/43116) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Revert " Keeper retries during insert (clean)""'. [#43122](https://github.com/ClickHouse/ClickHouse/pull/43122) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* NO CL ENTRY: 'Revert "Optimize TTL merge, completely expired parts can be removed in time"'. [#43134](https://github.com/ClickHouse/ClickHouse/pull/43134) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Randomize keeper fault injection settings in stress tests"'. [#43218](https://github.com/ClickHouse/ClickHouse/pull/43218) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* NO CL ENTRY: 'Revert "S3 request per second rate throttling"'. [#43306](https://github.com/ClickHouse/ClickHouse/pull/43306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Better logging for docs builder [#41903](https://github.com/ClickHouse/ClickHouse/pull/41903) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Save full server log in AST Fuzzer checks [#42316](https://github.com/ClickHouse/ClickHouse/pull/42316) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Build with libcxx(abi) 15 [#42513](https://github.com/ClickHouse/ClickHouse/pull/42513) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Sonar Cloud Workflow [#42534](https://github.com/ClickHouse/ClickHouse/pull/42534) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||||
|
* Invalid type in where for Merge table (logical error) [#42576](https://github.com/ClickHouse/ClickHouse/pull/42576) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix frequent memory drift message and clarify things in comments [#42582](https://github.com/ClickHouse/ClickHouse/pull/42582) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add functions for PowerBI connect [#42612](https://github.com/ClickHouse/ClickHouse/pull/42612) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
* Try to save `IDataPartStorage` interface [#42618](https://github.com/ClickHouse/ClickHouse/pull/42618) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Remove Ubuntu cruft [#42622](https://github.com/ClickHouse/ClickHouse/pull/42622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Analyzer change setting into allow_experimental_analyzer [#42649](https://github.com/ClickHouse/ClickHouse/pull/42649) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Analyzer IQueryTreeNode remove getName method [#42651](https://github.com/ClickHouse/ClickHouse/pull/42651) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Minor fix iotest_nonblock build [#42658](https://github.com/ClickHouse/ClickHouse/pull/42658) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Add tests and doc for some url-related functions [#42664](https://github.com/ClickHouse/ClickHouse/pull/42664) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.10.1.1875-stable [#42676](https://github.com/ClickHouse/ClickHouse/pull/42676) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix error handling in clickhouse_helper.py [#42678](https://github.com/ClickHouse/ClickHouse/pull/42678) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix execution of version_helper.py to use git tweaks [#42679](https://github.com/ClickHouse/ClickHouse/pull/42679) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* MergeTree indexes use RPNBuilderTree [#42681](https://github.com/ClickHouse/ClickHouse/pull/42681) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Support optimize_syntax_fuse_functions for sum/count/avg via analyzer [#42689](https://github.com/ClickHouse/ClickHouse/pull/42689) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update version after release [#42699](https://github.com/ClickHouse/ClickHouse/pull/42699) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.10.1.1877-stable [#42700](https://github.com/ClickHouse/ClickHouse/pull/42700) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* OrderByLimitByDuplicateEliminationPass improve performance [#42704](https://github.com/ClickHouse/ClickHouse/pull/42704) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Analyzer improve subqueries representation [#42705](https://github.com/ClickHouse/ClickHouse/pull/42705) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.9.4.32-stable [#42712](https://github.com/ClickHouse/ClickHouse/pull/42712) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.7.34-lts [#42713](https://github.com/ClickHouse/ClickHouse/pull/42713) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.7.7.24-stable [#42714](https://github.com/ClickHouse/ClickHouse/pull/42714) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Move SonarCloud Job to nightly [#42718](https://github.com/ClickHouse/ClickHouse/pull/42718) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.8.3-lts [#42738](https://github.com/ClickHouse/ClickHouse/pull/42738) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Minor fix implicit cast CaresPTRResolver [#42747](https://github.com/ClickHouse/ClickHouse/pull/42747) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Fix build on master [#42752](https://github.com/ClickHouse/ClickHouse/pull/42752) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.3.14.18-lts [#42759](https://github.com/ClickHouse/ClickHouse/pull/42759) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix anchor links [#42760](https://github.com/ClickHouse/ClickHouse/pull/42760) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.3.14.23-lts [#42764](https://github.com/ClickHouse/ClickHouse/pull/42764) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update README.md [#42783](https://github.com/ClickHouse/ClickHouse/pull/42783) ([Yuko Takagi](https://github.com/yukotakagi)).
|
||||||
|
* Slightly better code with projections [#42794](https://github.com/ClickHouse/ClickHouse/pull/42794) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix some races in MergeTree [#42805](https://github.com/ClickHouse/ClickHouse/pull/42805) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix typo in comments [#42809](https://github.com/ClickHouse/ClickHouse/pull/42809) ([Gabriel](https://github.com/Gabriel39)).
|
||||||
|
* Fix compilation of LLVM with cmake cache [#42816](https://github.com/ClickHouse/ClickHouse/pull/42816) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix link in docs [#42821](https://github.com/ClickHouse/ClickHouse/pull/42821) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Link to proper place in docs [#42822](https://github.com/ClickHouse/ClickHouse/pull/42822) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Fix argument type check in AggregateFunctionAnalysisOfVariance [#42823](https://github.com/ClickHouse/ClickHouse/pull/42823) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Tests/lambda analyzer [#42824](https://github.com/ClickHouse/ClickHouse/pull/42824) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* Fix Missing Quotes - Sonar Nightly [#42831](https://github.com/ClickHouse/ClickHouse/pull/42831) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||||
|
* Add exclusions from the Snyk scan [#42834](https://github.com/ClickHouse/ClickHouse/pull/42834) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix Missing Env Vars - Sonar Nightly [#42843](https://github.com/ClickHouse/ClickHouse/pull/42843) ([Julio Jimenez](https://github.com/juliojimenez)).
|
||||||
|
* Fix typo [#42855](https://github.com/ClickHouse/ClickHouse/pull/42855) ([GoGoWen](https://github.com/GoGoWen)).
|
||||||
|
* Add timezone to 02458_datediff_date32 [#42857](https://github.com/ClickHouse/ClickHouse/pull/42857) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Adjust cancel and rerun workflow names to the actual [#42862](https://github.com/ClickHouse/ClickHouse/pull/42862) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Analyzer subquery in JOIN TREE with aggregation [#42865](https://github.com/ClickHouse/ClickHouse/pull/42865) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix getauxval for sanitizer builds [#42866](https://github.com/ClickHouse/ClickHouse/pull/42866) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.10.2.11-stable [#42871](https://github.com/ClickHouse/ClickHouse/pull/42871) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Better usability for dashboard.html on changes [#42872](https://github.com/ClickHouse/ClickHouse/pull/42872) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Some fixes for ReplicatedMergeTree [#42878](https://github.com/ClickHouse/ClickHouse/pull/42878) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Validate Query Tree in debug [#42879](https://github.com/ClickHouse/ClickHouse/pull/42879) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* changed type name for s3 plain storage [#42890](https://github.com/ClickHouse/ClickHouse/pull/42890) ([Aleksandr](https://github.com/AVMusorin)).
|
||||||
|
* Cleanup implementation of regexpReplace(All|One) [#42907](https://github.com/ClickHouse/ClickHouse/pull/42907) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Do not show status for Bugfix validate check in non bugfix PRs [#42932](https://github.com/ClickHouse/ClickHouse/pull/42932) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* fix(typo): Passible -> Possible [#42933](https://github.com/ClickHouse/ClickHouse/pull/42933) ([Yakko Majuri](https://github.com/yakkomajuri)).
|
||||||
|
* Pin the cryptography version to not break lambdas [#42934](https://github.com/ClickHouse/ClickHouse/pull/42934) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix: bad cast from type DB::ColumnLowCardinality to DB::ColumnString [#42937](https://github.com/ClickHouse/ClickHouse/pull/42937) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Attach thread pool for loading parts to the query [#42947](https://github.com/ClickHouse/ClickHouse/pull/42947) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix macOS M1 builds due to sprintf deprecation [#42962](https://github.com/ClickHouse/ClickHouse/pull/42962) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Less use of CH-specific bit_cast() [#42968](https://github.com/ClickHouse/ClickHouse/pull/42968) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove some utils [#42972](https://github.com/ClickHouse/ClickHouse/pull/42972) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix a bug in CAST function parser [#42980](https://github.com/ClickHouse/ClickHouse/pull/42980) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix old bug to remove `refs/head` from ref name [#42981](https://github.com/ClickHouse/ClickHouse/pull/42981) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add debug information to nightly builds [#42997](https://github.com/ClickHouse/ClickHouse/pull/42997) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add some guard rails around aggregation memory management [#42999](https://github.com/ClickHouse/ClickHouse/pull/42999) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add `on: workflow_call` to debug CI [#43000](https://github.com/ClickHouse/ClickHouse/pull/43000) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Analyzer added identifier typo corrections [#43002](https://github.com/ClickHouse/ClickHouse/pull/43002) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Simple fixes for restart replica description [#43004](https://github.com/ClickHouse/ClickHouse/pull/43004) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Cleanup match code [#43006](https://github.com/ClickHouse/ClickHouse/pull/43006) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix TSan errors (correctly ignore _exit interception) [#43009](https://github.com/ClickHouse/ClickHouse/pull/43009) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* fix bandwidth throttlers initialization order [#43015](https://github.com/ClickHouse/ClickHouse/pull/43015) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Add test for issue [#42520](https://github.com/ClickHouse/ClickHouse/issues/42520) [#43027](https://github.com/ClickHouse/ClickHouse/pull/43027) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Analyzer improve ARRAY JOIN with JOIN [#43048](https://github.com/ClickHouse/ClickHouse/pull/43048) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix projection part removal with zero-copy replication [#43060](https://github.com/ClickHouse/ClickHouse/pull/43060) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix msan warning [#43065](https://github.com/ClickHouse/ClickHouse/pull/43065) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Analyzer AST key condition crash fix [#43070](https://github.com/ClickHouse/ClickHouse/pull/43070) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Better logging for mark range filtering on projection parts [#43076](https://github.com/ClickHouse/ClickHouse/pull/43076) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix ub type punning [#43088](https://github.com/ClickHouse/ClickHouse/pull/43088) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Analyzer improve aliases support for table expressions [#43089](https://github.com/ClickHouse/ClickHouse/pull/43089) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Throw not implemented for window frame type 'groups' in analyzer [#43090](https://github.com/ClickHouse/ClickHouse/pull/43090) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Disable clickhouse local and client non-interactive progress by default. [#43092](https://github.com/ClickHouse/ClickHouse/pull/43092) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Make error message after dropping current user more correct. [#43097](https://github.com/ClickHouse/ClickHouse/pull/43097) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* More stable test [#43102](https://github.com/ClickHouse/ClickHouse/pull/43102) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Rewrite tests for memory overcommit [#43105](https://github.com/ClickHouse/ClickHouse/pull/43105) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix trailing \n from SQLancer status [#43114](https://github.com/ClickHouse/ClickHouse/pull/43114) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix `test_keeper_four_word_command::test_cmd_stat` [#43115](https://github.com/ClickHouse/ClickHouse/pull/43115) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Enable keeper fault injection for inserts in functional tests [#43117](https://github.com/ClickHouse/ClickHouse/pull/43117) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Analyzer aggregation crash fix [#43118](https://github.com/ClickHouse/ClickHouse/pull/43118) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Analyzer aggregation totals crash fix [#43119](https://github.com/ClickHouse/ClickHouse/pull/43119) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Improve commit_status_helper.py [#43121](https://github.com/ClickHouse/ClickHouse/pull/43121) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Skip hash logging on sanitizer builds [#43129](https://github.com/ClickHouse/ClickHouse/pull/43129) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Analyzer improve JOIN with constants [#43141](https://github.com/ClickHouse/ClickHouse/pull/43141) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Remove POCO_CLICKHOUSE_PATCH [#43146](https://github.com/ClickHouse/ClickHouse/pull/43146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Update CompressionCodecDeflateQpl.cpp [#43150](https://github.com/ClickHouse/ClickHouse/pull/43150) ([Tiaonmmn](https://github.com/Tiaonmmn)).
|
||||||
|
* Randomize keeper fault injection settings in stress tests [#43187](https://github.com/ClickHouse/ClickHouse/pull/43187) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix for missing columns bug with projections an ALTER UPDATE [#43189](https://github.com/ClickHouse/ClickHouse/pull/43189) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* A workaround for LLVM bug, https://github.com/llvm/llvm-project/issues/58633 [#43195](https://github.com/ClickHouse/ClickHouse/pull/43195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Stop `ConfigReloader` first to avoid data race [#43201](https://github.com/ClickHouse/ClickHouse/pull/43201) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix typo [#43203](https://github.com/ClickHouse/ClickHouse/pull/43203) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Miscellaneous changes [#43206](https://github.com/ClickHouse/ClickHouse/pull/43206) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky 02449_check_dependencies_and_table_shutdown [#43212](https://github.com/ClickHouse/ClickHouse/pull/43212) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add test to check [#43167](https://github.com/ClickHouse/ClickHouse/issues/43167) for all builds [#43216](https://github.com/ClickHouse/ClickHouse/pull/43216) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Don't throw if shared ID already created in `StorageReplicatedMergeTree` [#43244](https://github.com/ClickHouse/ClickHouse/pull/43244) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix nullptr dereference in collectScopeValidIdentifiersForTypoCorrection [#43245](https://github.com/ClickHouse/ClickHouse/pull/43245) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Better message in wait_zookeeper_to_start [#43256](https://github.com/ClickHouse/ClickHouse/pull/43256) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Make test_global_overcommit_tracker non-parallel [#43266](https://github.com/ClickHouse/ClickHouse/pull/43266) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Rename canonicalRand to randCanonical [#43283](https://github.com/ClickHouse/ClickHouse/pull/43283) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* check limits for an AST in select parser fuzzer [#43285](https://github.com/ClickHouse/ClickHouse/pull/43285) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Allow autoremoval of old parts if detach_not_byte_identical_parts enabled [#43287](https://github.com/ClickHouse/ClickHouse/pull/43287) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* `pmod`: compatibility with Spark, better documentation [#43313](https://github.com/ClickHouse/ClickHouse/pull/43313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
31
docs/changelogs/v22.8.9.24-lts.md
Normal file
31
docs/changelogs/v22.8.9.24-lts.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.9.24-lts (a1b69551d40) FIXME as compared to v22.8.8.3-lts (ac5a6cababc)
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Backported in [#43012](https://github.com/ClickHouse/ClickHouse/issues/43012): Keeper performance improvement: improve commit performance for cases when many different nodes have uncommitted states. This should help with cases when a follower node can't sync fast enough. [#42926](https://github.com/ClickHouse/ClickHouse/pull/42926) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#42840](https://github.com/ClickHouse/ClickHouse/issues/42840): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#42964](https://github.com/ClickHouse/ClickHouse/issues/42964): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#43040](https://github.com/ClickHouse/ClickHouse/issues/43040): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#42720](https://github.com/ClickHouse/ClickHouse/issues/42720): Fixed `Unknown identifier (aggregate-function)` exception which appears when a user tries to calculate WINDOW ORDER BY/PARTITION BY expressions over aggregate functions: ``` CREATE TABLE default.tenk1 ( `unique1` Int32, `unique2` Int32, `ten` Int32 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192; SELECT ten, sum(unique1) + sum(unique2) AS res, rank() OVER (ORDER BY sum(unique1) + sum(unique2) ASC) AS rank FROM _complex GROUP BY ten ORDER BY ten ASC; ``` which gives: ``` Code: 47. DB::Exception: Received from localhost:9000. DB::Exception: Unknown identifier: sum(unique1); there are columns: unique1, unique2, ten: While processing sum(unique1) + sum(unique2) ASC. (UNKNOWN_IDENTIFIER) ```. [#39762](https://github.com/ClickHouse/ClickHouse/pull/39762) ([Vladimir Chebotaryov](https://github.com/quickhouse)).
|
||||||
|
* Backported in [#42748](https://github.com/ClickHouse/ClickHouse/issues/42748): A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} <Fatal> BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} <Fatal> BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} <Fatal> BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} <Fatal> BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} <Fatal> BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} <Fatal> BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} <Fatal> BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Backported in [#43062](https://github.com/ClickHouse/ClickHouse/issues/43062): Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Do not warn about kvm-clock [#41217](https://github.com/ClickHouse/ClickHouse/pull/41217) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Revert revert 41268 disable s3 parallel write for part moves to disk s3 [#42617](https://github.com/ClickHouse/ClickHouse/pull/42617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
@ -77,15 +77,15 @@ While turning on `gtid_mode` you should also specify `enforce_gtid_consistency =
|
|||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
|
When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
|
||||||
|
|
||||||
### \_version
|
### \_version
|
||||||
|
|
||||||
`_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
|
`_version` — Transaction counter. Type [UInt64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
### \_sign
|
### \_sign
|
||||||
|
|
||||||
`_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
|
`_sign` — Deletion mark. Type [Int8](/docs/en/sql-reference/data-types/int-uint.md). Possible values:
|
||||||
- `1` — Row is not deleted,
|
- `1` — Row is not deleted,
|
||||||
- `-1` — Row is deleted.
|
- `-1` — Row is deleted.
|
||||||
|
|
||||||
@ -93,29 +93,29 @@ When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](
|
|||||||
|
|
||||||
| MySQL | ClickHouse |
|
| MySQL | ClickHouse |
|
||||||
|-------------------------|--------------------------------------------------------------|
|
|-------------------------|--------------------------------------------------------------|
|
||||||
| TINY | [Int8](../../sql-reference/data-types/int-uint.md) |
|
| TINY | [Int8](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| SHORT | [Int16](../../sql-reference/data-types/int-uint.md) |
|
| SHORT | [Int16](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| INT24 | [Int32](../../sql-reference/data-types/int-uint.md) |
|
| INT24 | [Int32](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| LONG | [UInt32](../../sql-reference/data-types/int-uint.md) |
|
| LONG | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| LONGLONG | [UInt64](../../sql-reference/data-types/int-uint.md) |
|
| LONGLONG | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| FLOAT | [Float32](../../sql-reference/data-types/float.md) |
|
| FLOAT | [Float32](/docs/en/sql-reference/data-types/float.md) |
|
||||||
| DOUBLE | [Float64](../../sql-reference/data-types/float.md) |
|
| DOUBLE | [Float64](/docs/en/sql-reference/data-types/float.md) |
|
||||||
| DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) |
|
| DECIMAL, NEWDECIMAL | [Decimal](/docs/en/sql-reference/data-types/decimal.md) |
|
||||||
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
|
| DATE, NEWDATE | [Date](/docs/en/sql-reference/data-types/date.md) |
|
||||||
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
|
| DATETIME, TIMESTAMP | [DateTime](/docs/en/sql-reference/data-types/datetime.md) |
|
||||||
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
|
| DATETIME2, TIMESTAMP2 | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
|
||||||
| YEAR | [UInt16](../../sql-reference/data-types/int-uint.md) |
|
| YEAR | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| TIME | [Int64](../../sql-reference/data-types/int-uint.md) |
|
| TIME | [Int64](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| ENUM | [Enum](../../sql-reference/data-types/enum.md) |
|
| ENUM | [Enum](/docs/en/sql-reference/data-types/enum.md) |
|
||||||
| STRING | [String](../../sql-reference/data-types/string.md) |
|
| STRING | [String](/docs/en/sql-reference/data-types/string.md) |
|
||||||
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
|
| VARCHAR, VAR_STRING | [String](/docs/en/sql-reference/data-types/string.md) |
|
||||||
| BLOB | [String](../../sql-reference/data-types/string.md) |
|
| BLOB | [String](/docs/en/sql-reference/data-types/string.md) |
|
||||||
| GEOMETRY | [String](../../sql-reference/data-types/string.md) |
|
| GEOMETRY | [String](/docs/en/sql-reference/data-types/string.md) |
|
||||||
| BINARY | [FixedString](../../sql-reference/data-types/fixedstring.md) |
|
| BINARY | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
|
||||||
| BIT | [UInt64](../../sql-reference/data-types/int-uint.md) |
|
| BIT | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
| SET | [UInt64](../../sql-reference/data-types/int-uint.md) |
|
| SET | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) |
|
||||||
|
|
||||||
[Nullable](../../sql-reference/data-types/nullable.md) is supported.
|
[Nullable](/docs/en/sql-reference/data-types/nullable.md) is supported.
|
||||||
|
|
||||||
The data of TIME type in MySQL is converted to microseconds in ClickHouse.
|
The data of TIME type in MySQL is converted to microseconds in ClickHouse.
|
||||||
|
|
||||||
@ -133,7 +133,7 @@ Apart of the data types limitations there are few restrictions comparing to `MyS
|
|||||||
|
|
||||||
### DDL Queries {#ddl-queries}
|
### DDL Queries {#ddl-queries}
|
||||||
|
|
||||||
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
|
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](/docs/en/sql-reference/statements/alter/index.md), [CREATE](/docs/en/sql-reference/statements/create/index.md), [DROP](/docs/en/sql-reference/statements/drop.md), [RENAME](/docs/en/sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
|
||||||
|
|
||||||
### Data Replication {#data-replication}
|
### Data Replication {#data-replication}
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
|||||||
`SELECT` query from `MaterializedMySQL` tables has some specifics:
|
`SELECT` query from `MaterializedMySQL` tables has some specifics:
|
||||||
|
|
||||||
- If `_version` is not specified in the `SELECT` query, the
|
- If `_version` is not specified in the `SELECT` query, the
|
||||||
[FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used, so only rows with
|
[FINAL](/docs/en/sql-reference/statements/select/from.md/#select-from-final) modifier is used, so only rows with
|
||||||
`MAX(_version)` are returned for each primary key value.
|
`MAX(_version)` are returned for each primary key value.
|
||||||
|
|
||||||
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not
|
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not
|
||||||
@ -164,7 +164,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([
|
|||||||
MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables.
|
MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables.
|
||||||
|
|
||||||
ClickHouse has only one physical order, which is determined by `ORDER BY` clause. To create a new physical order, use
|
ClickHouse has only one physical order, which is determined by `ORDER BY` clause. To create a new physical order, use
|
||||||
[materialized views](../../sql-reference/statements/create/view.md#materialized).
|
[materialized views](/docs/en/sql-reference/statements/create/view.md/#materialized).
|
||||||
|
|
||||||
**Notes**
|
**Notes**
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ ClickHouse has only one physical order, which is determined by `ORDER BY` clause
|
|||||||
MySQL binlog.
|
MySQL binlog.
|
||||||
- Replication can be easily broken.
|
- Replication can be easily broken.
|
||||||
- Manual operations on database and tables are forbidden.
|
- Manual operations on database and tables are forbidden.
|
||||||
- `MaterializedMySQL` is affected by the [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert)
|
- `MaterializedMySQL` is affected by the [optimize_on_insert](/docs/en/operations/settings/settings.md/#optimize-on-insert)
|
||||||
setting. Data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL
|
setting. Data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL
|
||||||
server changes.
|
server changes.
|
||||||
|
|
||||||
@ -187,19 +187,19 @@ These are the schema conversion manipulations you can do with table overrides fo
|
|||||||
|
|
||||||
* Modify column type. Must be compatible with the original type, or replication will fail. For example,
|
* Modify column type. Must be compatible with the original type, or replication will fail. For example,
|
||||||
you can modify a UInt32 column to UInt64, but you can not modify a String column to Array(String).
|
you can modify a UInt32 column to UInt64, but you can not modify a String column to Array(String).
|
||||||
* Modify [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl).
|
* Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
|
||||||
* Modify [column compression codec](../../sql-reference/statements/create/table/#codecs).
|
* Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs).
|
||||||
* Add [ALIAS columns](../../sql-reference/statements/create/table/#alias).
|
* Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias).
|
||||||
* Add [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes)
|
* Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes)
|
||||||
* Add [projections](../table-engines/mergetree-family/mergetree/#projections). Note that projection optimizations are
|
* Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are
|
||||||
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
|
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
|
||||||
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)
|
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)
|
||||||
may be more useful in this case.
|
may be more useful in this case.
|
||||||
* Modify [PARTITION BY](../table-engines/mergetree-family/custom-partitioning-key/)
|
* Modify [PARTITION BY](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key/)
|
||||||
* Modify [ORDER BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
* Modify [ORDER BY](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses)
|
||||||
* Modify [PRIMARY KEY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
* Modify [PRIMARY KEY](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses)
|
||||||
* Add [SAMPLE BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
* Add [SAMPLE BY](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses)
|
||||||
* Add [table TTL](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses)
|
* Add [table TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses)
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE db_name ENGINE = MaterializedMySQL(...)
|
CREATE DATABASE db_name ENGINE = MaterializedMySQL(...)
|
||||||
|
@ -86,7 +86,7 @@ node1 :) SELECT materialize(hostName()) AS host, groupArray(n) FROM r.d GROUP BY
|
|||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─hosts─┬─groupArray(n)─┐
|
┌─hosts─┬─groupArray(n)─┐
|
||||||
│ node1 │ [1,3,5,7,9] │
|
│ node3 │ [1,3,5,7,9] │
|
||||||
│ node2 │ [0,2,4,6,8] │
|
│ node2 │ [0,2,4,6,8] │
|
||||||
└───────┴───────────────┘
|
└───────┴───────────────┘
|
||||||
```
|
```
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Integrations
|
|||||||
|
|
||||||
# Table Engines for Integrations
|
# Table Engines for Integrations
|
||||||
|
|
||||||
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like external dictionaries or table functions, which require to use custom query methods on each use.
|
ClickHouse provides various means for integrating with external systems, including table engines. Like with all other table engines, the configuration is done using `CREATE TABLE` or `ALTER TABLE` queries. Then from a user perspective, the configured integration looks like a normal table, but queries to it are proxied to the external system. This transparent querying is one of the key advantages of this approach over alternative integration methods, like dictionaries or table functions, which require to use custom query methods on each use.
|
||||||
|
|
||||||
List of supported integrations:
|
List of supported integrations:
|
||||||
|
|
||||||
|
@ -180,6 +180,6 @@ Default value: `300`.
|
|||||||
## See Also {#see-also}
|
## See Also {#see-also}
|
||||||
|
|
||||||
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
|
||||||
- [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
- [Using MySQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->
|
||||||
|
@ -126,7 +126,7 @@ SELECT * FROM odbc_t
|
|||||||
|
|
||||||
## See Also {#see-also}
|
## See Also {#see-also}
|
||||||
|
|
||||||
- [ODBC external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
- [ODBC dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
|
||||||
- [ODBC table function](../../../sql-reference/table-functions/odbc.md)
|
- [ODBC table function](../../../sql-reference/table-functions/odbc.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/odbc/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/odbc/) <!--hide-->
|
||||||
|
@ -174,6 +174,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md)
|
- [The `postgresql` table function](../../../sql-reference/table-functions/postgresql.md)
|
||||||
- [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->
|
||||||
|
@ -127,6 +127,10 @@ The following settings can be set before query execution or placed into configur
|
|||||||
- `s3_min_upload_part_size` — The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). Default value is `512Mb`.
|
- `s3_min_upload_part_size` — The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). Default value is `512Mb`.
|
||||||
- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`.
|
- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`.
|
||||||
- `s3_single_read_retries` — The maximum number of attempts during single read. Default value is `4`.
|
- `s3_single_read_retries` — The maximum number of attempts during single read. Default value is `4`.
|
||||||
|
- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited).
|
||||||
|
- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`.
|
||||||
|
- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited).
|
||||||
|
- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`.
|
||||||
|
|
||||||
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
|
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
|
||||||
|
|
||||||
@ -142,6 +146,7 @@ The following settings can be specified in configuration file for given endpoint
|
|||||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
||||||
- `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional.
|
- `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional.
|
||||||
|
- `max_put_rps`, `max_put_burst`, `max_get_rps` and `max_get_burst` - Throttling settings (see description above) to use for specific endpoint instead of per query. Optional.
|
||||||
|
|
||||||
**Example:**
|
**Example:**
|
||||||
|
|
||||||
|
@ -10,11 +10,11 @@ These engines were developed for scenarios when you need to quickly write many s
|
|||||||
|
|
||||||
Engines of the family:
|
Engines of the family:
|
||||||
|
|
||||||
- [StripeLog](../../../engines/table-engines/log-family/stripelog.md)
|
- [StripeLog](/docs/en/engines/table-engines/log-family/stripelog.md)
|
||||||
- [Log](../../../engines/table-engines/log-family/log.md)
|
- [Log](/docs/en/engines/table-engines/log-family/log.md)
|
||||||
- [TinyLog](../../../engines/table-engines/log-family/tinylog.md)
|
- [TinyLog](/docs/en/engines/table-engines/log-family/tinylog.md)
|
||||||
|
|
||||||
`Log` family table engines can store data to [HDFS](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-hdfs) or [S3](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3) distributed file systems.
|
`Log` family table engines can store data to [HDFS](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-hdfs) or [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3) distributed file systems.
|
||||||
|
|
||||||
## Common Properties {#common-properties}
|
## Common Properties {#common-properties}
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ Engines:
|
|||||||
|
|
||||||
During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently.
|
During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently.
|
||||||
|
|
||||||
- Do not support [mutations](../../../sql-reference/statements/alter/index.md#alter-mutations).
|
- Do not support [mutations](/docs/en/sql-reference/statements/alter/index.md#alter-mutations).
|
||||||
|
|
||||||
- Do not support indexes.
|
- Do not support indexes.
|
||||||
|
|
||||||
|
@ -68,36 +68,57 @@ In the results of `SELECT` query, the values of `AggregateFunction` type have im
|
|||||||
|
|
||||||
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
||||||
|
|
||||||
`AggregatingMergeTree` materialized view that watches the `test.visits` table:
|
We will create the table `test.visits` that contain the raw data:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE MATERIALIZED VIEW test.basic
|
CREATE TABLE test.visits
|
||||||
ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate)
|
(
|
||||||
|
StartDate DateTime64 NOT NULL,
|
||||||
|
CounterID UInt64,
|
||||||
|
Sign Nullable(Int32),
|
||||||
|
UserID Nullable(Int32)
|
||||||
|
) ENGINE = MergeTree ORDER BY (StartDate, CounterID);
|
||||||
|
```
|
||||||
|
|
||||||
|
`AggregatingMergeTree` materialized view that watches the `test.visits` table, and use the `AggregateFunction` type:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE MATERIALIZED VIEW test.mv_visits
|
||||||
|
(
|
||||||
|
StartDate DateTime64 NOT NULL,
|
||||||
|
CounterID UInt64,
|
||||||
|
Visits AggregateFunction(sum, Nullable(Int32)),
|
||||||
|
Users AggregateFunction(uniq, Nullable(Int32))
|
||||||
|
)
|
||||||
|
ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID)
|
||||||
AS SELECT
|
AS SELECT
|
||||||
CounterID,
|
|
||||||
StartDate,
|
StartDate,
|
||||||
sumState(Sign) AS Visits,
|
CounterID,
|
||||||
|
sumState(Sign) AS Visits,
|
||||||
uniqState(UserID) AS Users
|
uniqState(UserID) AS Users
|
||||||
FROM test.visits
|
FROM test.visits
|
||||||
GROUP BY CounterID, StartDate;
|
GROUP BY StartDate, CounterID;
|
||||||
```
|
```
|
||||||
|
|
||||||
Inserting data into the `test.visits` table.
|
Inserting data into the `test.visits` table.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
INSERT INTO test.visits ...
|
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||||
|
VALUES (1667446031, 1, 3, 4)
|
||||||
|
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||||
|
VALUES (1667446031, 1, 6, 3)
|
||||||
```
|
```
|
||||||
|
|
||||||
The data are inserted in both the table and view `test.basic` that will perform the aggregation.
|
The data are inserted in both the table and the materialized view `test.mv_visits`.
|
||||||
|
|
||||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the view `test.basic`:
|
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
StartDate,
|
StartDate,
|
||||||
sumMerge(Visits) AS Visits,
|
sumMerge(Visits) AS Visits,
|
||||||
uniqMerge(Users) AS Users
|
uniqMerge(Users) AS Users
|
||||||
FROM test.basic
|
FROM test.mv_visits
|
||||||
GROUP BY StartDate
|
GROUP BY StartDate
|
||||||
ORDER BY StartDate;
|
ORDER BY StartDate;
|
||||||
```
|
```
|
||||||
|
@ -2,13 +2,20 @@
|
|||||||
|
|
||||||
The main task that indexes achieve is to quickly find nearest neighbors for multidimensional data. An example of such a problem can be finding similar pictures (texts) for a given picture (text). That problem can be reduced to finding the nearest [embeddings](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning). They can be created from data using [UDF](../../../sql-reference/functions/index.md#executable-user-defined-functions).
|
The main task that indexes achieve is to quickly find nearest neighbors for multidimensional data. An example of such a problem can be finding similar pictures (texts) for a given picture (text). That problem can be reduced to finding the nearest [embeddings](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning). They can be created from data using [UDF](../../../sql-reference/functions/index.md#executable-user-defined-functions).
|
||||||
|
|
||||||
The next query finds the closest neighbors in N-dimensional space using the L2 (Euclidean) distance:
|
The next queries find the closest neighbors in N-dimensional space using the L2 (Euclidean) distance:
|
||||||
``` sql
|
``` sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM table_name
|
FROM table_name
|
||||||
WHERE L2Distance(Column, Point) < MaxDistance
|
WHERE L2Distance(Column, Point) < MaxDistance
|
||||||
LIMIT N
|
LIMIT N
|
||||||
```
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT *
|
||||||
|
FROM table_name
|
||||||
|
ORDER BY L2Distance(Column, Point)
|
||||||
|
LIMIT N
|
||||||
|
```
|
||||||
But it will take some time for execution because of the long calculation of the distance between `TargetEmbedding` and all other vectors. This is where ANN indexes can help. They store a compact approximation of the search space (e.g. using clustering, search trees, etc.) and are able to compute approximate neighbors quickly.
|
But it will take some time for execution because of the long calculation of the distance between `TargetEmbedding` and all other vectors. This is where ANN indexes can help. They store a compact approximation of the search space (e.g. using clustering, search trees, etc.) and are able to compute approximate neighbors quickly.
|
||||||
|
|
||||||
## Indexes Structure
|
## Indexes Structure
|
||||||
@ -34,26 +41,27 @@ Approximate Nearest Neighbor Search Indexes (`ANNIndexes`) are similar to skip i
|
|||||||
|
|
||||||
In these queries, `DistanceFunction` is selected from [distance functions](../../../sql-reference/functions/distance-functions). `Point` is a known vector (something like `(0.1, 0.1, ... )`). To avoid writing large vectors, use [client parameters](../../../interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters). `Value` - a float value that will bound the neighbourhood.
|
In these queries, `DistanceFunction` is selected from [distance functions](../../../sql-reference/functions/distance-functions). `Point` is a known vector (something like `(0.1, 0.1, ... )`). To avoid writing large vectors, use [client parameters](../../../interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters). `Value` - a float value that will bound the neighbourhood.
|
||||||
|
|
||||||
!!! note "Note"
|
:::note
|
||||||
ANN index can't speed up query that satisfies both types(`where + order by`, only one of them). All queries must have the limit, as algorithms are used to find nearest neighbors and need a specific number of them.
|
ANN index can't speed up query that satisfies both types (`where + order by`, only one of them). All queries must have the limit, as algorithms are used to find nearest neighbors and need a specific number of them.
|
||||||
|
:::
|
||||||
|
|
||||||
!!! note "Note"
|
:::note
|
||||||
Indexes are applied only to queries with a limit less than the `max_limit_for_ann_queries` setting. This helps to avoid memory overflows in queries with a large limit. `max_limit_for_ann_queries` setting can be changed if you know you can provide enough memory. The default value is `1000000`.
|
Indexes are applied only to queries with a limit less than the `max_limit_for_ann_queries` setting. This helps to avoid memory overflows in queries with a large limit. `max_limit_for_ann_queries` setting can be changed if you know you can provide enough memory. The default value is `1000000`.
|
||||||
|
:::
|
||||||
|
|
||||||
Both types of queries are handled the same way. The indexes get `n` neighbors (where `n` is taken from the `LIMIT` clause) and work with them. In `ORDER BY` query they remember the numbers of all parts of the granule that have at least one of neighbor. In `WHERE` query they remember only those parts that satisfy the requirements.
|
Both types of queries are handled the same way. The indexes get `n` neighbors (where `n` is taken from the `LIMIT` clause) and work with them. In `ORDER BY` query they remember the numbers of all parts of the granule that have at least one of neighbor. In `WHERE` query they remember only those parts that satisfy the requirements.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Create table with ANNIndex
|
## Create table with ANNIndex
|
||||||
|
|
||||||
This feature is disabled by default. To enable it, set `allow_experimental_annoy_index` to 1. Also, this feature is disabled for arm, due to likely problems with the algorithm.
|
This feature is disabled by default. To enable it, set `allow_experimental_annoy_index` to 1. Also, this feature is disabled on ARM, due to likely problems with the algorithm.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
`id` Int64,
|
`id` Int64,
|
||||||
`number` Tuple(Float32, Float32, Float32),
|
`data` Tuple(Float32, Float32, Float32),
|
||||||
INDEX x number TYPE annoy GRANULARITY N
|
INDEX ann_index_name data TYPE ann_index_type(ann_index_parameters) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
@ -63,8 +71,8 @@ ORDER BY id;
|
|||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
`id` Int64,
|
`id` Int64,
|
||||||
`number` Array(Float32),
|
`data` Array(Float32),
|
||||||
INDEX x number TYPE annoy GRANULARITY N
|
INDEX ann_index_name data TYPE ann_index_type(ann_index_parameters) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
@ -73,7 +81,7 @@ ORDER BY id;
|
|||||||
With greater `GRANULARITY` indexes remember the data structure better. The `GRANULARITY` indicates how many granules will be used to construct the index. The more data is provided for the index, the more of it can be handled by one index and the more chances that with the right hyperparameters the index will remember the data structure better. But some indexes can't be built if they don't have enough data, so this granule will always participate in the query. For more information, see the description of indexes.
|
With greater `GRANULARITY` indexes remember the data structure better. The `GRANULARITY` indicates how many granules will be used to construct the index. The more data is provided for the index, the more of it can be handled by one index and the more chances that with the right hyperparameters the index will remember the data structure better. But some indexes can't be built if they don't have enough data, so this granule will always participate in the query. For more information, see the description of indexes.
|
||||||
|
|
||||||
As the indexes are built only during insertions into table, `INSERT` and `OPTIMIZE` queries are slower than for ordinary table. At this stage indexes remember all the information about the given data. ANNIndexes should be used if you have immutable or rarely changed data and many read requests.
|
As the indexes are built only during insertions into table, `INSERT` and `OPTIMIZE` queries are slower than for ordinary table. At this stage indexes remember all the information about the given data. ANNIndexes should be used if you have immutable or rarely changed data and many read requests.
|
||||||
|
|
||||||
You can create your table with index which uses certain algorithm. Now only indices based on the following algorithms are supported:
|
You can create your table with index which uses certain algorithm. Now only indices based on the following algorithms are supported:
|
||||||
|
|
||||||
# Index list
|
# Index list
|
||||||
@ -91,8 +99,8 @@ __Examples__:
|
|||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
id Int64,
|
id Int64,
|
||||||
number Tuple(Float32, Float32, Float32),
|
data Tuple(Float32, Float32, Float32),
|
||||||
INDEX x number TYPE annoy(T) GRANULARITY N
|
INDEX ann_index_name data TYPE annoy(NumTrees, DistanceName) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
@ -102,18 +110,30 @@ ORDER BY id;
|
|||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
id Int64,
|
id Int64,
|
||||||
number Array(Float32),
|
data Array(Float32),
|
||||||
INDEX x number TYPE annoy(T) GRANULARITY N
|
INDEX ann_index_name data TYPE annoy(NumTrees, DistanceName) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
```
|
```
|
||||||
!!! note "Note"
|
|
||||||
Table with array field will work faster, but all arrays **must** have same length. Use [CONSTRAINT](../../../sql-reference/statements/create/table.md#constraints) to avoid errors. For example, `CONSTRAINT constraint_name_1 CHECK length(number) = 256`.
|
|
||||||
|
|
||||||
Parameter `T` is the number of trees which algorithm will create. The bigger it is, the slower (approximately linear) it works (in both `CREATE` and `SELECT` requests), but the better accuracy you get (adjusted for randomness).
|
:::note
|
||||||
|
Table with array field will work faster, but all arrays **must** have same length. Use [CONSTRAINT](../../../sql-reference/statements/create/table.md#constraints) to avoid errors. For example, `CONSTRAINT constraint_name_1 CHECK length(data) = 256`.
|
||||||
|
:::
|
||||||
|
|
||||||
Annoy supports only `L2Distance`.
|
Parameter `NumTrees` is the number of trees which the algorithm will create. The bigger it is, the slower (approximately linear) it works (in both `CREATE` and `SELECT` requests), but the better accuracy you get (adjusted for randomness). By default it is set to `100`. Parameter `DistanceName` is name of distance function. By default it is set to `L2Distance`. It can be set without changing first parameter, for example
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t
|
||||||
|
(
|
||||||
|
id Int64,
|
||||||
|
data Array(Float32),
|
||||||
|
INDEX ann_index_name data TYPE annoy('cosineDistance') GRANULARITY N
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY id;
|
||||||
|
```
|
||||||
|
|
||||||
|
Annoy supports `L2Distance` and `cosineDistance`.
|
||||||
|
|
||||||
In the `SELECT` in the settings (`ann_index_select_query_params`) you can specify the size of the internal buffer (more details in the description above or in the [original repository](https://github.com/spotify/annoy)). During the query it will inspect up to `search_k` nodes which defaults to `n_trees * n` if not provided. `search_k` gives you a run-time tradeoff between better accuracy and speed.
|
In the `SELECT` in the settings (`ann_index_select_query_params`) you can specify the size of the internal buffer (more details in the description above or in the [original repository](https://github.com/spotify/annoy)). During the query it will inspect up to `search_k` nodes which defaults to `n_trees * n` if not provided. `search_k` gives you a run-time tradeoff between better accuracy and speed.
|
||||||
|
|
||||||
|
@ -16,20 +16,20 @@ Main features:
|
|||||||
|
|
||||||
This allows you to create a small sparse index that helps find data faster.
|
This allows you to create a small sparse index that helps find data faster.
|
||||||
|
|
||||||
- Partitions can be used if the [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified.
|
- Partitions can be used if the [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified.
|
||||||
|
|
||||||
ClickHouse supports certain operations with partitions that are more efficient than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query.
|
ClickHouse supports certain operations with partitions that are more efficient than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query.
|
||||||
|
|
||||||
- Data replication support.
|
- Data replication support.
|
||||||
|
|
||||||
The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](../../../engines/table-engines/mergetree-family/replication.md).
|
The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](/docs/en/engines/table-engines/mergetree-family/replication.md).
|
||||||
|
|
||||||
- Data sampling support.
|
- Data sampling support.
|
||||||
|
|
||||||
If necessary, you can set the data sampling method in the table.
|
If necessary, you can set the data sampling method in the table.
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
The [Merge](../../../engines/table-engines/special/merge.md#merge) engine does not belong to the `*MergeTree` family.
|
The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Creating a Table {#table_engine-mergetree-creating-a-table}
|
## Creating a Table {#table_engine-mergetree-creating-a-table}
|
||||||
@ -57,7 +57,7 @@ ORDER BY expr
|
|||||||
[SETTINGS name=value, ...]
|
[SETTINGS name=value, ...]
|
||||||
```
|
```
|
||||||
|
|
||||||
For a description of parameters, see the [CREATE query description](../../../sql-reference/statements/create/table.md).
|
For a description of parameters, see the [CREATE query description](/docs/en/sql-reference/statements/create/table.md).
|
||||||
|
|
||||||
### Query Clauses {#mergetree-query-clauses}
|
### Query Clauses {#mergetree-query-clauses}
|
||||||
|
|
||||||
@ -77,9 +77,9 @@ Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting th
|
|||||||
|
|
||||||
#### PARTITION BY
|
#### PARTITION BY
|
||||||
|
|
||||||
`PARTITION BY` — The [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
`PARTITION BY` — The [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](../../../sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
#### PRIMARY KEY
|
#### PRIMARY KEY
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ Additional parameters that control the behavior of the `MergeTree` (optional):
|
|||||||
|
|
||||||
#### use_minimalistic_part_header_in_zookeeper
|
#### use_minimalistic_part_header_in_zookeeper
|
||||||
|
|
||||||
`use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”.
|
`use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”.
|
||||||
|
|
||||||
#### min_merge_bytes_to_use_direct_io
|
#### min_merge_bytes_to_use_direct_io
|
||||||
|
|
||||||
@ -166,15 +166,15 @@ Additional parameters that control the behavior of the `MergeTree` (optional):
|
|||||||
|
|
||||||
#### max_compress_block_size
|
#### max_compress_block_size
|
||||||
|
|
||||||
`max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. You can also specify this setting in the global settings (see [max_compress_block_size](../../../operations/settings/settings.md#max-compress-block-size) setting). The value specified when table is created overrides the global value for this setting.
|
`max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. You can also specify this setting in the global settings (see [max_compress_block_size](/docs/en/operations/settings/settings.md/#max-compress-block-size) setting). The value specified when table is created overrides the global value for this setting.
|
||||||
|
|
||||||
#### min_compress_block_size
|
#### min_compress_block_size
|
||||||
|
|
||||||
`min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. You can also specify this setting in the global settings (see [min_compress_block_size](../../../operations/settings/settings.md#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting.
|
`min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting.
|
||||||
|
|
||||||
#### max_partitions_to_read
|
#### max_partitions_to_read
|
||||||
|
|
||||||
`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](../../../operations/settings/merge-tree-settings.md#max-partitions-to-read) in the global setting.
|
`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting.
|
||||||
|
|
||||||
**Example of Sections Setting**
|
**Example of Sections Setting**
|
||||||
|
|
||||||
@ -184,7 +184,7 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa
|
|||||||
|
|
||||||
In the example, we set partitioning by month.
|
In the example, we set partitioning by month.
|
||||||
|
|
||||||
We also set an expression for sampling as a hash by the user ID. This allows you to pseudorandomize the data in the table for each `CounterID` and `EventDate`. If you define a [SAMPLE](../../../sql-reference/statements/select/sample.md#select-sample-clause) clause when selecting the data, ClickHouse will return an evenly pseudorandom data sample for a subset of users.
|
We also set an expression for sampling as a hash by the user ID. This allows you to pseudorandomize the data in the table for each `CounterID` and `EventDate`. If you define a [SAMPLE](/docs/en/sql-reference/statements/select/sample.md/#select-sample-clause) clause when selecting the data, ClickHouse will return an evenly pseudorandom data sample for a subset of users.
|
||||||
|
|
||||||
The `index_granularity` setting can be omitted because 8192 is the default value.
|
The `index_granularity` setting can be omitted because 8192 is the default value.
|
||||||
|
|
||||||
@ -207,9 +207,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
**MergeTree() Parameters**
|
**MergeTree() Parameters**
|
||||||
|
|
||||||
- `date-column` — The name of a column of the [Date](../../../sql-reference/data-types/date.md) type. ClickHouse automatically creates partitions by month based on this column. The partition names are in the `"YYYYMM"` format.
|
- `date-column` — The name of a column of the [Date](/docs/en/sql-reference/data-types/date.md) type. ClickHouse automatically creates partitions by month based on this column. The partition names are in the `"YYYYMM"` format.
|
||||||
- `sampling_expression` — An expression for sampling.
|
- `sampling_expression` — An expression for sampling.
|
||||||
- `(primary, key)` — Primary key. Type: [Tuple()](../../../sql-reference/data-types/tuple.md)
|
- `(primary, key)` — Primary key. Type: [Tuple()](/docs/en/sql-reference/data-types/tuple.md)
|
||||||
- `index_granularity` — The granularity of an index. The number of data rows between the “marks” of an index. The value 8192 is appropriate for most tasks.
|
- `index_granularity` — The granularity of an index. The number of data rows between the “marks” of an index. The value 8192 is appropriate for most tasks.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
@ -262,7 +262,7 @@ Sparse indexes allow you to work with a very large number of table rows, because
|
|||||||
|
|
||||||
ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key.
|
ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key.
|
||||||
|
|
||||||
You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](../../../operations/settings/settings.md#allow-nullable-key) setting. The [NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause.
|
You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](/docs/en/operations/settings/settings.md/#allow-nullable-key) setting. The [NULLS_LAST](/docs/en/sql-reference/statements/select/order-by.md/#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause.
|
||||||
|
|
||||||
### Selecting the Primary Key {#selecting-the-primary-key}
|
### Selecting the Primary Key {#selecting-the-primary-key}
|
||||||
|
|
||||||
@ -279,26 +279,26 @@ The number of columns in the primary key is not explicitly limited. Depending on
|
|||||||
|
|
||||||
ClickHouse sorts data by primary key, so the higher the consistency, the better the compression.
|
ClickHouse sorts data by primary key, so the higher the consistency, the better the compression.
|
||||||
|
|
||||||
- Provide additional logic when merging data parts in the [CollapsingMergeTree](../../../engines/table-engines/mergetree-family/collapsingmergetree.md#table_engine-collapsingmergetree) and [SummingMergeTree](../../../engines/table-engines/mergetree-family/summingmergetree.md) engines.
|
- Provide additional logic when merging data parts in the [CollapsingMergeTree](/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md/#table_engine-collapsingmergetree) and [SummingMergeTree](/docs/en/engines/table-engines/mergetree-family/summingmergetree.md) engines.
|
||||||
|
|
||||||
In this case it makes sense to specify the *sorting key* that is different from the primary key.
|
In this case it makes sense to specify the *sorting key* that is different from the primary key.
|
||||||
|
|
||||||
A long primary key will negatively affect the insert performance and memory consumption, but extra columns in the primary key do not affect ClickHouse performance during `SELECT` queries.
|
A long primary key will negatively affect the insert performance and memory consumption, but extra columns in the primary key do not affect ClickHouse performance during `SELECT` queries.
|
||||||
|
|
||||||
You can create a table without a primary key using the `ORDER BY tuple()` syntax. In this case, ClickHouse stores data in the order of inserting. If you want to save data order when inserting data by `INSERT ... SELECT` queries, set [max_insert_threads = 1](../../../operations/settings/settings.md#settings-max-insert-threads).
|
You can create a table without a primary key using the `ORDER BY tuple()` syntax. In this case, ClickHouse stores data in the order of inserting. If you want to save data order when inserting data by `INSERT ... SELECT` queries, set [max_insert_threads = 1](/docs/en/operations/settings/settings.md/#settings-max-insert-threads).
|
||||||
|
|
||||||
To select data in the initial order, use [single-threaded](../../../operations/settings/settings.md#settings-max_threads) `SELECT` queries.
|
To select data in the initial order, use [single-threaded](/docs/en/operations/settings/settings.md/#settings-max_threads) `SELECT` queries.
|
||||||
|
|
||||||
### Choosing a Primary Key that Differs from the Sorting Key {#choosing-a-primary-key-that-differs-from-the-sorting-key}
|
### Choosing a Primary Key that Differs from the Sorting Key {#choosing-a-primary-key-that-differs-from-the-sorting-key}
|
||||||
|
|
||||||
It is possible to specify a primary key (an expression with values that are written in the index file for each mark) that is different from the sorting key (an expression for sorting the rows in data parts). In this case the primary key expression tuple must be a prefix of the sorting key expression tuple.
|
It is possible to specify a primary key (an expression with values that are written in the index file for each mark) that is different from the sorting key (an expression for sorting the rows in data parts). In this case the primary key expression tuple must be a prefix of the sorting key expression tuple.
|
||||||
|
|
||||||
This feature is helpful when using the [SummingMergeTree](../../../engines/table-engines/mergetree-family/summingmergetree.md) and
|
This feature is helpful when using the [SummingMergeTree](/docs/en/engines/table-engines/mergetree-family/summingmergetree.md) and
|
||||||
[AggregatingMergeTree](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines. In a common case when using these engines, the table has two types of columns: *dimensions* and *measures*. Typical queries aggregate values of measure columns with arbitrary `GROUP BY` and filtering by dimensions. Because SummingMergeTree and AggregatingMergeTree aggregate rows with the same value of the sorting key, it is natural to add all dimensions to it. As a result, the key expression consists of a long list of columns and this list must be frequently updated with newly added dimensions.
|
[AggregatingMergeTree](/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines. In a common case when using these engines, the table has two types of columns: *dimensions* and *measures*. Typical queries aggregate values of measure columns with arbitrary `GROUP BY` and filtering by dimensions. Because SummingMergeTree and AggregatingMergeTree aggregate rows with the same value of the sorting key, it is natural to add all dimensions to it. As a result, the key expression consists of a long list of columns and this list must be frequently updated with newly added dimensions.
|
||||||
|
|
||||||
In this case it makes sense to leave only a few columns in the primary key that will provide efficient range scans and add the remaining dimension columns to the sorting key tuple.
|
In this case it makes sense to leave only a few columns in the primary key that will provide efficient range scans and add the remaining dimension columns to the sorting key tuple.
|
||||||
|
|
||||||
[ALTER](../../../sql-reference/statements/alter/index.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification.
|
[ALTER](/docs/en/sql-reference/statements/alter/index.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification.
|
||||||
|
|
||||||
### Use of Indexes and Partitions in Queries {#use-of-indexes-and-partitions-in-queries}
|
### Use of Indexes and Partitions in Queries {#use-of-indexes-and-partitions-in-queries}
|
||||||
|
|
||||||
@ -342,7 +342,7 @@ In the example below, the index can’t be used.
|
|||||||
SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%'
|
SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%'
|
||||||
```
|
```
|
||||||
|
|
||||||
To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) and [force_primary_key](../../../operations/settings/settings.md#force-primary-key).
|
To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](/docs/en/operations/settings/settings.md/#settings-force_index_by_date) and [force_primary_key](/docs/en/operations/settings/settings.md/#force-primary-key).
|
||||||
|
|
||||||
The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date.
|
The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date.
|
||||||
|
|
||||||
@ -400,7 +400,7 @@ Stores unique values of the specified expression (no more than `max_rows` rows,
|
|||||||
|
|
||||||
#### `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)`
|
#### `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)`
|
||||||
|
|
||||||
Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with datatypes: [String](../../../sql-reference/data-types/string.md), [FixedString](../../../sql-reference/data-types/fixedstring.md) and [Map](../../../sql-reference/data-types/map.md). Can be used for optimization of `EQUALS`, `LIKE` and `IN` expressions.
|
Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with datatypes: [String](/docs/en/sql-reference/data-types/string.md), [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) and [Map](/docs/en/sql-reference/data-types/map.md). Can be used for optimization of `EQUALS`, `LIKE` and `IN` expressions.
|
||||||
|
|
||||||
- `n` — ngram size,
|
- `n` — ngram size,
|
||||||
- `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well).
|
- `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well).
|
||||||
@ -417,11 +417,11 @@ The optional `false_positive` parameter is the probability of receiving a false
|
|||||||
|
|
||||||
Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`, `UUID`, `Map`.
|
Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`, `UUID`, `Map`.
|
||||||
|
|
||||||
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](/docs/en/sql-reference/functions/tuple-map-functions.md/#mapkeys) or [mapValues](/docs/en/sql-reference/functions/tuple-map-functions.md/#mapvalues) function.
|
||||||
|
|
||||||
There are also special-purpose and experimental indexes to support approximate nearest neighbor (ANN) queries. See [here](annindexes.md) for details.
|
There are also special-purpose and experimental indexes to support approximate nearest neighbor (ANN) queries. See [here](annindexes.md) for details.
|
||||||
|
|
||||||
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
The following functions can use the filter: [equals](/docs/en/sql-reference/functions/comparison-functions.md), [notEquals](/docs/en/sql-reference/functions/comparison-functions.md), [in](/docs/en/sql-reference/functions/in-functions), [notIn](/docs/en/sql-reference/functions/in-functions), [has](/docs/en/sql-reference/functions/array-functions#hasarr-elem), [hasAny](/docs/en/sql-reference/functions/array-functions#hasany), [hasAll](/docs/en/sql-reference/functions/array-functions#hasall).
|
||||||
|
|
||||||
Example of index creation for `Map` data type
|
Example of index creation for `Map` data type
|
||||||
|
|
||||||
@ -445,21 +445,21 @@ The `set` index can be used with all functions. Function subsets for other index
|
|||||||
|
|
||||||
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
|
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
|
||||||
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
|
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
|
||||||
| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||||
| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||||
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||||
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||||
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
|
||||||
| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ |
|
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ |
|
||||||
| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
|
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
|
||||||
| [in](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
| [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||||
| [notIn](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
| [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||||
| [less (<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||||
| [greater (>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||||
| [lessOrEquals (<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||||
| [greaterOrEquals (>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||||
| [empty](../../../sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||||
| [notEmpty](../../../sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||||
|
|
||||||
Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization.
|
Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization.
|
||||||
@ -485,16 +485,16 @@ For example:
|
|||||||
|
|
||||||
|
|
||||||
## Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex}
|
## Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex}
|
||||||
In addition to skip indices, there are also [Approximate Nearest Neighbor Search Indexes](../../../engines/table-engines/mergetree-family/annindexes.md).
|
In addition to skip indices, there are also [Approximate Nearest Neighbor Search Indexes](/docs/en/engines/table-engines/mergetree-family/annindexes.md).
|
||||||
|
|
||||||
## Projections {#projections}
|
## Projections {#projections}
|
||||||
Projections are like [materialized views](../../../sql-reference/statements/create/view.md#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries.
|
Projections are like [materialized views](/docs/en/sql-reference/statements/create/view.md/#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
When you are implementing projections you should also consider the [force_optimize_projection](../../../operations/settings/settings.md#force-optimize-projection) setting.
|
When you are implementing projections you should also consider the [force_optimize_projection](/docs/en/operations/settings/settings.md/#force-optimize-projection) setting.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
Projections are not supported in the `SELECT` statements with the [FINAL](../../../sql-reference/statements/select/from.md#select-from-final) modifier.
|
Projections are not supported in the `SELECT` statements with the [FINAL](/docs/en/sql-reference/statements/select/from.md/#select-from-final) modifier.
|
||||||
|
|
||||||
### Projection Query {#projection-query}
|
### Projection Query {#projection-query}
|
||||||
A projection query is what defines a projection. It implicitly selects data from the parent table.
|
A projection query is what defines a projection. It implicitly selects data from the parent table.
|
||||||
@ -504,7 +504,7 @@ A projection query is what defines a projection. It implicitly selects data from
|
|||||||
SELECT <column list expr> [GROUP BY] <group keys expr> [ORDER BY] <expr>
|
SELECT <column list expr> [GROUP BY] <group keys expr> [ORDER BY] <expr>
|
||||||
```
|
```
|
||||||
|
|
||||||
Projections can be modified or dropped with the [ALTER](../../../sql-reference/statements/alter/projection.md) statement.
|
Projections can be modified or dropped with the [ALTER](/docs/en/sql-reference/statements/alter/projection.md) statement.
|
||||||
|
|
||||||
### Projection Storage {#projection-storage}
|
### Projection Storage {#projection-storage}
|
||||||
Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous `MergeTree` table's part. The table is induced by the definition query of the projection. If there is a `GROUP BY` clause, the underlying storage engine becomes [AggregatingMergeTree](aggregatingmergetree.md), and all aggregate functions are converted to `AggregateFunction`. If there is an `ORDER BY` clause, the `MergeTree` table uses it as its primary key expression. During the merge process the projection part is merged via its storage's merge routine. The checksum of the parent table's part is combined with the projection's part. Other maintenance jobs are similar to skip indices.
|
Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous `MergeTree` table's part. The table is induced by the definition query of the projection. If there is a `GROUP BY` clause, the underlying storage engine becomes [AggregatingMergeTree](aggregatingmergetree.md), and all aggregate functions are converted to `AggregateFunction`. If there is an `ORDER BY` clause, the `MergeTree` table uses it as its primary key expression. During the merge process the projection part is merged via its storage's merge routine. The checksum of the parent table's part is combined with the projection's part. Other maintenance jobs are similar to skip indices.
|
||||||
@ -526,7 +526,7 @@ Determines the lifetime of values.
|
|||||||
|
|
||||||
The `TTL` clause can be set for the whole table and for each individual column. Table-level `TTL` can also specify the logic of automatic moving data between disks and volumes, or recompressing parts where all the data has been expired.
|
The `TTL` clause can be set for the whole table and for each individual column. Table-level `TTL` can also specify the logic of automatic moving data between disks and volumes, or recompressing parts where all the data has been expired.
|
||||||
|
|
||||||
Expressions must evaluate to [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md) data type.
|
Expressions must evaluate to [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -537,7 +537,7 @@ TTL time_column
|
|||||||
TTL time_column + interval
|
TTL time_column + interval
|
||||||
```
|
```
|
||||||
|
|
||||||
To define `interval`, use [time interval](../../../sql-reference/operators/index.md#operators-datetime) operators, for example:
|
To define `interval`, use [time interval](/docs/en/sql-reference/operators/index.md#operators-datetime) operators, for example:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
TTL date_time + INTERVAL 1 MONTH
|
TTL date_time + INTERVAL 1 MONTH
|
||||||
@ -684,11 +684,11 @@ Data with an expired `TTL` is removed when ClickHouse merges data parts.
|
|||||||
|
|
||||||
When ClickHouse detects that data is expired, it performs an off-schedule merge. To control the frequency of such merges, you can set `merge_with_ttl_timeout`. If the value is too low, it will perform many off-schedule merges that may consume a lot of resources.
|
When ClickHouse detects that data is expired, it performs an off-schedule merge. To control the frequency of such merges, you can set `merge_with_ttl_timeout`. If the value is too low, it will perform many off-schedule merges that may consume a lot of resources.
|
||||||
|
|
||||||
If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](../../../sql-reference/statements/optimize.md) query before `SELECT`.
|
If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) query before `SELECT`.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [ttl_only_drop_parts](../../../operations/settings/settings.md#ttl_only_drop_parts) setting
|
- [ttl_only_drop_parts](/docs/en/operations/settings/settings.md/#ttl_only_drop_parts) setting
|
||||||
|
|
||||||
|
|
||||||
## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes}
|
## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes}
|
||||||
@ -697,16 +697,16 @@ If you perform the `SELECT` query between merges, you may get expired data. To a
|
|||||||
|
|
||||||
`MergeTree` family table engines can store data on multiple block devices. For example, it can be useful when the data of a certain table are implicitly split into “hot” and “cold”. The most recent data is regularly requested but requires only a small amount of space. On the contrary, the fat-tailed historical data is requested rarely. If several disks are available, the “hot” data may be located on fast disks (for example, NVMe SSDs or in memory), while the “cold” data - on relatively slow ones (for example, HDD).
|
`MergeTree` family table engines can store data on multiple block devices. For example, it can be useful when the data of a certain table are implicitly split into “hot” and “cold”. The most recent data is regularly requested but requires only a small amount of space. On the contrary, the fat-tailed historical data is requested rarely. If several disks are available, the “hot” data may be located on fast disks (for example, NVMe SSDs or in memory), while the “cold” data - on relatively slow ones (for example, HDD).
|
||||||
|
|
||||||
Data part is the minimum movable unit for `MergeTree`-engine tables. The data belonging to one part are stored on one disk. Data parts can be moved between disks in the background (according to user settings) as well as by means of the [ALTER](../../../sql-reference/statements/alter/partition.md#alter_move-partition) queries.
|
Data part is the minimum movable unit for `MergeTree`-engine tables. The data belonging to one part are stored on one disk. Data parts can be moved between disks in the background (according to user settings) as well as by means of the [ALTER](/docs/en/sql-reference/statements/alter/partition.md/#alter_move-partition) queries.
|
||||||
|
|
||||||
### Terms {#terms}
|
### Terms {#terms}
|
||||||
|
|
||||||
- Disk — Block device mounted to the filesystem.
|
- Disk — Block device mounted to the filesystem.
|
||||||
- Default disk — Disk that stores the path specified in the [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) server setting.
|
- Default disk — Disk that stores the path specified in the [path](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-path) server setting.
|
||||||
- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)).
|
- Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)).
|
||||||
- Storage policy — Set of volumes and the rules for moving data between them.
|
- Storage policy — Set of volumes and the rules for moving data between them.
|
||||||
|
|
||||||
The names given to the described entities can be found in the system tables, [system.storage_policies](../../../operations/system-tables/storage_policies.md#system_tables-storage_policies) and [system.disks](../../../operations/system-tables/disks.md#system_tables-disks). To apply one of the configured storage policies for a table, use the `storage_policy` setting of `MergeTree`-engine family tables.
|
The names given to the described entities can be found in the system tables, [system.storage_policies](/docs/en/operations/system-tables/storage_policies.md/#system_tables-storage_policies) and [system.disks](/docs/en/operations/system-tables/disks.md/#system_tables-disks). To apply one of the configured storage policies for a table, use the `storage_policy` setting of `MergeTree`-engine family tables.
|
||||||
|
|
||||||
### Configuration {#table_engine-mergetree-multiple-volumes_configure}
|
### Configuration {#table_engine-mergetree-multiple-volumes_configure}
|
||||||
|
|
||||||
@ -853,16 +853,16 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
|||||||
The `default` storage policy implies using only one volume, which consists of only one disk given in `<path>`.
|
The `default` storage policy implies using only one volume, which consists of only one disk given in `<path>`.
|
||||||
You could change storage policy after table creation with [ALTER TABLE ... MODIFY SETTING] query, new policy should include all old disks and volumes with same names.
|
You could change storage policy after table creation with [ALTER TABLE ... MODIFY SETTING] query, new policy should include all old disks and volumes with same names.
|
||||||
|
|
||||||
The number of threads performing background moves of data parts can be changed by [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) setting.
|
The number of threads performing background moves of data parts can be changed by [background_move_pool_size](/docs/en/operations/settings/settings.md/#background_move_pool_size) setting.
|
||||||
|
|
||||||
### Details {#details}
|
### Details {#details}
|
||||||
|
|
||||||
In the case of `MergeTree` tables, data is getting to disk in different ways:
|
In the case of `MergeTree` tables, data is getting to disk in different ways:
|
||||||
|
|
||||||
- As a result of an insert (`INSERT` query).
|
- As a result of an insert (`INSERT` query).
|
||||||
- During background merges and [mutations](../../../sql-reference/statements/alter/index.md#alter-mutations).
|
- During background merges and [mutations](/docs/en/sql-reference/statements/alter/index.md#alter-mutations).
|
||||||
- When downloading from another replica.
|
- When downloading from another replica.
|
||||||
- As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter/partition.md#alter_freeze-partition).
|
- As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](/docs/en/sql-reference/statements/alter/partition.md/#alter_freeze-partition).
|
||||||
|
|
||||||
In all these cases except for mutations and partition freezing, a part is stored on a volume and a disk according to the given storage policy:
|
In all these cases except for mutations and partition freezing, a part is stored on a volume and a disk according to the given storage policy:
|
||||||
|
|
||||||
@ -872,16 +872,16 @@ In all these cases except for mutations and partition freezing, a part is stored
|
|||||||
Under the hood, mutations and partition freezing make use of [hard links](https://en.wikipedia.org/wiki/Hard_link). Hard links between different disks are not supported, therefore in such cases the resulting parts are stored on the same disks as the initial ones.
|
Under the hood, mutations and partition freezing make use of [hard links](https://en.wikipedia.org/wiki/Hard_link). Hard links between different disks are not supported, therefore in such cases the resulting parts are stored on the same disks as the initial ones.
|
||||||
|
|
||||||
In the background, parts are moved between volumes on the basis of the amount of free space (`move_factor` parameter) according to the order the volumes are declared in the configuration file.
|
In the background, parts are moved between volumes on the basis of the amount of free space (`move_factor` parameter) according to the order the volumes are declared in the configuration file.
|
||||||
Data is never transferred from the last one and into the first one. One may use system tables [system.part_log](../../../operations/system-tables/part_log.md#system_tables-part-log) (field `type = MOVE_PART`) and [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) (fields `path` and `disk`) to monitor background moves. Also, the detailed information can be found in server logs.
|
Data is never transferred from the last one and into the first one. One may use system tables [system.part_log](/docs/en/operations/system-tables/part_log.md/#system_tables-part-log) (field `type = MOVE_PART`) and [system.parts](/docs/en/operations/system-tables/parts.md/#system_tables-parts) (fields `path` and `disk`) to monitor background moves. Also, the detailed information can be found in server logs.
|
||||||
|
|
||||||
User can force moving a part or a partition from one volume to another using the query [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql-reference/statements/alter/partition.md#alter_move-partition), all the restrictions for background operations are taken into account. The query initiates a move on its own and does not wait for background operations to be completed. User will get an error message if not enough free space is available or if any of the required conditions are not met.
|
User can force moving a part or a partition from one volume to another using the query [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](/docs/en/sql-reference/statements/alter/partition.md/#alter_move-partition), all the restrictions for background operations are taken into account. The query initiates a move on its own and does not wait for background operations to be completed. User will get an error message if not enough free space is available or if any of the required conditions are not met.
|
||||||
|
|
||||||
Moving data does not interfere with data replication. Therefore, different storage policies can be specified for the same table on different replicas.
|
Moving data does not interfere with data replication. Therefore, different storage policies can be specified for the same table on different replicas.
|
||||||
|
|
||||||
After the completion of background merges and mutations, old parts are removed only after a certain amount of time (`old_parts_lifetime`).
|
After the completion of background merges and mutations, old parts are removed only after a certain amount of time (`old_parts_lifetime`).
|
||||||
During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space.
|
During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space.
|
||||||
|
|
||||||
User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](../../../operations/settings/merge-tree-settings.md#min-bytes-to-rebalance-partition-over-jbod) setting.
|
User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](/docs/en/operations/settings/merge-tree-settings.md/#min-bytes-to-rebalance-partition-over-jbod) setting.
|
||||||
|
|
||||||
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
## Using S3 for Data Storage {#table_engine-mergetree-s3}
|
||||||
|
|
||||||
@ -940,6 +940,10 @@ Optional parameters:
|
|||||||
- `cache_path` — Path on local FS where to store cached mark and index files. Default value is `/var/lib/clickhouse/disks/<disk_name>/cache/`.
|
- `cache_path` — Path on local FS where to store cached mark and index files. Default value is `/var/lib/clickhouse/disks/<disk_name>/cache/`.
|
||||||
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
- `skip_access_check` — If true, disk access checks will not be performed on disk start-up. Default value is `false`.
|
||||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set.
|
||||||
|
- `s3_max_put_rps` — Maximum PUT requests per second rate before throttling. Default value is `0` (unlimited).
|
||||||
|
- `s3_max_put_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_put_rps`.
|
||||||
|
- `s3_max_get_rps` — Maximum GET requests per second rate before throttling. Default value is `0` (unlimited).
|
||||||
|
- `s3_max_get_burst` — Max number of requests that can be issued simultaneously before hitting request per second limit. By default (`0` value) equals to `s3_max_get_rps`.
|
||||||
|
|
||||||
S3 disk can be configured as `main` or `cold` storage:
|
S3 disk can be configured as `main` or `cold` storage:
|
||||||
``` xml
|
``` xml
|
||||||
|
@ -20,7 +20,7 @@ Replication works at the level of an individual table, not the entire server. A
|
|||||||
|
|
||||||
Replication does not depend on sharding. Each shard has its own independent replication.
|
Replication does not depend on sharding. Each shard has its own independent replication.
|
||||||
|
|
||||||
Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](../../../sql-reference/statements/alter/index.md#query_language_queries_alter)).
|
Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](/docs/en/sql-reference/statements/alter/index.md#query_language_queries_alter)).
|
||||||
|
|
||||||
`CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated:
|
`CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated:
|
||||||
|
|
||||||
@ -28,9 +28,9 @@ Compressed data for `INSERT` and `ALTER` queries is replicated (for more informa
|
|||||||
- The `DROP TABLE` query deletes the replica located on the server where the query is run.
|
- The `DROP TABLE` query deletes the replica located on the server where the query is run.
|
||||||
- The `RENAME` query renames the table on one of the replicas. In other words, replicated tables can have different names on different replicas.
|
- The `RENAME` query renames the table on one of the replicas. In other words, replicated tables can have different names on different replicas.
|
||||||
|
|
||||||
ClickHouse uses [ClickHouse Keeper](../../../guides/sre/keeper/clickhouse-keeper.md) for storing replicas meta information. It is possible to use ZooKeeper version 3.4.5 or newer, but ClickHouse Keeper is recommended.
|
ClickHouse uses [ClickHouse Keeper](/docs/en/guides/sre/keeper/clickhouse-keeper.md) for storing replicas meta information. It is possible to use ZooKeeper version 3.4.5 or newer, but ClickHouse Keeper is recommended.
|
||||||
|
|
||||||
To use replication, set parameters in the [zookeeper](../../../operations/server-configuration-parameters/settings.md#server-settings_zookeeper) server configuration section.
|
To use replication, set parameters in the [zookeeper](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings_zookeeper) server configuration section.
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem.
|
Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem.
|
||||||
@ -85,7 +85,7 @@ Example of setting the addresses of the auxiliary ZooKeeper cluster:
|
|||||||
</auxiliary_zookeepers>
|
</auxiliary_zookeepers>
|
||||||
```
|
```
|
||||||
|
|
||||||
To store table datameta in a auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
To store table metadata in an auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
||||||
ReplicatedMergeTree engine as follow:
|
ReplicatedMergeTree engine as follow:
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -95,21 +95,21 @@ You can specify any existing ZooKeeper cluster and the system will use a directo
|
|||||||
|
|
||||||
If ZooKeeper isn’t set in the config file, you can’t create replicated tables, and any existing replicated tables will be read-only.
|
If ZooKeeper isn’t set in the config file, you can’t create replicated tables, and any existing replicated tables will be read-only.
|
||||||
|
|
||||||
ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries).
|
ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-fallback_to_stale_replicas_for_distributed_queries).
|
||||||
|
|
||||||
For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it does not create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data.
|
For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it does not create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data.
|
||||||
|
|
||||||
For very large clusters, you can use different ZooKeeper clusters for different shards. However, from our experience this has not proven necessary based on production clusters with approximately 300 servers.
|
For very large clusters, you can use different ZooKeeper clusters for different shards. However, from our experience this has not proven necessary based on production clusters with approximately 300 servers.
|
||||||
|
|
||||||
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting.
|
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](/docs/en/operations/settings/settings.md/#background_schedule_pool_size) setting.
|
||||||
|
|
||||||
`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart.
|
`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](/docs/en/operations/settings/settings.md/#background_fetches_pool_size) setting which can be tuned with a server restart.
|
||||||
|
|
||||||
By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
|
By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option.
|
||||||
|
|
||||||
Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically.
|
Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically.
|
||||||
|
|
||||||
Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application does not know if the data was written to the DB, so the `INSERT` query can simply be repeated. It does not matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-merge_tree) server settings.
|
Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application does not know if the data was written to the DB, so the `INSERT` query can simply be repeated. It does not matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-merge_tree) server settings.
|
||||||
|
|
||||||
During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.)
|
During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.)
|
||||||
|
|
||||||
@ -165,7 +165,7 @@ CREATE TABLE table_name
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the [macros](../../../operations/server-configuration-parameters/settings.md#macros) section of the configuration file.
|
As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the [macros](/docs/en/operations/server-configuration-parameters/settings.md/#macros) section of the configuration file.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -295,10 +295,10 @@ If the data in ClickHouse Keeper was lost or damaged, you can save data by movin
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
|
- [background_schedule_pool_size](/docs/en/operations/settings/settings.md/#background_schedule_pool_size)
|
||||||
- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size)
|
- [background_fetches_pool_size](/docs/en/operations/settings/settings.md/#background_fetches_pool_size)
|
||||||
- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold)
|
- [execute_merges_on_single_replica_time_threshold](/docs/en/operations/settings/settings.md/#execute-merges-on-single-replica-time-threshold)
|
||||||
- [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth)
|
- [max_replicated_fetches_network_bandwidth](/docs/en/operations/settings/merge-tree-settings.md/#max_replicated_fetches_network_bandwidth)
|
||||||
- [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth)
|
- [max_replicated_sends_network_bandwidth](/docs/en/operations/settings/merge-tree-settings.md/#max_replicated_sends_network_bandwidth)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/replication/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/operations/table_engines/replication/) <!--hide-->
|
||||||
|
@ -6,10 +6,10 @@ sidebar_label: Join
|
|||||||
|
|
||||||
# Join Table Engine
|
# Join Table Engine
|
||||||
|
|
||||||
Optional prepared data structure for usage in [JOIN](../../../sql-reference/statements/select/join.md#select-join) operations.
|
Optional prepared data structure for usage in [JOIN](/docs/en/sql-reference/statements/select/join.md/#select-join) operations.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
This is not an article about the [JOIN clause](../../../sql-reference/statements/select/join.md#select-join) itself.
|
This is not an article about the [JOIN clause](/docs/en/sql-reference/statements/select/join.md/#select-join) itself.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Creating a Table {#creating-a-table}
|
## Creating a Table {#creating-a-table}
|
||||||
@ -22,17 +22,17 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
) ENGINE = Join(join_strictness, join_type, k1[, k2, ...])
|
) ENGINE = Join(join_strictness, join_type, k1[, k2, ...])
|
||||||
```
|
```
|
||||||
|
|
||||||
See the detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
|
See the detailed description of the [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md/#create-table-query) query.
|
||||||
|
|
||||||
## Engine Parameters
|
## Engine Parameters
|
||||||
|
|
||||||
### join_strictness
|
### join_strictness
|
||||||
|
|
||||||
`join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types).
|
`join_strictness` – [JOIN strictness](/docs/en/sql-reference/statements/select/join.md/#select-join-types).
|
||||||
|
|
||||||
### join_type
|
### join_type
|
||||||
|
|
||||||
`join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types).
|
`join_type` – [JOIN type](/docs/en/sql-reference/statements/select/join.md/#select-join-types).
|
||||||
|
|
||||||
### Key columns
|
### Key columns
|
||||||
|
|
||||||
@ -55,11 +55,11 @@ You can use `INSERT` queries to add data to the `Join`-engine tables. If the tab
|
|||||||
Main use-cases for `Join`-engine tables are following:
|
Main use-cases for `Join`-engine tables are following:
|
||||||
|
|
||||||
- Place the table to the right side in a `JOIN` clause.
|
- Place the table to the right side in a `JOIN` clause.
|
||||||
- Call the [joinGet](../../../sql-reference/functions/other-functions.md#joinget) function, which lets you extract data from the table the same way as from a dictionary.
|
- Call the [joinGet](/docs/en/sql-reference/functions/other-functions.md/#joinget) function, which lets you extract data from the table the same way as from a dictionary.
|
||||||
|
|
||||||
### Deleting Data {#deleting-data}
|
### Deleting Data {#deleting-data}
|
||||||
|
|
||||||
`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](../../../sql-reference/statements/alter/index.md#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk.
|
`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](/docs/en/sql-reference/statements/alter/index.md#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk.
|
||||||
|
|
||||||
### Limitations and Settings {#join-limitations-and-settings}
|
### Limitations and Settings {#join-limitations-and-settings}
|
||||||
|
|
||||||
@ -67,30 +67,30 @@ When creating a table, the following settings are applied:
|
|||||||
|
|
||||||
#### join_use_nulls
|
#### join_use_nulls
|
||||||
|
|
||||||
[join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
[join_use_nulls](/docs/en/operations/settings/settings.md/#join_use_nulls)
|
||||||
|
|
||||||
#### max_rows_in_join
|
#### max_rows_in_join
|
||||||
|
|
||||||
[max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join)
|
[max_rows_in_join](/docs/en/operations/settings/query-complexity.md/#settings-max_rows_in_join)
|
||||||
|
|
||||||
#### max_bytes_in_join
|
#### max_bytes_in_join
|
||||||
|
|
||||||
[max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join)
|
[max_bytes_in_join](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)
|
||||||
|
|
||||||
#### join_overflow_mode
|
#### join_overflow_mode
|
||||||
|
|
||||||
[join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode)
|
[join_overflow_mode](/docs/en/operations/settings/query-complexity.md/#settings-join_overflow_mode)
|
||||||
|
|
||||||
#### join_any_take_last_row
|
#### join_any_take_last_row
|
||||||
|
|
||||||
[join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
[join_any_take_last_row](/docs/en/operations/settings/settings.md/#settings-join_any_take_last_row)
|
||||||
#### join_use_nulls
|
#### join_use_nulls
|
||||||
|
|
||||||
[persistent](../../../operations/settings/settings.md#persistent)
|
[persistent](/docs/en/operations/settings/settings.md/#persistent)
|
||||||
|
|
||||||
The `Join`-engine tables can’t be used in `GLOBAL JOIN` operations.
|
The `Join`-engine tables can’t be used in `GLOBAL JOIN` operations.
|
||||||
|
|
||||||
The `Join`-engine allows to specify [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls) setting in the `CREATE TABLE` statement. [SELECT](../../../sql-reference/statements/select/index.md) query should have the same `join_use_nulls` value.
|
The `Join`-engine allows to specify [join_use_nulls](/docs/en/operations/settings/settings.md/#join_use_nulls) setting in the `CREATE TABLE` statement. [SELECT](/docs/en/sql-reference/statements/select/index.md) query should have the same `join_use_nulls` value.
|
||||||
|
|
||||||
## Usage Examples {#example}
|
## Usage Examples {#example}
|
||||||
|
|
||||||
|
@ -163,7 +163,7 @@ SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10
|
|||||||
|
|
||||||
Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia.
|
Based on the above query and the [MCC list](https://en.wikipedia.org/wiki/Mobile_country_code), the countries with the most cell towers are: the USA, Germany, and Russia.
|
||||||
|
|
||||||
You may want to create an [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
You may want to create a [Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) in ClickHouse to decode these values.
|
||||||
|
|
||||||
## Use case: Incorporate geo data {#use-case}
|
## Use case: Incorporate geo data {#use-case}
|
||||||
|
|
||||||
|
2499
docs/en/getting-started/example-datasets/github.md
Normal file
2499
docs/en/getting-started/example-datasets/github.md
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
After Width: | Height: | Size: 277 KiB |
Binary file not shown.
After Width: | Height: | Size: 315 KiB |
Binary file not shown.
After Width: | Height: | Size: 246 KiB |
Binary file not shown.
After Width: | Height: | Size: 69 KiB |
@ -5,7 +5,7 @@ sidebar_label: Input and Output Formats
|
|||||||
title: Formats for Input and Output Data
|
title: Formats for Input and Output Data
|
||||||
---
|
---
|
||||||
|
|
||||||
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read an external dictionary. A format supported for output can be used to arrange the
|
ClickHouse can accept and return data in various formats. A format supported for input can be used to parse the data provided to `INSERT`s, to perform `SELECT`s from a file-backed table such as File, URL or HDFS, or to read a dictionary. A format supported for output can be used to arrange the
|
||||||
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
||||||
|
|
||||||
The supported formats are:
|
The supported formats are:
|
||||||
@ -13,7 +13,7 @@ The supported formats are:
|
|||||||
| Format | Input | Output |
|
| Format | Input | Output |
|
||||||
|-------------------------------------------------------------------------------------------|------|--------|
|
|-------------------------------------------------------------------------------------------|------|--------|
|
||||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||||
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
||||||
@ -48,6 +48,7 @@ The supported formats are:
|
|||||||
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
||||||
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
||||||
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
|
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
|
||||||
|
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
|
||||||
| [TSKV](#tskv) | ✔ | ✔ |
|
| [TSKV](#tskv) | ✔ | ✔ |
|
||||||
| [Pretty](#pretty) | ✗ | ✔ |
|
| [Pretty](#pretty) | ✗ | ✔ |
|
||||||
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
||||||
@ -1210,6 +1211,69 @@ SELECT * FROM json_each_row_nested
|
|||||||
- [output_format_json_array_of_rows](../operations/settings/settings.md#output_format_json_array_of_rows) - output a JSON array of all rows in JSONEachRow(Compact) format. Default value - `false`.
|
- [output_format_json_array_of_rows](../operations/settings/settings.md#output_format_json_array_of_rows) - output a JSON array of all rows in JSONEachRow(Compact) format. Default value - `false`.
|
||||||
- [output_format_json_validate_utf8](../operations/settings/settings.md#output_format_json_validate_utf8) - enables validation of UTF-8 sequences in JSON output formats (note that it doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8). Default value - `false`.
|
- [output_format_json_validate_utf8](../operations/settings/settings.md#output_format_json_validate_utf8) - enables validation of UTF-8 sequences in JSON output formats (note that it doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8). Default value - `false`.
|
||||||
|
|
||||||
|
## BSONEachRow {#bsoneachrow}
|
||||||
|
|
||||||
|
In this format, ClickHouse formats/parses data as a sequence of BSON documents without any separator between them.
|
||||||
|
Each row is formatted as a single document and each column is formatted as a single BSON document field with column name as a key.
|
||||||
|
|
||||||
|
For output it uses the following correspondence between ClickHouse types and BSON types:
|
||||||
|
|
||||||
|
| ClickHouse type | BSON Type |
|
||||||
|
|-----------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------|
|
||||||
|
| [Bool](../sql-reference/data-types/boolean.md) | `\x08` boolean |
|
||||||
|
| [Int8/UInt8](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||||
|
| [Int16UInt16](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||||
|
| [Int32](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||||
|
| [UInt32](../sql-reference/data-types/int-uint.md) | `\x12` int64 |
|
||||||
|
| [Int64/UInt64](../sql-reference/data-types/int-uint.md) | `\x12` int64 |
|
||||||
|
| [Float32/Float64](../sql-reference/data-types/float.md) | `\x01` double |
|
||||||
|
| [Date](../sql-reference/data-types/date.md)/[Date32](../sql-reference/data-types/date32.md) | `\x10` int32 |
|
||||||
|
| [DateTime](../sql-reference/data-types/datetime.md) | `\x12` int64 |
|
||||||
|
| [DateTime64](../sql-reference/data-types/datetime64.md) | `\x09` datetime |
|
||||||
|
| [Decimal32](../sql-reference/data-types/decimal.md) | `\x10` int32 |
|
||||||
|
| [Decimal64](../sql-reference/data-types/decimal.md) | `\x12` int64 |
|
||||||
|
| [Decimal128](../sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 16 |
|
||||||
|
| [Decimal256](../sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 32 |
|
||||||
|
| [Int128/UInt128](../sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 16 |
|
||||||
|
| [Int256/UInt256](../sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 32 |
|
||||||
|
| [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) | `\x05` binary, `\x00` binary subtype or \x02 string if setting output_format_bson_string_as_string is enabled |
|
||||||
|
| [UUID](../sql-reference/data-types/uuid.md) | `\x05` binary, `\x04` uuid subtype, size = 16 |
|
||||||
|
| [Array](../sql-reference/data-types/array.md) | `\x04` array |
|
||||||
|
| [Tuple](../sql-reference/data-types/tuple.md) | `\x04` array |
|
||||||
|
| [Named Tuple](../sql-reference/data-types/tuple.md) | `\x03` document |
|
||||||
|
| [Map](../sql-reference/data-types/map.md) (with String keys) | `\x03` document |
|
||||||
|
|
||||||
|
For input it uses the following correspondence between BSON types and ClickHouse types:
|
||||||
|
|
||||||
|
| BSON Type | ClickHouse Type |
|
||||||
|
|------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `\x01` double | [Float32/Float64](../sql-reference/data-types/float.md) |
|
||||||
|
| `\x02` string | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||||
|
| `\x03` document | [Map](../sql-reference/data-types/map.md)/[Named Tuple](../sql-reference/data-types/tuple.md) |
|
||||||
|
| `\x04` array | [Array](../sql-reference/data-types/array.md)/[Tuple](../sql-reference/data-types/tuple.md) |
|
||||||
|
| `\x05` binary, `\x00` binary subtype | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||||
|
| `\x05` binary, `\x02` old binary subtype | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||||
|
| `\x05` binary, `\x03` old uuid subtype | [UUID](../sql-reference/data-types/uuid.md) |
|
||||||
|
| `\x05` binary, `\x04` uuid subtype | [UUID](../sql-reference/data-types/uuid.md) |
|
||||||
|
| `\x07` ObjectId | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||||
|
| `\x08` boolean | [Bool](../sql-reference/data-types/boolean.md) |
|
||||||
|
| `\x09` datetime | [DateTime64](../sql-reference/data-types/datetime64.md) |
|
||||||
|
| `\x0A` null value | [NULL](../sql-reference/data-types/nullable.md) |
|
||||||
|
| `\x0D` JavaScript code | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||||
|
| `\x0E` symbol | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||||
|
| `\x10` int32 | [Int32/UInt32](../sql-reference/data-types/int-uint.md)/[Decimal32](../sql-reference/data-types/decimal.md) |
|
||||||
|
| `\x12` int64 | [Int64/UInt64](../sql-reference/data-types/int-uint.md)/[Decimal64](../sql-reference/data-types/decimal.md)/[DateTime64](../sql-reference/data-types/datetime64.md) |
|
||||||
|
|
||||||
|
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
|
||||||
|
Big integers and decimals (Int128/UInt128/Int256/UInt256/Decimal128/Decimal256) can be parsed from BSON Binary value with `\x00` binary subtype. In this case this format will validate that the size of binary data equals the size of expected value.
|
||||||
|
|
||||||
|
Note: this format don't work properly on Big-Endian platforms.
|
||||||
|
|
||||||
|
### BSON format settings {#bson-format-settings}
|
||||||
|
|
||||||
|
- [output_format_bson_string_as_string](../operations/settings/settings.md#output_format_bson_string_as_string) - use BSON String type instead of Binary for String columns. Default value - `false`.
|
||||||
|
- [input_format_bson_skip_fields_with_unsupported_types_in_schema_inference](../operations/settings/settings.md#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for format BSONEachRow. Default value - `false`.
|
||||||
|
|
||||||
## Native {#native}
|
## Native {#native}
|
||||||
|
|
||||||
The most efficient format. Data is written and read by blocks in binary format. For each block, the number of rows, number of columns, column names and types, and parts of columns in this block are recorded one after another. In other words, this format is “columnar” – it does not convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients.
|
The most efficient format. Data is written and read by blocks in binary format. For each block, the number of rows, number of columns, column names and types, and parts of columns in this block are recorded one after another. In other words, this format is “columnar” – it does not convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients.
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
---
|
|
||||||
slug: /en/operations/troubleshooting
|
[//]: # (This file is included in FAQ > Troubleshooting)
|
||||||
sidebar_position: 46
|
|
||||||
sidebar_label: Troubleshooting
|
|
||||||
title: Troubleshooting
|
|
||||||
---
|
|
||||||
|
|
||||||
- [Installation](#troubleshooting-installation-errors)
|
- [Installation](#troubleshooting-installation-errors)
|
||||||
- [Connecting to the server](#troubleshooting-accepts-no-connections)
|
- [Connecting to the server](#troubleshooting-accepts-no-connections)
|
@ -57,7 +57,7 @@ Internal coordination settings are located in the `<keeper_server>.<coordination
|
|||||||
- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true).
|
- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true).
|
||||||
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
|
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
|
||||||
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
|
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
|
||||||
- `four_letter_word_white_list` — White list of 4lw commands (default: `conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro`).
|
- `four_letter_word_white_list` — White list of 4lw commands (default: `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld`).
|
||||||
|
|
||||||
Quorum configuration is located in the `<keeper_server>.<raft_configuration>` section and contain servers description.
|
Quorum configuration is located in the `<keeper_server>.<raft_configuration>` section and contain servers description.
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ clickhouse keeper --config /etc/your_path_to_config/config.xml
|
|||||||
|
|
||||||
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
|
||||||
|
|
||||||
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro`.
|
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld`.
|
||||||
|
|
||||||
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
|
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
|
||||||
|
|
||||||
@ -309,6 +309,31 @@ Sessions with Ephemerals (1):
|
|||||||
/clickhouse/task_queue/ddl
|
/clickhouse/task_queue/ddl
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- `csnp`: Schedule a snapshot creation task. Return the last committed log index of the scheduled snapshot if success or `Failed to schedule snapshot creation task.` if failed. Note that `lgif` command can help you determine whether the snapshot is done.
|
||||||
|
|
||||||
|
```
|
||||||
|
100
|
||||||
|
```
|
||||||
|
|
||||||
|
- `lgif`: Keeper log information. `first_log_idx` : my first log index in log store; `first_log_term` : my first log term; `last_log_idx` : my last log index in log store; `last_log_term` : my last log term; `last_committed_log_idx` : my last committed log index in state machine; `leader_committed_log_idx` : leader's committed log index from my perspective; `target_committed_log_idx` : target log index should be committed to; `last_snapshot_idx` : the largest committed log index in last snapshot.
|
||||||
|
|
||||||
|
```
|
||||||
|
first_log_idx 1
|
||||||
|
first_log_term 1
|
||||||
|
last_log_idx 101
|
||||||
|
last_log_term 1
|
||||||
|
last_committed_log_idx 100
|
||||||
|
leader_committed_log_idx 101
|
||||||
|
target_committed_log_idx 101
|
||||||
|
last_snapshot_idx 50
|
||||||
|
```
|
||||||
|
|
||||||
|
- `rqld`: Request to become new leader. Return `Sent leadership request to leader.` if request sent or `Failed to send leadership request to leader.` if request not sent. Note that if node is already leader the outcome is same as the request is sent.
|
||||||
|
|
||||||
|
```
|
||||||
|
Sent leadership request to leader.
|
||||||
|
```
|
||||||
|
|
||||||
## Migration from ZooKeeper {#migration-from-zookeeper}
|
## Migration from ZooKeeper {#migration-from-zookeeper}
|
||||||
|
|
||||||
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
||||||
|
@ -130,7 +130,7 @@ SHOW TABLES FROM mydatabase;
|
|||||||
└────────┘
|
└────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an external dictionary with source MySQL
|
### Example of using named collections with a dictionary with source MySQL
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict (A Int64, B String)
|
CREATE DICTIONARY dict (A Int64, B String)
|
||||||
@ -213,7 +213,7 @@ SHOW TABLES FROM mydatabase
|
|||||||
└──────┘
|
└──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an external dictionary with source POSTGRESQL
|
### Example of using named collections with a dictionary with source POSTGRESQL
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict (a Int64, b String)
|
CREATE DICTIONARY dict (a Int64, b String)
|
||||||
@ -270,7 +270,7 @@ SELECT * FROM remote(remote1, database = default, table = test);
|
|||||||
└───┴───┘
|
└───┴───┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an external dictionary with source ClickHouse
|
### Example of using named collections with a dictionary with source ClickHouse
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict(a Int64, b String)
|
CREATE DICTIONARY dict(a Int64, b String)
|
||||||
|
@ -268,14 +268,14 @@ The path to the table in ZooKeeper.
|
|||||||
|
|
||||||
## dictionaries_config {#server_configuration_parameters-dictionaries_config}
|
## dictionaries_config {#server_configuration_parameters-dictionaries_config}
|
||||||
|
|
||||||
The path to the config file for external dictionaries.
|
The path to the config file for dictionaries.
|
||||||
|
|
||||||
Path:
|
Path:
|
||||||
|
|
||||||
- Specify the absolute path or the path relative to the server config file.
|
- Specify the absolute path or the path relative to the server config file.
|
||||||
- The path can contain wildcards \* and ?.
|
- The path can contain wildcards \* and ?.
|
||||||
|
|
||||||
See also “[External dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”.
|
See also “[Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md)”.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ Ways to configure settings, in order of priority:
|
|||||||
|
|
||||||
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
- When starting the ClickHouse console client in non-interactive mode, set the startup parameter `--setting=value`.
|
||||||
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
- When using the HTTP API, pass CGI parameters (`URL?setting_1=value&setting_2=value...`).
|
||||||
- Make settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select) clause of the SELECT query. The setting value is applied only to that query and is reset to default or previous value after the query is executed.
|
- Make settings in the [SETTINGS](../../sql-reference/statements/select/index.md#settings-in-select-query) clause of the SELECT query. The setting value is applied only to that query and is reset to default or previous value after the query is executed.
|
||||||
|
|
||||||
Settings that can only be made in the server config file are not covered in this section.
|
Settings that can only be made in the server config file are not covered in this section.
|
||||||
|
|
||||||
|
@ -16,44 +16,54 @@ Queries in ClickHouse can be divided into several types:
|
|||||||
|
|
||||||
The following settings regulate user permissions by the type of query:
|
The following settings regulate user permissions by the type of query:
|
||||||
|
|
||||||
- [readonly](#settings_readonly) — Restricts permissions for all types of queries except DDL queries.
|
## readonly
|
||||||
- [allow_ddl](#settings_allow_ddl) — Restricts permissions for DDL queries.
|
Restricts permissions for read data, write data, and change settings queries.
|
||||||
|
|
||||||
`KILL QUERY` can be performed with any settings.
|
When set to 1, allows:
|
||||||
|
|
||||||
## readonly {#settings_readonly}
|
- All types of read queries (like SELECT and equivalent queries).
|
||||||
|
- Queries that modify only session context (like USE).
|
||||||
|
|
||||||
Restricts permissions for reading data, write data and change settings queries.
|
When set to 2, allows the above plus:
|
||||||
|
- SET and CREATE TEMPORARY TABLE
|
||||||
|
|
||||||
See how the queries are divided into types [above](#permissions_for_queries).
|
:::tip
|
||||||
|
Queries like EXISTS, DESCRIBE, EXPLAIN, SHOW PROCESSLIST, etc are equivalent to SELECT, because they just do select from system tables.
|
||||||
|
:::
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 — All queries are allowed.
|
- 0 — Read, Write, and Change settings queries are allowed.
|
||||||
- 1 — Only read data queries are allowed.
|
- 1 — Only Read data queries are allowed.
|
||||||
- 2 — Read data and change settings queries are allowed.
|
- 2 — Read data and Change settings queries are allowed.
|
||||||
|
|
||||||
|
Default value: 0
|
||||||
|
|
||||||
|
:::note
|
||||||
After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session.
|
After setting `readonly = 1`, the user can’t change `readonly` and `allow_ddl` settings in the current session.
|
||||||
|
|
||||||
When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method.
|
When using the `GET` method in the [HTTP interface](../../interfaces/http.md), `readonly = 1` is set automatically. To modify data, use the `POST` method.
|
||||||
|
|
||||||
Setting `readonly = 1` prohibit the user from changing all the settings. There is a way to prohibit the user from changing only specific settings. Also there is a way to allow changing only specific settings under `readonly = 1` restrictions. For details see [constraints on settings](../../operations/settings/constraints-on-settings.md).
|
Setting `readonly = 1` prohibits the user from changing settings. There is a way to prohibit the user from changing only specific settings. Also there is a way to allow changing only specific settings under `readonly = 1` restrictions. For details see [constraints on settings](../../operations/settings/constraints-on-settings.md).
|
||||||
|
:::
|
||||||
|
|
||||||
Default value: 0
|
|
||||||
|
|
||||||
## allow_ddl {#settings_allow_ddl}
|
## allow_ddl {#settings_allow_ddl}
|
||||||
|
|
||||||
Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries.
|
Allows or denies [DDL](https://en.wikipedia.org/wiki/Data_definition_language) queries.
|
||||||
|
|
||||||
See how the queries are divided into types [above](#permissions_for_queries).
|
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 — DDL queries are not allowed.
|
- 0 — DDL queries are not allowed.
|
||||||
- 1 — DDL queries are allowed.
|
- 1 — DDL queries are allowed.
|
||||||
|
|
||||||
You can’t execute `SET allow_ddl = 1` if `allow_ddl = 0` for the current session.
|
|
||||||
|
|
||||||
Default value: 1
|
Default value: 1
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/settings/permissions_for_queries/) <!--hide-->
|
:::note
|
||||||
|
You cannot run `SET allow_ddl = 1` if `allow_ddl = 0` for the current session.
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
|
:::note KILL QUERY
|
||||||
|
`KILL QUERY` can be performed with any combination of readonly and allow_ddl settings.
|
||||||
|
:::
|
||||||
|
@ -70,7 +70,7 @@ Another use case of `prefer_global_in_and_join` is accessing tables created by
|
|||||||
|
|
||||||
**See also:**
|
**See also:**
|
||||||
|
|
||||||
- [Distributed subqueries](../../sql-reference/operators/in.md#select-distributed-subqueries) for more information on how to use `GLOBAL IN`/`GLOBAL JOIN`
|
- [Distributed subqueries](../../sql-reference/operators/in.md/#select-distributed-subqueries) for more information on how to use `GLOBAL IN`/`GLOBAL JOIN`
|
||||||
|
|
||||||
## enable_optimize_predicate_expression {#enable-optimize-predicate-expression}
|
## enable_optimize_predicate_expression {#enable-optimize-predicate-expression}
|
||||||
|
|
||||||
@ -170,7 +170,7 @@ It makes sense to disable it if the server has millions of tiny tables that are
|
|||||||
|
|
||||||
## function_range_max_elements_in_block {#settings-function_range_max_elements_in_block}
|
## function_range_max_elements_in_block {#settings-function_range_max_elements_in_block}
|
||||||
|
|
||||||
Sets the safety threshold for data volume generated by function [range](../../sql-reference/functions/array-functions.md#range). Defines the maximum number of values generated by function per block of data (sum of array sizes for every row in a block).
|
Sets the safety threshold for data volume generated by function [range](../../sql-reference/functions/array-functions.md/#range). Defines the maximum number of values generated by function per block of data (sum of array sizes for every row in a block).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -273,10 +273,10 @@ Default value: 0.
|
|||||||
|
|
||||||
## insert_null_as_default {#insert_null_as_default}
|
## insert_null_as_default {#insert_null_as_default}
|
||||||
|
|
||||||
Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type.
|
Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md/#create-default-values) instead of [NULL](../../sql-reference/syntax.md/#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable) data type.
|
||||||
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
|
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
|
||||||
|
|
||||||
This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause.
|
This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md/#inserting-the-results-of-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -287,7 +287,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## join_default_strictness {#settings-join_default_strictness}
|
## join_default_strictness {#settings-join_default_strictness}
|
||||||
|
|
||||||
Sets default strictness for [JOIN clauses](../../sql-reference/statements/select/join.md#select-join).
|
Sets default strictness for [JOIN clauses](../../sql-reference/statements/select/join.md/#select-join).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -322,7 +322,7 @@ When using `partial_merge` algorithm, ClickHouse sorts the data and dumps it to
|
|||||||
|
|
||||||
- `direct` - can be applied when the right storage supports key-value requests.
|
- `direct` - can be applied when the right storage supports key-value requests.
|
||||||
|
|
||||||
The `direct` algorithm performs a lookup in the right table using rows from the left table as keys. It's supported only by special storage such as [Dictionary](../../engines/table-engines/special/dictionary.md#dictionary) or [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) and only the `LEFT` and `INNER` JOINs.
|
The `direct` algorithm performs a lookup in the right table using rows from the left table as keys. It's supported only by special storage such as [Dictionary](../../engines/table-engines/special/dictionary.md/#dictionary) or [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) and only the `LEFT` and `INNER` JOINs.
|
||||||
|
|
||||||
- `auto` — try `hash` join and switch on the fly to another algorithm if the memory limit is violated.
|
- `auto` — try `hash` join and switch on the fly to another algorithm if the memory limit is violated.
|
||||||
|
|
||||||
@ -348,7 +348,7 @@ Default value: 0.
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- [JOIN clause](../../sql-reference/statements/select/join.md#select-join)
|
- [JOIN clause](../../sql-reference/statements/select/join.md/#select-join)
|
||||||
- [Join table engine](../../engines/table-engines/special/join.md)
|
- [Join table engine](../../engines/table-engines/special/join.md)
|
||||||
- [join_default_strictness](#settings-join_default_strictness)
|
- [join_default_strictness](#settings-join_default_strictness)
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ Sets the type of [JOIN](../../sql-reference/statements/select/join.md) behaviour
|
|||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 — The empty cells are filled with the default value of the corresponding field type.
|
- 0 — The empty cells are filled with the default value of the corresponding field type.
|
||||||
- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable), and empty cells are filled with [NULL](../../sql-reference/syntax.md).
|
- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable), and empty cells are filled with [NULL](../../sql-reference/syntax.md).
|
||||||
|
|
||||||
Default value: 0.
|
Default value: 0.
|
||||||
|
|
||||||
@ -431,7 +431,7 @@ Default value: 0.
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings)
|
- [JOIN strictness](../../sql-reference/statements/select/join.md/#join-settings)
|
||||||
|
|
||||||
## temporary_files_codec {#temporary_files_codec}
|
## temporary_files_codec {#temporary_files_codec}
|
||||||
|
|
||||||
@ -532,7 +532,7 @@ Default value: 8.
|
|||||||
|
|
||||||
If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it does not use the cache of uncompressed blocks.
|
If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it does not use the cache of uncompressed blocks.
|
||||||
|
|
||||||
The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
|
The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md/#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -544,7 +544,7 @@ Default value: 128 ✕ 8192.
|
|||||||
|
|
||||||
If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it does not use the cache of uncompressed blocks.
|
If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it does not use the cache of uncompressed blocks.
|
||||||
|
|
||||||
The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
|
The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md/#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -594,7 +594,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
Setting up query logging.
|
Setting up query logging.
|
||||||
|
|
||||||
Queries sent to ClickHouse with this setup are logged according to the rules in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server configuration parameter.
|
Queries sent to ClickHouse with this setup are logged according to the rules in the [query_log](../../operations/server-configuration-parameters/settings.md/#server_configuration_parameters-query-log) server configuration parameter.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -639,7 +639,7 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
|||||||
|
|
||||||
Setting up query threads logging.
|
Setting up query threads logging.
|
||||||
|
|
||||||
Query threads log into [system.query_thread_log](../../operations/system-tables/query_thread_log.md) table. This setting have effect only when [log_queries](#settings-log-queries) is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
Query threads log into [system.query_thread_log](../../operations/system-tables/query_thread_log.md) table. This setting have effect only when [log_queries](#settings-log-queries) is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md/#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -658,7 +658,7 @@ log_query_threads=1
|
|||||||
|
|
||||||
Setting up query views logging.
|
Setting up query views logging.
|
||||||
|
|
||||||
When a query run by ClickHouse with this setup on has associated views (materialized or live views), they are logged in the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) server configuration parameter.
|
When a query run by ClickHouse with this setup on has associated views (materialized or live views), they are logged in the [query_views_log](../../operations/server-configuration-parameters/settings.md/#server_configuration_parameters-query_views_log) server configuration parameter.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -884,7 +884,7 @@ Default value: `5`.
|
|||||||
|
|
||||||
## max_replicated_fetches_network_bandwidth_for_server {#max_replicated_fetches_network_bandwidth_for_server}
|
## max_replicated_fetches_network_bandwidth_for_server {#max_replicated_fetches_network_bandwidth_for_server}
|
||||||
|
|
||||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) fetches for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_fetches_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth) setting.
|
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) fetches for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_fetches_network_bandwidth](../../operations/settings/merge-tree-settings.md/#max_replicated_fetches_network_bandwidth) setting.
|
||||||
|
|
||||||
The setting isn't followed perfectly accurately.
|
The setting isn't followed perfectly accurately.
|
||||||
|
|
||||||
@ -905,7 +905,7 @@ Could be used for throttling speed when replicating the data to add or replace n
|
|||||||
|
|
||||||
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server}
|
||||||
|
|
||||||
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) setting.
|
Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md/#max_replicated_sends_network_bandwidth) setting.
|
||||||
|
|
||||||
The setting isn't followed perfectly accurately.
|
The setting isn't followed perfectly accurately.
|
||||||
|
|
||||||
@ -955,7 +955,7 @@ For more information, see the section “Extreme values”.
|
|||||||
|
|
||||||
## kafka_max_wait_ms {#kafka-max-wait-ms}
|
## kafka_max_wait_ms {#kafka-max-wait-ms}
|
||||||
|
|
||||||
The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) before retry.
|
The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md/#kafka) before retry.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -977,7 +977,7 @@ Default value: false.
|
|||||||
## use_uncompressed_cache {#setting-use_uncompressed_cache}
|
## use_uncompressed_cache {#setting-use_uncompressed_cache}
|
||||||
|
|
||||||
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
|
Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled).
|
||||||
Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted.
|
Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md/#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted.
|
||||||
|
|
||||||
For queries that read at least a somewhat large volume of data (one million rows or more), the uncompressed cache is disabled automatically to save space for truly small queries. This means that you can keep the ‘use_uncompressed_cache’ setting always set to 1.
|
For queries that read at least a somewhat large volume of data (one million rows or more), the uncompressed cache is disabled automatically to save space for truly small queries. This means that you can keep the ‘use_uncompressed_cache’ setting always set to 1.
|
||||||
|
|
||||||
@ -1124,7 +1124,7 @@ This setting is useful for replicated tables with a sampling key. A query may be
|
|||||||
- The cluster latency distribution has a long tail, so that querying more servers increases the query overall latency.
|
- The cluster latency distribution has a long tail, so that querying more servers increases the query overall latency.
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details.
|
This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md/#max_parallel_replica-subqueries) for more details.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## compile_expressions {#compile-expressions}
|
## compile_expressions {#compile-expressions}
|
||||||
@ -1261,7 +1261,7 @@ Possible values:
|
|||||||
Default value: 1.
|
Default value: 1.
|
||||||
|
|
||||||
By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
|
By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)).
|
||||||
For the replicated tables by default the only 100 of the most recent blocks for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)).
|
For the replicated tables by default the only 100 of the most recent blocks for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md/#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)).
|
||||||
For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window).
|
For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window).
|
||||||
|
|
||||||
## deduplicate_blocks_in_dependent_materialized_views {#settings-deduplicate-blocks-in-dependent-materialized-views}
|
## deduplicate_blocks_in_dependent_materialized_views {#settings-deduplicate-blocks-in-dependent-materialized-views}
|
||||||
@ -1296,7 +1296,7 @@ Default value: empty string (disabled)
|
|||||||
|
|
||||||
`insert_deduplication_token` is used for deduplication _only_ when not empty.
|
`insert_deduplication_token` is used for deduplication _only_ when not empty.
|
||||||
|
|
||||||
For the replicated tables by default the only 100 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)).
|
For the replicated tables by default the only 100 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md/#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)).
|
||||||
For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window).
|
For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
@ -1373,15 +1373,15 @@ Default value: 0.
|
|||||||
|
|
||||||
## count_distinct_implementation {#settings-count_distinct_implementation}
|
## count_distinct_implementation {#settings-count_distinct_implementation}
|
||||||
|
|
||||||
Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) construction.
|
Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md/#agg_function-count) construction.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md/#agg_function-uniq)
|
||||||
- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md/#agg_function-uniqcombined)
|
||||||
- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md/#agg_function-uniqcombined64)
|
||||||
- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md/#agg_function-uniqhll12)
|
||||||
- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md/#agg_function-uniqexact)
|
||||||
|
|
||||||
Default value: `uniqExact`.
|
Default value: `uniqExact`.
|
||||||
|
|
||||||
@ -1616,14 +1616,14 @@ Enables or disables optimization by transforming some functions to reading subco
|
|||||||
|
|
||||||
These functions can be transformed:
|
These functions can be transformed:
|
||||||
|
|
||||||
- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
|
- [length](../../sql-reference/functions/array-functions.md/#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn.
|
||||||
- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
|
- [empty](../../sql-reference/functions/array-functions.md/#function-empty) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn.
|
||||||
- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn.
|
- [notEmpty](../../sql-reference/functions/array-functions.md/#function-notempty) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn.
|
||||||
- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
|
- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||||
- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
|
- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||||
- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn.
|
- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn.
|
||||||
- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn.
|
- [mapKeys](../../sql-reference/functions/tuple-map-functions.md/#mapkeys) to read the [keys](../../sql-reference/data-types/map.md/#map-subcolumns) subcolumn.
|
||||||
- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn.
|
- [mapValues](../../sql-reference/functions/tuple-map-functions.md/#mapvalues) to read the [values](../../sql-reference/data-types/map.md/#map-subcolumns) subcolumn.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1782,7 +1782,7 @@ Default value: 1000000000 nanoseconds (once a second).
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- System table [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||||
|
|
||||||
## query_profiler_cpu_time_period_ns {#query_profiler_cpu_time_period_ns}
|
## query_profiler_cpu_time_period_ns {#query_profiler_cpu_time_period_ns}
|
||||||
|
|
||||||
@ -1805,7 +1805,7 @@ Default value: 1000000000 nanoseconds.
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- System table [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||||
|
|
||||||
## allow_introspection_functions {#settings-allow_introspection_functions}
|
## allow_introspection_functions {#settings-allow_introspection_functions}
|
||||||
|
|
||||||
@ -1821,11 +1821,11 @@ Default value: 0.
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Sampling Query Profiler](../../operations/optimizing-performance/sampling-query-profiler.md)
|
- [Sampling Query Profiler](../../operations/optimizing-performance/sampling-query-profiler.md)
|
||||||
- System table [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log)
|
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||||
|
|
||||||
## input_format_parallel_parsing {#input-format-parallel-parsing}
|
## input_format_parallel_parsing {#input-format-parallel-parsing}
|
||||||
|
|
||||||
Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](../../interfaces/formats.md#tabseparated), [TKSV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) and [JSONEachRow](../../interfaces/formats.md#jsoneachrow) formats.
|
Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TKSV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1836,7 +1836,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## output_format_parallel_formatting {#output-format-parallel-formatting}
|
## output_format_parallel_formatting {#output-format-parallel-formatting}
|
||||||
|
|
||||||
Enables or disables parallel formatting of data formats. Supported only for [TSV](../../interfaces/formats.md#tabseparated), [TKSV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) and [JSONEachRow](../../interfaces/formats.md#jsoneachrow) formats.
|
Enables or disables parallel formatting of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TKSV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1878,7 +1878,7 @@ Default value: 0.
|
|||||||
|
|
||||||
## insert_distributed_sync {#insert_distributed_sync}
|
## insert_distributed_sync {#insert_distributed_sync}
|
||||||
|
|
||||||
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
|
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table.
|
||||||
|
|
||||||
By default, when inserting data into a `Distributed` table, the ClickHouse server sends data to cluster nodes in asynchronous mode. When `insert_distributed_sync=1`, the data is processed synchronously, and the `INSERT` operation succeeds only after all the data is saved on all shards (at least one replica for each shard if `internal_replication` is true).
|
By default, when inserting data into a `Distributed` table, the ClickHouse server sends data to cluster nodes in asynchronous mode. When `insert_distributed_sync=1`, the data is processed synchronously, and the `INSERT` operation succeeds only after all the data is saved on all shards (at least one replica for each shard if `internal_replication` is true).
|
||||||
|
|
||||||
@ -1891,12 +1891,12 @@ Default value: `0`.
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed)
|
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed)
|
||||||
- [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed)
|
- [Managing Distributed Tables](../../sql-reference/statements/system.md/#query-language-system-distributed)
|
||||||
|
|
||||||
## insert_shard_id {#insert_shard_id}
|
## insert_shard_id {#insert_shard_id}
|
||||||
|
|
||||||
If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md#distributed) table into which the data will be inserted synchronously.
|
If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table into which the data will be inserted synchronously.
|
||||||
|
|
||||||
If `insert_shard_id` value is incorrect, the server will throw an exception.
|
If `insert_shard_id` value is incorrect, the server will throw an exception.
|
||||||
|
|
||||||
@ -1909,7 +1909,7 @@ SELECT uniq(shard_num) FROM system.clusters WHERE cluster = 'requested_cluster';
|
|||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
- Any number from `1` to `shards_num` of corresponding [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
|
- Any number from `1` to `shards_num` of corresponding [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
@ -1969,7 +1969,7 @@ Default value: 16.
|
|||||||
|
|
||||||
## background_move_pool_size {#background_move_pool_size}
|
## background_move_pool_size {#background_move_pool_size}
|
||||||
|
|
||||||
Sets the number of threads performing background moves of data parts for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)-engine tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session.
|
Sets the number of threads performing background moves of data parts for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-multiple-volumes)-engine tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1979,7 +1979,7 @@ Default value: 8.
|
|||||||
|
|
||||||
## background_schedule_pool_size {#background_schedule_pool_size}
|
## background_schedule_pool_size {#background_schedule_pool_size}
|
||||||
|
|
||||||
Sets the number of threads performing background tasks for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables, [Kafka](../../engines/table-engines/integrations/kafka.md) streaming, [DNS cache updates](../../operations/server-configuration-parameters/settings.md#server-settings-dns-cache-update-period). This setting is applied at ClickHouse server start and can’t be changed in a user session.
|
Sets the number of threads performing background tasks for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables, [Kafka](../../engines/table-engines/integrations/kafka.md) streaming, [DNS cache updates](../../operations/server-configuration-parameters/settings.md/#server-settings-dns-cache-update-period). This setting is applied at ClickHouse server start and can’t be changed in a user session.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2036,8 +2036,8 @@ Default value: 16.
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) engine.
|
- [Kafka](../../engines/table-engines/integrations/kafka.md/#kafka) engine.
|
||||||
- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md#rabbitmq-engine) engine.
|
- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md/#rabbitmq-engine) engine.
|
||||||
|
|
||||||
## validate_polygons {#validate_polygons}
|
## validate_polygons {#validate_polygons}
|
||||||
|
|
||||||
@ -2052,7 +2052,7 @@ Default value: 1.
|
|||||||
|
|
||||||
## transform_null_in {#transform_null_in}
|
## transform_null_in {#transform_null_in}
|
||||||
|
|
||||||
Enables equality of [NULL](../../sql-reference/syntax.md#null-literal) values for [IN](../../sql-reference/operators/in.md) operator.
|
Enables equality of [NULL](../../sql-reference/syntax.md/#null-literal) values for [IN](../../sql-reference/operators/in.md) operator.
|
||||||
|
|
||||||
By default, `NULL` values can’t be compared because `NULL` means undefined value. Thus, comparison `expr = NULL` must always return `false`. With this setting `NULL = NULL` returns `true` for `IN` operator.
|
By default, `NULL` values can’t be compared because `NULL` means undefined value. Thus, comparison `expr = NULL` must always return `false`. With this setting `NULL = NULL` returns `true` for `IN` operator.
|
||||||
|
|
||||||
@ -2106,7 +2106,7 @@ Result:
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [NULL Processing in IN Operators](../../sql-reference/operators/in.md#in-null-processing)
|
- [NULL Processing in IN Operators](../../sql-reference/operators/in.md/#in-null-processing)
|
||||||
|
|
||||||
## low_cardinality_max_dictionary_size {#low_cardinality_max_dictionary_size}
|
## low_cardinality_max_dictionary_size {#low_cardinality_max_dictionary_size}
|
||||||
|
|
||||||
@ -2133,7 +2133,7 @@ Default value: 0.
|
|||||||
|
|
||||||
## low_cardinality_allow_in_native_format {#low_cardinality_allow_in_native_format}
|
## low_cardinality_allow_in_native_format {#low_cardinality_allow_in_native_format}
|
||||||
|
|
||||||
Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type with the [Native](../../interfaces/formats.md#native) format.
|
Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type with the [Native](../../interfaces/formats.md/#native) format.
|
||||||
|
|
||||||
If usage of `LowCardinality` is restricted, ClickHouse server converts `LowCardinality`-columns to ordinary ones for `SELECT` queries, and convert ordinary columns to `LowCardinality`-columns for `INSERT` queries.
|
If usage of `LowCardinality` is restricted, ClickHouse server converts `LowCardinality`-columns to ordinary ones for `SELECT` queries, and convert ordinary columns to `LowCardinality`-columns for `INSERT` queries.
|
||||||
|
|
||||||
@ -2197,7 +2197,7 @@ Default value: 268435456.
|
|||||||
|
|
||||||
## optimize_read_in_order {#optimize_read_in_order}
|
## optimize_read_in_order {#optimize_read_in_order}
|
||||||
|
|
||||||
Enables [ORDER BY](../../sql-reference/statements/select/order-by.md#optimize_read_in_order) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries for reading data from [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables.
|
Enables [ORDER BY](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries for reading data from [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2208,7 +2208,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md#optimize_read_in_order)
|
- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order)
|
||||||
|
|
||||||
## optimize_aggregation_in_order {#optimize_aggregation_in_order}
|
## optimize_aggregation_in_order {#optimize_aggregation_in_order}
|
||||||
|
|
||||||
@ -2223,7 +2223,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [GROUP BY optimization](../../sql-reference/statements/select/group-by.md#aggregation-in-order)
|
- [GROUP BY optimization](../../sql-reference/statements/select/group-by.md/#aggregation-in-order)
|
||||||
|
|
||||||
## mutations_sync {#mutations_sync}
|
## mutations_sync {#mutations_sync}
|
||||||
|
|
||||||
@ -2261,8 +2261,8 @@ Default value: `0`.
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [CREATE TABLE query clauses and settings](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) (`merge_with_ttl_timeout` setting)
|
- [CREATE TABLE query clauses and settings](../../engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) (`merge_with_ttl_timeout` setting)
|
||||||
- [Table TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl)
|
- [Table TTL](../../engines/table-engines/mergetree-family/mergetree.md/#mergetree-table-ttl)
|
||||||
|
|
||||||
## lock_acquire_timeout {#lock_acquire_timeout}
|
## lock_acquire_timeout {#lock_acquire_timeout}
|
||||||
|
|
||||||
@ -2279,7 +2279,7 @@ Default value: `120` seconds.
|
|||||||
|
|
||||||
## cast_keep_nullable {#cast_keep_nullable}
|
## cast_keep_nullable {#cast_keep_nullable}
|
||||||
|
|
||||||
Enables or disables keeping of the `Nullable` data type in [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) operations.
|
Enables or disables keeping of the `Nullable` data type in [CAST](../../sql-reference/functions/type-conversion-functions.md/#type_conversion_function-cast) operations.
|
||||||
|
|
||||||
When the setting is enabled and the argument of `CAST` function is `Nullable`, the result is also transformed to `Nullable` type. When the setting is disabled, the result always has the destination type exactly.
|
When the setting is enabled and the argument of `CAST` function is `Nullable`, the result is also transformed to `Nullable` type. When the setting is disabled, the result always has the destination type exactly.
|
||||||
|
|
||||||
@ -2324,7 +2324,7 @@ Result:
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function
|
- [CAST](../../sql-reference/functions/type-conversion-functions.md/#type_conversion_function-cast) function
|
||||||
|
|
||||||
## system_events_show_zero_values {#system_events_show_zero_values}
|
## system_events_show_zero_values {#system_events_show_zero_values}
|
||||||
|
|
||||||
@ -2369,7 +2369,7 @@ Result
|
|||||||
|
|
||||||
## persistent {#persistent}
|
## persistent {#persistent}
|
||||||
|
|
||||||
Disables persistency for the [Set](../../engines/table-engines/special/set.md#set) and [Join](../../engines/table-engines/special/join.md#join) table engines.
|
Disables persistency for the [Set](../../engines/table-engines/special/set.md/#set) and [Join](../../engines/table-engines/special/join.md/#join) table engines.
|
||||||
|
|
||||||
Reduces the I/O overhead. Suitable for scenarios that pursue performance and do not require persistence.
|
Reduces the I/O overhead. Suitable for scenarios that pursue performance and do not require persistence.
|
||||||
|
|
||||||
@ -2382,7 +2382,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## allow_nullable_key {#allow-nullable-key}
|
## allow_nullable_key {#allow-nullable-key}
|
||||||
|
|
||||||
Allows using of the [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) tables.
|
Allows using of the [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md/#table_engines-mergetree) tables.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2401,7 +2401,7 @@ Do not enable this feature in version `<= 21.8`. It's not properly implemented a
|
|||||||
|
|
||||||
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
|
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
|
||||||
|
|
||||||
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
|
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md/#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
|
||||||
It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries.
|
It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
@ -2448,7 +2448,7 @@ See examples in [UNION](../../sql-reference/statements/select/union.md).
|
|||||||
|
|
||||||
## data_type_default_nullable {#data_type_default_nullable}
|
## data_type_default_nullable {#data_type_default_nullable}
|
||||||
|
|
||||||
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2478,7 +2478,7 @@ It can be useful when merges are CPU bounded not IO bounded (performing heavy da
|
|||||||
|
|
||||||
## max_final_threads {#max-final-threads}
|
## max_final_threads {#max-final-threads}
|
||||||
|
|
||||||
Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier.
|
Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md/#select-from-final) modifier.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2551,7 +2551,7 @@ Result:
|
|||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) behaviour.
|
Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md/#materialized) and [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) behaviour.
|
||||||
|
|
||||||
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists}
|
||||||
|
|
||||||
@ -2608,7 +2608,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## allow_experimental_live_view {#allow-experimental-live-view}
|
## allow_experimental_live_view {#allow-experimental-live-view}
|
||||||
|
|
||||||
Allows creation of experimental [live views](../../sql-reference/statements/create/view.md#live-view).
|
Allows creation of experimental [live views](../../sql-reference/statements/create/view.md/#live-view).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2619,19 +2619,19 @@ Default value: `0`.
|
|||||||
|
|
||||||
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
|
## live_view_heartbeat_interval {#live-view-heartbeat-interval}
|
||||||
|
|
||||||
Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md#live-view) is alive .
|
Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md/#live-view) is alive .
|
||||||
|
|
||||||
Default value: `15`.
|
Default value: `15`.
|
||||||
|
|
||||||
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
|
## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh}
|
||||||
|
|
||||||
Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md#live-view) is re-executed.
|
Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md/#live-view) is re-executed.
|
||||||
|
|
||||||
Default value: `64`.
|
Default value: `64`.
|
||||||
|
|
||||||
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
## periodic_live_view_refresh {#periodic-live-view-refresh}
|
||||||
|
|
||||||
Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md#live-view) is forced to refresh.
|
Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md/#live-view) is forced to refresh.
|
||||||
|
|
||||||
Default value: `60`.
|
Default value: `60`.
|
||||||
|
|
||||||
@ -2670,7 +2670,7 @@ Default value: 180.
|
|||||||
|
|
||||||
## check_query_single_value_result {#check_query_single_value_result}
|
## check_query_single_value_result {#check_query_single_value_result}
|
||||||
|
|
||||||
Defines the level of detail for the [CHECK TABLE](../../sql-reference/statements/check-table.md#checking-mergetree-tables) query result for `MergeTree` family engines .
|
Defines the level of detail for the [CHECK TABLE](../../sql-reference/statements/check-table.md/#checking-mergetree-tables) query result for `MergeTree` family engines .
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2681,7 +2681,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## prefer_column_name_to_alias {#prefer-column-name-to-alias}
|
## prefer_column_name_to_alias {#prefer-column-name-to-alias}
|
||||||
|
|
||||||
Enables or disables using the original column names instead of aliases in query expressions and clauses. It especially matters when alias is the same as the column name, see [Expression Aliases](../../sql-reference/syntax.md#notes-on-usage). Enable this setting to make aliases syntax rules in ClickHouse more compatible with most other database engines.
|
Enables or disables using the original column names instead of aliases in query expressions and clauses. It especially matters when alias is the same as the column name, see [Expression Aliases](../../sql-reference/syntax.md/#notes-on-usage). Enable this setting to make aliases syntax rules in ClickHouse more compatible with most other database engines.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2725,7 +2725,7 @@ Result:
|
|||||||
|
|
||||||
## limit {#limit}
|
## limit {#limit}
|
||||||
|
|
||||||
Sets the maximum number of rows to get from the query result. It adjusts the value set by the [LIMIT](../../sql-reference/statements/select/limit.md#limit-clause) clause, so that the limit, specified in the query, cannot exceed the limit, set by this setting.
|
Sets the maximum number of rows to get from the query result. It adjusts the value set by the [LIMIT](../../sql-reference/statements/select/limit.md/#limit-clause) clause, so that the limit, specified in the query, cannot exceed the limit, set by this setting.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2736,7 +2736,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## offset {#offset}
|
## offset {#offset}
|
||||||
|
|
||||||
Sets the number of rows to skip before starting to return rows from the query. It adjusts the offset set by the [OFFSET](../../sql-reference/statements/select/offset.md#offset-fetch) clause, so that these two values are summarized.
|
Sets the number of rows to skip before starting to return rows from the query. It adjusts the offset set by the [OFFSET](../../sql-reference/statements/select/offset.md/#offset-fetch) clause, so that these two values are summarized.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2773,7 +2773,7 @@ Result:
|
|||||||
|
|
||||||
## optimize_syntax_fuse_functions {#optimize_syntax_fuse_functions}
|
## optimize_syntax_fuse_functions {#optimize_syntax_fuse_functions}
|
||||||
|
|
||||||
Enables to fuse aggregate functions with identical argument. It rewrites query contains at least two aggregate functions from [sum](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum), [count](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) or [avg](../../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg) with identical argument to [sumCount](../../sql-reference/aggregate-functions/reference/sumcount.md#agg_function-sumCount).
|
Enables to fuse aggregate functions with identical argument. It rewrites query contains at least two aggregate functions from [sum](../../sql-reference/aggregate-functions/reference/sum.md/#agg_function-sum), [count](../../sql-reference/aggregate-functions/reference/count.md/#agg_function-count) or [avg](../../sql-reference/aggregate-functions/reference/avg.md/#agg_function-avg) with identical argument to [sumCount](../../sql-reference/aggregate-functions/reference/sumcount.md/#agg_function-sumCount).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2932,7 +2932,7 @@ If the setting is set to `0`, the table function does not make Nullable columns
|
|||||||
|
|
||||||
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
## allow_experimental_projection_optimization {#allow-experimental-projection-optimization}
|
||||||
|
|
||||||
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries.
|
Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2943,7 +2943,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## force_optimize_projection {#force-optimize-projection}
|
## force_optimize_projection {#force-optimize-projection}
|
||||||
|
|
||||||
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
|
Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2978,7 +2978,7 @@ Default value: `120` seconds.
|
|||||||
|
|
||||||
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
|
## regexp_max_matches_per_row {#regexp-max-matches-per-row}
|
||||||
|
|
||||||
Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function.
|
Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md/#extractallgroups-horizontal) function.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3010,7 +3010,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## short_circuit_function_evaluation {#short-circuit-function-evaluation}
|
## short_circuit_function_evaluation {#short-circuit-function-evaluation}
|
||||||
|
|
||||||
Allows calculating the [if](../../sql-reference/functions/conditional-functions.md#if), [multiIf](../../sql-reference/functions/conditional-functions.md#multiif), [and](../../sql-reference/functions/logical-functions.md#logical-and-function), and [or](../../sql-reference/functions/logical-functions.md#logical-or-function) functions according to a [short scheme](https://en.wikipedia.org/wiki/Short-circuit_evaluation). This helps optimize the execution of complex expressions in these functions and prevent possible exceptions (such as division by zero when it is not expected).
|
Allows calculating the [if](../../sql-reference/functions/conditional-functions.md/#if), [multiIf](../../sql-reference/functions/conditional-functions.md/#multiif), [and](../../sql-reference/functions/logical-functions.md/#logical-and-function), and [or](../../sql-reference/functions/logical-functions.md/#logical-or-function) functions according to a [short scheme](https://en.wikipedia.org/wiki/Short-circuit_evaluation). This helps optimize the execution of complex expressions in these functions and prevent possible exceptions (such as division by zero when it is not expected).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3022,7 +3022,7 @@ Default value: `enable`.
|
|||||||
|
|
||||||
## max_hyperscan_regexp_length {#max-hyperscan-regexp-length}
|
## max_hyperscan_regexp_length {#max-hyperscan-regexp-length}
|
||||||
|
|
||||||
Defines the maximum length for each regular expression in the [hyperscan multi-match functions](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn).
|
Defines the maximum length for each regular expression in the [hyperscan multi-match functions](../../sql-reference/functions/string-search-functions.md/#multimatchanyhaystack-pattern1-pattern2-patternn).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3065,7 +3065,7 @@ Exception: Regexp length too large.
|
|||||||
|
|
||||||
## max_hyperscan_regexp_total_length {#max-hyperscan-regexp-total-length}
|
## max_hyperscan_regexp_total_length {#max-hyperscan-regexp-total-length}
|
||||||
|
|
||||||
Sets the maximum length total of all regular expressions in each [hyperscan multi-match function](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn).
|
Sets the maximum length total of all regular expressions in each [hyperscan multi-match function](../../sql-reference/functions/string-search-functions.md/#multimatchanyhaystack-pattern1-pattern2-patternn).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3142,8 +3142,8 @@ Result:
|
|||||||
## enable_extended_results_for_datetime_functions {#enable-extended-results-for-datetime-functions}
|
## enable_extended_results_for_datetime_functions {#enable-extended-results-for-datetime-functions}
|
||||||
|
|
||||||
Enables or disables returning results of type:
|
Enables or disables returning results of type:
|
||||||
- `Date32` with extended range (compared to type `Date`) for functions [toStartOfYear](../../sql-reference/functions/date-time-functions.md#tostartofyear), [toStartOfISOYear](../../sql-reference/functions/date-time-functions.md#tostartofisoyear), [toStartOfQuarter](../../sql-reference/functions/date-time-functions.md#tostartofquarter), [toStartOfMonth](../../sql-reference/functions/date-time-functions.md#tostartofmonth), [toStartOfWeek](../../sql-reference/functions/date-time-functions.md#tostartofweek), [toMonday](../../sql-reference/functions/date-time-functions.md#tomonday) and [toLastDayOfMonth](../../sql-reference/functions/date-time-functions.md#tolastdayofmonth).
|
- `Date32` with extended range (compared to type `Date`) for functions [toStartOfYear](../../sql-reference/functions/date-time-functions.md/#tostartofyear), [toStartOfISOYear](../../sql-reference/functions/date-time-functions.md/#tostartofisoyear), [toStartOfQuarter](../../sql-reference/functions/date-time-functions.md/#tostartofquarter), [toStartOfMonth](../../sql-reference/functions/date-time-functions.md/#tostartofmonth), [toStartOfWeek](../../sql-reference/functions/date-time-functions.md/#tostartofweek), [toMonday](../../sql-reference/functions/date-time-functions.md/#tomonday) and [toLastDayOfMonth](../../sql-reference/functions/date-time-functions.md/#tolastdayofmonth).
|
||||||
- `DateTime64` with extended range (compared to type `DateTime`) for functions [toStartOfDay](../../sql-reference/functions/date-time-functions.md#tostartofday), [toStartOfHour](../../sql-reference/functions/date-time-functions.md#tostartofhour), [toStartOfMinute](../../sql-reference/functions/date-time-functions.md#tostartofminute), [toStartOfFiveMinutes](../../sql-reference/functions/date-time-functions.md#tostartoffiveminutes), [toStartOfTenMinutes](../../sql-reference/functions/date-time-functions.md#tostartoftenminutes), [toStartOfFifteenMinutes](../../sql-reference/functions/date-time-functions.md#tostartoffifteenminutes) and [timeSlot](../../sql-reference/functions/date-time-functions.md#timeslot).
|
- `DateTime64` with extended range (compared to type `DateTime`) for functions [toStartOfDay](../../sql-reference/functions/date-time-functions.md/#tostartofday), [toStartOfHour](../../sql-reference/functions/date-time-functions.md/#tostartofhour), [toStartOfMinute](../../sql-reference/functions/date-time-functions.md/#tostartofminute), [toStartOfFiveMinutes](../../sql-reference/functions/date-time-functions.md/#tostartoffiveminutes), [toStartOfTenMinutes](../../sql-reference/functions/date-time-functions.md/#tostartoftenminutes), [toStartOfFifteenMinutes](../../sql-reference/functions/date-time-functions.md/#tostartoffifteenminutes) and [timeSlot](../../sql-reference/functions/date-time-functions.md/#timeslot).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3167,7 +3167,7 @@ Default value: `1`.
|
|||||||
|
|
||||||
## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final}
|
## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final}
|
||||||
|
|
||||||
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier.
|
Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md/#select-from-final) modifier.
|
||||||
|
|
||||||
Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables.
|
Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables.
|
||||||
|
|
||||||
@ -3184,7 +3184,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## describe_include_subcolumns {#describe_include_subcolumns}
|
## describe_include_subcolumns {#describe_include_subcolumns}
|
||||||
|
|
||||||
Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md#finding-null) or an [Array](../../sql-reference/data-types/array.md#array-size) data type.
|
Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md/#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md/#finding-null) or an [Array](../../sql-reference/data-types/array.md/#array-size) data type.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3283,7 +3283,7 @@ Default value: `0`.
|
|||||||
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
||||||
|
|
||||||
Enables or disables the display of information about the parts to which the manipulation operations with partitions and parts have been successfully applied.
|
Enables or disables the display of information about the parts to which the manipulation operations with partitions and parts have been successfully applied.
|
||||||
Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition).
|
Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md/#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md/#alter_freeze-partition).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3399,6 +3399,17 @@ Use schema from cache for URL with last modification time validation (for urls w
|
|||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## use_structure_from_insertion_table_in_table_functions {use_structure_from_insertion_table_in_table_functions}
|
||||||
|
|
||||||
|
Use structure from insertion table instead of schema inference from data.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- 0 - disabled
|
||||||
|
- 1 - enabled
|
||||||
|
- 2 - auto
|
||||||
|
|
||||||
|
Default value: 2.
|
||||||
|
|
||||||
## compatibility {#compatibility}
|
## compatibility {#compatibility}
|
||||||
|
|
||||||
This setting changes other settings according to provided ClickHouse version.
|
This setting changes other settings according to provided ClickHouse version.
|
||||||
@ -3418,11 +3429,11 @@ When writing data, ClickHouse throws an exception if input data contain columns
|
|||||||
|
|
||||||
Supported formats:
|
Supported formats:
|
||||||
|
|
||||||
- [JSONEachRow](../../interfaces/formats.md#jsoneachrow)
|
- [JSONEachRow](../../interfaces/formats.md/#jsoneachrow)
|
||||||
- [TSKV](../../interfaces/formats.md#tskv)
|
- [TSKV](../../interfaces/formats.md/#tskv)
|
||||||
- All formats with suffixes WithNames/WithNamesAndTypes
|
- All formats with suffixes WithNames/WithNamesAndTypes
|
||||||
- [JSONColumns](../../interfaces/formats.md#jsoncolumns)
|
- [JSONColumns](../../interfaces/formats.md/#jsoncolumns)
|
||||||
- [MySQLDump](../../interfaces/formats.md#mysqldump)
|
- [MySQLDump](../../interfaces/formats.md/#mysqldump)
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3439,18 +3450,18 @@ To improve insert performance, we recommend disabling this check if you are sure
|
|||||||
|
|
||||||
Supported formats:
|
Supported formats:
|
||||||
|
|
||||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnames)
|
- [CSVWithNames](../../interfaces/formats.md/#csvwithnames)
|
||||||
- [CSVWithNamesAndTypes](../../interfaces/formats.md#csvwithnamesandtypes)
|
- [CSVWithNamesAndTypes](../../interfaces/formats.md/#csvwithnamesandtypes)
|
||||||
- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames)
|
- [TabSeparatedWithNames](../../interfaces/formats.md/#tabseparatedwithnames)
|
||||||
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes)
|
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md/#tabseparatedwithnamesandtypes)
|
||||||
- [JSONCompactEachRowWithNames](../../interfaces/formats.md#jsoncompacteachrowwithnames)
|
- [JSONCompactEachRowWithNames](../../interfaces/formats.md/#jsoncompacteachrowwithnames)
|
||||||
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes)
|
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompacteachrowwithnamesandtypes)
|
||||||
- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md#jsoncompactstringseachrowwithnames)
|
- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md/#jsoncompactstringseachrowwithnames)
|
||||||
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes)
|
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompactstringseachrowwithnamesandtypes)
|
||||||
- [RowBinaryWithNames](../../interfaces/formats.md#rowbinarywithnames)
|
- [RowBinaryWithNames](../../interfaces/formats.md/#rowbinarywithnames)
|
||||||
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes)
|
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md/#rowbinarywithnamesandtypes)
|
||||||
- [CustomSeparatedWithNames](../../interfaces/formats.md#customseparatedwithnames)
|
- [CustomSeparatedWithNames](../../interfaces/formats.md/#customseparatedwithnames)
|
||||||
- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md#customseparatedwithnamesandtypes)
|
- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md/#customseparatedwithnamesandtypes)
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3465,12 +3476,12 @@ Controls whether format parser should check if data types from the input data ma
|
|||||||
|
|
||||||
Supported formats:
|
Supported formats:
|
||||||
|
|
||||||
- [CSVWithNamesAndTypes](../../interfaces/formats.md#csvwithnamesandtypes)
|
- [CSVWithNamesAndTypes](../../interfaces/formats.md/#csvwithnamesandtypes)
|
||||||
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes)
|
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md/#tabseparatedwithnamesandtypes)
|
||||||
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes)
|
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompacteachrowwithnamesandtypes)
|
||||||
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes)
|
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompactstringseachrowwithnamesandtypes)
|
||||||
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes)
|
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md/#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes)
|
||||||
- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md#customseparatedwithnamesandtypes)
|
- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md/#customseparatedwithnamesandtypes)
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3481,7 +3492,7 @@ Default value: 1.
|
|||||||
|
|
||||||
## input_format_defaults_for_omitted_fields {#input_format_defaults_for_omitted_fields}
|
## input_format_defaults_for_omitted_fields {#input_format_defaults_for_omitted_fields}
|
||||||
|
|
||||||
When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv), [TabSeparated](../../interfaces/formats.md#tabseparated) formats and formats with `WithNames`/`WithNamesAndTypes` suffixes.
|
When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md/#jsoneachrow), [CSV](../../interfaces/formats.md/#csv), [TabSeparated](../../interfaces/formats.md/#tabseparated) formats and formats with `WithNames`/`WithNamesAndTypes` suffixes.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance.
|
When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance.
|
||||||
@ -3496,7 +3507,7 @@ Default value: 1.
|
|||||||
|
|
||||||
## input_format_null_as_default {#input_format_null_as_default}
|
## input_format_null_as_default {#input_format_null_as_default}
|
||||||
|
|
||||||
Enables or disables the initialization of [NULL](../../sql-reference/syntax.md#null-literal) fields with [default values](../../sql-reference/statements/create/table.md#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
Enables or disables the initialization of [NULL](../../sql-reference/syntax.md/#null-literal) fields with [default values](../../sql-reference/statements/create/table.md/#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
||||||
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
|
If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting.
|
||||||
|
|
||||||
This setting is applicable to [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) queries for text input formats.
|
This setting is applicable to [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) queries for text input formats.
|
||||||
@ -3663,7 +3674,7 @@ Enabled by default
|
|||||||
|
|
||||||
## insert_distributed_one_random_shard {#insert_distributed_one_random_shard}
|
## insert_distributed_one_random_shard {#insert_distributed_one_random_shard}
|
||||||
|
|
||||||
Enables or disables random shard insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table when there is no distributed key.
|
Enables or disables random shard insertion into a [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table when there is no distributed key.
|
||||||
|
|
||||||
By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will reject any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards.
|
By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will reject any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards.
|
||||||
|
|
||||||
@ -3682,7 +3693,7 @@ Enables or disables the insertion of JSON data with nested objects.
|
|||||||
|
|
||||||
Supported formats:
|
Supported formats:
|
||||||
|
|
||||||
- [JSONEachRow](../../interfaces/formats.md#jsoneachrow)
|
- [JSONEachRow](../../interfaces/formats.md/#jsoneachrow)
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3693,7 +3704,7 @@ Default value: 0.
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- [Usage of Nested Structures](../../interfaces/formats.md#jsoneachrow-nested) with the `JSONEachRow` format.
|
- [Usage of Nested Structures](../../interfaces/formats.md/#jsoneachrow-nested) with the `JSONEachRow` format.
|
||||||
|
|
||||||
### input_format_json_read_bools_as_numbers {#input_format_json_read_bools_as_numbers}
|
### input_format_json_read_bools_as_numbers {#input_format_json_read_bools_as_numbers}
|
||||||
|
|
||||||
@ -3716,7 +3727,7 @@ Enabled by default.
|
|||||||
|
|
||||||
### output_format_json_quote_64bit_integers {#output_format_json_quote_64bit_integers}
|
### output_format_json_quote_64bit_integers {#output_format_json_quote_64bit_integers}
|
||||||
|
|
||||||
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
|
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md/#json) format.
|
||||||
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
|
Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
@ -3734,7 +3745,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### output_format_json_quote_denormals {#output_format_json_quote_denormals}
|
### output_format_json_quote_denormals {#output_format_json_quote_denormals}
|
||||||
|
|
||||||
Enables `+nan`, `-nan`, `+inf`, `-inf` outputs in [JSON](../../interfaces/formats.md#json) output format.
|
Enables `+nan`, `-nan`, `+inf`, `-inf` outputs in [JSON](../../interfaces/formats.md/#json) output format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3851,7 +3862,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### output_format_json_array_of_rows {#output_format_json_array_of_rows}
|
### output_format_json_array_of_rows {#output_format_json_array_of_rows}
|
||||||
|
|
||||||
Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md#jsoneachrow) format.
|
Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -3904,7 +3915,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### format_json_object_each_row_column_for_object_name {#format_json_object_each_row_column_for_object_name}
|
### format_json_object_each_row_column_for_object_name {#format_json_object_each_row_column_for_object_name}
|
||||||
|
|
||||||
The name of column that will be used for storing/writing object names in [JSONObjectEachRow](../../interfaces/formats.md#jsonobjecteachrow) format.
|
The name of column that will be used for storing/writing object names in [JSONObjectEachRow](../../interfaces/formats.md/#jsonobjecteachrow) format.
|
||||||
Column type should be String. If value is empty, default names `row_{i}`will be used for object names.
|
Column type should be String. If value is empty, default names `row_{i}`will be used for object names.
|
||||||
|
|
||||||
Default value: ''.
|
Default value: ''.
|
||||||
@ -4005,7 +4016,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### format_tsv_null_representation {#format_tsv_null_representation}
|
### format_tsv_null_representation {#format_tsv_null_representation}
|
||||||
|
|
||||||
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output and input formats. User can set any string as a value, for example, `My NULL`.
|
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md/#tabseparated) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||||
|
|
||||||
Default value: `\N`.
|
Default value: `\N`.
|
||||||
|
|
||||||
@ -4159,7 +4170,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
### format_csv_null_representation {#format_csv_null_representation}
|
### format_csv_null_representation {#format_csv_null_representation}
|
||||||
|
|
||||||
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md#csv) output and input formats. User can set any string as a value, for example, `My NULL`.
|
Defines the representation of `NULL` for [CSV](../../interfaces/formats.md/#csv) output and input formats. User can set any string as a value, for example, `My NULL`.
|
||||||
|
|
||||||
Default value: `\N`.
|
Default value: `\N`.
|
||||||
|
|
||||||
@ -4198,7 +4209,7 @@ My NULL
|
|||||||
|
|
||||||
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
### input_format_values_interpret_expressions {#input_format_values_interpret_expressions}
|
||||||
|
|
||||||
Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../sql-reference/syntax.md) section.
|
Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md/#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../sql-reference/syntax.md) section.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4248,7 +4259,7 @@ Ok.
|
|||||||
|
|
||||||
### input_format_values_deduce_templates_of_expressions {#input_format_values_deduce_templates_of_expressions}
|
### input_format_values_deduce_templates_of_expressions {#input_format_values_deduce_templates_of_expressions}
|
||||||
|
|
||||||
Enables or disables template deduction for SQL expressions in [Values](../../interfaces/formats.md#data-format-values) format. It allows parsing and interpreting expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse tries to deduce the template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows.
|
Enables or disables template deduction for SQL expressions in [Values](../../interfaces/formats.md/#data-format-values) format. It allows parsing and interpreting expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse tries to deduce the template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4293,7 +4304,7 @@ Default value: 1.
|
|||||||
|
|
||||||
### input_format_arrow_import_nested {#input_format_arrow_import_nested}
|
### input_format_arrow_import_nested {#input_format_arrow_import_nested}
|
||||||
|
|
||||||
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Arrow](../../interfaces/formats.md#data_types-matching-arrow) input format.
|
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Arrow](../../interfaces/formats.md/#data_types-matching-arrow) input format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4322,7 +4333,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### output_format_arrow_low_cardinality_as_dictionary {#output_format_arrow_low_cardinality_as_dictionary}
|
### output_format_arrow_low_cardinality_as_dictionary {#output_format_arrow_low_cardinality_as_dictionary}
|
||||||
|
|
||||||
Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md#data-format-arrow) format for `SELECT` queries.
|
Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md/#data-format-arrow) format for `SELECT` queries.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4341,7 +4352,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### input_format_orc_import_nested {#input_format_orc_import_nested}
|
### input_format_orc_import_nested {#input_format_orc_import_nested}
|
||||||
|
|
||||||
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [ORC](../../interfaces/formats.md#data-format-orc) input format.
|
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [ORC](../../interfaces/formats.md/#data-format-orc) input format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4384,7 +4395,7 @@ Disabled by default.
|
|||||||
|
|
||||||
## input_format_parquet_import_nested {#input_format_parquet_import_nested}
|
## input_format_parquet_import_nested {#input_format_parquet_import_nested}
|
||||||
|
|
||||||
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Parquet](../../interfaces/formats.md#data-format-parquet) input format.
|
Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Parquet](../../interfaces/formats.md/#data-format-parquet) input format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4481,7 +4492,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### input_format_avro_allow_missing_fields {#input_format_avro_allow_missing_fields}
|
### input_format_avro_allow_missing_fields {#input_format_avro_allow_missing_fields}
|
||||||
|
|
||||||
Enables using fields that are not specified in [Avro](../../interfaces/formats.md#data-format-avro) or [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format schema. When a field is not found in the schema, ClickHouse uses the default value instead of throwing an exception.
|
Enables using fields that are not specified in [Avro](../../interfaces/formats.md/#data-format-avro) or [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format schema. When a field is not found in the schema, ClickHouse uses the default value instead of throwing an exception.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4492,7 +4503,7 @@ Default value: 0.
|
|||||||
|
|
||||||
### format_avro_schema_registry_url {#format_avro_schema_registry_url}
|
### format_avro_schema_registry_url {#format_avro_schema_registry_url}
|
||||||
|
|
||||||
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format.
|
Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format.
|
||||||
|
|
||||||
Default value: `Empty`.
|
Default value: `Empty`.
|
||||||
|
|
||||||
@ -4549,7 +4560,7 @@ Default value: `250`.
|
|||||||
|
|
||||||
### output_format_pretty_max_value_width {#output_format_pretty_max_value_width}
|
### output_format_pretty_max_value_width {#output_format_pretty_max_value_width}
|
||||||
|
|
||||||
Limits the width of value displayed in [Pretty](../../interfaces/formats.md#pretty) formats. If the value width exceeds the limit, the value is cut.
|
Limits the width of value displayed in [Pretty](../../interfaces/formats.md/#pretty) formats. If the value width exceeds the limit, the value is cut.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4625,7 +4636,7 @@ SELECT * FROM a;
|
|||||||
|
|
||||||
### output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
|
### output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
|
||||||
|
|
||||||
Adds row numbers to output in the [Pretty](../../interfaces/formats.md#pretty) format.
|
Adds row numbers to output in the [Pretty](../../interfaces/formats.md/#pretty) format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4670,52 +4681,52 @@ Delimiter between rows (for Template format).
|
|||||||
|
|
||||||
### format_custom_escaping_rule {#format_custom_escaping_rule}
|
### format_custom_escaping_rule {#format_custom_escaping_rule}
|
||||||
|
|
||||||
Sets the field escaping rule for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the field escaping rule for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md#tabseparated).
|
- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md/#tabseparated).
|
||||||
- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md#data-format-values).
|
- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md/#data-format-values).
|
||||||
- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md#csv).
|
- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md/#csv).
|
||||||
- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
|
- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md/#jsoneachrow).
|
||||||
- `'XML'` — Similarly to [XML](../../interfaces/formats.md#xml).
|
- `'XML'` — Similarly to [XML](../../interfaces/formats.md/#xml).
|
||||||
- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md#tabseparatedraw).
|
- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md/#tabseparatedraw).
|
||||||
|
|
||||||
Default value: `'Escaped'`.
|
Default value: `'Escaped'`.
|
||||||
|
|
||||||
### format_custom_field_delimiter {#format_custom_field_delimiter}
|
### format_custom_field_delimiter {#format_custom_field_delimiter}
|
||||||
|
|
||||||
Sets the character that is interpreted as a delimiter between the fields for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the character that is interpreted as a delimiter between the fields for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Default value: `'\t'`.
|
Default value: `'\t'`.
|
||||||
|
|
||||||
### format_custom_row_before_delimiter {#format_custom_row_before_delimiter}
|
### format_custom_row_before_delimiter {#format_custom_row_before_delimiter}
|
||||||
|
|
||||||
Sets the character that is interpreted as a delimiter before the field of the first column for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the character that is interpreted as a delimiter before the field of the first column for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Default value: `''`.
|
Default value: `''`.
|
||||||
|
|
||||||
### format_custom_row_after_delimiter {#format_custom_row_after_delimiter}
|
### format_custom_row_after_delimiter {#format_custom_row_after_delimiter}
|
||||||
|
|
||||||
Sets the character that is interpreted as a delimiter after the field of the last column for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the character that is interpreted as a delimiter after the field of the last column for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Default value: `'\n'`.
|
Default value: `'\n'`.
|
||||||
|
|
||||||
### format_custom_row_between_delimiter {#format_custom_row_between_delimiter}
|
### format_custom_row_between_delimiter {#format_custom_row_between_delimiter}
|
||||||
|
|
||||||
Sets the character that is interpreted as a delimiter between the rows for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the character that is interpreted as a delimiter between the rows for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Default value: `''`.
|
Default value: `''`.
|
||||||
|
|
||||||
### format_custom_result_before_delimiter {#format_custom_result_before_delimiter}
|
### format_custom_result_before_delimiter {#format_custom_result_before_delimiter}
|
||||||
|
|
||||||
Sets the character that is interpreted as a prefix before the result set for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the character that is interpreted as a prefix before the result set for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Default value: `''`.
|
Default value: `''`.
|
||||||
|
|
||||||
### format_custom_result_after_delimiter {#format_custom_result_after_delimiter}
|
### format_custom_result_after_delimiter {#format_custom_result_after_delimiter}
|
||||||
|
|
||||||
Sets the character that is interpreted as a suffix after the result set for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format.
|
Sets the character that is interpreted as a suffix after the result set for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format.
|
||||||
|
|
||||||
Default value: `''`.
|
Default value: `''`.
|
||||||
|
|
||||||
@ -4727,12 +4738,12 @@ Field escaping rule.
|
|||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md#tabseparated).
|
- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md/#tabseparated).
|
||||||
- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md#data-format-values).
|
- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md/#data-format-values).
|
||||||
- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md#csv).
|
- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md/#csv).
|
||||||
- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
|
- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md/#jsoneachrow).
|
||||||
- `'XML'` — Similarly to [XML](../../interfaces/formats.md#xml).
|
- `'XML'` — Similarly to [XML](../../interfaces/formats.md/#xml).
|
||||||
- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md#tabseparatedraw).
|
- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md/#tabseparatedraw).
|
||||||
|
|
||||||
Default value: `Raw`.
|
Default value: `Raw`.
|
||||||
|
|
||||||
@ -4746,7 +4757,7 @@ Disabled by default.
|
|||||||
|
|
||||||
### format_capn_proto_enum_comparising_mode {#format_capn_proto_enum_comparising_mode}
|
### format_capn_proto_enum_comparising_mode {#format_capn_proto_enum_comparising_mode}
|
||||||
|
|
||||||
Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md#capnproto) `Enum` data type from schema.
|
Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md/#capnproto) `Enum` data type from schema.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -4773,7 +4784,7 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 1.
|
Default value: 1.
|
||||||
|
|
||||||
## SQLInsert format settings {$sqlinsert-format-settings}
|
## SQLInsert format settings {#sqlinsert-format-settings}
|
||||||
|
|
||||||
### output_format_sql_insert_max_batch_size {#output_format_sql_insert_max_batch_size}
|
### output_format_sql_insert_max_batch_size {#output_format_sql_insert_max_batch_size}
|
||||||
|
|
||||||
@ -4804,3 +4815,17 @@ Default value: `false`.
|
|||||||
Quote column names with "`" characters
|
Quote column names with "`" characters
|
||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## BSONEachRow format settings {#bson-each-row-format-settings}
|
||||||
|
|
||||||
|
### output_format_bson_string_as_string {#output_format_bson_string_as_string}
|
||||||
|
|
||||||
|
Use BSON String type instead of Binary for String columns.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
### input_format_bson_skip_fields_with_unsupported_types_in_schema_inference {#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference}
|
||||||
|
|
||||||
|
Allow skipping columns with unsupported types while schema inference for format BSONEachRow.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
@ -7,13 +7,13 @@ title: "External Disks for Storing Data"
|
|||||||
|
|
||||||
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
|
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
|
||||||
|
|
||||||
To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine.
|
To work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine.
|
||||||
|
|
||||||
To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver).
|
To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver).
|
||||||
|
|
||||||
## Configuring HDFS {#configuring-hdfs}
|
## Configuring HDFS {#configuring-hdfs}
|
||||||
|
|
||||||
[MergeTree](../engines/table-engines/mergetree-family/mergetree.md) and [Log](../engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`.
|
[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`.
|
||||||
|
|
||||||
Configuration markup:
|
Configuration markup:
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ Optional parameters:
|
|||||||
|
|
||||||
## Using Virtual File System for Data Encryption {#encrypted-virtual-file-system}
|
## Using Virtual File System for Data Encryption {#encrypted-virtual-file-system}
|
||||||
|
|
||||||
You can encrypt the data stored on [S3](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one.
|
You can encrypt the data stored on [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one.
|
||||||
|
|
||||||
Example of disk configuration:
|
Example of disk configuration:
|
||||||
|
|
||||||
@ -80,14 +80,14 @@ Required parameters:
|
|||||||
|
|
||||||
- `type` — `encrypted`. Otherwise the encrypted disk is not created.
|
- `type` — `encrypted`. Otherwise the encrypted disk is not created.
|
||||||
- `disk` — Type of disk for data storage.
|
- `disk` — Type of disk for data storage.
|
||||||
- `key` — The key for encryption and decryption. Type: [Uint64](../sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encrypt in hexadecimal form.
|
- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encrypt in hexadecimal form.
|
||||||
You can specify multiple keys using the `id` attribute (see example above).
|
You can specify multiple keys using the `id` attribute (see example above).
|
||||||
|
|
||||||
Optional parameters:
|
Optional parameters:
|
||||||
|
|
||||||
- `path` — Path to the location on the disk where the data will be saved. If not specified, the data will be saved in the root directory.
|
- `path` — Path to the location on the disk where the data will be saved. If not specified, the data will be saved in the root directory.
|
||||||
- `current_key_id` — The key used for encryption. All the specified keys can be used for decryption, and you can always switch to another key while maintaining access to previously encrypted data.
|
- `current_key_id` — The key used for encryption. All the specified keys can be used for decryption, and you can always switch to another key while maintaining access to previously encrypted data.
|
||||||
- `algorithm` — [Algorithm](../sql-reference/statements/create/table.md#create-query-encryption-codecs) for encryption. Possible values: `AES_128_CTR`, `AES_192_CTR` or `AES_256_CTR`. Default value: `AES_128_CTR`. The key length depends on the algorithm: `AES_128_CTR` — 16 bytes, `AES_192_CTR` — 24 bytes, `AES_256_CTR` — 32 bytes.
|
- `algorithm` — [Algorithm](/docs/en/sql-reference/statements/create/table.md/#create-query-encryption-codecs) for encryption. Possible values: `AES_128_CTR`, `AES_192_CTR` or `AES_256_CTR`. Default value: `AES_128_CTR`. The key length depends on the algorithm: `AES_128_CTR` — 16 bytes, `AES_192_CTR` — 24 bytes, `AES_256_CTR` — 32 bytes.
|
||||||
|
|
||||||
Example of disk configuration:
|
Example of disk configuration:
|
||||||
|
|
||||||
@ -265,9 +265,9 @@ Cache profile events:
|
|||||||
|
|
||||||
There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`.
|
There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`.
|
||||||
|
|
||||||
This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](../sql-reference/statements/create/table.md), [ALTER TABLE](../sql-reference/statements/alter/index.md), [RENAME TABLE](../sql-reference/statements/rename.md#misc_operations-rename_table), [DETACH TABLE](../sql-reference/statements/detach.md) and [TRUNCATE TABLE](../sql-reference/statements/truncate.md).
|
This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/index.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md).
|
||||||
|
|
||||||
Web server storage is supported only for the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) and [Log](../engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](../engines/table-engines/mergetree-family/mergetree.md#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`.
|
Web server storage is supported only for the [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`.
|
||||||
|
|
||||||
A ready test case. You need to add this configuration to config:
|
A ready test case. You need to add this configuration to config:
|
||||||
|
|
||||||
@ -451,7 +451,7 @@ Optional parameters:
|
|||||||
- `remote_fs_read_backoff_threashold` — The maximum wait time when trying to read data for remote disk. Default value: `10000` seconds.
|
- `remote_fs_read_backoff_threashold` — The maximum wait time when trying to read data for remote disk. Default value: `10000` seconds.
|
||||||
- `remote_fs_read_backoff_max_tries` — The maximum number of attempts to read with backoff. Default value: `5`.
|
- `remote_fs_read_backoff_max_tries` — The maximum number of attempts to read with backoff. Default value: `5`.
|
||||||
|
|
||||||
If a query fails with an exception `DB:Exception Unreachable URL`, then you can try to adjust the settings: [http_connection_timeout](../operations/settings/settings.md#http_connection_timeout), [http_receive_timeout](../operations/settings/settings.md#http_receive_timeout), [keep_alive_timeout](../operations/server-configuration-parameters/settings.md#keep-alive-timeout).
|
If a query fails with an exception `DB:Exception Unreachable URL`, then you can try to adjust the settings: [http_connection_timeout](/docs/en/operations/settings/settings.md/#http_connection_timeout), [http_receive_timeout](/docs/en/operations/settings/settings.md/#http_receive_timeout), [keep_alive_timeout](/docs/en/operations/server-configuration-parameters/settings.md/#keep-alive-timeout).
|
||||||
|
|
||||||
To get files for upload run:
|
To get files for upload run:
|
||||||
`clickhouse static-files-disk-uploader --metadata-path <path> --output-dir <dir>` (`--metadata-path` can be found in query `SELECT data_paths FROM system.tables WHERE name = 'table_name'`).
|
`clickhouse static-files-disk-uploader --metadata-path <path> --output-dir <dir>` (`--metadata-path` can be found in query `SELECT data_paths FROM system.tables WHERE name = 'table_name'`).
|
||||||
@ -460,7 +460,7 @@ When loading files by `endpoint`, they must be loaded into `<endpoint>/store/` p
|
|||||||
|
|
||||||
If URL is not reachable on disk load when the server is starting up tables, then all errors are caught. If in this case there were errors, tables can be reloaded (become visible) via `DETACH TABLE table_name` -> `ATTACH TABLE table_name`. If metadata was successfully loaded at server startup, then tables are available straight away.
|
If URL is not reachable on disk load when the server is starting up tables, then all errors are caught. If in this case there were errors, tables can be reloaded (become visible) via `DETACH TABLE table_name` -> `ATTACH TABLE table_name`. If metadata was successfully loaded at server startup, then tables are available straight away.
|
||||||
|
|
||||||
Use [http_max_single_read_retries](../operations/settings/settings.md#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read.
|
Use [http_max_single_read_retries](/docs/en/operations/settings/settings.md/#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read.
|
||||||
|
|
||||||
|
|
||||||
## Zero-copy Replication (not ready for production) {#zero-copy}
|
## Zero-copy Replication (not ready for production) {#zero-copy}
|
||||||
|
@ -7,8 +7,8 @@ Contains information about stack traces for fatal errors. The table does not exi
|
|||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date of the event.
|
- `event_date` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date of the event.
|
||||||
- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Time of the event.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the event.
|
||||||
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the event with nanoseconds.
|
- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the event with nanoseconds.
|
||||||
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number.
|
- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number.
|
||||||
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID.
|
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID.
|
||||||
|
@ -3,7 +3,7 @@ slug: /en/operations/system-tables/dictionaries
|
|||||||
---
|
---
|
||||||
# dictionaries
|
# dictionaries
|
||||||
|
|
||||||
Contains information about [external dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
Contains information about [dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md).
|
||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ Columns:
|
|||||||
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
- `lifetime_min` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Minimum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
||||||
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
- `lifetime_max` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Maximum [lifetime](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of the dictionary in memory, after which ClickHouse tries to reload the dictionary (if `invalidate_query` is set, then only if it has changed). Set in seconds.
|
||||||
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
- `loading_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time for loading the dictionary.
|
||||||
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with external sources and investigate causes.
|
- `last_successful_update_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — End time for loading or updating the dictionary. Helps to monitor some troubles with dictionary sources and investigate the causes.
|
||||||
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
- `loading_duration` ([Float32](../../sql-reference/data-types/float.md)) — Duration of a dictionary loading.
|
||||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text of the error that occurs when creating or reloading the dictionary if the dictionary couldn’t be created.
|
||||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
- `comment` ([String](../../sql-reference/data-types/string.md)) — Text of the comment to dictionary.
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
---
|
---
|
||||||
slug: /en/operations/system-tables/
|
slug: /en/operations/system-tables/
|
||||||
sidebar_position: 52
|
sidebar_position: 52
|
||||||
sidebar_label: System Tables
|
sidebar_label: Overview
|
||||||
|
pagination_next: 'en/operations/system-tables/asynchronous_metric_log'
|
||||||
---
|
---
|
||||||
|
|
||||||
# System Tables
|
# System Tables
|
||||||
@ -72,4 +73,3 @@ If procfs is supported and enabled on the system, ClickHouse server collects the
|
|||||||
- `OSReadBytes`
|
- `OSReadBytes`
|
||||||
- `OSWriteBytes`
|
- `OSWriteBytes`
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/) <!--hide-->
|
|
||||||
|
@ -3,31 +3,31 @@ slug: /en/operations/system-tables/mutations
|
|||||||
---
|
---
|
||||||
# mutations
|
# mutations
|
||||||
|
|
||||||
The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
The table contains information about [mutations](/docs/en/sql-reference/statements/alter/index.md#mutations) of [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row.
|
||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied.
|
- `database` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied.
|
||||||
|
|
||||||
- `table` ([String](../../sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied.
|
- `table` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied.
|
||||||
|
|
||||||
- `mutation_id` ([String](../../sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_clickhouse_keeper>/mutations/` directory in ClickHouse Keeper. For non-replicated tables the IDs correspond to file names in the data directory of the table.
|
- `mutation_id` ([String](/docs/en/sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `<table_path_in_clickhouse_keeper>/mutations/` directory in ClickHouse Keeper. For non-replicated tables the IDs correspond to file names in the data directory of the table.
|
||||||
|
|
||||||
- `command` ([String](../../sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
- `command` ([String](/docs/en/sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`).
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
- `create_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution.
|
||||||
|
|
||||||
- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
- `block_numbers.partition_id` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty.
|
||||||
|
|
||||||
- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
|
- `block_numbers.number` ([Array](/docs/en/sql-reference/data-types/array.md)([Int64](/docs/en/sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition.
|
||||||
|
|
||||||
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation.
|
||||||
|
|
||||||
- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
|
- `parts_to_do_names` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete.
|
||||||
|
|
||||||
- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
|
- `parts_to_do` ([Int64](/docs/en/sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete.
|
||||||
|
|
||||||
- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
|
- `is_done` ([UInt8](/docs/en/sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values:
|
||||||
- `1` if the mutation is completed,
|
- `1` if the mutation is completed,
|
||||||
- `0` if the mutation is still in process.
|
- `0` if the mutation is still in process.
|
||||||
|
|
||||||
@ -37,16 +37,16 @@ Even if `parts_to_do = 0` it is possible that a mutation of a replicated table i
|
|||||||
|
|
||||||
If there were problems with mutating some data parts, the following columns contain additional information:
|
If there were problems with mutating some data parts, the following columns contain additional information:
|
||||||
|
|
||||||
- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
- `latest_failed_part` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated.
|
||||||
|
|
||||||
- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
- `latest_fail_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure.
|
||||||
|
|
||||||
- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
- `latest_fail_reason` ([String](/docs/en/sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
|
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
||||||
- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine
|
- [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) table engine
|
||||||
- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family
|
- [ReplicatedMergeTree](/docs/en/engines/table-engines/mergetree-family/replication.md) family
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/mutations) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/operations/system-tables/mutations) <!--hide-->
|
||||||
|
@ -75,7 +75,7 @@ Columns:
|
|||||||
|
|
||||||
- `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) reserved for primary key values.
|
- `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) reserved for primary key values.
|
||||||
|
|
||||||
- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup does not exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
|
- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup does not exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md/#alter_freeze-partition)
|
||||||
|
|
||||||
- `database` ([String](../../sql-reference/data-types/string.md)) – Name of the database.
|
- `database` ([String](../../sql-reference/data-types/string.md)) – Name of the database.
|
||||||
|
|
||||||
@ -87,25 +87,25 @@ Columns:
|
|||||||
|
|
||||||
- `disk_name` ([String](../../sql-reference/data-types/string.md)) – Name of a disk that stores the data part.
|
- `disk_name` ([String](../../sql-reference/data-types/string.md)) – Name of a disk that stores the data part.
|
||||||
|
|
||||||
- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files.
|
- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash128) of compressed files.
|
||||||
|
|
||||||
- `hash_of_uncompressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.).
|
- `hash_of_uncompressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.).
|
||||||
|
|
||||||
- `uncompressed_hash_of_compressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed.
|
- `uncompressed_hash_of_compressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash128) of data in the compressed files as if they were uncompressed.
|
||||||
|
|
||||||
- `delete_ttl_info_min` ([DateTime](../../sql-reference/data-types/datetime.md)) — The minimum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
- `delete_ttl_info_min` ([DateTime](../../sql-reference/data-types/datetime.md)) — The minimum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl).
|
||||||
|
|
||||||
- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl).
|
||||||
|
|
||||||
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl).
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields.
|
The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
- `move_ttl_info.min` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the minimum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
- `move_ttl_info.min` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the minimum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl).
|
||||||
|
|
||||||
- `move_ttl_info.max` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the maximum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl).
|
- `move_ttl_info.max` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the maximum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl).
|
||||||
|
|
||||||
- `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Alias for `bytes_on_disk`.
|
- `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Alias for `bytes_on_disk`.
|
||||||
|
|
||||||
@ -166,6 +166,6 @@ move_ttl_info.max: []
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||||
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl)
|
- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/parts) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/operations/system-tables/parts) <!--hide-->
|
||||||
|
@ -29,7 +29,7 @@ Columns:
|
|||||||
- `MUTATE_PART` — Apply one or several mutations to the part.
|
- `MUTATE_PART` — Apply one or several mutations to the part.
|
||||||
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
||||||
|
|
||||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
- `create_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||||
|
|
||||||
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
- `required_quorum` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of replicas waiting for the task to complete with confirmation of completion. This column is only relevant for the `GET_PARTS` task.
|
||||||
|
|
||||||
@ -47,13 +47,13 @@ Columns:
|
|||||||
|
|
||||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
||||||
|
|
||||||
- `last_attempt_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
- `last_attempt_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last attempted.
|
||||||
|
|
||||||
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
- `num_postponed` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of postponed tasks.
|
||||||
|
|
||||||
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
- `postpone_reason` ([String](../../sql-reference/data-types/string.md)) — The reason why the task was postponed.
|
||||||
|
|
||||||
- `last_postpone_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
- `last_postpone_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was last postponed.
|
||||||
|
|
||||||
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
- `merge_type` ([String](../../sql-reference/data-types/string.md)) — Type of the current merge. Empty if it's a mutation.
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ Columns:
|
|||||||
- `DOUBLE_SHA1_PASSWORD`
|
- `DOUBLE_SHA1_PASSWORD`
|
||||||
- `LDAP`
|
- `LDAP`
|
||||||
- `KERBEROS`
|
- `KERBEROS`
|
||||||
|
- `SSL_CERTIFICATE`
|
||||||
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
|
||||||
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
|
||||||
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
|
||||||
|
@ -12,7 +12,7 @@ Columns:
|
|||||||
|
|
||||||
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
- `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter.
|
||||||
|
|
||||||
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password.
|
||||||
|
|
||||||
- `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`.
|
- `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`.
|
||||||
|
|
||||||
|
@ -189,10 +189,12 @@ preAllocSize=131072
|
|||||||
# especially if there are a lot of clients. To prevent ZooKeeper from running
|
# especially if there are a lot of clients. To prevent ZooKeeper from running
|
||||||
# out of memory due to queued requests, ZooKeeper will throttle clients so that
|
# out of memory due to queued requests, ZooKeeper will throttle clients so that
|
||||||
# there is no more than globalOutstandingLimit outstanding requests in the
|
# there is no more than globalOutstandingLimit outstanding requests in the
|
||||||
# system. The default limit is 1,000.ZooKeeper logs transactions to a
|
# system. The default limit is 1000.
|
||||||
# transaction log. After snapCount transactions are written to a log file a
|
# globalOutstandingLimit=1000
|
||||||
# snapshot is started and a new transaction log file is started. The default
|
|
||||||
# snapCount is 10,000.
|
# ZooKeeper logs transactions to a transaction log. After snapCount transactions
|
||||||
|
# are written to a log file a snapshot is started and a new transaction log file
|
||||||
|
# is started. The default snapCount is 100000.
|
||||||
snapCount=3000000
|
snapCount=3000000
|
||||||
|
|
||||||
# If this option is defined, requests will be will logged to a trace file named
|
# If this option is defined, requests will be will logged to a trace file named
|
||||||
|
@ -109,56 +109,38 @@ In the report you can find:
|
|||||||
|
|
||||||
`clickhouse-benchmark` can compare performances for two running ClickHouse servers.
|
`clickhouse-benchmark` can compare performances for two running ClickHouse servers.
|
||||||
|
|
||||||
To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown for each server separately.
|
To use the comparison mode, specify endpoints of both servers by two pairs of `--host`, `--port` keys. Keys matched together by position in arguments list, the first `--host` is matched with the first `--port` and so on. `clickhouse-benchmark` establishes connections to both servers, then sends queries. Each query addressed to a randomly selected server. The results are shown in a table.
|
||||||
|
|
||||||
## Example {#clickhouse-benchmark-example}
|
## Example {#clickhouse-benchmark-example}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10
|
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark --host=localhost --port=9001 --host=localhost --port=9000 -i 10
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
Loaded 1 queries.
|
Loaded 1 queries.
|
||||||
|
|
||||||
Queries executed: 6.
|
Queries executed: 5.
|
||||||
|
|
||||||
localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459.
|
localhost:9001, queries 2, QPS: 3.764, RPS: 75446929.370, MiB/s: 575.614, result RPS: 37639659.982, result MiB/s: 287.168.
|
||||||
|
localhost:9000, queries 3, QPS: 3.815, RPS: 76466659.385, MiB/s: 583.394, result RPS: 38148392.297, result MiB/s: 291.049.
|
||||||
|
|
||||||
0.000% 0.159 sec.
|
0.000% 0.258 sec. 0.250 sec.
|
||||||
10.000% 0.159 sec.
|
10.000% 0.258 sec. 0.250 sec.
|
||||||
20.000% 0.159 sec.
|
20.000% 0.258 sec. 0.250 sec.
|
||||||
30.000% 0.160 sec.
|
30.000% 0.258 sec. 0.267 sec.
|
||||||
40.000% 0.160 sec.
|
40.000% 0.258 sec. 0.267 sec.
|
||||||
50.000% 0.162 sec.
|
50.000% 0.273 sec. 0.267 sec.
|
||||||
60.000% 0.164 sec.
|
60.000% 0.273 sec. 0.267 sec.
|
||||||
70.000% 0.165 sec.
|
70.000% 0.273 sec. 0.267 sec.
|
||||||
80.000% 0.166 sec.
|
80.000% 0.273 sec. 0.269 sec.
|
||||||
90.000% 0.166 sec.
|
90.000% 0.273 sec. 0.269 sec.
|
||||||
95.000% 0.167 sec.
|
95.000% 0.273 sec. 0.269 sec.
|
||||||
99.000% 0.167 sec.
|
99.000% 0.273 sec. 0.269 sec.
|
||||||
99.900% 0.167 sec.
|
99.900% 0.273 sec. 0.269 sec.
|
||||||
99.990% 0.167 sec.
|
99.990% 0.273 sec. 0.269 sec.
|
||||||
|
|
||||||
|
No difference proven at 99.5% confidence
|
||||||
|
|
||||||
Queries executed: 10.
|
|
||||||
|
|
||||||
localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986.
|
|
||||||
|
|
||||||
0.000% 0.159 sec.
|
|
||||||
10.000% 0.159 sec.
|
|
||||||
20.000% 0.160 sec.
|
|
||||||
30.000% 0.163 sec.
|
|
||||||
40.000% 0.164 sec.
|
|
||||||
50.000% 0.165 sec.
|
|
||||||
60.000% 0.166 sec.
|
|
||||||
70.000% 0.166 sec.
|
|
||||||
80.000% 0.167 sec.
|
|
||||||
90.000% 0.167 sec.
|
|
||||||
95.000% 0.170 sec.
|
|
||||||
99.000% 0.172 sec.
|
|
||||||
99.900% 0.172 sec.
|
|
||||||
99.990% 0.172 sec.
|
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark.md) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark.md) <!--hide-->
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
slug: /en/operations/utilities/
|
slug: /en/operations/utilities/
|
||||||
sidebar_position: 56
|
sidebar_position: 56
|
||||||
sidebar_label: Utilities
|
sidebar_label: Overview
|
||||||
|
pagination_next: 'en/operations/utilities/clickhouse-copier'
|
||||||
---
|
---
|
||||||
|
|
||||||
# ClickHouse Utility
|
# ClickHouse Utilities
|
||||||
|
|
||||||
- [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without starting the ClickHouse server, similar to how `awk` does this.
|
- [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without starting the ClickHouse server, similar to how `awk` does this.
|
||||||
- [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster.
|
- [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster.
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Date32
|
|||||||
|
|
||||||
# Date32
|
# Date32
|
||||||
|
|
||||||
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1900-01-01. Allows storing values till 2299-12-31.
|
A date. Supports the date range same with [DateTime64](../../sql-reference/data-types/datetime64.md). Stored as a signed 32-bit integer in native byte order with the value representing the days since 1970-01-01 (0 represents 1970-01-01 and negative values represent the days before 1970).
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ sidebar_position: 48
|
|||||||
sidebar_label: DateTime
|
sidebar_label: DateTime
|
||||||
---
|
---
|
||||||
|
|
||||||
# Datetime
|
# DateTime
|
||||||
|
|
||||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day.
|
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day.
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ sidebar_position: 49
|
|||||||
sidebar_label: DateTime64
|
sidebar_label: DateTime64
|
||||||
---
|
---
|
||||||
|
|
||||||
# Datetime64
|
# DateTime64
|
||||||
|
|
||||||
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
Allows to store an instant in time, that can be expressed as a calendar date and a time of a day, with defined sub-second precision
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
position: 37
|
position: 37
|
||||||
label: 'External Dictionaries'
|
label: 'Dictionaries'
|
||||||
collapsible: true
|
collapsible: true
|
||||||
collapsed: true
|
collapsed: true
|
||||||
link:
|
link:
|
||||||
type: generated-index
|
type: generated-index
|
||||||
title: External Dictionaries
|
title: Dictionaries
|
||||||
slug: /en/sql-reference/dictionaries/external-dictionaries
|
slug: /en/sql-reference/dictionaries/external-dictionaries
|
||||||
|
@ -0,0 +1,4 @@
|
|||||||
|
:::tip
|
||||||
|
If you are using a dictionary with ClickHouse Cloud please use the DDL query option to create your dictionaries, and create your dictionary as user `default`.
|
||||||
|
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/whats-new/cloud-capabilities.md).
|
||||||
|
:::
|
@ -3,6 +3,7 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-l
|
|||||||
sidebar_position: 41
|
sidebar_position: 41
|
||||||
sidebar_label: Storing Dictionaries in Memory
|
sidebar_label: Storing Dictionaries in Memory
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Storing Dictionaries in Memory
|
# Storing Dictionaries in Memory
|
||||||
|
|
||||||
@ -22,7 +23,9 @@ ClickHouse generates an exception for errors with dictionaries. Examples of erro
|
|||||||
- The dictionary being accessed could not be loaded.
|
- The dictionary being accessed could not be loaded.
|
||||||
- Error querying a `cached` dictionary.
|
- Error querying a `cached` dictionary.
|
||||||
|
|
||||||
You can view the list of external dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
You can view the list of dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
The configuration looks like this:
|
The configuration looks like this:
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-l
|
|||||||
sidebar_position: 42
|
sidebar_position: 42
|
||||||
sidebar_label: Dictionary Updates
|
sidebar_label: Dictionary Updates
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Dictionary Updates
|
# Dictionary Updates
|
||||||
|
|
||||||
@ -12,6 +13,8 @@ Dictionary updates (other than loading for first use) do not block queries. Duri
|
|||||||
|
|
||||||
Example of settings:
|
Example of settings:
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<dictionary>
|
<dictionary>
|
||||||
...
|
...
|
||||||
|
@ -4,12 +4,15 @@ sidebar_position: 46
|
|||||||
sidebar_label: Polygon Dictionaries With Grids
|
sidebar_label: Polygon Dictionaries With Grids
|
||||||
title: "Polygon dictionaries"
|
title: "Polygon dictionaries"
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
||||||
For example: defining a city area by geographical coordinates.
|
For example: defining a city area by geographical coordinates.
|
||||||
|
|
||||||
Example of a polygon dictionary configuration:
|
Example of a polygon dictionary configuration:
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<dictionary>
|
<dictionary>
|
||||||
<structure>
|
<structure>
|
||||||
@ -78,7 +81,7 @@ To respond to the query, there is a corresponding cell, and the index for the po
|
|||||||
|
|
||||||
- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`.
|
- `POLYGON`. Synonym to `POLYGON_INDEX_CELL`.
|
||||||
|
|
||||||
Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with external dictionaries.
|
Dictionary queries are carried out using standard [functions](../../../sql-reference/functions/ext-dict-functions.md) for working with dictionaries.
|
||||||
An important difference is that here the keys will be the points for which you want to find the polygon containing them.
|
An important difference is that here the keys will be the points for which you want to find the polygon containing them.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources
|
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources
|
||||||
sidebar_position: 43
|
sidebar_position: 43
|
||||||
sidebar_label: Sources of External Dictionaries
|
sidebar_label: Dictionary Sources
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Sources of External Dictionaries
|
# Dictionary Sources
|
||||||
|
|
||||||
An external dictionary can be connected to ClickHouse from many different sources.
|
<CloudDetails />
|
||||||
|
|
||||||
|
A dictionary can be connected to ClickHouse from many different sources.
|
||||||
|
|
||||||
If the dictionary is configured using an xml-file, the configuration looks like this:
|
If the dictionary is configured using an xml-file, the configuration looks like this:
|
||||||
|
|
||||||
@ -65,13 +68,13 @@ Types of sources (`source_type`):
|
|||||||
- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool)
|
- [Executable Pool](#dicts-external_dicts_dict_sources-executable_pool)
|
||||||
- [HTTP(s)](#dicts-external_dicts_dict_sources-http)
|
- [HTTP(s)](#dicts-external_dicts_dict_sources-http)
|
||||||
- DBMS
|
- DBMS
|
||||||
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
|
- [ODBC](#odbc)
|
||||||
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
|
- [MySQL](#mysql)
|
||||||
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
|
- [ClickHouse](#clickhouse)
|
||||||
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
|
- [MongoDB](#mongodb)
|
||||||
- [Redis](#dicts-external_dicts_dict_sources-redis)
|
- [Redis](#redis)
|
||||||
- [Cassandra](#dicts-external_dicts_dict_sources-cassandra)
|
- [Cassandra](#cassandra)
|
||||||
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
|
- [PostgreSQL](#postgresql)
|
||||||
|
|
||||||
## Local File
|
## Local File
|
||||||
|
|
||||||
|
@ -3,9 +3,12 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-s
|
|||||||
sidebar_position: 44
|
sidebar_position: 44
|
||||||
sidebar_label: Dictionary Key and Fields
|
sidebar_label: Dictionary Key and Fields
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Dictionary Key and Fields
|
# Dictionary Key and Fields
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
The `structure` clause describes the dictionary key and fields available for queries.
|
The `structure` clause describes the dictionary key and fields available for queries.
|
||||||
|
|
||||||
XML description:
|
XML description:
|
||||||
@ -171,5 +174,5 @@ Configuration fields:
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
|
- [Functions for working with dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
|
||||||
|
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict
|
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict
|
||||||
sidebar_position: 40
|
sidebar_position: 40
|
||||||
sidebar_label: Configuring an External Dictionary
|
sidebar_label: Configuring a Dictionary
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# Configuring an External Dictionary
|
# Configuring a Dictionary
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
If dictionary is configured using xml file, than dictionary configuration has the following structure:
|
If dictionary is configured using xml file, than dictionary configuration has the following structure:
|
||||||
|
|
||||||
|
@ -3,18 +3,23 @@ slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts
|
|||||||
sidebar_position: 39
|
sidebar_position: 39
|
||||||
sidebar_label: General Description
|
sidebar_label: General Description
|
||||||
---
|
---
|
||||||
|
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
|
||||||
|
|
||||||
# External Dictionaries
|
# Dictionaries
|
||||||
|
|
||||||
You can add your own dictionaries from various data sources. The data source for a dictionary can be a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Sources for external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”.
|
:::tip Tutorial
|
||||||
|
If you are getting started with Dictionaries in ClickHouse we have a tutorial that covers that topic. Take a look [here](/docs/en/tutorial.md).
|
||||||
|
:::
|
||||||
|
|
||||||
|
You can add your own dictionaries from various data sources. The source for a dictionary can be a ClickHouse table, a local text or executable file, an HTTP(s) resource, or another DBMS. For more information, see “[Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)”.
|
||||||
|
|
||||||
ClickHouse:
|
ClickHouse:
|
||||||
|
|
||||||
- Fully or partially stores dictionaries in RAM.
|
- Fully or partially stores dictionaries in RAM.
|
||||||
- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically.
|
- Periodically updates dictionaries and dynamically loads missing values. In other words, dictionaries can be loaded dynamically.
|
||||||
- Allows to create external dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
|
- Allows creating dictionaries with xml files or [DDL queries](../../../sql-reference/statements/create/dictionary.md).
|
||||||
|
|
||||||
The configuration of external dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
|
The configuration of dictionaries can be located in one or more xml-files. The path to the configuration is specified in the [dictionaries_config](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_config) parameter.
|
||||||
|
|
||||||
Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting.
|
Dictionaries can be loaded at server startup or at first use, depending on the [dictionaries_lazy_load](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load) setting.
|
||||||
|
|
||||||
@ -24,6 +29,22 @@ The [dictionaries](../../../operations/system-tables/dictionaries.md#system_tabl
|
|||||||
- Configuration parameters.
|
- Configuration parameters.
|
||||||
- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded.
|
- Metrics like amount of RAM allocated for the dictionary or a number of queries since the dictionary was successfully loaded.
|
||||||
|
|
||||||
|
<CloudDetails />
|
||||||
|
|
||||||
|
## Creating a dictionary with a DDL query
|
||||||
|
|
||||||
|
Dictionaries can be created with [DDL queries](../../../sql-reference/statements/create/dictionary.md), and this is the recommended method because with DDL created dictionaries:
|
||||||
|
- No additional records are added to server configuration files
|
||||||
|
- The dictionaries can be worked with as first-class entities, like tables or views
|
||||||
|
- Data can be read directly, using familiar SELECT rather than dictionary table functions
|
||||||
|
- The dictionaries can be easily renamed
|
||||||
|
|
||||||
|
## Creating a dictionary with a configuration file
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Creating a dictionary with a configuration file is not applicable to ClickHouse Cloud. Please use DDL (see above), and create your dictionary as user `default`.
|
||||||
|
:::
|
||||||
|
|
||||||
The dictionary configuration file has the following format:
|
The dictionary configuration file has the following format:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
@ -44,18 +65,17 @@ The dictionary configuration file has the following format:
|
|||||||
|
|
||||||
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
||||||
|
|
||||||
[DDL queries for dictionaries](../../../sql-reference/statements/create/dictionary.md) does not require any additional records in server configuration. They allow to work with dictionaries as first-class entities, like tables or views.
|
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to external dictionaries.
|
You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../../../sql-reference/functions/other-functions.md) function). This functionality is not related to dictionaries.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
- [Configuring an External Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md)
|
- [Configuring a Dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md)
|
||||||
- [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md)
|
- [Storing Dictionaries in Memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md)
|
||||||
- [Dictionary Updates](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md)
|
- [Dictionary Updates](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md)
|
||||||
- [Sources of External Dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)
|
- [Dictionary Sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md)
|
||||||
- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md)
|
- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md)
|
||||||
- [Functions for Working with External Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
|
- [Functions for Working with Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
|
||||||
|
|
||||||
|
@ -12,6 +12,6 @@ ClickHouse supports special functions for working with dictionaries that can be
|
|||||||
|
|
||||||
ClickHouse supports:
|
ClickHouse supports:
|
||||||
|
|
||||||
- [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
|
- [Dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
|
||||||
- [Plug-in (external) dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
|
- [Embedded dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
|
||||||
|
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/dictionaries/internal-dicts
|
slug: /en/sql-reference/dictionaries/internal-dicts
|
||||||
sidebar_position: 39
|
sidebar_position: 39
|
||||||
sidebar_label: Internal Dictionaries
|
sidebar_label: Embedded Dictionaries
|
||||||
---
|
---
|
||||||
|
import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.md';
|
||||||
|
|
||||||
# Internal Dictionaries
|
# Embedded Dictionaries
|
||||||
|
|
||||||
|
<SelfManaged />
|
||||||
|
|
||||||
ClickHouse contains a built-in feature for working with a geobase.
|
ClickHouse contains a built-in feature for working with a geobase.
|
||||||
|
|
||||||
|
@ -65,6 +65,11 @@ An exception is thrown when dividing by zero or when dividing a minimal negative
|
|||||||
|
|
||||||
Differs from [modulo](#modulo) in that it returns zero when the divisor is zero.
|
Differs from [modulo](#modulo) in that it returns zero when the divisor is zero.
|
||||||
|
|
||||||
|
## positive_modulo(a, b)
|
||||||
|
Calculates the remainder when dividing `a` by `b`. Similar to function `modulo` except that `positive_modulo` always return non-negative number.
|
||||||
|
|
||||||
|
Notice that `positive_modulo` is 4-5 times slower than `modulo`. You should not use `positive_modulo` unless you want to get positive result and don't care about performance too much.
|
||||||
|
|
||||||
## negate(a), -a operator
|
## negate(a), -a operator
|
||||||
|
|
||||||
Calculates a number with the reverse sign. The result is always signed.
|
Calculates a number with the reverse sign. The result is always signed.
|
||||||
@ -156,3 +161,140 @@ Result:
|
|||||||
│ -1 │
|
│ -1 │
|
||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## multiplyDecimal(a, b[, result_scale])
|
||||||
|
|
||||||
|
Performs multiplication on two decimals. Result value will be of type [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
Result scale can be explicitly specified by `result_scale` argument (const Integer in range `[0, 76]`). If not specified, the result scale is the max scale of given arguments.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
These functions work significantly slower than usual `multiply`.
|
||||||
|
In case you don't really need controlled precision and/or need fast computation, consider using [multiply](#multiply)
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
multiplyDecimal(a, b[, result_scale])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `a` — First value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `b` — Second value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `result_scale` — Scale of result: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The result of multiplication with given scale.
|
||||||
|
|
||||||
|
Type: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiplyDecimal(toDecimal256(-12, 0), toDecimal32(-2.1, 1), 1)─┐
|
||||||
|
│ 25.2 │
|
||||||
|
└────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Difference from regular multiplication:**
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12.647, 3) * toDecimal32(2.1239, 4);
|
||||||
|
SELECT toDecimal64(-12.647, 3) as a, toDecimal32(2.1239, 4) as b, multiplyDecimal(a, b);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiply(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||||
|
│ -26.8609633 │
|
||||||
|
└───────────────────────────────────────────────────────────┘
|
||||||
|
┌─multiplyDecimal(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||||
|
│ -26.8609 │
|
||||||
|
└──────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toDecimal64(-12.647987876, 9) AS a,
|
||||||
|
toDecimal64(123.967645643, 9) AS b,
|
||||||
|
multiplyDecimal(a, b);
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
toDecimal64(-12.647987876, 9) AS a,
|
||||||
|
toDecimal64(123.967645643, 9) AS b,
|
||||||
|
a * b;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─────────────a─┬─────────────b─┬─multiplyDecimal(toDecimal64(-12.647987876, 9), toDecimal64(123.967645643, 9))─┐
|
||||||
|
│ -12.647987876 │ 123.967645643 │ -1567.941279108 │
|
||||||
|
└───────────────┴───────────────┴───────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
Received exception from server (version 22.11.1):
|
||||||
|
Code: 407. DB::Exception: Received from localhost:9000. DB::Exception: Decimal math overflow: While processing toDecimal64(-12.647987876, 9) AS a, toDecimal64(123.967645643, 9) AS b, a * b. (DECIMAL_OVERFLOW)
|
||||||
|
```
|
||||||
|
|
||||||
|
## divideDecimal(a, b[, result_scale])
|
||||||
|
|
||||||
|
Performs division on two decimals. Result value will be of type [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
Result scale can be explicitly specified by `result_scale` argument (const Integer in range `[0, 76]`). If not specified, the result scale is the max scale of given arguments.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
These function work significantly slower than usual `divide`.
|
||||||
|
In case you don't really need controlled precision and/or need fast computation, consider using [divide](#divide).
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
divideDecimal(a, b[, result_scale])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `a` — First value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `b` — Second value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `result_scale` — Scale of result: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The result of division with given scale.
|
||||||
|
|
||||||
|
Type: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─divideDecimal(toDecimal256(-12, 0), toDecimal32(2.1, 1), 10)─┐
|
||||||
|
│ -5.7142857142 │
|
||||||
|
└──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Difference from regular division:**
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12, 1) / toDecimal32(2.1, 1);
|
||||||
|
SELECT toDecimal64(-12, 1) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─divide(toDecimal64(-12, 1), toDecimal32(2.1, 1))─┐
|
||||||
|
│ -5.7 │
|
||||||
|
└──────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 5)─┐
|
||||||
|
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||||
|
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12, 0) / toDecimal32(2.1, 1);
|
||||||
|
SELECT toDecimal64(-12, 0) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
DB::Exception: Decimal result's scale is less than argument's one: While processing toDecimal64(-12, 0) / toDecimal32(2.1, 1). (ARGUMENT_OUT_OF_BOUND)
|
||||||
|
|
||||||
|
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 5)─┐
|
||||||
|
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||||
|
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user