mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into fix_fuzzer_run
This commit is contained in:
commit
91b4b8e1d5
28
.github/workflows/backport_branches.yml
vendored
28
.github/workflows/backport_branches.yml
vendored
@ -145,8 +145,8 @@ jobs:
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -190,8 +190,8 @@ jobs:
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -233,8 +233,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -276,8 +276,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -319,8 +319,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -364,8 +364,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -409,8 +409,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
|
68
.github/workflows/master.yml
vendored
68
.github/workflows/master.yml
vendored
@ -209,8 +209,8 @@ jobs:
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -251,8 +251,8 @@ jobs:
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -295,8 +295,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -338,8 +338,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -381,8 +381,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -424,8 +424,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -467,8 +467,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -510,8 +510,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -556,8 +556,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -599,8 +599,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -644,8 +644,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -689,8 +689,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -734,8 +734,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -779,8 +779,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -824,8 +824,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -869,8 +869,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -914,8 +914,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
|
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -105,7 +105,7 @@ jobs:
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
|
68
.github/workflows/pull_request.yml
vendored
68
.github/workflows/pull_request.yml
vendored
@ -272,8 +272,8 @@ jobs:
|
||||
fetch-depth: 0 # for performance artifact
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -315,8 +315,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -360,8 +360,8 @@ jobs:
|
||||
fetch-depth: 0 # for performance artifact
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -403,8 +403,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -446,8 +446,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -489,8 +489,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -532,8 +532,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -575,8 +575,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -621,8 +621,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -664,8 +664,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -707,8 +707,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -750,8 +750,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -793,8 +793,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -836,8 +836,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -879,8 +879,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -922,8 +922,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -965,8 +965,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
|
36
.github/workflows/release_branches.yml
vendored
36
.github/workflows/release_branches.yml
vendored
@ -136,8 +136,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -178,8 +178,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -220,8 +220,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -263,8 +263,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -306,8 +306,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -349,8 +349,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -392,8 +392,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -437,8 +437,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -482,8 +482,8 @@ jobs:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
|
@ -442,8 +442,9 @@ elseif (OS_DARWIN)
|
||||
include(cmake/darwin/default_libs.cmake)
|
||||
elseif (OS_FREEBSD)
|
||||
include(cmake/freebsd/default_libs.cmake)
|
||||
else()
|
||||
link_libraries(global-group)
|
||||
endif ()
|
||||
link_libraries(global-group)
|
||||
|
||||
if (NOT (OS_LINUX OR OS_DARWIN))
|
||||
# Using system libs can cause a lot of warnings in includes (on macro expansion).
|
||||
@ -592,7 +593,7 @@ add_subdirectory (programs)
|
||||
add_subdirectory (tests)
|
||||
add_subdirectory (utils)
|
||||
|
||||
include (cmake/sanitize_target_link_libraries.cmake)
|
||||
include (cmake/sanitize_targets.cmake)
|
||||
|
||||
# Build native targets if necessary
|
||||
get_property(NATIVE_BUILD_TARGETS GLOBAL PROPERTY NATIVE_BUILD_TARGETS)
|
||||
|
@ -220,13 +220,13 @@ struct statx {
|
||||
uint32_t stx_dev_minor;
|
||||
uint64_t spare[14];
|
||||
};
|
||||
#endif
|
||||
|
||||
int statx(int fd, const char *restrict path, int flag,
|
||||
unsigned int mask, struct statx *restrict statxbuf)
|
||||
{
|
||||
return syscall(SYS_statx, fd, path, flag, mask, statxbuf);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#include <syscall.h>
|
||||
|
@ -23,6 +23,7 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
include (cmake/cxx.cmake)
|
||||
link_libraries(global-group)
|
||||
|
||||
target_link_libraries(global-group INTERFACE
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
|
@ -24,6 +24,7 @@ find_package(Threads REQUIRED)
|
||||
|
||||
include (cmake/unwind.cmake)
|
||||
include (cmake/cxx.cmake)
|
||||
link_libraries(global-group)
|
||||
|
||||
target_link_libraries(global-group INTERFACE
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
|
@ -34,6 +34,13 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
include (cmake/unwind.cmake)
|
||||
include (cmake/cxx.cmake)
|
||||
|
||||
# Delay the call to link the global interface after the libc++ libraries are included to avoid circular dependencies
|
||||
# which are ok with static libraries but not with dynamic ones
|
||||
link_libraries(global-group)
|
||||
|
||||
if (NOT OS_ANDROID)
|
||||
if (NOT USE_MUSL)
|
||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||
@ -42,9 +49,6 @@ if (NOT OS_ANDROID)
|
||||
add_subdirectory(base/harmful)
|
||||
endif ()
|
||||
|
||||
include (cmake/unwind.cmake)
|
||||
include (cmake/cxx.cmake)
|
||||
|
||||
target_link_libraries(global-group INTERFACE
|
||||
-Wl,--start-group
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
|
@ -1,3 +1,13 @@
|
||||
# https://stackoverflow.com/a/62311397/328260
|
||||
macro (get_all_targets_recursive targets dir)
|
||||
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
|
||||
foreach (subdir ${subdirectories})
|
||||
get_all_targets_recursive (${targets} ${subdir})
|
||||
endforeach ()
|
||||
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
|
||||
list (APPEND ${targets} ${current_targets})
|
||||
endmacro ()
|
||||
|
||||
# When you will try to link target with the directory (that exists), cmake will
|
||||
# skip this without an error, only the following warning will be reported:
|
||||
#
|
||||
@ -18,23 +28,12 @@
|
||||
# -- but cannot be used with link_libraries()
|
||||
# - use BUILDSYSTEM_TARGETS property to get list of all targets and sanitize
|
||||
# -- this will work.
|
||||
|
||||
# https://stackoverflow.com/a/62311397/328260
|
||||
function (get_all_targets var)
|
||||
set (targets)
|
||||
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
set (${var} ${targets} PARENT_SCOPE)
|
||||
endfunction()
|
||||
macro (get_all_targets_recursive targets dir)
|
||||
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
|
||||
foreach (subdir ${subdirectories})
|
||||
get_all_targets_recursive (${targets} ${subdir})
|
||||
endforeach ()
|
||||
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
|
||||
list (APPEND ${targets} ${current_targets})
|
||||
endmacro ()
|
||||
|
||||
macro (sanitize_link_libraries target)
|
||||
function (sanitize_link_libraries target)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
if (${target_type} STREQUAL "INTERFACE_LIBRARY")
|
||||
get_property(linked_libraries TARGET ${target} PROPERTY INTERFACE_LINK_LIBRARIES)
|
||||
@ -48,9 +47,35 @@ macro (sanitize_link_libraries target)
|
||||
message(FATAL_ERROR "${target} requested to link with directory: ${linked_library}")
|
||||
endif()
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
endfunction()
|
||||
get_all_targets (all_targets)
|
||||
foreach (target ${all_targets})
|
||||
sanitize_link_libraries(${target})
|
||||
endforeach()
|
||||
|
||||
#
|
||||
# Do not allow to define -W* from contrib publically (INTERFACE/PUBLIC).
|
||||
#
|
||||
function (get_contrib_targets var)
|
||||
set (targets)
|
||||
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR}/contrib)
|
||||
set (${var} ${targets} PARENT_SCOPE)
|
||||
endfunction()
|
||||
function (sanitize_interface_flags target)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
get_property(compile_definitions TARGET ${target} PROPERTY INTERFACE_COMPILE_DEFINITIONS)
|
||||
get_property(compile_options TARGET ${target} PROPERTY INTERFACE_COMPILE_OPTIONS)
|
||||
if (NOT "${compile_options}" STREQUAL "")
|
||||
message(FATAL_ERROR "${target} set INTERFACE_COMPILE_OPTIONS to ${compile_options}. This is forbidden.")
|
||||
endif()
|
||||
if ("${compile_definitions}" MATCHES "-Wl,")
|
||||
# linker option - OK
|
||||
elseif ("${compile_definitions}" MATCHES "-W")
|
||||
message(FATAL_ERROR "${target} contains ${compile_definitions} flags in INTERFACE_COMPILE_DEFINITIONS. This is forbidden.")
|
||||
endif()
|
||||
endfunction()
|
||||
get_contrib_targets (contrib_targets)
|
||||
foreach (contrib_target ${contrib_targets})
|
||||
sanitize_interface_flags(${contrib_target})
|
||||
endforeach()
|
||||
|
@ -57,7 +57,7 @@ add_library(cxx ${SRCS})
|
||||
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
|
||||
|
||||
target_include_directories(cxx SYSTEM BEFORE PRIVATE $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/src>)
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>>)
|
||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
|
||||
# Enable capturing stack traces for all exceptions.
|
||||
|
@ -388,6 +388,8 @@ else
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# Turn on after 22.12
|
||||
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
@ -451,6 +453,7 @@ else
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
||||
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
@ -485,6 +488,7 @@ else
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
@ -85,7 +85,7 @@ Example of setting the addresses of the auxiliary ZooKeeper cluster:
|
||||
</auxiliary_zookeepers>
|
||||
```
|
||||
|
||||
To store table datameta in a auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
||||
To store table metadata in an auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
||||
ReplicatedMergeTree engine as follow:
|
||||
|
||||
```
|
||||
|
@ -13,7 +13,7 @@ The supported formats are:
|
||||
| Format | Input | Output |
|
||||
|-------------------------------------------------------------------------------------------|------|--------|
|
||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
||||
@ -48,6 +48,7 @@ The supported formats are:
|
||||
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
|
||||
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
|
||||
| [TSKV](#tskv) | ✔ | ✔ |
|
||||
| [Pretty](#pretty) | ✗ | ✔ |
|
||||
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
||||
@ -1210,6 +1211,69 @@ SELECT * FROM json_each_row_nested
|
||||
- [output_format_json_array_of_rows](../operations/settings/settings.md#output_format_json_array_of_rows) - output a JSON array of all rows in JSONEachRow(Compact) format. Default value - `false`.
|
||||
- [output_format_json_validate_utf8](../operations/settings/settings.md#output_format_json_validate_utf8) - enables validation of UTF-8 sequences in JSON output formats (note that it doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8). Default value - `false`.
|
||||
|
||||
## BSONEachRow {#bsoneachrow}
|
||||
|
||||
In this format, ClickHouse formats/parses data as a sequence of BSON documents without any separator between them.
|
||||
Each row is formatted as a single document and each column is formatted as a single BSON document field with column name as a key.
|
||||
|
||||
For output it uses the following correspondence between ClickHouse types and BSON types:
|
||||
|
||||
| ClickHouse type | BSON Type |
|
||||
|-----------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------|
|
||||
| [Bool](../sql-reference/data-types/boolean.md) | `\x08` boolean |
|
||||
| [Int8/UInt8](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||
| [Int16UInt16](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||
| [Int32](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
|
||||
| [UInt32](../sql-reference/data-types/int-uint.md) | `\x12` int64 |
|
||||
| [Int64/UInt64](../sql-reference/data-types/int-uint.md) | `\x12` int64 |
|
||||
| [Float32/Float64](../sql-reference/data-types/float.md) | `\x01` double |
|
||||
| [Date](../sql-reference/data-types/date.md)/[Date32](../sql-reference/data-types/date32.md) | `\x10` int32 |
|
||||
| [DateTime](../sql-reference/data-types/datetime.md) | `\x12` int64 |
|
||||
| [DateTime64](../sql-reference/data-types/datetime64.md) | `\x09` datetime |
|
||||
| [Decimal32](../sql-reference/data-types/decimal.md) | `\x10` int32 |
|
||||
| [Decimal64](../sql-reference/data-types/decimal.md) | `\x12` int64 |
|
||||
| [Decimal128](../sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 16 |
|
||||
| [Decimal256](../sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 32 |
|
||||
| [Int128/UInt128](../sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 16 |
|
||||
| [Int256/UInt256](../sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 32 |
|
||||
| [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) | `\x05` binary, `\x00` binary subtype or \x02 string if setting output_format_bson_string_as_string is enabled |
|
||||
| [UUID](../sql-reference/data-types/uuid.md) | `\x05` binary, `\x04` uuid subtype, size = 16 |
|
||||
| [Array](../sql-reference/data-types/array.md) | `\x04` array |
|
||||
| [Tuple](../sql-reference/data-types/tuple.md) | `\x04` array |
|
||||
| [Named Tuple](../sql-reference/data-types/tuple.md) | `\x03` document |
|
||||
| [Map](../sql-reference/data-types/map.md) (with String keys) | `\x03` document |
|
||||
|
||||
For input it uses the following correspondence between BSON types and ClickHouse types:
|
||||
|
||||
| BSON Type | ClickHouse Type |
|
||||
|------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `\x01` double | [Float32/Float64](../sql-reference/data-types/float.md) |
|
||||
| `\x02` string | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `\x03` document | [Map](../sql-reference/data-types/map.md)/[Named Tuple](../sql-reference/data-types/tuple.md) |
|
||||
| `\x04` array | [Array](../sql-reference/data-types/array.md)/[Tuple](../sql-reference/data-types/tuple.md) |
|
||||
| `\x05` binary, `\x00` binary subtype | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `\x05` binary, `\x02` old binary subtype | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `\x05` binary, `\x03` old uuid subtype | [UUID](../sql-reference/data-types/uuid.md) |
|
||||
| `\x05` binary, `\x04` uuid subtype | [UUID](../sql-reference/data-types/uuid.md) |
|
||||
| `\x07` ObjectId | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `\x08` boolean | [Bool](../sql-reference/data-types/boolean.md) |
|
||||
| `\x09` datetime | [DateTime64](../sql-reference/data-types/datetime64.md) |
|
||||
| `\x0A` null value | [NULL](../sql-reference/data-types/nullable.md) |
|
||||
| `\x0D` JavaScript code | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `\x0E` symbol | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
|
||||
| `\x10` int32 | [Int32/UInt32](../sql-reference/data-types/int-uint.md)/[Decimal32](../sql-reference/data-types/decimal.md) |
|
||||
| `\x12` int64 | [Int64/UInt64](../sql-reference/data-types/int-uint.md)/[Decimal64](../sql-reference/data-types/decimal.md)/[DateTime64](../sql-reference/data-types/datetime64.md) |
|
||||
|
||||
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
|
||||
Big integers and decimals (Int128/UInt128/Int256/UInt256/Decimal128/Decimal256) can be parsed from BSON Binary value with `\x00` binary subtype. In this case this format will validate that the size of binary data equals the size of expected value.
|
||||
|
||||
Note: this format don't work properly on Big-Endian platforms.
|
||||
|
||||
### BSON format settings {#bson-format-settings}
|
||||
|
||||
- [output_format_bson_string_as_string](../operations/settings/settings.md#output_format_bson_string_as_string) - use BSON String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_bson_skip_fields_with_unsupported_types_in_schema_inference](../operations/settings/settings.md#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for format BSONEachRow. Default value - `false`.
|
||||
|
||||
## Native {#native}
|
||||
|
||||
The most efficient format. Data is written and read by blocks in binary format. For each block, the number of rows, number of columns, column names and types, and parts of columns in this block are recorded one after another. In other words, this format is “columnar” – it does not convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients.
|
||||
|
@ -4784,7 +4784,7 @@ Possible values:
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## SQLInsert format settings {$sqlinsert-format-settings}
|
||||
## SQLInsert format settings {#sqlinsert-format-settings}
|
||||
|
||||
### output_format_sql_insert_max_batch_size {#output_format_sql_insert_max_batch_size}
|
||||
|
||||
@ -4815,3 +4815,17 @@ Default value: `false`.
|
||||
Quote column names with "`" characters
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
## BSONEachRow format settings {#bson-each-row-format-settings}
|
||||
|
||||
### output_format_bson_string_as_string {#output_format_bson_string_as_string}
|
||||
|
||||
Use BSON String type instead of Binary for String columns.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_bson_skip_fields_with_unsupported_types_in_schema_inference {#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference}
|
||||
|
||||
Allow skipping columns with unsupported types while schema inference for format BSONEachRow.
|
||||
|
||||
Disabled by default.
|
||||
|
@ -11,6 +11,14 @@ Projections store data in a format that optimizes query execution, this feature
|
||||
|
||||
You can define one or more projections for a table, and during the query analysis the projection with the least data to scan will be selected by ClickHouse without modifying the query provided by the user.
|
||||
|
||||
:::note Disk usage
|
||||
|
||||
Projections will create internally a new hidden table, this means that more IO and space on disk will be required.
|
||||
Example, If the projection has defined a different primary key, all the data from the original table will be duplicated.
|
||||
:::
|
||||
|
||||
You can see more technical details about how projections work internally on this [page](/docs/en/guides/improving-query-performance/sparse-primary-indexes/sparse-primary-indexes-multiple.md/#option-3-projections).
|
||||
|
||||
## Example filtering without using primary keys
|
||||
|
||||
Creating the table:
|
||||
|
@ -60,7 +60,7 @@ If you specify `POPULATE`, the existing table data is inserted into the view whe
|
||||
|
||||
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
|
||||
|
||||
The execution of [ALTER](../../../sql-reference/statements/alter/view.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
|
||||
The execution of [ALTER](/docs/en/sql-reference/statements/alter/view.md) queries on materialized views has limitations, for example, you can not update the `SELECT` query, so this might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
|
||||
|
||||
Note that materialized view is influenced by [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged before the insertion into a view.
|
||||
|
||||
|
@ -47,6 +47,7 @@ Union
|
||||
|
||||
- `AST` — Abstract syntax tree.
|
||||
- `SYNTAX` — Query text after AST-level optimizations.
|
||||
- `QUERY TREE` — Query tree after Query Tree level optimizations.
|
||||
- `PLAN` — Query execution plan.
|
||||
- `PIPELINE` — Query execution pipeline.
|
||||
|
||||
@ -110,6 +111,32 @@ FROM
|
||||
CROSS JOIN system.numbers AS c
|
||||
```
|
||||
|
||||
### EXPLAIN QUERY TREE
|
||||
|
||||
Settings:
|
||||
|
||||
- `run_passes` — Run all query tree passes before dumping the query tree. Defaul: `1`.
|
||||
- `dump_passes` — Dump information about used passes before dumping the query tree. Default: `0`.
|
||||
- `passes` — Specifies how many passes to run. If set to `-1`, runs all the passes. Default: `-1`.
|
||||
|
||||
Example:
|
||||
```sql
|
||||
EXPLAIN QUERY TREE SELECT id, value FROM test_table;
|
||||
```
|
||||
|
||||
```
|
||||
QUERY id: 0
|
||||
PROJECTION COLUMNS
|
||||
id UInt64
|
||||
value String
|
||||
PROJECTION
|
||||
LIST id: 1, nodes: 2
|
||||
COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3
|
||||
COLUMN id: 4, column_name: value, result_type: String, source_id: 3
|
||||
JOIN TREE
|
||||
TABLE id: 3, table_name: default.test_table
|
||||
```
|
||||
|
||||
### EXPLAIN PLAN
|
||||
|
||||
Dump query plan steps.
|
||||
|
@ -243,6 +243,54 @@ If `max_rows_to_group_by` and `group_by_overflow_mode = 'any'` are not used, all
|
||||
|
||||
You can use `WITH TOTALS` in subqueries, including subqueries in the [JOIN](../../../sql-reference/statements/select/join.md) clause (in this case, the respective total values are combined).
|
||||
|
||||
## GROUP BY ALL
|
||||
|
||||
`GROUP BY ALL` is equivalent to listing all the SELECT-ed expressions that are not aggregate functions.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
a * 2,
|
||||
b,
|
||||
count(c),
|
||||
FROM t
|
||||
GROUP BY ALL
|
||||
```
|
||||
|
||||
is the same as
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
a * 2,
|
||||
b,
|
||||
count(c),
|
||||
FROM t
|
||||
GROUP BY a * 2, b
|
||||
```
|
||||
|
||||
For a special case that if there is a function having both aggregate functions and other fields as its arguments, the `GROUP BY` keys will contain the maximum non-aggregate fields we can extract from it.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
substring(a, 4, 2),
|
||||
substring(substring(a, 1, 2), 1, count(b))
|
||||
FROM t
|
||||
GROUP BY ALL
|
||||
```
|
||||
|
||||
is the same as
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
substring(a, 4, 2),
|
||||
substring(substring(a, 1, 2), 1, count(b))
|
||||
FROM t
|
||||
GROUP BY substring(a, 4, 2), substring(a, 1, 2)
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
Example:
|
||||
|
@ -98,7 +98,7 @@ ClickHouse предоставляет возможность аутентифи
|
||||
|
||||
|
||||
:::danger "Важно"
|
||||
Если пользователь настроен для Kerberos-аутентификации, другие виды уатентификации будут для него недоступны. Если наряду с `kerberos` в определении пользователя будет указан какой-либо другой способ аутентификации, ClickHouse завершит работу.
|
||||
Если пользователь настроен для Kerberos-аутентификации, другие виды аутентификации будут для него недоступны. Если наряду с `kerberos` в определении пользователя будет указан какой-либо другой способ аутентификации, ClickHouse завершит работу.
|
||||
|
||||
:::info ""
|
||||
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.
|
||||
|
@ -74,7 +74,7 @@ Kafka 特性:
|
||||
|
||||
消费的消息会被自动追踪,因此每个消息在不同的消费组里只会记录一次。如果希望获得两次数据,则使用另一个组名创建副本。
|
||||
|
||||
消费组可以灵活配置并且在集群之间同步。例如,如果群集中有10个主题和5个表副本,则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 http://kafka.apache.org/intro。
|
||||
消费组可以灵活配置并且在集群之间同步。例如,如果群集中有10个主题和5个表副本,则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 [http://kafka.apache.org/intro](http://kafka.apache.org/intro)。
|
||||
|
||||
`SELECT` 查询对于读取消息并不是很有用(调试除外),因为每条消息只能被读取一次。使用物化视图创建实时线程更实用。您可以这样做:
|
||||
|
||||
|
@ -164,7 +164,7 @@ SETTINGS index_granularity = 8192, index_granularity_bytes = 0;
|
||||
<li><font face = "monospace">index_granularity</font>: 显式设置为其默认值8192。这意味着对于每一组8192行,主索引将有一个索引条目,例如,如果表包含16384行,那么索引将有两个索引条目。
|
||||
</li>
|
||||
<br/>
|
||||
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">字适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
|
||||
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">自适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
|
||||
<ul>
|
||||
<li>如果n小于8192,但n行的合并行数据大小大于或等于10MB (index_granularity_bytes的默认值)或</li>
|
||||
<li>n达到8192</li>
|
||||
@ -777,7 +777,7 @@ ClickHouse现在创建了一个额外的索引来存储—每组4个连续的颗
|
||||
如果我们想显著加快我们的两个示例查询——一个过滤具有特定UserID的行,一个过滤具有特定URL的行——那么我们需要使用多个主索引,通过使用这三个方法中的一个:
|
||||
|
||||
- 新建一个不同主键的新表。
|
||||
- 创建一个雾化视图。
|
||||
- 创建一个物化视图。
|
||||
- 增加projection。
|
||||
|
||||
这三个方法都会有效地将示例数据复制到另一个表中,以便重新组织表的主索引和行排序顺序。
|
||||
@ -992,7 +992,7 @@ Ok.
|
||||
|
||||
:::note
|
||||
- 我们在视图的主键中切换键列的顺序(与原始表相比)
|
||||
- 雾化视图由一个隐藏表支持,该表的行顺序和主索引基于给定的主键定义
|
||||
- 物化视图由一个隐藏表支持,该表的行顺序和主索引基于给定的主键定义
|
||||
- 我们使用POPULATE关键字,以便用源表hits_UserID_URL中的所有887万行立即导入新的物化视图
|
||||
- 如果在源表hits_UserID_URL中插入了新行,那么这些行也会自动插入到隐藏表中
|
||||
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们上面显式创建的辅助表相同:
|
||||
@ -1082,7 +1082,7 @@ ALTER TABLE hits_UserID_URL
|
||||
);
|
||||
```
|
||||
|
||||
雾化projection:
|
||||
物化projection:
|
||||
```sql
|
||||
ALTER TABLE hits_UserID_URL
|
||||
MATERIALIZE PROJECTION prj_url_userid;
|
||||
|
@ -77,6 +77,54 @@ sidebar_label: GROUP BY
|
||||
|
||||
您可以使用 `WITH TOTALS` 在子查询中,包括在子查询 [JOIN](../../../sql-reference/statements/select/join.md) 子句(在这种情况下,将各自的总值合并)。
|
||||
|
||||
## GROUP BY ALL {#group-by-all}
|
||||
|
||||
`GROUP BY ALL` 相当于对所有被查询的并且不被聚合函数使用的字段进行`GROUP BY`。
|
||||
|
||||
例如
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
a * 2,
|
||||
b,
|
||||
count(c),
|
||||
FROM t
|
||||
GROUP BY ALL
|
||||
```
|
||||
|
||||
效果等同于
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
a * 2,
|
||||
b,
|
||||
count(c),
|
||||
FROM t
|
||||
GROUP BY a * 2, b
|
||||
```
|
||||
|
||||
对于一种特殊情况,如果一个 function 的参数中同时有聚合函数和其他字段,会对参数中能提取的最大非聚合字段进行`GROUP BY`。
|
||||
|
||||
例如:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
substring(a, 4, 2),
|
||||
substring(substring(a, 1, 2), 1, count(b))
|
||||
FROM t
|
||||
GROUP BY ALL
|
||||
```
|
||||
|
||||
效果等同于
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
substring(a, 4, 2),
|
||||
substring(substring(a, 1, 2), 1, count(b))
|
||||
FROM t
|
||||
GROUP BY substring(a, 4, 2), substring(a, 1, 2)
|
||||
```
|
||||
|
||||
## 例子 {#examples}
|
||||
|
||||
示例:
|
||||
|
@ -9,7 +9,10 @@ After=time-sync.target network-online.target
|
||||
Wants=time-sync.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Type=notify
|
||||
|
||||
# Switching off watchdog is very important for sd_notify to work correctly.
|
||||
Environment=CLICKHOUSE_WATCHDOG_ENABLE=0
|
||||
User=clickhouse
|
||||
Group=clickhouse
|
||||
Restart=always
|
||||
|
@ -80,8 +80,8 @@ require (
|
||||
go.opentelemetry.io/otel v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.4.1 // indirect
|
||||
golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||
golang.org/x/text v0.3.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.43.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
|
@ -1078,8 +1078,8 @@ golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -1089,8 +1089,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -262,6 +262,7 @@ void Keeper::defineOptions(Poco::Util::OptionSet & options)
|
||||
}
|
||||
|
||||
int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||
try
|
||||
{
|
||||
Poco::Logger * log = &logger();
|
||||
|
||||
@ -473,6 +474,12 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Poco does not provide stacktrace.
|
||||
tryLogCurrentException("Application");
|
||||
throw;
|
||||
}
|
||||
|
||||
|
||||
void Keeper::logRevision() const
|
||||
|
@ -99,6 +99,10 @@
|
||||
#include "config_version.h"
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
# include <cstddef>
|
||||
# include <cstdlib>
|
||||
# include <sys/socket.h>
|
||||
# include <sys/un.h>
|
||||
# include <sys/mman.h>
|
||||
# include <sys/ptrace.h>
|
||||
# include <Common/hasLinuxCapability.h>
|
||||
@ -273,6 +277,7 @@ namespace ErrorCodes
|
||||
extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA;
|
||||
extern const int NETWORK_ERROR;
|
||||
extern const int CORRUPTED_DATA;
|
||||
extern const int SYSTEM_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -646,7 +651,53 @@ static void sanityChecks(Server & server)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
/// Sends notification to systemd, analogous to sd_notify from libsystemd
|
||||
static void systemdNotify(const std::string_view & command)
|
||||
{
|
||||
const char * path = getenv("NOTIFY_SOCKET"); // NOLINT(concurrency-mt-unsafe)
|
||||
|
||||
if (path == nullptr)
|
||||
return; /// not using systemd
|
||||
|
||||
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
|
||||
|
||||
if (s == -1)
|
||||
throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
SCOPE_EXIT({ close(s); });
|
||||
|
||||
const size_t len = strlen(path);
|
||||
|
||||
struct sockaddr_un addr;
|
||||
|
||||
addr.sun_family = AF_UNIX;
|
||||
|
||||
if (len < 2 || len > sizeof(addr.sun_path) - 1)
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
|
||||
|
||||
memcpy(addr.sun_path, path, len + 1); /// write last zero as well.
|
||||
|
||||
size_t addrlen = offsetof(struct sockaddr_un, sun_path) + len;
|
||||
|
||||
/// '@' meass this is Linux abstract socket, per documentation it must be sun_path[0] must be set to '\0' for it.
|
||||
if (path[0] == '@')
|
||||
addr.sun_path[0] = 0;
|
||||
else if (path[0] == '/')
|
||||
addrlen += 1; /// non-abstract-addresses should be zero terminated.
|
||||
else
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
|
||||
|
||||
const struct sockaddr *sock_addr = reinterpret_cast <const struct sockaddr *>(&addr);
|
||||
|
||||
if (sendto(s, command.data(), command.size(), 0, sock_addr, static_cast <socklen_t>(addrlen)) != static_cast <ssize_t>(command.size()))
|
||||
throw Exception("Failed to notify systemd.", ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
int Server::main(const std::vector<std::string> & /*args*/)
|
||||
try
|
||||
{
|
||||
Poco::Logger * log = &logger();
|
||||
|
||||
@ -1148,6 +1199,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
total_memory_tracker.setDescription("(total)");
|
||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||
|
||||
bool allow_use_jemalloc_memory = config->getBool("allow_use_jemalloc_memory", true);
|
||||
total_memory_tracker.setAllowUseJemallocMemory(allow_use_jemalloc_memory);
|
||||
|
||||
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
||||
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
|
||||
|
||||
@ -1776,6 +1830,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
tryLogCurrentException(log, "Caught exception while starting cluster discovery");
|
||||
}
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
systemdNotify("READY=1\n");
|
||||
#endif
|
||||
|
||||
SCOPE_EXIT_SAFE({
|
||||
LOG_DEBUG(log, "Received termination signal.");
|
||||
|
||||
@ -1845,6 +1903,12 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Poco does not provide stacktrace.
|
||||
tryLogCurrentException("Application");
|
||||
throw;
|
||||
}
|
||||
|
||||
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
|
@ -130,8 +130,8 @@ enum class AccessType
|
||||
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
|
||||
M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
||||
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", GROUP, ACCESS_MANAGEMENT) \
|
||||
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
||||
\
|
||||
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
|
||||
|
@ -2,6 +2,8 @@
|
||||
#include <Access/LDAPClient.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
@ -73,6 +75,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
const bool has_tls_ca_cert_file = config.has(ldap_server_config + ".tls_ca_cert_file");
|
||||
const bool has_tls_ca_cert_dir = config.has(ldap_server_config + ".tls_ca_cert_dir");
|
||||
const bool has_tls_cipher_suite = config.has(ldap_server_config + ".tls_cipher_suite");
|
||||
const bool has_search_limit = config.has(ldap_server_config + ".search_limit");
|
||||
|
||||
if (!has_host)
|
||||
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
@ -91,8 +94,8 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
}
|
||||
else if (has_auth_dn_prefix || has_auth_dn_suffix)
|
||||
{
|
||||
const auto auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix");
|
||||
const auto auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix");
|
||||
std::string auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix");
|
||||
std::string auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix");
|
||||
params.bind_dn = auth_dn_prefix + "{user_name}" + auth_dn_suffix;
|
||||
}
|
||||
|
||||
@ -176,14 +179,17 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
|
||||
if (has_port)
|
||||
{
|
||||
const auto port = config.getInt64(ldap_server_config + ".port");
|
||||
if (port < 0 || port > 65535)
|
||||
UInt32 port = config.getUInt(ldap_server_config + ".port");
|
||||
if (port > 65535)
|
||||
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
params.port = port;
|
||||
}
|
||||
else
|
||||
params.port = (params.enable_tls == LDAPClient::Params::TLSEnable::YES ? 636 : 389);
|
||||
|
||||
if (has_search_limit)
|
||||
params.search_limit = static_cast<UInt32>(config.getUInt64(ldap_server_config + ".search_limit"));
|
||||
}
|
||||
|
||||
void parseKerberosParams(GSSAcceptorContext::Params & params, const Poco::Util::AbstractConfiguration & config)
|
||||
@ -313,11 +319,26 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
|
||||
}
|
||||
}
|
||||
|
||||
UInt128 computeParamsHash(const LDAPClient::Params & params, const LDAPClient::RoleSearchParamsList * role_search_params)
|
||||
{
|
||||
SipHash hash;
|
||||
params.updateHash(hash);
|
||||
if (role_search_params)
|
||||
{
|
||||
for (const auto & params_instance : *role_search_params)
|
||||
{
|
||||
params_instance.updateHash(hash);
|
||||
}
|
||||
}
|
||||
|
||||
return hash.get128();
|
||||
}
|
||||
|
||||
bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const BasicCredentials & credentials,
|
||||
const LDAPClient::RoleSearchParamsList * role_search_params, LDAPClient::SearchResultsList * role_search_results) const
|
||||
{
|
||||
std::optional<LDAPClient::Params> params;
|
||||
std::size_t params_hash = 0;
|
||||
UInt128 params_hash = 0;
|
||||
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
@ -331,14 +352,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
|
||||
params->user = credentials.getUserName();
|
||||
params->password = credentials.getPassword();
|
||||
|
||||
params->combineCoreHash(params_hash);
|
||||
if (role_search_params)
|
||||
{
|
||||
for (const auto & params_instance : *role_search_params)
|
||||
{
|
||||
params_instance.combineHash(params_hash);
|
||||
}
|
||||
}
|
||||
params_hash = computeParamsHash(*params, role_search_params);
|
||||
|
||||
// Check the cache, but only if the caching is enabled at all.
|
||||
if (params->verification_cooldown > std::chrono::seconds{0})
|
||||
@ -408,15 +422,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
|
||||
new_params.user = credentials.getUserName();
|
||||
new_params.password = credentials.getPassword();
|
||||
|
||||
std::size_t new_params_hash = 0;
|
||||
new_params.combineCoreHash(new_params_hash);
|
||||
if (role_search_params)
|
||||
{
|
||||
for (const auto & params_instance : *role_search_params)
|
||||
{
|
||||
params_instance.combineHash(new_params_hash);
|
||||
}
|
||||
}
|
||||
const UInt128 new_params_hash = computeParamsHash(new_params, role_search_params);
|
||||
|
||||
// If the critical server params have changed while we were checking the password, we discard the current result.
|
||||
if (params_hash != new_params_hash)
|
||||
|
@ -2,10 +2,10 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/container_hash/hash.hpp>
|
||||
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
@ -15,6 +15,22 @@
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename T, typename = std::enable_if_t<std::is_fundamental_v<std::decay_t<T>>>>
|
||||
void updateHash(SipHash & hash, const T & value)
|
||||
{
|
||||
hash.update(value);
|
||||
}
|
||||
|
||||
void updateHash(SipHash & hash, const std::string & value)
|
||||
{
|
||||
hash.update(value.size());
|
||||
hash.update(value);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -26,30 +42,30 @@ namespace ErrorCodes
|
||||
extern const int LDAP_ERROR;
|
||||
}
|
||||
|
||||
void LDAPClient::SearchParams::combineHash(std::size_t & seed) const
|
||||
void LDAPClient::SearchParams::updateHash(SipHash & hash) const
|
||||
{
|
||||
boost::hash_combine(seed, base_dn);
|
||||
boost::hash_combine(seed, static_cast<int>(scope));
|
||||
boost::hash_combine(seed, search_filter);
|
||||
boost::hash_combine(seed, attribute);
|
||||
::updateHash(hash, base_dn);
|
||||
::updateHash(hash, static_cast<int>(scope));
|
||||
::updateHash(hash, search_filter);
|
||||
::updateHash(hash, attribute);
|
||||
}
|
||||
|
||||
void LDAPClient::RoleSearchParams::combineHash(std::size_t & seed) const
|
||||
void LDAPClient::RoleSearchParams::updateHash(SipHash & hash) const
|
||||
{
|
||||
SearchParams::combineHash(seed);
|
||||
boost::hash_combine(seed, prefix);
|
||||
SearchParams::updateHash(hash);
|
||||
::updateHash(hash, prefix);
|
||||
}
|
||||
|
||||
void LDAPClient::Params::combineCoreHash(std::size_t & seed) const
|
||||
void LDAPClient::Params::updateHash(SipHash & hash) const
|
||||
{
|
||||
boost::hash_combine(seed, host);
|
||||
boost::hash_combine(seed, port);
|
||||
boost::hash_combine(seed, bind_dn);
|
||||
boost::hash_combine(seed, user);
|
||||
boost::hash_combine(seed, password);
|
||||
::updateHash(hash, host);
|
||||
::updateHash(hash, port);
|
||||
::updateHash(hash, bind_dn);
|
||||
::updateHash(hash, user);
|
||||
::updateHash(hash, password);
|
||||
|
||||
if (user_dn_detection)
|
||||
user_dn_detection->combineHash(seed);
|
||||
user_dn_detection->updateHash(hash);
|
||||
}
|
||||
|
||||
LDAPClient::LDAPClient(const Params & params_)
|
||||
@ -153,13 +169,13 @@ namespace
|
||||
|
||||
}
|
||||
|
||||
void LDAPClient::diag(int rc, String text)
|
||||
void LDAPClient::handleError(int result_code, String text)
|
||||
{
|
||||
std::scoped_lock lock(ldap_global_mutex);
|
||||
|
||||
if (rc != LDAP_SUCCESS)
|
||||
if (result_code != LDAP_SUCCESS)
|
||||
{
|
||||
const char * raw_err_str = ldap_err2string(rc);
|
||||
const char * raw_err_str = ldap_err2string(result_code);
|
||||
if (raw_err_str && *raw_err_str != '\0')
|
||||
{
|
||||
if (!text.empty())
|
||||
@ -214,7 +230,7 @@ bool LDAPClient::openConnection()
|
||||
|
||||
SCOPE_EXIT({ ldap_memfree(uri); });
|
||||
|
||||
diag(ldap_initialize(&handle, uri));
|
||||
handleError(ldap_initialize(&handle, uri));
|
||||
if (!handle)
|
||||
throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR);
|
||||
}
|
||||
@ -226,13 +242,13 @@ bool LDAPClient::openConnection()
|
||||
case LDAPClient::Params::ProtocolVersion::V2: value = LDAP_VERSION2; break;
|
||||
case LDAPClient::Params::ProtocolVersion::V3: value = LDAP_VERSION3; break;
|
||||
}
|
||||
diag(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value));
|
||||
}
|
||||
|
||||
diag(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON));
|
||||
|
||||
#ifdef LDAP_OPT_KEEPCONN
|
||||
diag(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON));
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_TIMEOUT
|
||||
@ -240,7 +256,7 @@ bool LDAPClient::openConnection()
|
||||
::timeval operation_timeout;
|
||||
operation_timeout.tv_sec = params.operation_timeout.count();
|
||||
operation_timeout.tv_usec = 0;
|
||||
diag(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -249,18 +265,18 @@ bool LDAPClient::openConnection()
|
||||
::timeval network_timeout;
|
||||
network_timeout.tv_sec = params.network_timeout.count();
|
||||
network_timeout.tv_usec = 0;
|
||||
diag(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout));
|
||||
}
|
||||
#endif
|
||||
|
||||
{
|
||||
const int search_timeout = static_cast<int>(params.search_timeout.count());
|
||||
diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
|
||||
}
|
||||
|
||||
{
|
||||
const int size_limit = params.search_limit;
|
||||
diag(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit));
|
||||
const int size_limit = static_cast<int>(params.search_limit);
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit));
|
||||
}
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_PROTOCOL_MIN
|
||||
@ -274,7 +290,7 @@ bool LDAPClient::openConnection()
|
||||
case LDAPClient::Params::TLSProtocolVersion::TLS1_1: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; break;
|
||||
case LDAPClient::Params::TLSProtocolVersion::TLS1_2: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; break;
|
||||
}
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -288,44 +304,44 @@ bool LDAPClient::openConnection()
|
||||
case LDAPClient::Params::TLSRequireCert::TRY: value = LDAP_OPT_X_TLS_TRY; break;
|
||||
case LDAPClient::Params::TLSRequireCert::DEMAND: value = LDAP_OPT_X_TLS_DEMAND; break;
|
||||
}
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_CERTFILE
|
||||
if (!params.tls_cert_file.empty())
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str()));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str()));
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_KEYFILE
|
||||
if (!params.tls_key_file.empty())
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str()));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str()));
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_CACERTFILE
|
||||
if (!params.tls_ca_cert_file.empty())
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str()));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str()));
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_CACERTDIR
|
||||
if (!params.tls_ca_cert_dir.empty())
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str()));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str()));
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_CIPHER_SUITE
|
||||
if (!params.tls_cipher_suite.empty())
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str()));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str()));
|
||||
#endif
|
||||
|
||||
#ifdef LDAP_OPT_X_TLS_NEWCTX
|
||||
{
|
||||
const int i_am_a_server = 0;
|
||||
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server));
|
||||
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (params.enable_tls == LDAPClient::Params::TLSEnable::YES_STARTTLS)
|
||||
diag(ldap_start_tls_s(handle, nullptr, nullptr));
|
||||
handleError(ldap_start_tls_s(handle, nullptr, nullptr));
|
||||
|
||||
final_user_name = escapeForDN(params.user);
|
||||
final_bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", final_user_name} });
|
||||
@ -346,7 +362,7 @@ bool LDAPClient::openConnection()
|
||||
if (rc == LDAP_INVALID_CREDENTIALS)
|
||||
return false;
|
||||
|
||||
diag(rc);
|
||||
handleError(rc);
|
||||
}
|
||||
|
||||
// Once bound, run the user DN search query and update the default value, if asked.
|
||||
@ -425,7 +441,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
}
|
||||
});
|
||||
|
||||
diag(ldap_search_ext_s(handle, final_base_dn.c_str(), scope, final_search_filter.c_str(), attrs, 0, nullptr, nullptr, &timeout, params.search_limit, &msgs));
|
||||
handleError(ldap_search_ext_s(handle, final_base_dn.c_str(), scope, final_search_filter.c_str(), attrs, 0, nullptr, nullptr, &timeout, params.search_limit, &msgs));
|
||||
|
||||
for (
|
||||
auto * msg = ldap_first_message(handle, msgs);
|
||||
@ -452,7 +468,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
|
||||
::berval bv;
|
||||
|
||||
diag(ldap_get_dn_ber(handle, msg, &ber, &bv));
|
||||
handleError(ldap_get_dn_ber(handle, msg, &ber, &bv));
|
||||
|
||||
if (bv.bv_val && bv.bv_len > 0)
|
||||
result.emplace(bv.bv_val, bv.bv_len);
|
||||
@ -504,7 +520,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
case LDAP_RES_SEARCH_REFERENCE:
|
||||
{
|
||||
char ** referrals = nullptr;
|
||||
diag(ldap_parse_reference(handle, msg, &referrals, nullptr, 0));
|
||||
handleError(ldap_parse_reference(handle, msg, &referrals, nullptr, 0));
|
||||
|
||||
if (referrals)
|
||||
{
|
||||
@ -528,7 +544,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
char * matched_msg = nullptr;
|
||||
char * error_msg = nullptr;
|
||||
|
||||
diag(ldap_parse_result(handle, msg, &rc, &matched_msg, &error_msg, nullptr, nullptr, 0));
|
||||
handleError(ldap_parse_result(handle, msg, &rc, &matched_msg, &error_msg, nullptr, nullptr, 0));
|
||||
|
||||
if (rc != LDAP_SUCCESS)
|
||||
{
|
||||
@ -610,7 +626,7 @@ bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search
|
||||
|
||||
#else // USE_LDAP
|
||||
|
||||
void LDAPClient::diag(const int, String)
|
||||
void LDAPClient::handleError(const int, String)
|
||||
{
|
||||
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
class SipHash;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -38,7 +39,7 @@ public:
|
||||
String search_filter;
|
||||
String attribute = "cn";
|
||||
|
||||
void combineHash(std::size_t & seed) const;
|
||||
void updateHash(SipHash & hash) const;
|
||||
};
|
||||
|
||||
struct RoleSearchParams
|
||||
@ -46,7 +47,7 @@ public:
|
||||
{
|
||||
String prefix;
|
||||
|
||||
void combineHash(std::size_t & seed) const;
|
||||
void updateHash(SipHash & hash) const;
|
||||
};
|
||||
|
||||
using RoleSearchParamsList = std::vector<RoleSearchParams>;
|
||||
@ -95,7 +96,7 @@ public:
|
||||
ProtocolVersion protocol_version = ProtocolVersion::V3;
|
||||
|
||||
String host;
|
||||
std::uint16_t port = 636;
|
||||
UInt16 port = 636;
|
||||
|
||||
TLSEnable enable_tls = TLSEnable::YES;
|
||||
TLSProtocolVersion tls_minimum_protocol_version = TLSProtocolVersion::TLS1_2;
|
||||
@ -119,9 +120,9 @@ public:
|
||||
std::chrono::seconds operation_timeout{40};
|
||||
std::chrono::seconds network_timeout{30};
|
||||
std::chrono::seconds search_timeout{20};
|
||||
std::uint32_t search_limit = 100;
|
||||
UInt32 search_limit = 256; /// An arbitrary number, no particular motivation for this value.
|
||||
|
||||
void combineCoreHash(std::size_t & seed) const;
|
||||
void updateHash(SipHash & hash) const;
|
||||
};
|
||||
|
||||
explicit LDAPClient(const Params & params_);
|
||||
@ -133,7 +134,7 @@ public:
|
||||
LDAPClient & operator= (LDAPClient &&) = delete;
|
||||
|
||||
protected:
|
||||
MAYBE_NORETURN void diag(int rc, String text = "");
|
||||
MAYBE_NORETURN void handleError(int result_code, String text = "");
|
||||
MAYBE_NORETURN bool openConnection();
|
||||
void closeConnection() noexcept;
|
||||
SearchResults search(const SearchParams & search_params);
|
||||
|
@ -228,6 +228,12 @@ namespace
|
||||
user->access.revokeGrantOption(AccessType::ALL);
|
||||
}
|
||||
|
||||
bool show_named_collections = config.getBool(user_config + ".show_named_collections", false);
|
||||
if (!show_named_collections)
|
||||
{
|
||||
user->access.revoke(AccessType::SHOW_NAMED_COLLECTIONS);
|
||||
}
|
||||
|
||||
String default_database = config.getString(user_config + ".default_database", "");
|
||||
user->default_database = default_database;
|
||||
|
||||
|
@ -30,6 +30,7 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int TOO_LARGE_STRING_SIZE;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/** Aggregate functions that store one of passed values.
|
||||
@ -485,13 +486,15 @@ struct SingleValueDataString //-V730
|
||||
private:
|
||||
using Self = SingleValueDataString;
|
||||
|
||||
Int32 size = -1; /// -1 indicates that there is no value.
|
||||
Int32 capacity = 0; /// power of two or zero
|
||||
/// 0 size indicates that there is no value. Empty string must has terminating '\0' and, therefore, size of empty string is 1
|
||||
UInt32 size = 0;
|
||||
UInt32 capacity = 0; /// power of two or zero
|
||||
char * large_data;
|
||||
|
||||
public:
|
||||
static constexpr Int32 AUTOMATIC_STORAGE_SIZE = 64;
|
||||
static constexpr Int32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size) - sizeof(capacity) - sizeof(large_data);
|
||||
static constexpr UInt32 AUTOMATIC_STORAGE_SIZE = 64;
|
||||
static constexpr UInt32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size) - sizeof(capacity) - sizeof(large_data);
|
||||
static constexpr UInt32 MAX_STRING_SIZE = std::numeric_limits<Int32>::max();
|
||||
|
||||
private:
|
||||
char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero.
|
||||
@ -502,7 +505,7 @@ public:
|
||||
|
||||
bool has() const
|
||||
{
|
||||
return size >= 0;
|
||||
return size;
|
||||
}
|
||||
|
||||
private:
|
||||
@ -536,20 +539,27 @@ public:
|
||||
|
||||
void write(WriteBuffer & buf, const ISerialization & /*serialization*/) const
|
||||
{
|
||||
writeBinary(size, buf);
|
||||
if (unlikely(MAX_STRING_SIZE < size))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "String size is too big ({}), it's a bug", size);
|
||||
|
||||
/// For serialization we use signed Int32 (for historical reasons), -1 means "no value"
|
||||
Int32 size_to_write = size ? size : -1;
|
||||
writeBinary(size_to_write, buf);
|
||||
if (has())
|
||||
buf.write(getData(), size);
|
||||
}
|
||||
|
||||
void allocateLargeDataIfNeeded(Int64 size_to_reserve, Arena * arena)
|
||||
void allocateLargeDataIfNeeded(UInt32 size_to_reserve, Arena * arena)
|
||||
{
|
||||
if (capacity < size_to_reserve)
|
||||
{
|
||||
capacity = static_cast<Int32>(roundUpToPowerOfTwoOrZero(size_to_reserve));
|
||||
/// It might happen if the size was too big and the rounded value does not fit a size_t
|
||||
if (unlikely(capacity < size_to_reserve))
|
||||
if (unlikely(MAX_STRING_SIZE < size_to_reserve))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", size_to_reserve);
|
||||
|
||||
size_t rounded_capacity = roundUpToPowerOfTwoOrZero(size_to_reserve);
|
||||
chassert(rounded_capacity <= MAX_STRING_SIZE + 1); /// rounded_capacity <= 2^31
|
||||
capacity = static_cast<UInt32>(rounded_capacity);
|
||||
|
||||
/// Don't free large_data here.
|
||||
large_data = arena->alloc(capacity);
|
||||
}
|
||||
@ -557,31 +567,28 @@ public:
|
||||
|
||||
void read(ReadBuffer & buf, const ISerialization & /*serialization*/, Arena * arena)
|
||||
{
|
||||
Int32 rhs_size;
|
||||
readBinary(rhs_size, buf);
|
||||
/// For serialization we use signed Int32 (for historical reasons), -1 means "no value"
|
||||
Int32 rhs_size_signed;
|
||||
readBinary(rhs_size_signed, buf);
|
||||
|
||||
if (rhs_size < 0)
|
||||
if (rhs_size_signed < 0)
|
||||
{
|
||||
/// Don't free large_data here.
|
||||
size = rhs_size;
|
||||
size = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
UInt32 rhs_size = rhs_size_signed;
|
||||
if (rhs_size <= MAX_SMALL_STRING_SIZE)
|
||||
{
|
||||
/// Don't free large_data here.
|
||||
|
||||
size = rhs_size;
|
||||
|
||||
if (size > 0)
|
||||
buf.readStrict(small_data, size);
|
||||
buf.readStrict(small_data, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Reserve one byte more for null-character
|
||||
Int64 rhs_size_to_reserve = rhs_size;
|
||||
rhs_size_to_reserve += 1; /// Avoid overflow
|
||||
allocateLargeDataIfNeeded(rhs_size_to_reserve, arena);
|
||||
allocateLargeDataIfNeeded(rhs_size + 1, arena);
|
||||
size = rhs_size;
|
||||
buf.readStrict(large_data, size);
|
||||
}
|
||||
@ -616,7 +623,10 @@ public:
|
||||
/// Assuming to.has()
|
||||
void changeImpl(StringRef value, Arena * arena)
|
||||
{
|
||||
Int32 value_size = static_cast<Int32>(value.size);
|
||||
if (unlikely(MAX_STRING_SIZE < value.size))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", value.size);
|
||||
|
||||
UInt32 value_size = static_cast<UInt32>(value.size);
|
||||
|
||||
if (value_size <= MAX_SMALL_STRING_SIZE)
|
||||
{
|
||||
|
@ -155,7 +155,7 @@ public:
|
||||
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
|
||||
getName(), value_type->getName()};
|
||||
|
||||
WhichDataType value_type_to_check(value_type);
|
||||
WhichDataType value_type_to_check(value_type_without_nullable);
|
||||
|
||||
/// Do not promote decimal because of implementation issues of this function design
|
||||
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale
|
||||
|
60
src/Analyzer/HashUtils.h
Normal file
60
src/Analyzer/HashUtils.h
Normal file
@ -0,0 +1,60 @@
|
||||
#pragma once
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** This structure holds query tree node ptr and its hash. It can be used as hash map key to avoid unnecessary hash
|
||||
* recalculations.
|
||||
*
|
||||
* Example of usage:
|
||||
* std::unordered_map<QueryTreeNodeConstRawPtrWithHash, std::string> map;
|
||||
*/
|
||||
template <typename QueryTreeNodePtrType>
|
||||
struct QueryTreeNodeWithHash
|
||||
{
|
||||
QueryTreeNodeWithHash(QueryTreeNodePtrType node_) /// NOLINT
|
||||
: node(std::move(node_))
|
||||
, hash(node->getTreeHash().first)
|
||||
{}
|
||||
|
||||
QueryTreeNodePtrType node = nullptr;
|
||||
size_t hash = 0;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline bool operator==(const QueryTreeNodeWithHash<T> & lhs, const QueryTreeNodeWithHash<T> & rhs)
|
||||
{
|
||||
return lhs.hash == rhs.hash && lhs.node->isEqual(*rhs.node);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator!=(const QueryTreeNodeWithHash<T> & lhs, const QueryTreeNodeWithHash<T> & rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
using QueryTreeNodePtrWithHash = QueryTreeNodeWithHash<QueryTreeNodePtr>;
|
||||
using QueryTreeNodeRawPtrWithHash = QueryTreeNodeWithHash<IQueryTreeNode *>;
|
||||
using QueryTreeNodeConstRawPtrWithHash = QueryTreeNodeWithHash<const IQueryTreeNode *>;
|
||||
|
||||
using QueryTreeNodePtrWithHashSet = std::unordered_set<QueryTreeNodePtrWithHash>;
|
||||
using QueryTreeNodeConstRawPtrWithHashSet = std::unordered_set<QueryTreeNodeConstRawPtrWithHash>;
|
||||
|
||||
template <typename Value>
|
||||
using QueryTreeNodePtrWithHashMap = std::unordered_map<QueryTreeNodePtrWithHash, Value>;
|
||||
|
||||
template <typename Value>
|
||||
using QueryTreeNodeConstRawPtrWithHashMap = std::unordered_map<QueryTreeNodeConstRawPtrWithHash, Value>;
|
||||
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
struct std::hash<DB::QueryTreeNodeWithHash<T>>
|
||||
{
|
||||
size_t operator()(const DB::QueryTreeNodeWithHash<T> & node_with_hash) const
|
||||
{
|
||||
return node_with_hash.hash;
|
||||
}
|
||||
};
|
@ -8,6 +8,7 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -48,43 +49,24 @@ public:
|
||||
/// Do not apply for `count()` with without arguments or `count(*)`, only `count(x)` is supported.
|
||||
return;
|
||||
|
||||
mapping[QueryTreeNodeWithHash(argument_nodes[0])].push_back(&node);
|
||||
argument_to_functions_mapping[argument_nodes[0]].push_back(&node);
|
||||
}
|
||||
|
||||
struct QueryTreeNodeWithHash
|
||||
{
|
||||
const QueryTreeNodePtr & node;
|
||||
IQueryTreeNode::Hash hash;
|
||||
|
||||
explicit QueryTreeNodeWithHash(const QueryTreeNodePtr & node_)
|
||||
: node(node_)
|
||||
, hash(node->getTreeHash())
|
||||
{}
|
||||
|
||||
bool operator==(const QueryTreeNodeWithHash & rhs) const
|
||||
{
|
||||
return hash == rhs.hash && node->isEqual(*rhs.node);
|
||||
}
|
||||
|
||||
struct Hash
|
||||
{
|
||||
size_t operator() (const QueryTreeNodeWithHash & key) const { return key.hash.first ^ key.hash.second; }
|
||||
};
|
||||
};
|
||||
|
||||
/// argument -> list of sum/count/avg functions with this argument
|
||||
std::unordered_map<QueryTreeNodeWithHash, std::vector<QueryTreeNodePtr *>, QueryTreeNodeWithHash::Hash> mapping;
|
||||
QueryTreeNodePtrWithHashMap<std::vector<QueryTreeNodePtr *>> argument_to_functions_mapping;
|
||||
|
||||
private:
|
||||
std::unordered_set<String> names_to_collect;
|
||||
};
|
||||
|
||||
QueryTreeNodePtr createResolvedFunction(ContextPtr context, const String & name, DataTypePtr result_type, QueryTreeNodes arguments)
|
||||
QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String & name, const DataTypePtr & result_type, QueryTreeNodes arguments)
|
||||
{
|
||||
auto function_node = std::make_shared<FunctionNode>(name);
|
||||
|
||||
auto function = FunctionFactory::instance().get(name, context);
|
||||
function_node->resolveAsFunction(std::move(function), result_type);
|
||||
function_node->getArguments().getNodes() = std::move(arguments);
|
||||
|
||||
return function_node;
|
||||
}
|
||||
|
||||
@ -94,21 +76,20 @@ FunctionNodePtr createResolvedAggregateFunction(const String & name, const Query
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(name, {argument->getResultType()}, parameters, properties);
|
||||
|
||||
function_node->resolveAsAggregateFunction(aggregate_function, aggregate_function->getReturnType());
|
||||
function_node->getArguments().getNodes() = { argument };
|
||||
|
||||
function_node->getArgumentsNode() = std::make_shared<ListNode>(QueryTreeNodes{argument});
|
||||
return function_node;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr createTupleElementFunction(ContextPtr context, DataTypePtr result_type, QueryTreeNodePtr argument, UInt64 index)
|
||||
QueryTreeNodePtr createTupleElementFunction(const ContextPtr & context, const DataTypePtr & result_type, QueryTreeNodePtr argument, UInt64 index)
|
||||
{
|
||||
return createResolvedFunction(context, "tupleElement", result_type, {argument, std::make_shared<ConstantNode>(index)});
|
||||
return createResolvedFunction(context, "tupleElement", result_type, {std::move(argument), std::make_shared<ConstantNode>(index)});
|
||||
}
|
||||
|
||||
QueryTreeNodePtr createArrayElementFunction(ContextPtr context, DataTypePtr result_type, QueryTreeNodePtr argument, UInt64 index)
|
||||
QueryTreeNodePtr createArrayElementFunction(const ContextPtr & context, const DataTypePtr & result_type, QueryTreeNodePtr argument, UInt64 index)
|
||||
{
|
||||
return createResolvedFunction(context, "arrayElement", result_type, {argument, std::make_shared<ConstantNode>(index)});
|
||||
return createResolvedFunction(context, "arrayElement", result_type, {std::move(argument), std::make_shared<ConstantNode>(index)});
|
||||
}
|
||||
|
||||
void replaceWithSumCount(QueryTreeNodePtr & node, const FunctionNodePtr & sum_count_node, ContextPtr context)
|
||||
@ -151,6 +132,7 @@ FunctionNodePtr createFusedQuantilesNode(const std::vector<QueryTreeNodePtr *> n
|
||||
{
|
||||
Array parameters;
|
||||
parameters.reserve(nodes.size());
|
||||
|
||||
for (const auto * node : nodes)
|
||||
{
|
||||
const FunctionNode & function_node = (*node)->as<const FunctionNode &>();
|
||||
@ -172,6 +154,7 @@ FunctionNodePtr createFusedQuantilesNode(const std::vector<QueryTreeNodePtr *> n
|
||||
|
||||
parameters.push_back(constant_value->getValue());
|
||||
}
|
||||
|
||||
return createResolvedAggregateFunction("quantiles", argument, parameters);
|
||||
}
|
||||
|
||||
@ -181,7 +164,7 @@ void tryFuseSumCountAvg(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
FuseFunctionsVisitor visitor({"sum", "count", "avg"});
|
||||
visitor.visit(query_tree_node);
|
||||
|
||||
for (auto & [argument, nodes] : visitor.mapping)
|
||||
for (auto & [argument, nodes] : visitor.argument_to_functions_mapping)
|
||||
{
|
||||
if (nodes.size() < 2)
|
||||
continue;
|
||||
@ -199,24 +182,22 @@ void tryFuseQuantiles(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
FuseFunctionsVisitor visitor_quantile({"quantile"});
|
||||
visitor_quantile.visit(query_tree_node);
|
||||
for (auto & [argument, nodes] : visitor_quantile.mapping)
|
||||
|
||||
for (auto & [argument, nodes] : visitor_quantile.argument_to_functions_mapping)
|
||||
{
|
||||
if (nodes.size() < 2)
|
||||
size_t nodes_size = nodes.size();
|
||||
if (nodes_size < 2)
|
||||
continue;
|
||||
|
||||
auto quantiles_node = createFusedQuantilesNode(nodes, argument.node);
|
||||
auto result_array_type = std::dynamic_pointer_cast<const DataTypeArray>(quantiles_node->getResultType());
|
||||
if (!result_array_type)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Unexpected return type '{}' of function '{}', should be array",
|
||||
quantiles_node->getResultType(), quantiles_node->getFunctionName());
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < nodes.size(); ++i)
|
||||
{
|
||||
for (size_t i = 0; i < nodes_size; ++i)
|
||||
*nodes[i] = createArrayElementFunction(context, result_array_type->getNestedType(), quantiles_node, i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/SortNode.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -10,35 +11,6 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
struct QueryTreeNodeWithHash
|
||||
{
|
||||
explicit QueryTreeNodeWithHash(const IQueryTreeNode * node_)
|
||||
: node(node_)
|
||||
, hash(node->getTreeHash().first)
|
||||
{}
|
||||
|
||||
const IQueryTreeNode * node = nullptr;
|
||||
size_t hash = 0;
|
||||
};
|
||||
|
||||
struct QueryTreeNodeWithHashHash
|
||||
{
|
||||
size_t operator()(const QueryTreeNodeWithHash & node_with_hash) const
|
||||
{
|
||||
return node_with_hash.hash;
|
||||
}
|
||||
};
|
||||
|
||||
struct QueryTreeNodeWithHashEqualTo
|
||||
{
|
||||
bool operator()(const QueryTreeNodeWithHash & lhs_node, const QueryTreeNodeWithHash & rhs_node) const
|
||||
{
|
||||
return lhs_node.hash == rhs_node.hash && lhs_node.node->isEqual(*rhs_node.node);
|
||||
}
|
||||
};
|
||||
|
||||
using QueryTreeNodeWithHashSet = std::unordered_set<QueryTreeNodeWithHash, QueryTreeNodeWithHashHash, QueryTreeNodeWithHashEqualTo>;
|
||||
|
||||
class OrderByLimitByDuplicateEliminationVisitor : public InDepthQueryTreeVisitor<OrderByLimitByDuplicateEliminationVisitor>
|
||||
{
|
||||
public:
|
||||
@ -93,7 +65,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
QueryTreeNodeWithHashSet unique_expressions_nodes_set;
|
||||
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -67,6 +67,8 @@
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/QueryTreeBuilder.h>
|
||||
|
||||
#include <Common/checkStackSize.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -517,7 +519,7 @@ public:
|
||||
|
||||
private:
|
||||
QueryTreeNodes expressions;
|
||||
std::unordered_map<std::string, std::vector<QueryTreeNodePtr>> alias_name_to_expressions;
|
||||
std::unordered_map<std::string, QueryTreeNodes> alias_name_to_expressions;
|
||||
};
|
||||
|
||||
/** Projection names is name of query tree node that is used in projection part of query node.
|
||||
@ -1100,6 +1102,10 @@ private:
|
||||
|
||||
static void validateJoinTableExpressionWithoutAlias(const QueryTreeNodePtr & join_node, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope);
|
||||
|
||||
static void expandGroupByAll(QueryNode & query_tree_node_typed);
|
||||
|
||||
static std::pair<bool, UInt64> recursivelyCollectMaxOrdinaryExpressions(QueryTreeNodePtr & node, QueryTreeNodes & into);
|
||||
|
||||
/// Resolve identifier functions
|
||||
|
||||
static QueryTreeNodePtr tryResolveTableIdentifierFromDatabaseCatalog(const Identifier & table_identifier, ContextPtr context);
|
||||
@ -1929,6 +1935,68 @@ void QueryAnalyzer::validateJoinTableExpressionWithoutAlias(const QueryTreeNodeP
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
}
|
||||
|
||||
std::pair<bool, UInt64> QueryAnalyzer::recursivelyCollectMaxOrdinaryExpressions(QueryTreeNodePtr & node, QueryTreeNodes & into)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
if (node->as<ColumnNode>())
|
||||
{
|
||||
into.push_back(node);
|
||||
return {false, 1};
|
||||
}
|
||||
|
||||
auto * function = node->as<FunctionNode>();
|
||||
|
||||
if (!function)
|
||||
return {false, 0};
|
||||
|
||||
if (function->isAggregateFunction())
|
||||
return {true, 0};
|
||||
|
||||
UInt64 pushed_children = 0;
|
||||
bool has_aggregate = false;
|
||||
|
||||
for (auto & child : function->getArguments().getNodes())
|
||||
{
|
||||
auto [child_has_aggregate, child_pushed_children] = recursivelyCollectMaxOrdinaryExpressions(child, into);
|
||||
has_aggregate |= child_has_aggregate;
|
||||
pushed_children += child_pushed_children;
|
||||
}
|
||||
|
||||
/// The current function is not aggregate function and there is no aggregate function in its arguments,
|
||||
/// so use the current function to replace its arguments
|
||||
if (!has_aggregate)
|
||||
{
|
||||
for (UInt64 i = 0; i < pushed_children; i++)
|
||||
into.pop_back();
|
||||
|
||||
into.push_back(node);
|
||||
pushed_children = 1;
|
||||
}
|
||||
|
||||
return {has_aggregate, pushed_children};
|
||||
}
|
||||
|
||||
/** Expand GROUP BY ALL by extracting all the SELECT-ed expressions that are not aggregate functions.
|
||||
*
|
||||
* For a special case that if there is a function having both aggregate functions and other fields as its arguments,
|
||||
* the `GROUP BY` keys will contain the maximum non-aggregate fields we can extract from it.
|
||||
*
|
||||
* Example:
|
||||
* SELECT substring(a, 4, 2), substring(substring(a, 1, 2), 1, count(b)) FROM t GROUP BY ALL
|
||||
* will expand as
|
||||
* SELECT substring(a, 4, 2), substring(substring(a, 1, 2), 1, count(b)) FROM t GROUP BY substring(a, 4, 2), substring(a, 1, 2)
|
||||
*/
|
||||
void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed)
|
||||
{
|
||||
auto & group_by_nodes = query_tree_node_typed.getGroupBy().getNodes();
|
||||
auto & projection_list = query_tree_node_typed.getProjection();
|
||||
|
||||
for (auto & node : projection_list.getNodes())
|
||||
recursivelyCollectMaxOrdinaryExpressions(node, group_by_nodes);
|
||||
|
||||
}
|
||||
|
||||
|
||||
/// Resolve identifier functions implementation
|
||||
|
||||
@ -2171,18 +2239,19 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromAliases(const Identifier
|
||||
auto & alias_identifier_node = it->second->as<IdentifierNode &>();
|
||||
auto identifier = alias_identifier_node.getIdentifier();
|
||||
auto lookup_result = tryResolveIdentifier(IdentifierLookup{identifier, identifier_lookup.lookup_context}, scope, identifier_resolve_settings);
|
||||
if (!lookup_result.isResolved())
|
||||
if (!lookup_result.resolved_identifier)
|
||||
{
|
||||
std::unordered_set<Identifier> valid_identifiers;
|
||||
collectScopeWithParentScopesValidIdentifiersForTypoCorrection(identifier, scope, true, false, false, valid_identifiers);
|
||||
|
||||
auto hints = collectIdentifierTypoHints(identifier, valid_identifiers);
|
||||
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown {} identifier '{}' in scope {}{}",
|
||||
toStringLowercase(IdentifierLookupContext::EXPRESSION),
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown {} identifier '{}'. In scope {}{}",
|
||||
toStringLowercase(identifier_lookup.lookup_context),
|
||||
identifier.getFullName(),
|
||||
scope.scope_node->formatASTForErrorMessage(),
|
||||
getHintsErrorMessageSuffix(hints));
|
||||
}
|
||||
|
||||
it->second = lookup_result.resolved_identifier;
|
||||
|
||||
/** During collection of aliases if node is identifier and has alias, we cannot say if it is
|
||||
@ -2193,9 +2262,9 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromAliases(const Identifier
|
||||
* If we resolved identifier node as function, we must remove identifier node alias from
|
||||
* expression alias map.
|
||||
*/
|
||||
if (identifier_lookup.isExpressionLookup() && it->second)
|
||||
if (identifier_lookup.isExpressionLookup())
|
||||
scope.alias_name_to_lambda_node.erase(identifier_bind_part);
|
||||
else if (identifier_lookup.isFunctionLookup() && it->second)
|
||||
else if (identifier_lookup.isFunctionLookup())
|
||||
scope.alias_name_to_expression_node.erase(identifier_bind_part);
|
||||
|
||||
scope.expressions_in_resolve_process_stack.popNode();
|
||||
@ -3203,11 +3272,9 @@ QueryAnalyzer::QueryTreeNodesWithNames QueryAnalyzer::resolveUnqualifiedMatcher(
|
||||
|
||||
if (auto * array_join_node = table_expression->as<ArrayJoinNode>())
|
||||
{
|
||||
size_t table_expressions_column_nodes_with_names_stack_size = table_expressions_column_nodes_with_names_stack.size();
|
||||
if (table_expressions_column_nodes_with_names_stack_size < 1)
|
||||
if (table_expressions_column_nodes_with_names_stack.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Expected at least 1 table expressions on stack before ARRAY JOIN processing. Actual {}",
|
||||
table_expressions_column_nodes_with_names_stack_size);
|
||||
"Expected at least 1 table expressions on stack before ARRAY JOIN processing");
|
||||
|
||||
auto & table_expression_column_nodes_with_names = table_expressions_column_nodes_with_names_stack.back();
|
||||
|
||||
@ -5388,25 +5455,7 @@ void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node,
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: Special functions that can take query
|
||||
/// TODO: Support qualified matchers for table function
|
||||
|
||||
for (auto & argument_node : table_function_node.getArguments().getNodes())
|
||||
{
|
||||
if (argument_node->getNodeType() == QueryTreeNodeType::MATCHER)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Matcher as table function argument is not supported {}. In scope {}",
|
||||
join_tree_node->formatASTForErrorMessage(),
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
}
|
||||
|
||||
auto * function_node = argument_node->as<FunctionNode>();
|
||||
if (function_node && table_function_factory.hasNameOrAlias(function_node->getFunctionName()))
|
||||
continue;
|
||||
|
||||
resolveExpressionNode(argument_node, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
|
||||
}
|
||||
resolveExpressionNodeList(table_function_node.getArgumentsNode(), scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
|
||||
|
||||
auto table_function_ast = table_function_node.toAST();
|
||||
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
||||
@ -6006,6 +6055,9 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
node->removeAlias();
|
||||
}
|
||||
|
||||
if (query_node_typed.isGroupByAll())
|
||||
expandGroupByAll(query_node_typed);
|
||||
|
||||
/** Validate aggregates
|
||||
*
|
||||
* 1. Check that there are no aggregate functions and GROUPING function in JOIN TREE, WHERE, PREWHERE, in another aggregate functions.
|
||||
|
@ -2,9 +2,13 @@
|
||||
|
||||
#include <Functions/IFunction.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -30,7 +34,9 @@ public:
|
||||
if (!function_node || !function_node->isAggregateFunction() || !isUniqFunction(function_node->getFunctionName()))
|
||||
return;
|
||||
|
||||
bool replaced_argument = false;
|
||||
auto & uniq_function_arguments_nodes = function_node->getArguments().getNodes();
|
||||
|
||||
for (auto & uniq_function_argument_node : uniq_function_arguments_nodes)
|
||||
{
|
||||
auto * uniq_function_argument_node_typed = uniq_function_argument_node->as<FunctionNode>();
|
||||
@ -49,7 +55,28 @@ public:
|
||||
|
||||
/// Replace injective function with its single argument
|
||||
uniq_function_argument_node = uniq_function_argument_node_argument_nodes[0];
|
||||
replaced_argument = true;
|
||||
}
|
||||
|
||||
if (!replaced_argument)
|
||||
return;
|
||||
|
||||
const auto & function_node_argument_nodes = function_node->getArguments().getNodes();
|
||||
|
||||
DataTypes argument_types;
|
||||
argument_types.reserve(function_node_argument_nodes.size());
|
||||
|
||||
for (const auto & function_node_argument : function_node_argument_nodes)
|
||||
argument_types.emplace_back(function_node_argument->getResultType());
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(function_node->getFunctionName(),
|
||||
argument_types,
|
||||
function_node->getAggregateFunction()->getParameters(),
|
||||
properties);
|
||||
|
||||
auto function_result_type = function_node->getResultType();
|
||||
function_node->resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -54,6 +54,9 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
||||
if (is_group_by_with_totals)
|
||||
buffer << ", is_group_by_with_totals: " << is_group_by_with_totals;
|
||||
|
||||
if (is_group_by_all)
|
||||
buffer << ", is_group_by_all: " << is_group_by_all;
|
||||
|
||||
std::string group_by_type;
|
||||
if (is_group_by_with_rollup)
|
||||
group_by_type = "rollup";
|
||||
@ -117,7 +120,7 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
||||
getWhere()->dumpTreeImpl(buffer, format_state, indent + 4);
|
||||
}
|
||||
|
||||
if (hasGroupBy())
|
||||
if (!is_group_by_all && hasGroupBy())
|
||||
{
|
||||
buffer << '\n' << std::string(indent + 2, ' ') << "GROUP BY\n";
|
||||
getGroupBy().dumpTreeImpl(buffer, format_state, indent + 4);
|
||||
@ -198,7 +201,8 @@ bool QueryNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
||||
is_group_by_with_totals == rhs_typed.is_group_by_with_totals &&
|
||||
is_group_by_with_rollup == rhs_typed.is_group_by_with_rollup &&
|
||||
is_group_by_with_cube == rhs_typed.is_group_by_with_cube &&
|
||||
is_group_by_with_grouping_sets == rhs_typed.is_group_by_with_grouping_sets;
|
||||
is_group_by_with_grouping_sets == rhs_typed.is_group_by_with_grouping_sets &&
|
||||
is_group_by_all == rhs_typed.is_group_by_all;
|
||||
}
|
||||
|
||||
void QueryNode::updateTreeHashImpl(HashState & state) const
|
||||
@ -226,6 +230,7 @@ void QueryNode::updateTreeHashImpl(HashState & state) const
|
||||
state.update(is_group_by_with_rollup);
|
||||
state.update(is_group_by_with_cube);
|
||||
state.update(is_group_by_with_grouping_sets);
|
||||
state.update(is_group_by_all);
|
||||
|
||||
if (constant_value)
|
||||
{
|
||||
@ -251,6 +256,7 @@ QueryTreeNodePtr QueryNode::cloneImpl() const
|
||||
result_query_node->is_group_by_with_rollup = is_group_by_with_rollup;
|
||||
result_query_node->is_group_by_with_cube = is_group_by_with_cube;
|
||||
result_query_node->is_group_by_with_grouping_sets = is_group_by_with_grouping_sets;
|
||||
result_query_node->is_group_by_all = is_group_by_all;
|
||||
result_query_node->cte_name = cte_name;
|
||||
result_query_node->projection_columns = projection_columns;
|
||||
result_query_node->constant_value = constant_value;
|
||||
@ -267,6 +273,7 @@ ASTPtr QueryNode::toASTImpl() const
|
||||
select_query->group_by_with_rollup = is_group_by_with_rollup;
|
||||
select_query->group_by_with_cube = is_group_by_with_cube;
|
||||
select_query->group_by_with_grouping_sets = is_group_by_with_grouping_sets;
|
||||
select_query->group_by_all = is_group_by_all;
|
||||
|
||||
if (hasWith())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WITH, getWith().toAST());
|
||||
@ -283,7 +290,7 @@ ASTPtr QueryNode::toASTImpl() const
|
||||
if (getWhere())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WHERE, getWhere()->toAST());
|
||||
|
||||
if (hasGroupBy())
|
||||
if (!is_group_by_all && hasGroupBy())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, getGroupBy().toAST());
|
||||
|
||||
if (hasHaving())
|
||||
|
@ -176,6 +176,18 @@ public:
|
||||
is_group_by_with_grouping_sets = is_group_by_with_grouping_sets_value;
|
||||
}
|
||||
|
||||
/// Returns true, if query node has GROUP BY ALL modifier, false otherwise
|
||||
bool isGroupByAll() const
|
||||
{
|
||||
return is_group_by_all;
|
||||
}
|
||||
|
||||
/// Set query node GROUP BY ALL modifier value
|
||||
void setIsGroupByAll(bool is_group_by_all_value)
|
||||
{
|
||||
is_group_by_all = is_group_by_all_value;
|
||||
}
|
||||
|
||||
/// Returns true if query node WITH section is not empty, false otherwise
|
||||
bool hasWith() const
|
||||
{
|
||||
@ -580,6 +592,7 @@ private:
|
||||
bool is_group_by_with_rollup = false;
|
||||
bool is_group_by_with_cube = false;
|
||||
bool is_group_by_with_grouping_sets = false;
|
||||
bool is_group_by_all = false;
|
||||
|
||||
std::string cte_name;
|
||||
NamesAndTypes projection_columns;
|
||||
|
@ -215,6 +215,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
||||
current_query_tree->setIsGroupByWithCube(select_query_typed.group_by_with_cube);
|
||||
current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup);
|
||||
current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets);
|
||||
current_query_tree->setIsGroupByAll(select_query_typed.group_by_all);
|
||||
current_query_tree->setOriginalAST(select_query);
|
||||
|
||||
auto select_settings = select_query_typed.settings();
|
||||
|
@ -110,12 +110,12 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
|
||||
if (params.open_mode == IBackup::OpenMode::READ)
|
||||
{
|
||||
auto reader = std::make_shared<BackupReaderS3>(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context);
|
||||
auto reader = std::make_shared<BackupReaderS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context);
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
|
||||
}
|
||||
#else
|
||||
|
@ -1401,6 +1401,11 @@ try
|
||||
QueryPipeline pipeline(std::move(pipe));
|
||||
PullingAsyncPipelineExecutor executor(pipeline);
|
||||
|
||||
if (need_render_progress)
|
||||
{
|
||||
pipeline.setProgressCallback([this](const Progress & progress){ onProgress(progress); });
|
||||
}
|
||||
|
||||
Block block;
|
||||
while (executor.pull(block))
|
||||
{
|
||||
@ -1445,12 +1450,6 @@ catch (...)
|
||||
|
||||
void ClientBase::sendDataFromStdin(Block & sample, const ColumnsDescription & columns_description, ASTPtr parsed_query)
|
||||
{
|
||||
if (need_render_progress)
|
||||
{
|
||||
/// Add callback to track reading from fd.
|
||||
std_in.setProgressCallback(global_context);
|
||||
}
|
||||
|
||||
/// Send data read from stdin.
|
||||
try
|
||||
{
|
||||
|
@ -171,6 +171,11 @@ protected:
|
||||
|
||||
void initTtyBuffer(ProgressOption progress);
|
||||
|
||||
/// Should be one of the first, to be destroyed the last,
|
||||
/// since other members can use them.
|
||||
SharedContextHolder shared_context;
|
||||
ContextMutablePtr global_context;
|
||||
|
||||
bool is_interactive = false; /// Use either interactive line editing interface or batch mode.
|
||||
bool is_multiquery = false;
|
||||
bool delayed_interactive = false;
|
||||
@ -208,9 +213,6 @@ protected:
|
||||
/// Settings specified via command line args
|
||||
Settings cmd_settings;
|
||||
|
||||
SharedContextHolder shared_context;
|
||||
ContextMutablePtr global_context;
|
||||
|
||||
/// thread status should be destructed before shared context because it relies on process list.
|
||||
std::optional<ThreadStatus> thread_status;
|
||||
|
||||
|
@ -524,11 +524,13 @@ void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t leng
|
||||
size_t nested_offset = src_concrete.offsetAt(start);
|
||||
size_t nested_length = src_concrete.getOffsets()[start + length - 1] - nested_offset;
|
||||
|
||||
Offsets & cur_offsets = getOffsets();
|
||||
/// Reserve offsets before to make it more exception safe (in case of MEMORY_LIMIT_EXCEEDED)
|
||||
cur_offsets.reserve(cur_offsets.size() + length);
|
||||
|
||||
getData().insertRangeFrom(src_concrete.getData(), nested_offset, nested_length);
|
||||
|
||||
Offsets & cur_offsets = getOffsets();
|
||||
const Offsets & src_offsets = src_concrete.getOffsets();
|
||||
|
||||
if (start == 0 && cur_offsets.empty())
|
||||
{
|
||||
cur_offsets.assign(src_offsets.begin(), src_offsets.begin() + length);
|
||||
|
@ -124,6 +124,9 @@ void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t len
|
||||
size_t nested_offset = src_concrete.offsetAt(start);
|
||||
size_t nested_length = src_concrete.offsets[start + length - 1] - nested_offset;
|
||||
|
||||
/// Reserve offsets before to make it more exception safe (in case of MEMORY_LIMIT_EXCEEDED)
|
||||
offsets.reserve(offsets.size() + length);
|
||||
|
||||
size_t old_chars_size = chars.size();
|
||||
chars.resize(old_chars_size + nested_length);
|
||||
memcpy(&chars[old_chars_size], &src_concrete.chars[nested_offset], nested_length);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#define APPLY_FOR_METRICS(M) \
|
||||
M(Query, "Number of executing queries") \
|
||||
M(Merge, "Number of executing background merges") \
|
||||
M(Move, "Number of currently executing moves") \
|
||||
M(PartMutation, "Number of mutations (ALTER DELETE/UPDATE)") \
|
||||
M(ReplicatedFetch, "Number of data parts being fetched from replica") \
|
||||
M(ReplicatedSend, "Number of data parts being sent to replicas") \
|
||||
|
@ -27,6 +27,14 @@ public:
|
||||
/// NOTE: Adding events into distant past (further than `period`) must be avoided.
|
||||
void add(double now, double count)
|
||||
{
|
||||
// Remove data for initial heating stage that can present at the beginning of a query.
|
||||
// Otherwise it leads to wrong gradual increase of average value, turning algorithm into not very reactive.
|
||||
if (count != 0.0 && ++data_points < 5)
|
||||
{
|
||||
start = events.time;
|
||||
events = ExponentiallySmoothedAverage();
|
||||
}
|
||||
|
||||
if (now - period <= start) // precise counting mode
|
||||
events = ExponentiallySmoothedAverage(events.value + count, now);
|
||||
else // exponential smoothing mode
|
||||
@ -51,6 +59,7 @@ public:
|
||||
{
|
||||
start = now;
|
||||
events = ExponentiallySmoothedAverage();
|
||||
data_points = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
@ -58,6 +67,7 @@ private:
|
||||
const double half_decay_time;
|
||||
double start; // Instant in past without events before it; when measurement started or reset
|
||||
ExponentiallySmoothedAverage events; // Estimated number of events in the last `period`
|
||||
size_t data_points = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
||||
Int64 limit_to_check = current_hard_limit;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
if (level == VariableContext::Global)
|
||||
if (level == VariableContext::Global && allow_use_jemalloc_memory.load(std::memory_order_relaxed))
|
||||
{
|
||||
/// Jemalloc arenas may keep some extra memory.
|
||||
/// This memory was substucted from RSS to decrease memory drift.
|
||||
|
@ -55,6 +55,7 @@ private:
|
||||
std::atomic<Int64> soft_limit {0};
|
||||
std::atomic<Int64> hard_limit {0};
|
||||
std::atomic<Int64> profiler_limit {0};
|
||||
std::atomic_bool allow_use_jemalloc_memory {true};
|
||||
|
||||
static std::atomic<Int64> free_memory_in_allocator_arenas;
|
||||
|
||||
@ -125,6 +126,10 @@ public:
|
||||
{
|
||||
return soft_limit.load(std::memory_order_relaxed);
|
||||
}
|
||||
void setAllowUseJemallocMemory(bool value)
|
||||
{
|
||||
allow_use_jemalloc_memory.store(value, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/** Set limit if it was not set.
|
||||
* Otherwise, set limit to new value, if new value is greater than previous limit.
|
||||
|
@ -123,13 +123,16 @@ void ProgressIndication::writeFinalProgress()
|
||||
if (progress.read_rows < 1000)
|
||||
return;
|
||||
|
||||
std::cout << "Processed " << formatReadableQuantity(progress.read_rows) << " rows, "
|
||||
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes);
|
||||
UInt64 processed_rows = progress.read_rows + progress.written_rows;
|
||||
UInt64 processed_bytes = progress.read_bytes + progress.written_bytes;
|
||||
|
||||
std::cout << "Processed " << formatReadableQuantity(processed_rows) << " rows, "
|
||||
<< formatReadableSizeWithDecimalSuffix(processed_bytes);
|
||||
|
||||
UInt64 elapsed_ns = getElapsedNanoseconds();
|
||||
if (elapsed_ns)
|
||||
std::cout << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
|
||||
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.)";
|
||||
std::cout << " (" << formatReadableQuantity(processed_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
|
||||
<< formatReadableSizeWithDecimalSuffix(processed_bytes * 1000000000.0 / elapsed_ns) << "/s.)";
|
||||
else
|
||||
std::cout << ". ";
|
||||
}
|
||||
@ -164,16 +167,18 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
|
||||
|
||||
size_t prefix_size = message.count();
|
||||
|
||||
UInt64 processed_rows = progress.read_rows + progress.written_rows;
|
||||
UInt64 processed_bytes = progress.read_bytes + progress.written_bytes;
|
||||
message << indicator << " Progress: ";
|
||||
message
|
||||
<< formatReadableQuantity(progress.read_rows) << " rows, "
|
||||
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes);
|
||||
<< formatReadableQuantity(processed_rows) << " rows, "
|
||||
<< formatReadableSizeWithDecimalSuffix(processed_bytes);
|
||||
|
||||
UInt64 elapsed_ns = getElapsedNanoseconds();
|
||||
if (elapsed_ns)
|
||||
message << " ("
|
||||
<< formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
|
||||
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) ";
|
||||
<< formatReadableQuantity(processed_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
|
||||
<< formatReadableSizeWithDecimalSuffix(processed_bytes * 1000000000.0 / elapsed_ns) << "/s.) ";
|
||||
else
|
||||
message << ". ";
|
||||
|
||||
|
@ -90,7 +90,7 @@ private:
|
||||
|
||||
bool write_progress_on_update = false;
|
||||
|
||||
EventRateMeter cpu_usage_meter{static_cast<double>(clock_gettime_ns()), 3'000'000'000 /*ns*/}; // average cpu utilization last 3 second
|
||||
EventRateMeter cpu_usage_meter{static_cast<double>(clock_gettime_ns()), 2'000'000'000 /*ns*/}; // average cpu utilization last 2 second
|
||||
HostToThreadTimesMap thread_data;
|
||||
/// In case of all of the above:
|
||||
/// - clickhouse-local
|
||||
|
@ -189,6 +189,13 @@ public:
|
||||
finalize();
|
||||
return v0 ^ v1 ^ v2 ^ v3;
|
||||
}
|
||||
|
||||
UInt128 get128()
|
||||
{
|
||||
UInt128 res;
|
||||
get128(res);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -208,9 +215,7 @@ inline UInt128 sipHash128(const char * data, const size_t size)
|
||||
{
|
||||
SipHash hash;
|
||||
hash.update(data, size);
|
||||
UInt128 res;
|
||||
hash.get128(res);
|
||||
return res;
|
||||
return hash.get128();
|
||||
}
|
||||
|
||||
inline UInt64 sipHash64(const char * data, const size_t size)
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "hasLinuxCapability.h"
|
||||
#include <base/unaligned.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
@ -205,6 +206,20 @@ bool checkPermissionsImpl()
|
||||
{
|
||||
TaskStatsInfoGetter();
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (e.code() == ErrorCodes::NETLINK_ERROR)
|
||||
{
|
||||
/// This error happens all the time when running inside Docker - consider it ok,
|
||||
/// don't create noise with this error.
|
||||
LOG_DEBUG(&Poco::Logger::get(__PRETTY_FUNCTION__), "{}", getCurrentExceptionMessage(false));
|
||||
}
|
||||
else
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <base/getThreadId.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
@ -47,6 +48,8 @@ struct RUsageCounters
|
||||
UInt64 soft_page_faults = 0;
|
||||
UInt64 hard_page_faults = 0;
|
||||
|
||||
UInt64 thread_id = 0;
|
||||
|
||||
RUsageCounters() = default;
|
||||
RUsageCounters(const ::rusage & rusage_, UInt64 real_time_)
|
||||
{
|
||||
@ -61,6 +64,8 @@ struct RUsageCounters
|
||||
|
||||
soft_page_faults = static_cast<UInt64>(rusage.ru_minflt);
|
||||
hard_page_faults = static_cast<UInt64>(rusage.ru_majflt);
|
||||
|
||||
thread_id = getThreadId();
|
||||
}
|
||||
|
||||
static RUsageCounters current()
|
||||
@ -78,6 +83,12 @@ struct RUsageCounters
|
||||
|
||||
static void incrementProfileEvents(const RUsageCounters & prev, const RUsageCounters & curr, ProfileEvents::Counters & profile_events)
|
||||
{
|
||||
chassert(prev.thread_id == curr.thread_id);
|
||||
/// LONG_MAX is ~106751 days
|
||||
chassert(curr.real_time - prev.real_time < LONG_MAX);
|
||||
chassert(curr.user_time - prev.user_time < LONG_MAX);
|
||||
chassert(curr.sys_time - prev.sys_time < LONG_MAX);
|
||||
|
||||
profile_events.increment(ProfileEvents::RealTimeMicroseconds, (curr.real_time - prev.real_time) / 1000U);
|
||||
profile_events.increment(ProfileEvents::UserTimeMicroseconds, (curr.user_time - prev.user_time) / 1000U);
|
||||
profile_events.increment(ProfileEvents::SystemTimeMicroseconds, (curr.sys_time - prev.sys_time) / 1000U);
|
||||
|
@ -179,8 +179,8 @@ protected:
|
||||
/// Is used to send logs from logs_queue to client in case of fatal errors.
|
||||
std::function<void()> fatal_error_callback;
|
||||
|
||||
/// It is used to avoid enabling the query profiler when you have multiple ThreadStatus in the same thread
|
||||
bool query_profiler_enabled = true;
|
||||
/// See setInternalThread()
|
||||
bool internal_thread = false;
|
||||
|
||||
/// Requires access to query_id.
|
||||
friend class MemoryTrackerThreadSwitcher;
|
||||
@ -225,11 +225,21 @@ public:
|
||||
return global_context.lock();
|
||||
}
|
||||
|
||||
void disableProfiling()
|
||||
{
|
||||
assert(!query_profiler_real && !query_profiler_cpu);
|
||||
query_profiler_enabled = false;
|
||||
}
|
||||
/// "Internal" ThreadStatus is used for materialized views for separate
|
||||
/// tracking into system.query_views_log
|
||||
///
|
||||
/// You can have multiple internal threads, but only one non-internal with
|
||||
/// the same thread_id.
|
||||
///
|
||||
/// "Internal" thread:
|
||||
/// - cannot have query profiler
|
||||
/// since the running (main query) thread should already have one
|
||||
/// - should not try to obtain latest counter on detach
|
||||
/// because detaching of such threads will be done from a different
|
||||
/// thread_id, and some counters are not available (i.e. getrusage()),
|
||||
/// but anyway they are accounted correctly in the main ThreadStatus of a
|
||||
/// query.
|
||||
void setInternalThread();
|
||||
|
||||
/// Starts new query and create new thread group for it, current thread becomes master thread of the query
|
||||
void initializeQuery();
|
||||
|
@ -47,8 +47,8 @@ bool CachedCompressedReadBuffer::nextImpl()
|
||||
|
||||
auto cell = std::make_shared<UncompressedCacheCell>();
|
||||
|
||||
size_t size_decompressed;
|
||||
size_t size_compressed_without_checksum;
|
||||
size_t size_decompressed = 0;
|
||||
size_t size_compressed_without_checksum = 0;
|
||||
cell->compressed_size = readCompressedData(size_decompressed, size_compressed_without_checksum, false);
|
||||
|
||||
if (cell->compressed_size)
|
||||
|
@ -65,7 +65,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
|
||||
auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config);
|
||||
|
||||
auto endpoint = config.getString(config_prefix + ".endpoint");
|
||||
auto new_uri = S3::URI{Poco::URI(endpoint)};
|
||||
auto new_uri = S3::URI{endpoint};
|
||||
|
||||
{
|
||||
std::lock_guard client_lock{snapshot_s3_client_mutex};
|
||||
|
@ -667,9 +667,15 @@ Names Block::getDataTypeNames() const
|
||||
}
|
||||
|
||||
|
||||
std::unordered_map<String, size_t> Block::getNamesToIndexesMap() const
|
||||
Block::NameMap Block::getNamesToIndexesMap() const
|
||||
{
|
||||
return index_by_name;
|
||||
NameMap res;
|
||||
res.reserve(index_by_name.size());
|
||||
|
||||
for (const auto & [name, index] : index_by_name)
|
||||
res[name] = index;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
#include <Core/ColumnsWithTypeAndName.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
|
||||
#include <initializer_list>
|
||||
#include <list>
|
||||
#include <map>
|
||||
@ -93,7 +95,10 @@ public:
|
||||
Names getNames() const;
|
||||
DataTypes getDataTypes() const;
|
||||
Names getDataTypeNames() const;
|
||||
std::unordered_map<String, size_t> getNamesToIndexesMap() const;
|
||||
|
||||
/// Hash table match `column name -> position in the block`.
|
||||
using NameMap = HashMap<StringRef, size_t, StringRefHash>;
|
||||
NameMap getNamesToIndexesMap() const;
|
||||
|
||||
Serializations getSerializations() const;
|
||||
|
||||
|
@ -851,6 +851,9 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Bool, output_format_sql_insert_include_column_names, true, "Include column names in INSERT query", 0) \
|
||||
M(Bool, output_format_sql_insert_use_replace, false, "Use REPLACE statement instead of INSERT", 0) \
|
||||
M(Bool, output_format_sql_insert_quote_names, true, "Quote column names with '`' characters", 0) \
|
||||
\
|
||||
M(Bool, output_format_bson_string_as_string, false, "Use BSON String type instead of Binary for String columns.", 0) \
|
||||
M(Bool, input_format_bson_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format BSON.", 0) \
|
||||
|
||||
// End of FORMAT_FACTORY_SETTINGS
|
||||
// Please add settings non-related to formats into the COMMON_SETTINGS above.
|
||||
|
@ -981,4 +981,11 @@ Field FieldVisitorFoldDimension::operator()(const Array & x) const
|
||||
return res;
|
||||
}
|
||||
|
||||
void setAllObjectsToDummyTupleType(NamesAndTypesList & columns)
|
||||
{
|
||||
for (auto & column : columns)
|
||||
if (column.type->hasDynamicSubcolumns())
|
||||
column.type = createConcreteEmptyDynamicColumn(column.type);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -162,6 +162,8 @@ private:
|
||||
size_t num_dimensions_to_fold;
|
||||
};
|
||||
|
||||
void setAllObjectsToDummyTupleType(NamesAndTypesList & columns);
|
||||
|
||||
/// Receives range of objects, which contains collections
|
||||
/// of columns-like objects (e.g. ColumnsDescription or NamesAndTypesList)
|
||||
/// and deduces the common types of object columns for all entries.
|
||||
|
@ -284,7 +284,7 @@ std::vector<DictionaryAttribute> DictionaryStructure::getAttributes(
|
||||
std::unordered_set<String> attribute_names;
|
||||
std::vector<DictionaryAttribute> res_attributes;
|
||||
|
||||
const FormatSettings format_settings;
|
||||
const FormatSettings format_settings = {};
|
||||
|
||||
for (const auto & config_elem : config_elems)
|
||||
{
|
||||
|
@ -62,7 +62,7 @@ struct ExternalQueryBuilder
|
||||
|
||||
|
||||
private:
|
||||
const FormatSettings format_settings;
|
||||
const FormatSettings format_settings = {};
|
||||
|
||||
void composeLoadAllQuery(WriteBuffer & out) const;
|
||||
|
||||
|
@ -74,7 +74,6 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
|
||||
// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value);
|
||||
// src/IO/WriteHelpers.h:146 #define writeCString(s, buf)
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Processors/Transforms/MongoDBSource.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/Transforms/MongoDBSource.h>
|
||||
#include <Core/Block.h>
|
||||
|
||||
#include "DictionaryStructure.h"
|
||||
|
@ -42,7 +42,7 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
|
||||
if (read_hint.has_value())
|
||||
estimated_size = *read_hint;
|
||||
else if (file_size.has_value())
|
||||
estimated_size = file_size.has_value() ? *file_size : 0;
|
||||
estimated_size = *file_size;
|
||||
|
||||
if (!existing_memory
|
||||
&& settings.local_fs_method == LocalFSReadMethod::mmap
|
||||
@ -158,7 +158,15 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
|
||||
#endif
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary);
|
||||
return create(settings.local_fs_buffer_size, flags);
|
||||
|
||||
size_t buffer_size = settings.local_fs_buffer_size;
|
||||
/// Check if the buffer can be smaller than default
|
||||
if (read_hint.has_value() && *read_hint > 0 && *read_hint < buffer_size)
|
||||
buffer_size = *read_hint;
|
||||
if (file_size.has_value() && *file_size < buffer_size)
|
||||
buffer_size = *file_size;
|
||||
|
||||
return create(buffer_size, flags);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Common/getRandomASCIIString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <optional>
|
||||
#include <ranges>
|
||||
#include <filesystem>
|
||||
|
||||
@ -62,7 +63,7 @@ UnlinkFileOperation::UnlinkFileOperation(const std::string & path_, IDisk & disk
|
||||
|
||||
void UnlinkFileOperation::execute(std::unique_lock<std::shared_mutex> &)
|
||||
{
|
||||
auto buf = disk.readFile(path);
|
||||
auto buf = disk.readFile(path, ReadSettings{}, std::nullopt, disk.getFileSize(path));
|
||||
readStringUntilEOF(prev_data, *buf);
|
||||
disk.removeFile(path);
|
||||
}
|
||||
|
@ -658,7 +658,7 @@ std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(
|
||||
return std::make_unique<S3ObjectStorage>(
|
||||
std::move(new_client), std::move(new_s3_settings),
|
||||
version_id, s3_capabilities, new_namespace,
|
||||
S3::URI(Poco::URI(config.getString(config_prefix + ".endpoint"))).endpoint);
|
||||
config.getString(config_prefix + ".endpoint"));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ std::unique_ptr<Aws::S3::S3Client> getClient(
|
||||
settings.request_settings.get_request_throttler,
|
||||
settings.request_settings.put_request_throttler);
|
||||
|
||||
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
||||
S3::URI uri(config.getString(config_prefix + ".endpoint"));
|
||||
if (uri.key.back() != '/')
|
||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -104,7 +104,7 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
|
||||
ContextPtr context,
|
||||
const DisksMap & /*map*/) -> DiskPtr
|
||||
{
|
||||
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
||||
S3::URI uri(config.getString(config_prefix + ".endpoint"));
|
||||
|
||||
if (uri.key.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString());
|
||||
|
106
src/Formats/BSONTypes.cpp
Normal file
106
src/Formats/BSONTypes.cpp
Normal file
@ -0,0 +1,106 @@
|
||||
#include <Formats/BSONTypes.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/hex.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_TYPE;
|
||||
}
|
||||
|
||||
static std::string byteToHexString(uint8_t byte)
|
||||
{
|
||||
return "0x" + getHexUIntUppercase(byte);
|
||||
}
|
||||
|
||||
BSONType getBSONType(uint8_t value)
|
||||
{
|
||||
if ((value >= 0x01 && value <= 0x13) || value == 0xFF || value == 0x7f)
|
||||
return BSONType(value);
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Unknown BSON type: {}", byteToHexString(value));
|
||||
}
|
||||
|
||||
BSONBinarySubtype getBSONBinarySubtype(uint8_t value)
|
||||
{
|
||||
if (value <= 0x07)
|
||||
return BSONBinarySubtype(value);
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Unknown BSON binary subtype: {}", byteToHexString(value));
|
||||
}
|
||||
|
||||
std::string getBSONTypeName(BSONType type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case BSONType::BINARY:
|
||||
return "Binary";
|
||||
case BSONType::SYMBOL:
|
||||
return "Symbol";
|
||||
case BSONType::ARRAY:
|
||||
return "Array";
|
||||
case BSONType::DOCUMENT:
|
||||
return "Document";
|
||||
case BSONType::TIMESTAMP:
|
||||
return "Timestamp";
|
||||
case BSONType::INT64:
|
||||
return "Int64";
|
||||
case BSONType::INT32:
|
||||
return "Int32";
|
||||
case BSONType::BOOL:
|
||||
return "Bool";
|
||||
case BSONType::DOUBLE:
|
||||
return "Double";
|
||||
case BSONType::STRING:
|
||||
return "String";
|
||||
case BSONType::DECIMAL128:
|
||||
return "Decimal128";
|
||||
case BSONType::JAVA_SCRIPT_CODE_W_SCOPE:
|
||||
return "JavaScript code w/ scope";
|
||||
case BSONType::JAVA_SCRIPT_CODE:
|
||||
return "JavaScript code";
|
||||
case BSONType::DB_POINTER:
|
||||
return "DBPointer";
|
||||
case BSONType::REGEXP:
|
||||
return "Regexp";
|
||||
case BSONType::DATETIME:
|
||||
return "Datetime";
|
||||
case BSONType::OBJECT_ID:
|
||||
return "ObjectId";
|
||||
case BSONType::UNDEFINED:
|
||||
return "Undefined";
|
||||
case BSONType::NULL_VALUE:
|
||||
return "Null";
|
||||
case BSONType::MAX_KEY:
|
||||
return "Max key";
|
||||
case BSONType::MIN_KEY:
|
||||
return "Min key";
|
||||
}
|
||||
}
|
||||
|
||||
std::string getBSONBinarySubtypeName(BSONBinarySubtype subtype)
|
||||
{
|
||||
switch (subtype)
|
||||
{
|
||||
case BSONBinarySubtype::BINARY:
|
||||
return "Binary";
|
||||
case BSONBinarySubtype::FUNCTION:
|
||||
return "Function";
|
||||
case BSONBinarySubtype::BINARY_OLD:
|
||||
return "Binary (Old)";
|
||||
case BSONBinarySubtype::UUID_OLD:
|
||||
return "UUID (Old)";
|
||||
case BSONBinarySubtype::UUID:
|
||||
return "UUID";
|
||||
case BSONBinarySubtype::MD5:
|
||||
return "MD5";
|
||||
case BSONBinarySubtype::ENCRYPTED_BSON_VALUE:
|
||||
return "Encrypted BSON value";
|
||||
case BSONBinarySubtype::COMPRESSED_BSON_COLUMN:
|
||||
return "Compressed BSON column";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
57
src/Formats/BSONTypes.h
Normal file
57
src/Formats/BSONTypes.h
Normal file
@ -0,0 +1,57 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static const uint8_t BSON_DOCUMENT_END = 0x00;
|
||||
using BSONSizeT = uint32_t;
|
||||
static const BSONSizeT MAX_BSON_SIZE = std::numeric_limits<BSONSizeT>::max();
|
||||
|
||||
/// See details on https://bsonspec.org/spec.html
|
||||
enum class BSONType
|
||||
{
|
||||
DOUBLE = 0x01,
|
||||
STRING = 0x02,
|
||||
DOCUMENT = 0x03,
|
||||
ARRAY = 0x04,
|
||||
BINARY = 0x05,
|
||||
UNDEFINED = 0x06,
|
||||
OBJECT_ID = 0x07,
|
||||
BOOL = 0x08,
|
||||
DATETIME = 0x09,
|
||||
NULL_VALUE = 0x0A,
|
||||
REGEXP = 0x0B,
|
||||
DB_POINTER = 0x0C,
|
||||
JAVA_SCRIPT_CODE = 0x0D,
|
||||
SYMBOL = 0x0E,
|
||||
JAVA_SCRIPT_CODE_W_SCOPE = 0x0F,
|
||||
INT32 = 0x10,
|
||||
TIMESTAMP = 0x11,
|
||||
INT64 = 0x12,
|
||||
DECIMAL128 = 0x13,
|
||||
MIN_KEY = 0xFF,
|
||||
MAX_KEY = 0x7F,
|
||||
};
|
||||
|
||||
enum class BSONBinarySubtype
|
||||
{
|
||||
BINARY = 0x00,
|
||||
FUNCTION = 0x01,
|
||||
BINARY_OLD = 0x02,
|
||||
UUID_OLD = 0x03,
|
||||
UUID = 0x04,
|
||||
MD5 = 0x05,
|
||||
ENCRYPTED_BSON_VALUE = 0x06,
|
||||
COMPRESSED_BSON_COLUMN = 0x07,
|
||||
};
|
||||
|
||||
BSONType getBSONType(uint8_t value);
|
||||
std::string getBSONTypeName(BSONType type);
|
||||
|
||||
BSONBinarySubtype getBSONBinarySubtype(uint8_t value);
|
||||
std::string getBSONBinarySubtypeName(BSONBinarySubtype subtype);
|
||||
|
||||
}
|
@ -18,7 +18,7 @@ void ColumnMapping::setupByHeader(const Block & header)
|
||||
}
|
||||
|
||||
void ColumnMapping::addColumns(
|
||||
const Names & column_names, const std::unordered_map<String, size_t> & column_indexes_by_names, const FormatSettings & settings)
|
||||
const Names & column_names, const Block::NameMap & column_indexes_by_names, const FormatSettings & settings)
|
||||
{
|
||||
std::vector<bool> read_columns(column_indexes_by_names.size(), false);
|
||||
|
||||
@ -26,8 +26,8 @@ void ColumnMapping::addColumns(
|
||||
{
|
||||
names_of_columns.push_back(name);
|
||||
|
||||
const auto column_it = column_indexes_by_names.find(name);
|
||||
if (column_it == column_indexes_by_names.end())
|
||||
const auto * column_it = column_indexes_by_names.find(name);
|
||||
if (!column_it)
|
||||
{
|
||||
if (settings.skip_unknown_fields)
|
||||
{
|
||||
@ -41,7 +41,7 @@ void ColumnMapping::addColumns(
|
||||
name, column_indexes_for_input_fields.size());
|
||||
}
|
||||
|
||||
const auto column_index = column_it->second;
|
||||
const auto column_index = column_it->getMapped();
|
||||
|
||||
if (read_columns[column_index])
|
||||
throw Exception("Duplicate field found while parsing format header: " + name, ErrorCodes::INCORRECT_DATA);
|
||||
|
@ -28,7 +28,7 @@ struct ColumnMapping
|
||||
void setupByHeader(const Block & header);
|
||||
|
||||
void addColumns(
|
||||
const Names & column_names, const std::unordered_map<String, size_t> & column_indexes_by_names, const FormatSettings & settings);
|
||||
const Names & column_names, const Block::NameMap & column_indexes_by_names, const FormatSettings & settings);
|
||||
|
||||
void insertDefaultsForNotSeenColumns(MutableColumns & columns, std::vector<UInt8> & read_columns);
|
||||
};
|
||||
|
@ -834,17 +834,23 @@ DataTypes getDefaultDataTypeForEscapingRules(const std::vector<FormatSettings::E
|
||||
return data_types;
|
||||
}
|
||||
|
||||
String getAdditionalFormatInfoForAllRowBasedFormats(const FormatSettings & settings)
|
||||
{
|
||||
return fmt::format(
|
||||
"schema_inference_hints={}, max_rows_to_read_for_schema_inference={}",
|
||||
settings.schema_inference_hints,
|
||||
settings.max_rows_to_read_for_schema_inference);
|
||||
}
|
||||
|
||||
String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, FormatSettings::EscapingRule escaping_rule)
|
||||
{
|
||||
String result;
|
||||
String result = getAdditionalFormatInfoForAllRowBasedFormats(settings);
|
||||
/// First, settings that are common for all text formats:
|
||||
result = fmt::format(
|
||||
"schema_inference_hints={}, try_infer_integers={}, try_infer_dates={}, try_infer_datetimes={}, max_rows_to_read_for_schema_inference={}",
|
||||
settings.schema_inference_hints,
|
||||
result += fmt::format(
|
||||
", try_infer_integers={}, try_infer_dates={}, try_infer_datetimes={}",
|
||||
settings.try_infer_integers,
|
||||
settings.try_infer_dates,
|
||||
settings.try_infer_datetimes,
|
||||
settings.max_rows_to_read_for_schema_inference);
|
||||
settings.try_infer_datetimes);
|
||||
|
||||
/// Second, format-specific settings:
|
||||
switch (escaping_rule)
|
||||
|
@ -77,6 +77,7 @@ void transformInferredTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, c
|
||||
void transformInferredJSONTypesIfNeeded(DataTypes & types, const FormatSettings & settings, const std::unordered_set<const IDataType *> * numbers_parsed_from_json_strings = nullptr);
|
||||
void transformInferredJSONTypesIfNeeded(DataTypePtr & first, DataTypePtr & second, const FormatSettings & settings);
|
||||
|
||||
String getAdditionalFormatInfoForAllRowBasedFormats(const FormatSettings & settings);
|
||||
String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, FormatSettings::EscapingRule escaping_rule);
|
||||
|
||||
void checkSupportedDelimiterAfterField(FormatSettings::EscapingRule escaping_rule, const String & delimiter, const DataTypePtr & type);
|
||||
|
@ -178,6 +178,8 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
format_settings.try_infer_integers = settings.input_format_try_infer_integers;
|
||||
format_settings.try_infer_dates = settings.input_format_try_infer_dates;
|
||||
format_settings.try_infer_datetimes = settings.input_format_try_infer_datetimes;
|
||||
format_settings.bson.output_string_as_string = settings.output_format_bson_string_as_string;
|
||||
format_settings.bson.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_bson_skip_fields_with_unsupported_types_in_schema_inference;
|
||||
|
||||
/// Validate avro_schema_registry_url with RemoteHostFilter when non-empty and in Server context
|
||||
if (format_settings.schema.is_server)
|
||||
|
@ -303,6 +303,12 @@ struct FormatSettings
|
||||
bool use_replace = false;
|
||||
bool quote_names = true;
|
||||
} sql_insert;
|
||||
|
||||
struct
|
||||
{
|
||||
bool output_string_as_string;
|
||||
bool skip_fields_with_unsupported_types_in_schema_inference;
|
||||
} bson;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -231,7 +231,14 @@ namespace JSONUtils
|
||||
{
|
||||
auto type = getDataTypeFromFieldImpl(key_value_pair.second, settings, numbers_parsed_from_json_strings);
|
||||
if (!type)
|
||||
{
|
||||
/// If we couldn't infer nested type and Object type is not enabled,
|
||||
/// we can't determine the type of this JSON field.
|
||||
if (!settings.json.try_infer_objects)
|
||||
return nullptr;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (settings.json.try_infer_objects && isObject(type))
|
||||
return std::make_shared<DataTypeObject>("json", true);
|
||||
|
@ -19,6 +19,7 @@ void registerFileSegmentationEngineJSONCompactEachRow(FormatFactory & factory);
|
||||
void registerFileSegmentationEngineHiveText(FormatFactory & factory);
|
||||
#endif
|
||||
void registerFileSegmentationEngineLineAsString(FormatFactory & factory);
|
||||
void registerFileSegmentationEngineBSONEachRow(FormatFactory & factory);
|
||||
|
||||
/// Formats for both input/output.
|
||||
|
||||
@ -49,6 +50,8 @@ void registerInputFormatJSONColumns(FormatFactory & factory);
|
||||
void registerOutputFormatJSONColumns(FormatFactory & factory);
|
||||
void registerInputFormatJSONCompactColumns(FormatFactory & factory);
|
||||
void registerOutputFormatJSONCompactColumns(FormatFactory & factory);
|
||||
void registerInputFormatBSONEachRow(FormatFactory & factory);
|
||||
void registerOutputFormatBSONEachRow(FormatFactory & factory);
|
||||
void registerInputFormatJSONColumnsWithMetadata(FormatFactory & factory);
|
||||
void registerOutputFormatJSONColumnsWithMetadata(FormatFactory & factory);
|
||||
void registerInputFormatProtobuf(FormatFactory & factory);
|
||||
@ -136,7 +139,7 @@ void registerTSKVSchemaReader(FormatFactory & factory);
|
||||
void registerValuesSchemaReader(FormatFactory & factory);
|
||||
void registerTemplateSchemaReader(FormatFactory & factory);
|
||||
void registerMySQLSchemaReader(FormatFactory & factory);
|
||||
|
||||
void registerBSONEachRowSchemaReader(FormatFactory & factory);
|
||||
|
||||
void registerFileExtensions(FormatFactory & factory);
|
||||
|
||||
@ -155,6 +158,7 @@ void registerFormats()
|
||||
registerFileSegmentationEngineHiveText(factory);
|
||||
#endif
|
||||
registerFileSegmentationEngineLineAsString(factory);
|
||||
registerFileSegmentationEngineBSONEachRow(factory);
|
||||
|
||||
|
||||
registerInputFormatNative(factory);
|
||||
@ -184,6 +188,8 @@ void registerFormats()
|
||||
registerOutputFormatJSONColumns(factory);
|
||||
registerInputFormatJSONCompactColumns(factory);
|
||||
registerOutputFormatJSONCompactColumns(factory);
|
||||
registerInputFormatBSONEachRow(factory);
|
||||
registerOutputFormatBSONEachRow(factory);
|
||||
registerInputFormatJSONColumnsWithMetadata(factory);
|
||||
registerOutputFormatJSONColumnsWithMetadata(factory);
|
||||
registerInputFormatProtobuf(factory);
|
||||
@ -267,6 +273,7 @@ void registerFormats()
|
||||
registerValuesSchemaReader(factory);
|
||||
registerTemplateSchemaReader(factory);
|
||||
registerMySQLSchemaReader(factory);
|
||||
registerBSONEachRowSchemaReader(factory);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,31 +1,40 @@
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Disks/IDisk.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <filesystem>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int UNKNOWN_DISK;
|
||||
}
|
||||
namespace
|
||||
{
|
||||
|
||||
struct FilesystemAvailable
|
||||
{
|
||||
static constexpr auto name = "filesystemAvailable";
|
||||
static std::uintmax_t get(const std::filesystem::space_info & spaceinfo) { return spaceinfo.available; }
|
||||
static std::uintmax_t get(const DiskPtr & disk) { return disk->getAvailableSpace(); }
|
||||
};
|
||||
|
||||
struct FilesystemFree
|
||||
struct FilesystemUnreserved
|
||||
{
|
||||
static constexpr auto name = "filesystemFree";
|
||||
static std::uintmax_t get(const std::filesystem::space_info & spaceinfo) { return spaceinfo.free; }
|
||||
static constexpr auto name = "filesystemUnreserved";
|
||||
static std::uintmax_t get(const DiskPtr & disk) { return disk->getUnreservedSpace(); }
|
||||
};
|
||||
|
||||
struct FilesystemCapacity
|
||||
{
|
||||
static constexpr auto name = "filesystemCapacity";
|
||||
static std::uintmax_t get(const std::filesystem::space_info & spaceinfo) { return spaceinfo.capacity; }
|
||||
static std::uintmax_t get(const DiskPtr & disk) { return disk->getTotalSpace(); }
|
||||
};
|
||||
|
||||
template <typename Impl>
|
||||
@ -34,34 +43,72 @@ class FilesystemImpl : public IFunction
|
||||
public:
|
||||
static constexpr auto name = Impl::name;
|
||||
|
||||
static FunctionPtr create(ContextPtr context)
|
||||
{
|
||||
return std::make_shared<FilesystemImpl<Impl>>(std::filesystem::space(context->getPath()));
|
||||
}
|
||||
static FunctionPtr create(ContextPtr context_) { return std::make_shared<FilesystemImpl<Impl>>(context_); }
|
||||
|
||||
explicit FilesystemImpl(ContextPtr context_) : context(context_) { }
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
explicit FilesystemImpl(std::filesystem::space_info spaceinfo_) : spaceinfo(spaceinfo_) { }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool isDeterministic() const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
if (arguments.size() > 1)
|
||||
{
|
||||
throw Exception("Arguments size of function " + getName() + " should be 0 or 1", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
}
|
||||
if (arguments.size() == 1 && !isStringOrFixedString(arguments[0]))
|
||||
{
|
||||
throw Exception(
|
||||
"Arguments of function " + getName() + " should be String or FixedString", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
return std::make_shared<DataTypeUInt64>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr &, size_t input_rows_count) const override
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
return DataTypeUInt64().createColumnConst(input_rows_count, static_cast<UInt64>(Impl::get(spaceinfo)));
|
||||
if (arguments.empty())
|
||||
{
|
||||
auto disk = context->getDisk("default");
|
||||
return DataTypeUInt64().createColumnConst(input_rows_count, Impl::get(disk));
|
||||
}
|
||||
else
|
||||
{
|
||||
auto col = arguments[0].column;
|
||||
if (const ColumnString * col_str = checkAndGetColumn<ColumnString>(col.get()))
|
||||
{
|
||||
auto disk_map = context->getDisksMap();
|
||||
|
||||
auto col_res = ColumnVector<UInt64>::create(col_str->size());
|
||||
auto & data = col_res->getData();
|
||||
for (size_t i = 0; i < col_str->size(); ++i)
|
||||
{
|
||||
auto disk_name = col_str->getDataAt(i).toString();
|
||||
if (auto it = disk_map.find(disk_name); it != disk_map.end())
|
||||
data[i] = Impl::get(it->second);
|
||||
else
|
||||
throw Exception(
|
||||
"Unknown disk name " + disk_name + " while execute function " + getName(), ErrorCodes::UNKNOWN_DISK);
|
||||
}
|
||||
return col_res;
|
||||
}
|
||||
throw Exception(
|
||||
"Illegal column " + arguments[0].column->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::filesystem::space_info spaceinfo;
|
||||
ContextPtr context;
|
||||
};
|
||||
|
||||
}
|
||||
@ -70,7 +117,7 @@ REGISTER_FUNCTION(Filesystem)
|
||||
{
|
||||
factory.registerFunction<FilesystemImpl<FilesystemAvailable>>();
|
||||
factory.registerFunction<FilesystemImpl<FilesystemCapacity>>();
|
||||
factory.registerFunction<FilesystemImpl<FilesystemFree>>();
|
||||
factory.registerFunction<FilesystemImpl<FilesystemUnreserved>>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1016,6 +1016,7 @@ public:
|
||||
size_t getNumberOfArguments() const override { return 3; }
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForNothing() const override { return false; }
|
||||
bool isShortCircuit(ShortCircuitSettings & settings, size_t /*number_of_arguments*/) const override
|
||||
{
|
||||
settings.enable_lazy_execution_for_first_argument = false;
|
||||
|
@ -50,6 +50,7 @@ public:
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForNothing() const override { return false; }
|
||||
|
||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t number_of_arguments) const override
|
||||
{
|
||||
|
@ -57,7 +57,7 @@ namespace DB
|
||||
|
||||
if (unlikely(begin > end))
|
||||
{
|
||||
const FormatSettings default_format;
|
||||
const FormatSettings default_format{};
|
||||
WriteBufferFromOwnString buf_begin, buf_end;
|
||||
begin_serializaion->serializeTextQuoted(*(arguments[0].column), i, buf_begin, default_format);
|
||||
end_serialization->serializeTextQuoted(*(arguments[1].column), i, buf_end, default_format);
|
||||
|
@ -1278,6 +1278,25 @@ void skipToUnescapedNextLineOrEOF(ReadBuffer & buf)
|
||||
}
|
||||
}
|
||||
|
||||
void skipNullTerminated(ReadBuffer & buf)
|
||||
{
|
||||
while (!buf.eof())
|
||||
{
|
||||
char * next_pos = find_first_symbols<'\0'>(buf.position(), buf.buffer().end());
|
||||
buf.position() = next_pos;
|
||||
|
||||
if (!buf.hasPendingData())
|
||||
continue;
|
||||
|
||||
if (*buf.position() == '\0')
|
||||
{
|
||||
++buf.position();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void saveUpToPosition(ReadBuffer & in, Memory<> & memory, char * current)
|
||||
{
|
||||
assert(current >= in.position());
|
||||
|
@ -1448,6 +1448,8 @@ void skipToCarriageReturnOrEOF(ReadBuffer & buf);
|
||||
/// Skip to next character after next unescaped \n. If no \n in stream, skip to end. Does not throw on invalid escape sequences.
|
||||
void skipToUnescapedNextLineOrEOF(ReadBuffer & buf);
|
||||
|
||||
/// Skip to next character after next \0. If no \0 in stream, skip to end.
|
||||
void skipNullTerminated(ReadBuffer & buf);
|
||||
|
||||
/** This function just copies the data from buffer's internal position (in.position())
|
||||
* to current position (from arguments) into memory.
|
||||
|
@ -76,7 +76,7 @@ TEST(IOTestAwsS3Client, AppendExtraSSECHeaders)
|
||||
|
||||
DB::RemoteHostFilter remote_host_filter;
|
||||
unsigned int s3_max_redirects = 100;
|
||||
DB::S3::URI uri(Poco::URI(http.getUrl() + "/IOTestAwsS3ClientAppendExtraHeaders/test.txt"));
|
||||
DB::S3::URI uri(http.getUrl() + "/IOTestAwsS3ClientAppendExtraHeaders/test.txt");
|
||||
String access_key_id = "ACCESS_KEY_ID";
|
||||
String secret_access_key = "SECRET_ACCESS_KEY";
|
||||
String region = "us-east-1";
|
||||
|
@ -759,7 +759,7 @@ namespace S3
|
||||
put_request_throttler);
|
||||
}
|
||||
|
||||
URI::URI(const Poco::URI & uri_)
|
||||
URI::URI(const std::string & uri_)
|
||||
{
|
||||
/// Case when bucket name represented in domain name of S3 URL.
|
||||
/// E.g. (https://bucket-name.s3.Region.amazonaws.com/key)
|
||||
@ -777,16 +777,32 @@ namespace S3
|
||||
static constexpr auto OBS = "OBS";
|
||||
static constexpr auto OSS = "OSS";
|
||||
|
||||
uri = uri_;
|
||||
uri = Poco::URI(uri_);
|
||||
|
||||
storage_name = S3;
|
||||
|
||||
if (uri.getHost().empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host is empty in S3 URI.");
|
||||
|
||||
/// Extract object version ID from query string.
|
||||
bool has_version_id = false;
|
||||
for (const auto & [query_key, query_value] : uri.getQueryParameters())
|
||||
if (query_key == "versionId")
|
||||
{
|
||||
version_id = query_value;
|
||||
has_version_id = true;
|
||||
}
|
||||
|
||||
/// Poco::URI will ignore '?' when parsing the path, but if there is a vestionId in the http parameter,
|
||||
/// '?' can not be used as a wildcard, otherwise it will be ambiguous.
|
||||
/// If no "vertionId" in the http parameter, '?' can be used as a wildcard.
|
||||
/// It is necessary to encode '?' to avoid deletion during parsing path.
|
||||
if (!has_version_id && uri_.find('?') != String::npos)
|
||||
{
|
||||
String uri_with_question_mark_encode;
|
||||
Poco::URI::encode(uri_, "?", uri_with_question_mark_encode);
|
||||
uri = Poco::URI(uri_with_question_mark_encode);
|
||||
}
|
||||
|
||||
String name;
|
||||
String endpoint_authority_from_uri;
|
||||
|
@ -119,8 +119,7 @@ struct URI
|
||||
|
||||
bool is_virtual_hosted_style;
|
||||
|
||||
explicit URI(const Poco::URI & uri_);
|
||||
explicit URI(const std::string & uri_) : URI(Poco::URI(uri_)) {}
|
||||
explicit URI(const std::string & uri_);
|
||||
|
||||
static void validateBucket(const String & bucket, const Poco::URI & uri);
|
||||
};
|
||||
|
@ -20,55 +20,55 @@ struct TestCase
|
||||
};
|
||||
|
||||
const TestCase TestCases[] = {
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data")),
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?firstKey=someKey&secondKey=anotherKey")),
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?firstKey=someKey&secondKey=anotherKey"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data?firstKey=someKey&secondKey=anotherKey",
|
||||
"",
|
||||
true},
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId=testVersionId&anotherKey=someOtherKey"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?firstKey=someKey&versionId=testVersionId&anotherKey=someOtherKey"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?anotherKey=someOtherKey&versionId=testVersionId"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId=testVersionId"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId="),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId=testVersionId&anotherKey=someOtherKey")),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?firstKey=someKey&versionId=testVersionId&anotherKey=someOtherKey")),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?anotherKey=someOtherKey&versionId=testVersionId")),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId=testVersionId")),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"testVersionId",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId=")),
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId&"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId&")),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
"",
|
||||
true},
|
||||
{S3::URI(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId")),
|
||||
{S3::URI("https://bucketname.s3.us-east-2.amazonaws.com/data?versionId"),
|
||||
"https://s3.us-east-2.amazonaws.com",
|
||||
"bucketname",
|
||||
"data",
|
||||
@ -83,7 +83,7 @@ class S3UriTest : public testing::TestWithParam<std::string>
|
||||
TEST(S3UriTest, validPatterns)
|
||||
{
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://jokserfn.s3.amazonaws.com/"));
|
||||
S3::URI uri("https://jokserfn.s3.amazonaws.com/");
|
||||
ASSERT_EQ("https://s3.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("jokserfn", uri.bucket);
|
||||
ASSERT_EQ("", uri.key);
|
||||
@ -91,7 +91,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(true, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://s3.amazonaws.com/jokserfn/"));
|
||||
S3::URI uri("https://s3.amazonaws.com/jokserfn/");
|
||||
ASSERT_EQ("https://s3.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("jokserfn", uri.bucket);
|
||||
ASSERT_EQ("", uri.key);
|
||||
@ -99,7 +99,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(false, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://amazonaws.com/bucket/"));
|
||||
S3::URI uri("https://amazonaws.com/bucket/");
|
||||
ASSERT_EQ("https://amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("bucket", uri.bucket);
|
||||
ASSERT_EQ("", uri.key);
|
||||
@ -107,7 +107,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(false, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://jokserfn.s3.amazonaws.com/data"));
|
||||
S3::URI uri("https://jokserfn.s3.amazonaws.com/data");
|
||||
ASSERT_EQ("https://s3.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("jokserfn", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -115,7 +115,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(true, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://storage.amazonaws.com/jokserfn/data"));
|
||||
S3::URI uri("https://storage.amazonaws.com/jokserfn/data");
|
||||
ASSERT_EQ("https://storage.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("jokserfn", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -123,7 +123,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(false, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://bucketname.cos.ap-beijing.myqcloud.com/data"));
|
||||
S3::URI uri("https://bucketname.cos.ap-beijing.myqcloud.com/data");
|
||||
ASSERT_EQ("https://cos.ap-beijing.myqcloud.com", uri.endpoint);
|
||||
ASSERT_EQ("bucketname", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -131,7 +131,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(true, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://bucketname.s3.us-east-2.amazonaws.com/data"));
|
||||
S3::URI uri("https://bucketname.s3.us-east-2.amazonaws.com/data");
|
||||
ASSERT_EQ("https://s3.us-east-2.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("bucketname", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -139,7 +139,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(true, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://s3.us-east-2.amazonaws.com/bucketname/data"));
|
||||
S3::URI uri("https://s3.us-east-2.amazonaws.com/bucketname/data");
|
||||
ASSERT_EQ("https://s3.us-east-2.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("bucketname", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -147,7 +147,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(false, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://bucketname.s3-us-east-2.amazonaws.com/data"));
|
||||
S3::URI uri("https://bucketname.s3-us-east-2.amazonaws.com/data");
|
||||
ASSERT_EQ("https://s3-us-east-2.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("bucketname", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -155,7 +155,7 @@ TEST(S3UriTest, validPatterns)
|
||||
ASSERT_EQ(true, uri.is_virtual_hosted_style);
|
||||
}
|
||||
{
|
||||
S3::URI uri(Poco::URI("https://s3-us-east-2.amazonaws.com/bucketname/data"));
|
||||
S3::URI uri("https://s3-us-east-2.amazonaws.com/bucketname/data");
|
||||
ASSERT_EQ("https://s3-us-east-2.amazonaws.com", uri.endpoint);
|
||||
ASSERT_EQ("bucketname", uri.bucket);
|
||||
ASSERT_EQ("data", uri.key);
|
||||
@ -166,7 +166,7 @@ TEST(S3UriTest, validPatterns)
|
||||
|
||||
TEST_P(S3UriTest, invalidPatterns)
|
||||
{
|
||||
ASSERT_ANY_THROW(S3::URI(Poco::URI(GetParam())));
|
||||
ASSERT_ANY_THROW(S3::URI new_uri(GetParam()));
|
||||
}
|
||||
|
||||
TEST(S3UriTest, versionIdChecks)
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/MarkCache.h>
|
||||
#include <Storages/MergeTree/MergeList.h>
|
||||
#include <Storages/MergeTree/MovesList.h>
|
||||
#include <Storages/MergeTree/ReplicatedFetchList.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
@ -229,6 +230,7 @@ struct ContextSharedPart : boost::noncopyable
|
||||
ProcessList process_list; /// Executing queries at the moment.
|
||||
GlobalOvercommitTracker global_overcommit_tracker;
|
||||
MergeList merge_list; /// The list of executable merge (for (Replicated)?MergeTree)
|
||||
MovesList moves_list; /// The list of executing moves (for (Replicated)?MergeTree)
|
||||
ReplicatedFetchList replicated_fetch_list;
|
||||
ConfigurationPtr users_config; /// Config with the users, profiles and quotas sections.
|
||||
InterserverIOHandler interserver_io_handler; /// Handler for interserver communication.
|
||||
@ -637,6 +639,8 @@ const ProcessList & Context::getProcessList() const { return shared->process_lis
|
||||
OvercommitTracker * Context::getGlobalOvercommitTracker() const { return &shared->global_overcommit_tracker; }
|
||||
MergeList & Context::getMergeList() { return shared->merge_list; }
|
||||
const MergeList & Context::getMergeList() const { return shared->merge_list; }
|
||||
MovesList & Context::getMovesList() { return shared->moves_list; }
|
||||
const MovesList & Context::getMovesList() const { return shared->moves_list; }
|
||||
ReplicatedFetchList & Context::getReplicatedFetchList() { return shared->replicated_fetch_list; }
|
||||
const ReplicatedFetchList & Context::getReplicatedFetchList() const { return shared->replicated_fetch_list; }
|
||||
|
||||
|
@ -63,6 +63,7 @@ using InterserverCredentialsPtr = std::shared_ptr<const InterserverCredentials>;
|
||||
class InterserverIOHandler;
|
||||
class BackgroundSchedulePool;
|
||||
class MergeList;
|
||||
class MovesList;
|
||||
class ReplicatedFetchList;
|
||||
class Cluster;
|
||||
class Compiler;
|
||||
@ -775,6 +776,9 @@ public:
|
||||
MergeList & getMergeList();
|
||||
const MergeList & getMergeList() const;
|
||||
|
||||
MovesList & getMovesList();
|
||||
const MovesList & getMovesList() const;
|
||||
|
||||
ReplicatedFetchList & getReplicatedFetchList();
|
||||
const ReplicatedFetchList & getReplicatedFetchList() const;
|
||||
|
||||
|
@ -1015,7 +1015,7 @@ void DatabaseCatalog::dropTableFinally(const TableMarkedAsDropped & table)
|
||||
for (const auto & [disk_name, disk] : getContext()->getDisksMap())
|
||||
{
|
||||
String data_path = "store/" + getPathForUUID(table.table_id.uuid);
|
||||
if (!disk->exists(data_path) || disk->isReadOnly())
|
||||
if (disk->isReadOnly() || !disk->exists(data_path))
|
||||
continue;
|
||||
|
||||
LOG_INFO(log, "Removing data directory {} of dropped table {} from disk {}", data_path, table.table_id.getNameForLogs(), disk_name);
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Access/Common/AccessRightsElement.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
@ -120,6 +121,8 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ContextPtr context_, ASTDropQue
|
||||
auto [database, table] = query.if_exists ? DatabaseCatalog::instance().tryGetDatabaseAndTable(table_id, context_)
|
||||
: DatabaseCatalog::instance().getDatabaseAndTable(table_id, context_);
|
||||
|
||||
checkStorageSupportsTransactionsIfNeeded(table, context_);
|
||||
|
||||
if (database && table)
|
||||
{
|
||||
auto & ast_drop_query = query.as<ASTDropQuery &>();
|
||||
@ -207,18 +210,15 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ContextPtr context_, ASTDropQue
|
||||
|
||||
table->checkTableCanBeDropped();
|
||||
|
||||
TableExclusiveLockHolder table_lock;
|
||||
/// We don't need this lock for ReplicatedMergeTree
|
||||
if (!table->supportsReplication())
|
||||
{
|
||||
/// And for simple MergeTree we can stop merges before acquiring the lock
|
||||
auto merges_blocker = table->getActionLock(ActionLocks::PartsMerge);
|
||||
table_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout);
|
||||
}
|
||||
TableExclusiveLockHolder table_excl_lock;
|
||||
/// We don't need any lock for ReplicatedMergeTree and for simple MergeTree
|
||||
/// For the rest of tables types exclusive lock is needed
|
||||
if (!std::dynamic_pointer_cast<MergeTreeData>(table))
|
||||
table_excl_lock = table->lockExclusively(context_->getCurrentQueryId(), context_->getSettingsRef().lock_acquire_timeout);
|
||||
|
||||
auto metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
/// Drop table data, don't touch metadata
|
||||
table->truncate(query_ptr, metadata_snapshot, context_, table_lock);
|
||||
table->truncate(query_ptr, metadata_snapshot, context_, table_excl_lock);
|
||||
}
|
||||
else if (query.kind == ASTDropQuery::Kind::Drop)
|
||||
{
|
||||
@ -464,4 +464,16 @@ void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr
|
||||
}
|
||||
}
|
||||
|
||||
bool InterpreterDropQuery::supportsTransactions() const
|
||||
{
|
||||
/// Enable only for truncate table with MergeTreeData engine
|
||||
|
||||
auto & drop = query_ptr->as<ASTDropQuery &>();
|
||||
|
||||
return drop.cluster.empty()
|
||||
&& !drop.temporary
|
||||
&& drop.kind == ASTDropQuery::Kind::Truncate
|
||||
&& drop.table;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -28,6 +28,8 @@ public:
|
||||
|
||||
static void executeDropQuery(ASTDropQuery::Kind kind, ContextPtr global_context, ContextPtr current_context, const StorageID & target_table_id, bool sync);
|
||||
|
||||
bool supportsTransactions() const override;
|
||||
|
||||
private:
|
||||
AccessRightsElements getRequiredAccessForDDLOnCluster() const;
|
||||
ASTPtr query_ptr;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user