mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into grant_bug_fixed
This commit is contained in:
commit
8d70b67e2a
93
.github/workflows/backport_branches.yml
vendored
93
.github/workflows/backport_branches.yml
vendored
@ -9,7 +9,24 @@ on: # yamllint disable-line rule:truthy
|
||||
branches:
|
||||
- 'backport/**'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
@ -20,12 +37,40 @@ jobs:
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
CompatibilityCheck:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
@ -106,6 +151,47 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -247,6 +333,7 @@ jobs:
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebDebug
|
||||
|
2
.github/workflows/cancel.yml
vendored
2
.github/workflows/cancel.yml
vendored
@ -6,7 +6,7 @@ env:
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
workflow_run:
|
||||
workflows: ["CIGithubActions", "ReleaseCI", "DocsCheck", "BackportPR"]
|
||||
workflows: ["PullRequestCI", "ReleaseCI", "DocsCheck", "BackportPR"]
|
||||
types:
|
||||
- requested
|
||||
jobs:
|
||||
|
56
.github/workflows/docs_check.yml
vendored
56
.github/workflows/docs_check.yml
vendored
@ -28,24 +28,70 @@ jobs:
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 run_check.py
|
||||
DockerHubPush:
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
DocsCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
|
2
.github/workflows/jepsen.yml
vendored
2
.github/workflows/jepsen.yml
vendored
@ -8,7 +8,7 @@ on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */6 * * *'
|
||||
workflow_run:
|
||||
workflows: ["CIGithubActions"]
|
||||
workflows: ["PullRequestCI"]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
|
165
.github/workflows/master.yml
vendored
165
.github/workflows/master.yml
vendored
@ -9,7 +9,24 @@ on: # yamllint disable-line rule:truthy
|
||||
branches:
|
||||
- 'master'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
@ -20,12 +37,40 @@ jobs:
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
StyleCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, style-checker]
|
||||
@ -168,6 +213,47 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderPerformance:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -815,6 +901,7 @@ jobs:
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderBinRelease
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
@ -963,6 +1050,41 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (aarch64, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -1478,6 +1600,41 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (aarch64, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -2659,6 +2816,7 @@ jobs:
|
||||
- FunctionalStatelessTestDebug2
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestTsan0
|
||||
@ -2671,6 +2829,7 @@ jobs:
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatefulTestAarch64
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
||||
|
@ -1,4 +1,4 @@
|
||||
name: CIGithubActions
|
||||
name: PullRequestCI
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
@ -31,7 +31,25 @@ jobs:
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 run_check.py
|
||||
DockerHubPush:
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
@ -43,12 +61,40 @@ jobs:
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
StyleCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, style-checker]
|
||||
@ -928,8 +974,8 @@ jobs:
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderBinRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderBinRelease
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
@ -1153,6 +1199,41 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (aarch64, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -1668,6 +1749,41 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (aarch64, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -2888,6 +3004,7 @@ jobs:
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated0
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated1
|
||||
- FunctionalStatelessTestReleaseWideParts
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestTsan0
|
||||
@ -2899,6 +3016,7 @@ jobs:
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestAarch64
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
51
.github/workflows/release.yml
vendored
51
.github/workflows/release.yml
vendored
@ -19,7 +19,24 @@ on: # yamllint disable-line rule:truthy
|
||||
- '.github/**'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
@ -30,12 +47,40 @@ jobs:
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
DocsRelease:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
|
165
.github/workflows/release_branches.yml
vendored
165
.github/workflows/release_branches.yml
vendored
@ -12,7 +12,24 @@ on: # yamllint disable-line rule:truthy
|
||||
- '23.[1-9][1-9]'
|
||||
- '24.[1-9][1-9]'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
@ -23,12 +40,40 @@ jobs:
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
CompatibilityCheck:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
@ -109,6 +154,47 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NAME
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH $CACHES_PATH
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -340,6 +426,7 @@ jobs:
|
||||
BuilderReport:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
- BuilderDebAsan
|
||||
- BuilderDebTsan
|
||||
- BuilderDebUBsan
|
||||
@ -413,6 +500,41 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (aarch64, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestAsan0:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -893,6 +1015,41 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (aarch64, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr $GITHUB_WORKSPACE && mkdir $GITHUB_WORKSPACE
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -1580,6 +1737,7 @@ jobs:
|
||||
- FunctionalStatelessTestDebug1
|
||||
- FunctionalStatelessTestDebug2
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
- FunctionalStatelessTestTsan0
|
||||
@ -1591,6 +1749,7 @@ jobs:
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestAarch64
|
||||
- FunctionalStatefulTestAsan
|
||||
- FunctionalStatefulTestTsan
|
||||
- FunctionalStatefulTestMsan
|
||||
|
@ -48,7 +48,9 @@ struct StringRef
|
||||
std::string toString() const { return std::string(data, size); }
|
||||
|
||||
explicit operator std::string() const { return toString(); }
|
||||
constexpr explicit operator std::string_view() const { return {data, size}; }
|
||||
std::string_view toView() const { return std::string_view(data, size); }
|
||||
|
||||
constexpr explicit operator std::string_view() const { return std::string_view(data, size); }
|
||||
};
|
||||
|
||||
/// Here constexpr doesn't implicate inline, see https://www.viva64.com/en/w/v1043/
|
||||
|
@ -1,3 +1,4 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-build .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-check .
|
||||
FROM clickhouse/docs-builder
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/docs-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-release .
|
||||
FROM clickhouse/docs-builder
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/docs-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
# docker build -t clickhouse/binary-builder .
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/binary-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
|
@ -1,3 +1,4 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/deb-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@ -28,12 +29,14 @@ RUN apt-get update \
|
||||
software-properties-common \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/test-base .
|
||||
FROM clickhouse/test-util
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-util:$FROM_TAG
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
@ -28,12 +30,14 @@ RUN apt-get update \
|
||||
software-properties-common \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
|
@ -1,12 +1,14 @@
|
||||
# rebuild in #33610
|
||||
# docker build --network=host -t clickhouse/codebrowser .
|
||||
# docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser
|
||||
FROM clickhouse/binary-builder
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/binary-builder:$FROM_TAG
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-13 libllvm13 libclang-13-dev
|
||||
RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-13 libllvm13 libclang-13-dev libmlir-13-dev
|
||||
|
||||
# repo versions doesn't work correctly with C++17
|
||||
# also we push reports to s3, so we add index.html to subfolder urls
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/fasttest .
|
||||
FROM clickhouse/test-util
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-util:$FROM_TAG
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
@ -28,12 +30,14 @@ RUN apt-get update \
|
||||
software-properties-common \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN curl -O https://clickhouse-datasets.s3.yandex.net/utils/1/dpkg-deb \
|
||||
&& chmod +x dpkg-deb \
|
||||
&& cp dpkg-deb /usr/bin
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/fuzzer .
|
||||
FROM clickhouse/test-base
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
|
@ -1,44 +1,57 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/integration-test .
|
||||
FROM clickhouse/test-base
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get -y install \
|
||||
tzdata \
|
||||
python3 \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
default-jre \
|
||||
g++ \
|
||||
gdb \
|
||||
unixodbc \
|
||||
odbcinst \
|
||||
iproute2 \
|
||||
krb5-user \
|
||||
libicu-dev \
|
||||
libsqlite3-dev \
|
||||
libsqliteodbc \
|
||||
odbc-postgresql \
|
||||
sqlite3 \
|
||||
curl \
|
||||
tar \
|
||||
lz4 \
|
||||
krb5-user \
|
||||
iproute2 \
|
||||
lsof \
|
||||
g++ \
|
||||
default-jre
|
||||
lz4 \
|
||||
odbc-postgresql \
|
||||
odbcinst \
|
||||
python3 \
|
||||
rpm2cpio \
|
||||
sqlite3 \
|
||||
tar \
|
||||
tzdata \
|
||||
unixodbc \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
RUN apt-get clean
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install MySQL ODBC driver
|
||||
RUN curl 'https://downloads.mysql.com/archives/get/p/10/file/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --location --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
|
||||
# Install MySQL ODBC driver from RHEL rpm
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& cd /tmp \
|
||||
&& curl -o mysql-odbc.rpm "https://cdn.mysql.com/Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.27-1.el8.${rarch}.rpm" \
|
||||
&& rpm2archive mysql-odbc.rpm \
|
||||
&& tar xf mysql-odbc.rpm.tgz -C / ./usr/lib64/ \
|
||||
&& LINK_DIR=$(dpkg -L libodbc1 | grep '^/usr/lib/.*-linux-gnu/odbc$') \
|
||||
&& ln -s /usr/lib64/libmyodbc8a.so "$LINK_DIR" \
|
||||
&& ln -s /usr/lib64/libmyodbc8a.so "$LINK_DIR"/libmyodbc.so
|
||||
|
||||
# Unfortunately this is required for a single test for conversion data from zookeeper to clickhouse-keeper.
|
||||
# ZooKeeper is not started by default, but consumes some space in containers.
|
||||
# 777 perms used to allow anybody to start/stop ZooKeeper
|
||||
ENV ZOOKEEPER_VERSION='3.6.3'
|
||||
RUN curl -O "https://mirrors.estointernet.in/apache/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz"
|
||||
RUN curl -O "https://dlcdn.apache.org/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz"
|
||||
RUN tar -zxvf apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz && mv apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper && chmod -R 777 /opt/zookeeper && rm apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz
|
||||
RUN echo $'tickTime=2500 \n\
|
||||
tickTime=2500 \n\
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/keeper-jepsen-test .
|
||||
FROM clickhouse/test-base
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV CLOJURE_VERSION=1.10.3.814
|
||||
|
@ -1,8 +1,14 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/pvs-test .
|
||||
|
||||
FROM clickhouse/binary-builder
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/binary-builder:$FROM_TAG
|
||||
|
||||
RUN apt-get update --yes \
|
||||
# PVS studio doesn't support aarch64/arm64, so there is a check for it everywhere
|
||||
# We'll produce an empty image for arm64
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN test x$TARGETARCH = xarm64 || ( apt-get update --yes \
|
||||
&& apt-get install \
|
||||
bash \
|
||||
wget \
|
||||
@ -15,7 +21,7 @@ RUN apt-get update --yes \
|
||||
libprotoc-dev \
|
||||
libgrpc++-dev \
|
||||
libc-ares-dev \
|
||||
--yes --no-install-recommends
|
||||
--yes --no-install-recommends )
|
||||
|
||||
#RUN wget -nv -O - http://files.viva64.com/etc/pubkey.txt | sudo apt-key add -
|
||||
#RUN sudo wget -nv -O /etc/apt/sources.list.d/viva64.list http://files.viva64.com/etc/viva64.list
|
||||
@ -27,7 +33,7 @@ RUN apt-get update --yes \
|
||||
|
||||
ENV PKG_VERSION="pvs-studio-latest"
|
||||
|
||||
RUN set -x \
|
||||
RUN test x$TARGETARCH = xarm64 || ( set -x \
|
||||
&& export PUBKEY_HASHSUM="ad369a2e9d8b8c30f5a9f2eb131121739b79c78e03fef0f016ea51871a5f78cd4e6257b270dca0ac3be3d1f19d885516" \
|
||||
&& wget -nv https://files.viva64.com/etc/pubkey.txt -O /tmp/pubkey.txt \
|
||||
&& echo "${PUBKEY_HASHSUM} /tmp/pubkey.txt" | sha384sum -c \
|
||||
@ -35,7 +41,7 @@ RUN set -x \
|
||||
&& wget -nv "https://files.viva64.com/${PKG_VERSION}.deb" \
|
||||
&& { debsig-verify ${PKG_VERSION}.deb \
|
||||
|| echo "WARNING: Some file was just downloaded from the internet without any validation and we are installing it into the system"; } \
|
||||
&& dpkg -i "${PKG_VERSION}.deb"
|
||||
&& dpkg -i "${PKG_VERSION}.deb" )
|
||||
|
||||
ENV CCACHE_DIR=/test_output/ccache
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/split-build-smoke-test .
|
||||
FROM clickhouse/binary-builder
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/binary-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /run.sh
|
||||
COPY process_split_build_smoke_test_result.py /
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/stateful-test .
|
||||
FROM clickhouse/stateless-test
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/stateless-test:$FROM_TAG
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
|
@ -1,11 +1,10 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/stateless-test .
|
||||
FROM clickhouse/test-base
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||
|
||||
RUN echo "deb [trusted=yes] http://repo.mysql.com/apt/ubuntu/ bionic mysql-5.7" >> /etc/apt/sources.list \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8C718D3B5072E1F5
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
@ -30,7 +29,7 @@ RUN apt-get update -y \
|
||||
tree \
|
||||
unixodbc \
|
||||
wget \
|
||||
mysql-client=5.7* \
|
||||
mysql-client=8.0* \
|
||||
postgresql-client \
|
||||
sqlite3
|
||||
|
||||
@ -49,10 +48,13 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ENV NUM_TRIES=1
|
||||
ENV MAX_RUN_TIME=0
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
# Download Minio-related binaries
|
||||
RUN wget 'https://dl.min.io/server/minio/release/linux-amd64/minio' \
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& wget "https://dl.min.io/server/minio/release/linux-${arch}/minio" \
|
||||
&& chmod +x ./minio \
|
||||
&& wget 'https://dl.min.io/client/mc/release/linux-amd64/mc' \
|
||||
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/mc" \
|
||||
&& chmod +x ./mc
|
||||
|
||||
ENV MINIO_ROOT_USER="clickhouse"
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/stateless-pytest .
|
||||
FROM clickhouse/test-base
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/stress-test .
|
||||
FROM clickhouse/stateful-test
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/stateful-test:$FROM_TAG
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
|
@ -146,6 +146,7 @@ handle SIGUSR2 nostop noprint pass
|
||||
handle SIG$RTMIN nostop noprint pass
|
||||
info signals
|
||||
continue
|
||||
gcore
|
||||
backtrace full
|
||||
info locals
|
||||
info registers
|
||||
@ -263,3 +264,10 @@ done
|
||||
# Write check result into check_status.tsv
|
||||
clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv
|
||||
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
|
||||
|
||||
# Core dumps (see gcore)
|
||||
# Default filename is 'core.PROCESS_ID'
|
||||
for core in core.*; do
|
||||
pigz $core
|
||||
mv $core.gz /output/
|
||||
done
|
||||
|
@ -1,5 +1,7 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/unit-test .
|
||||
FROM clickhouse/stateless-test
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/stateless-test:$FROM_TAG
|
||||
|
||||
RUN apt-get install gdb
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/test-util .
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
@ -175,7 +175,7 @@ When we are going to read something from a part in `MergeTree`, we look at `prim
|
||||
|
||||
When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by primary key order and forms a new part. There are background threads that periodically select some parts and merge them into a single sorted part to keep the number of parts relatively low. That’s why it is called `MergeTree`. Of course, merging leads to “write amplification”. All parts are immutable: they are only created and deleted, but not modified. When SELECT is executed, it holds a snapshot of the table (a set of parts). After merging, we also keep old parts for some time to make a recovery after failure easier, so if we see that some merged part is probably broken, we can replace it with its source parts.
|
||||
|
||||
`MergeTree` is not an LSM tree because it does not contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications.
|
||||
`MergeTree` is not an LSM tree because it does not contain MEMTABLE and LOG: inserted data is written directly to the filesystem. This behavior makes MergeTree much more suitable to insert data in batches. Therefore frequently inserting small amounts of rows is not ideal for MergeTree. For example, a couple of rows per second is OK, but doing it a thousand times a second is not optimal for MergeTree. However, there is an async insert mode for small inserts to overcome this limitation. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications
|
||||
|
||||
There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form.
|
||||
|
||||
|
@ -158,6 +158,8 @@ While inside the `build` directory, configure your build by running CMake. Befor
|
||||
export CC=clang CXX=clang++
|
||||
cmake ..
|
||||
|
||||
If you installed clang using the automatic installation script above, also specify the version of clang installed in the first command, e.g. `export CC=clang-13 CXX=clang++-13`. The clang version will be in the script output.
|
||||
|
||||
The `CC` variable specifies the compiler for C (short for C Compiler), and `CXX` variable instructs which C++ compiler is to be used for building.
|
||||
|
||||
For a faster build, you can resort to the `debug` build type - a build with no optimizations. For that supply the following parameter `-D CMAKE_BUILD_TYPE=Debug`:
|
||||
|
@ -66,4 +66,14 @@ SELECT COUNT() FROM mongo_table;
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
You can also adjust connection timeout:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE mongo_table
|
||||
(
|
||||
key UInt64,
|
||||
data String
|
||||
) ENGINE = MongoDB('mongo2:27017', 'test', 'simple_table', 'testuser', 'clickhouse', 'connectTimeoutMS=100000');
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/mongodb/) <!--hide-->
|
||||
|
@ -380,6 +380,42 @@ Result:
|
||||
└──────┘
|
||||
```
|
||||
|
||||
## h3HexAreaKm2 {#h3hexareakm2}
|
||||
|
||||
Returns average hexagon area in square kilometers at the given resolution.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3HexAreaKm2(resolution)
|
||||
```
|
||||
|
||||
**Parameter**
|
||||
|
||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Area in square kilometers.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3HexAreaKm2(13) AS area;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────area─┐
|
||||
│ 0.0000439 │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
## h3IndexesAreNeighbors {#h3indexesareneighbors}
|
||||
|
||||
Returns whether or not the provided [H3](#h3index) indexes are neighbors.
|
||||
@ -704,4 +740,144 @@ Result:
|
||||
└───────┘
|
||||
```
|
||||
|
||||
## h3DegsToRads {#h3degstorads}
|
||||
|
||||
Converts degrees to radians.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3DegsToRads(degrees)
|
||||
```
|
||||
|
||||
**Parameter**
|
||||
|
||||
- `degrees` — Input in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Radians. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3DegsToRads(180.0) AS radians;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────────radians─┐
|
||||
│ 3.141592653589793 │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## h3RadsToDegs {#h3radstodegs}
|
||||
|
||||
Converts radians to degrees.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3RadsToDegs(radians)
|
||||
```
|
||||
|
||||
**Parameter**
|
||||
|
||||
- `radians` — Input in radians. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3RadsToDegs(3.141592653589793) AS degrees;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─degrees─┐
|
||||
│ 180 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
## h3CellAreaM2 {#h3cellaream2}
|
||||
|
||||
Returns the exact area of a specific cell in square meters corresponding to the given input H3 index.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3CellAreaM2(index)
|
||||
```
|
||||
|
||||
**Parameter**
|
||||
|
||||
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Cell area in square meters.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3CellAreaM2(579205133326352383) AS area;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────────────area─┐
|
||||
│ 4106166334463.9233 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## h3CellAreaRads2 {#h3cellarearads2}
|
||||
|
||||
Returns the exact area of a specific cell in square radians corresponding to the given input H3 index.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3CellAreaRads2(index)
|
||||
```
|
||||
|
||||
**Parameter**
|
||||
|
||||
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Cell area in square radians.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3CellAreaRads2(579205133326352383) AS area;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────────────area─┐
|
||||
│ 0.10116268528089567 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/geo/h3) <!--hide-->
|
||||
|
@ -16,7 +16,7 @@ This query tries to initialize an unscheduled merge of data parts for tables.
|
||||
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
|
||||
```
|
||||
|
||||
The `OPTMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported.
|
||||
The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported.
|
||||
|
||||
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`).
|
||||
|
||||
|
@ -132,4 +132,33 @@ Kafka 特性:
|
||||
|
||||
有关详细配置选项列表,请参阅 [librdkafka配置参考](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)。在 ClickHouse 配置中使用下划线 (`_`) ,并不是使用点 (`.`)。例如,`check.crcs=true` 将是 `<check_crcs>true</check_crcs>`。
|
||||
|
||||
### Kerberos 支持 {#kafka-kerberos-zhi-chi}
|
||||
|
||||
对于使用了kerberos的kafka, 将security_protocol 设置为sasl_plaintext就够了,如果kerberos的ticket是由操作系统获取和缓存的。
|
||||
clickhouse也支持自己使用keyfile的方式来维护kerbros的凭证。配置sasl_kerberos_service_name、sasl_kerberos_keytab、sasl_kerberos_principal三个子元素就可以。
|
||||
|
||||
示例:
|
||||
|
||||
``` xml
|
||||
<!-- Kerberos-aware Kafka -->
|
||||
<kafka>
|
||||
<security_protocol>SASL_PLAINTEXT</security_protocol>
|
||||
<sasl_kerberos_keytab>/home/kafkauser/kafkauser.keytab</sasl_kerberos_keytab>
|
||||
<sasl_kerberos_principal>kafkauser/kafkahost@EXAMPLE.COM</sasl_kerberos_principal>
|
||||
</kafka>
|
||||
```
|
||||
|
||||
## 虚拟列
|
||||
|
||||
- `_topic` – Kafka 主题。
|
||||
- `_key` – 信息的键。
|
||||
- `_offset` – 消息的偏移量。
|
||||
- `_timestamp ` – 消息的时间戳。
|
||||
- `_partition ` – Kafka 主题的分区。
|
||||
|
||||
**另请参阅**
|
||||
|
||||
- [虚拟列](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
- [后台消息代理调度池大小](../../../operations/settings/settings.md#background_message_broker_schedule_pool_size)
|
||||
|
||||
[原始文章](https://clickhouse.com/docs/zh/operations/table_engines/kafka/) <!--hide-->
|
||||
|
@ -1,23 +1,17 @@
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <unordered_set>
|
||||
#include <algorithm>
|
||||
#include <optional>
|
||||
#include <base/scope_guard_safe.h>
|
||||
#include <boost/program_options.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <Poco/String.h>
|
||||
#include <filesystem>
|
||||
#include <string>
|
||||
#include "Client.h"
|
||||
#include "Core/Protocol.h"
|
||||
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
@ -566,6 +566,7 @@ if (ENABLE_TESTS AND USE_GTEST)
|
||||
clickhouse_parsers
|
||||
clickhouse_storages_system
|
||||
dbms
|
||||
clickhouse_common_config
|
||||
clickhouse_common_zookeeper
|
||||
clickhouse_common_config
|
||||
string_utils)
|
||||
|
@ -463,12 +463,13 @@ void ClientBase::initBlockOutputStream(const Block & block, ASTPtr parsed_query)
|
||||
/// The query can specify output format or output file.
|
||||
if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(parsed_query.get()))
|
||||
{
|
||||
String out_file;
|
||||
if (query_with_output->out_file)
|
||||
{
|
||||
select_into_file = true;
|
||||
|
||||
const auto & out_file_node = query_with_output->out_file->as<ASTLiteral &>();
|
||||
const auto & out_file = out_file_node.value.safeGet<std::string>();
|
||||
out_file = out_file_node.value.safeGet<std::string>();
|
||||
|
||||
std::string compression_method;
|
||||
if (query_with_output->compression)
|
||||
@ -494,6 +495,12 @@ void ClientBase::initBlockOutputStream(const Block & block, ASTPtr parsed_query)
|
||||
const auto & id = query_with_output->format->as<ASTIdentifier &>();
|
||||
current_format = id.name();
|
||||
}
|
||||
else if (query_with_output->out_file)
|
||||
{
|
||||
const auto & format_name = FormatFactory::instance().getFormatFromFileName(out_file);
|
||||
if (!format_name.empty())
|
||||
current_format = format_name;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_vertical_output_suffix)
|
||||
@ -1008,11 +1015,15 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
compression_method = compression_method_node.value.safeGet<std::string>();
|
||||
}
|
||||
|
||||
String current_format = parsed_insert_query->format;
|
||||
if (current_format.empty())
|
||||
current_format = FormatFactory::instance().getFormatFromFileName(in_file);
|
||||
|
||||
/// Create temporary storage file, to support globs and parallel reading
|
||||
StorageFile::CommonArguments args{
|
||||
WithContext(global_context),
|
||||
parsed_insert_query->table_id,
|
||||
parsed_insert_query->format,
|
||||
current_format,
|
||||
getFormatSettings(global_context),
|
||||
compression_method,
|
||||
columns_description_for_query,
|
||||
|
@ -214,6 +214,12 @@ bool LocalConnection::poll(size_t)
|
||||
if (next_packet_type)
|
||||
return true;
|
||||
|
||||
if (state->exception)
|
||||
{
|
||||
next_packet_type = Protocol::Server::Exception;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!state->is_finished)
|
||||
{
|
||||
if (send_progress && (state->after_send_progress.elapsedMicroseconds() >= query_context->getSettingsRef().interactive_delay))
|
||||
|
@ -323,7 +323,7 @@ private:
|
||||
UInt64 address = 0;
|
||||
};
|
||||
static const UInt64 mask = 0xFFFFFFFFFFFFFFFC;
|
||||
static const UInt32 medium_set_size_max = 1UL << medium_set_power2_max;
|
||||
static const UInt32 medium_set_size_max = 1ULL << medium_set_power2_max;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -280,7 +280,7 @@ public:
|
||||
if ((reinterpret_cast<uintptr_t>(p) & 2048) == 0)
|
||||
{
|
||||
memcpy(&n[0], p, 8);
|
||||
n[0] &= -1ul >> s;
|
||||
n[0] &= -1ULL >> s;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -114,7 +114,7 @@ public:
|
||||
if ((reinterpret_cast<uintptr_t>(p) & 2048) == 0)
|
||||
{
|
||||
memcpy(&n[0], p, 8);
|
||||
n[0] &= -1ul >> s;
|
||||
n[0] &= -1ULL >> s;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -23,7 +23,7 @@ static constexpr auto NS = 1000000000UL;
|
||||
|
||||
/// Tracking window. Actually the size is not really important. We just want to avoid
|
||||
/// throttles when there are no actions for a long period time.
|
||||
static const double window_ns = 1UL * NS;
|
||||
static const double window_ns = 1ULL * NS;
|
||||
|
||||
void Throttler::add(size_t amount)
|
||||
{
|
||||
|
7
src/Common/tests/gtest_global_context.cpp
Normal file
7
src/Common/tests/gtest_global_context.cpp
Normal file
@ -0,0 +1,7 @@
|
||||
#include "gtest_global_context.h"
|
||||
|
||||
const ContextHolder & getContext()
|
||||
{
|
||||
static ContextHolder holder;
|
||||
return holder;
|
||||
}
|
@ -18,8 +18,4 @@ struct ContextHolder
|
||||
ContextHolder(ContextHolder &&) = default;
|
||||
};
|
||||
|
||||
inline const ContextHolder & getContext()
|
||||
{
|
||||
static ContextHolder holder;
|
||||
return holder;
|
||||
}
|
||||
const ContextHolder & getContext();
|
||||
|
@ -168,7 +168,7 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st
|
||||
auto max_session_id = deserializeSessionAndTimeout(storage, reader);
|
||||
LOG_INFO(log, "Sessions and timeouts deserialized");
|
||||
|
||||
storage.session_id_counter = max_session_id;
|
||||
storage.session_id_counter = max_session_id + 1; /// session_id_counter pointer to next slot
|
||||
deserializeACLMap(storage, reader);
|
||||
LOG_INFO(log, "ACLs deserialized");
|
||||
|
||||
|
@ -156,13 +156,15 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
|
||||
StorageMySQLConfiguration configuration;
|
||||
ASTs & arguments = engine->arguments->children;
|
||||
MySQLSettings mysql_settings;
|
||||
|
||||
if (auto named_collection = getExternalDataSourceConfiguration(arguments, context, true))
|
||||
if (auto named_collection = getExternalDataSourceConfiguration(arguments, context, true, true, mysql_settings))
|
||||
{
|
||||
auto [common_configuration, storage_specific_args] = named_collection.value();
|
||||
auto [common_configuration, storage_specific_args, settings_changes] = named_collection.value();
|
||||
|
||||
configuration.set(common_configuration);
|
||||
configuration.addresses = {std::make_pair(configuration.host, configuration.port)};
|
||||
mysql_settings.applyChanges(settings_changes);
|
||||
|
||||
if (!storage_specific_args.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
@ -200,7 +202,6 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
if (engine_name == "MySQL")
|
||||
{
|
||||
auto mysql_database_settings = std::make_unique<ConnectionMySQLSettings>();
|
||||
MySQLSettings mysql_settings;
|
||||
auto mysql_pool = createMySQLPoolWithFailover(configuration, mysql_settings);
|
||||
|
||||
mysql_database_settings->loadFromQueryContext(context);
|
||||
@ -299,7 +300,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
|
||||
if (auto named_collection = getExternalDataSourceConfiguration(engine_args, context, true))
|
||||
{
|
||||
auto [common_configuration, storage_specific_args] = named_collection.value();
|
||||
auto [common_configuration, storage_specific_args, _] = named_collection.value();
|
||||
|
||||
configuration.set(common_configuration);
|
||||
configuration.addresses = {std::make_pair(configuration.host, configuration.port)};
|
||||
@ -358,7 +359,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
|
||||
if (auto named_collection = getExternalDataSourceConfiguration(engine_args, context, true))
|
||||
{
|
||||
auto [common_configuration, storage_specific_args] = named_collection.value();
|
||||
auto [common_configuration, storage_specific_args, _] = named_collection.value();
|
||||
configuration.set(common_configuration);
|
||||
|
||||
if (!storage_specific_args.empty())
|
||||
|
@ -50,12 +50,17 @@ DatabaseMaterializedPostgreSQL::DatabaseMaterializedPostgreSQL(
|
||||
, remote_database_name(postgres_database_name)
|
||||
, connection_info(connection_info_)
|
||||
, settings(std::move(settings_))
|
||||
, startup_task(getContext()->getSchedulePool().createTask("MaterializedPostgreSQLDatabaseStartup", [this]{ startSynchronization(); }))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void DatabaseMaterializedPostgreSQL::startSynchronization()
|
||||
{
|
||||
std::lock_guard lock(handler_mutex);
|
||||
if (shutdown_called)
|
||||
return;
|
||||
|
||||
replication_handler = std::make_unique<PostgreSQLReplicationHandler>(
|
||||
/* replication_identifier */database_name,
|
||||
remote_database_name,
|
||||
@ -104,24 +109,14 @@ void DatabaseMaterializedPostgreSQL::startSynchronization()
|
||||
}
|
||||
|
||||
LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size());
|
||||
replication_handler->startup();
|
||||
replication_handler->startup(/* delayed */false);
|
||||
}
|
||||
|
||||
|
||||
void DatabaseMaterializedPostgreSQL::startupTables(ThreadPool & thread_pool, bool force_restore, bool force_attach)
|
||||
{
|
||||
DatabaseAtomic::startupTables(thread_pool, force_restore, force_attach);
|
||||
try
|
||||
{
|
||||
startSynchronization();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Cannot load nested database objects for PostgreSQL database engine.");
|
||||
|
||||
if (!force_attach)
|
||||
throw;
|
||||
}
|
||||
startup_task->activateAndSchedule();
|
||||
}
|
||||
|
||||
|
||||
@ -376,6 +371,7 @@ StoragePtr DatabaseMaterializedPostgreSQL::detachTable(ContextPtr context_, cons
|
||||
|
||||
void DatabaseMaterializedPostgreSQL::shutdown()
|
||||
{
|
||||
startup_task->deactivate();
|
||||
stopReplication();
|
||||
DatabaseAtomic::shutdown();
|
||||
}
|
||||
@ -387,6 +383,7 @@ void DatabaseMaterializedPostgreSQL::stopReplication()
|
||||
if (replication_handler)
|
||||
replication_handler->shutdown();
|
||||
|
||||
shutdown_called = true;
|
||||
/// Clear wrappers over nested, all access is not done to nested tables directly.
|
||||
materialized_tables.clear();
|
||||
}
|
||||
|
@ -86,6 +86,9 @@ private:
|
||||
std::map<std::string, StoragePtr> materialized_tables;
|
||||
mutable std::mutex tables_mutex;
|
||||
mutable std::mutex handler_mutex;
|
||||
|
||||
BackgroundSchedulePool::TaskHolder startup_task;
|
||||
bool shutdown_called = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -247,12 +247,13 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
|
||||
|
||||
if (named_collection)
|
||||
{
|
||||
host = named_collection->host;
|
||||
user = named_collection->username;
|
||||
password = named_collection->password;
|
||||
db = named_collection->database;
|
||||
table = named_collection->table;
|
||||
port = named_collection->port;
|
||||
const auto & configuration = named_collection->configuration;
|
||||
host = configuration.host;
|
||||
user = configuration.username;
|
||||
password = configuration.password;
|
||||
db = configuration.database;
|
||||
table = configuration.table;
|
||||
port = configuration.port;
|
||||
}
|
||||
|
||||
ClickHouseDictionarySource::Configuration configuration{
|
||||
|
@ -28,7 +28,7 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
|
||||
auto named_collection = getExternalDataSourceConfiguration(config, config_prefix, context, has_config_key);
|
||||
if (named_collection)
|
||||
{
|
||||
configuration = *named_collection;
|
||||
configuration = named_collection->configuration;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -60,19 +60,25 @@ void registerDictionarySourceMysql(DictionarySourceFactory & factory)
|
||||
|
||||
auto settings_config_prefix = config_prefix + ".mysql";
|
||||
std::shared_ptr<mysqlxx::PoolWithFailover> pool;
|
||||
auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key) || key.starts_with("replica"); };
|
||||
MySQLSettings mysql_settings;
|
||||
auto has_config_key = [&](const String & key)
|
||||
{
|
||||
return dictionary_allowed_keys.contains(key) || key.starts_with("replica") || mysql_settings.has(key);
|
||||
};
|
||||
StorageMySQLConfiguration configuration;
|
||||
auto named_collection = created_from_ddl
|
||||
? getExternalDataSourceConfiguration(config, settings_config_prefix, global_context, has_config_key)
|
||||
? getExternalDataSourceConfiguration(config, settings_config_prefix, global_context, has_config_key, mysql_settings)
|
||||
: std::nullopt;
|
||||
if (named_collection)
|
||||
{
|
||||
configuration.set(*named_collection);
|
||||
mysql_settings.applyChanges(named_collection->settings_changes);
|
||||
configuration.set(named_collection->configuration);
|
||||
configuration.addresses = {std::make_pair(configuration.host, configuration.port)};
|
||||
MySQLSettings mysql_settings;
|
||||
const auto & settings = global_context->getSettingsRef();
|
||||
mysql_settings.connect_timeout = settings.external_storage_connect_timeout_sec;
|
||||
mysql_settings.read_write_timeout = settings.external_storage_rw_timeout_sec;
|
||||
if (!mysql_settings.isChanged("connect_timeout"))
|
||||
mysql_settings.connect_timeout = settings.external_storage_connect_timeout_sec;
|
||||
if (!mysql_settings.isChanged("read_write_timeout"))
|
||||
mysql_settings.read_write_timeout = settings.external_storage_rw_timeout_sec;
|
||||
pool = std::make_shared<mysqlxx::PoolWithFailover>(createMySQLPoolWithFailover(configuration, mysql_settings));
|
||||
}
|
||||
else
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <Poco/URI.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -191,7 +193,8 @@ InputFormatPtr FormatFactory::getInput(
|
||||
|
||||
|
||||
ParallelParsingInputFormat::Params params{
|
||||
buf, sample, parser_creator, file_segmentation_engine, name, settings.max_threads, settings.min_chunk_bytes_for_parallel_parsing};
|
||||
buf, sample, parser_creator, file_segmentation_engine, name, settings.max_threads, settings.min_chunk_bytes_for_parallel_parsing,
|
||||
context->getApplicationType() == Context::ApplicationType::SERVER};
|
||||
return std::make_shared<ParallelParsingInputFormat>(params);
|
||||
}
|
||||
|
||||
@ -391,6 +394,30 @@ void FormatFactory::registerOutputFormat(const String & name, OutputCreator outp
|
||||
target = std::move(output_creator);
|
||||
}
|
||||
|
||||
void FormatFactory::registerFileExtension(const String & extension, const String & format_name)
|
||||
{
|
||||
file_extension_formats[extension] = format_name;
|
||||
}
|
||||
|
||||
String FormatFactory::getFormatFromFileName(String file_name)
|
||||
{
|
||||
CompressionMethod compression_method = chooseCompressionMethod(file_name, "");
|
||||
if (CompressionMethod::None != compression_method)
|
||||
{
|
||||
auto pos = file_name.find_last_of('.');
|
||||
if (pos != String::npos)
|
||||
file_name = file_name.substr(0, pos);
|
||||
}
|
||||
|
||||
auto pos = file_name.find_last_of('.');
|
||||
if (pos == String::npos)
|
||||
return "";
|
||||
|
||||
String file_extension = file_name.substr(pos + 1, String::npos);
|
||||
boost::algorithm::to_lower(file_extension);
|
||||
return file_extension_formats[file_extension];
|
||||
}
|
||||
|
||||
void FormatFactory::registerFileSegmentationEngine(const String & name, FileSegmentationEngine file_segmentation_engine)
|
||||
{
|
||||
auto & target = dict[name].file_segmentation_engine;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
#include <base/types.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
|
||||
@ -108,6 +109,7 @@ private:
|
||||
};
|
||||
|
||||
using FormatsDictionary = std::unordered_map<String, Creators>;
|
||||
using FileExtensionFormats = std::unordered_map<String, String>;
|
||||
|
||||
public:
|
||||
static FormatFactory & instance();
|
||||
@ -169,6 +171,10 @@ public:
|
||||
void registerInputFormat(const String & name, InputCreator input_creator);
|
||||
void registerOutputFormat(const String & name, OutputCreator output_creator);
|
||||
|
||||
/// Register file extension for format
|
||||
void registerFileExtension(const String & extension, const String & format_name);
|
||||
String getFormatFromFileName(String file_name);
|
||||
|
||||
/// Register schema readers for format its name.
|
||||
void registerSchemaReader(const String & name, SchemaReaderCreator schema_reader_creator);
|
||||
void registerExternalSchemaReader(const String & name, ExternalSchemaReaderCreator external_schema_reader_creator);
|
||||
@ -192,6 +198,7 @@ public:
|
||||
|
||||
private:
|
||||
FormatsDictionary dict;
|
||||
FileExtensionFormats file_extension_formats;
|
||||
|
||||
const Creators & getCreators(const String & name) const;
|
||||
|
||||
|
@ -196,6 +196,16 @@ void registerFormats()
|
||||
registerTSKVSchemaReader(factory);
|
||||
registerValuesSchemaReader(factory);
|
||||
registerTemplateSchemaReader(factory);
|
||||
|
||||
factory.registerFileExtension("csv", "CSV");
|
||||
factory.registerFileExtension("tsv", "TSV");
|
||||
factory.registerFileExtension("parquet", "Parquet");
|
||||
factory.registerFileExtension("orc", "ORC");
|
||||
factory.registerFileExtension("native", "Native");
|
||||
factory.registerFileExtension("json", "JSON");
|
||||
factory.registerFileExtension("ndjson", "JSONEachRow");
|
||||
factory.registerFileExtension("xml", "XML");
|
||||
factory.registerFileExtension("avro", "Avro");
|
||||
}
|
||||
|
||||
}
|
||||
|
90
src/Functions/h3CellAreaM2.cpp
Normal file
90
src/Functions/h3CellAreaM2.cpp
Normal file
@ -0,0 +1,90 @@
|
||||
#include "config_functions.h"
|
||||
|
||||
#if USE_H3
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/range.h>
|
||||
|
||||
#include <constants.h>
|
||||
#include <h3api.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionH3CellAreaM2 final : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "h3CellAreaM2";
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3CellAreaM2>(); }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
const auto * arg = arguments[0].get();
|
||||
if (!WhichDataType(arg).isUInt64())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt64",
|
||||
arg->getName(), 1, getName());
|
||||
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto * column = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
|
||||
if (!column)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt64.",
|
||||
arguments[0].type->getName(),
|
||||
1,
|
||||
getName());
|
||||
|
||||
const auto & data = column->getData();
|
||||
|
||||
auto dst = ColumnVector<Float64>::create();
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 index = data[row];
|
||||
Float64 res = cellAreaM2(index);
|
||||
dst_data[row] = res;
|
||||
}
|
||||
|
||||
return dst;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void registerFunctionH3CellAreaM2(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionH3CellAreaM2>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
90
src/Functions/h3CellAreaRads2.cpp
Normal file
90
src/Functions/h3CellAreaRads2.cpp
Normal file
@ -0,0 +1,90 @@
|
||||
#include "config_functions.h"
|
||||
|
||||
#if USE_H3
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/range.h>
|
||||
|
||||
#include <constants.h>
|
||||
#include <h3api.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionH3CellAreaRads2 final : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "h3CellAreaRads2";
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3CellAreaRads2>(); }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
const auto * arg = arguments[0].get();
|
||||
if (!WhichDataType(arg).isUInt64())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt64",
|
||||
arg->getName(), 1, getName());
|
||||
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto * column = checkAndGetColumn<ColumnUInt64>(arguments[0].column.get());
|
||||
if (!column)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt64",
|
||||
arguments[0].type->getName(),
|
||||
1,
|
||||
getName());
|
||||
|
||||
const auto & data = column->getData();
|
||||
|
||||
auto dst = ColumnVector<Float64>::create();
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 index = data[row];
|
||||
Float64 res = cellAreaRads2(index);
|
||||
dst_data[row] = res;
|
||||
}
|
||||
|
||||
return dst;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void registerFunctionH3CellAreaRads2(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionH3CellAreaRads2>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
90
src/Functions/h3DegsToRads.cpp
Normal file
90
src/Functions/h3DegsToRads.cpp
Normal file
@ -0,0 +1,90 @@
|
||||
#include "config_functions.h"
|
||||
|
||||
#if USE_H3
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
#include <h3api.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionH3DegsToRads final : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "h3DegsToRads";
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3DegsToRads>(); }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
const auto * arg = arguments[0].get();
|
||||
if (!WhichDataType(arg).isFloat64())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}. Must be Float64",
|
||||
arg->getName(), 1, getName());
|
||||
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto * column = checkAndGetColumn<ColumnFloat64>(arguments[0].column.get());
|
||||
|
||||
if (!column)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal type {} of argument {} of function {}. Must be Float64",
|
||||
arguments[0].type->getName(),
|
||||
1,
|
||||
getName());
|
||||
|
||||
const auto & data = column->getData();
|
||||
|
||||
auto dst = ColumnVector<Float64>::create();
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const Float64 degrees = data[row];
|
||||
auto res = degsToRads(degrees);
|
||||
dst_data[row] = res;
|
||||
}
|
||||
|
||||
return dst;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void registerFunctionH3DegsToRads(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionH3DegsToRads>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
99
src/Functions/h3HexAreaKm2.cpp
Normal file
99
src/Functions/h3HexAreaKm2.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
#include "config_functions.h"
|
||||
|
||||
#if USE_H3
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/range.h>
|
||||
|
||||
#include <constants.h>
|
||||
#include <h3api.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionH3HexAreaKm2 final : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "h3HexAreaKm2";
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3HexAreaKm2>(); }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
const auto * arg = arguments[0].get();
|
||||
if (!WhichDataType(arg).isUInt8())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt8",
|
||||
arg->getName(), 1, getName());
|
||||
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto * column = checkAndGetColumn<ColumnUInt8>(arguments[0].column.get());
|
||||
if (!column)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal type {} of argument {} of function {}. Must be UInt8",
|
||||
arguments[0].column->getName(),
|
||||
1,
|
||||
getName());
|
||||
|
||||
const auto & data = column->getData();
|
||||
|
||||
auto dst = ColumnVector<Float64>::create();
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const UInt64 resolution = data[row];
|
||||
if (resolution > MAX_H3_RES)
|
||||
throw Exception(
|
||||
ErrorCodes::ARGUMENT_OUT_OF_BOUND,
|
||||
"The argument 'resolution' ({}) of function {} is out of bounds because the maximum resolution in H3 library is ",
|
||||
resolution,
|
||||
getName(),
|
||||
MAX_H3_RES);
|
||||
|
||||
Float64 res = getHexagonAreaAvgKm2(resolution);
|
||||
dst_data[row] = res;
|
||||
}
|
||||
|
||||
return dst;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void registerFunctionH3HexAreaKm2(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionH3HexAreaKm2>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
88
src/Functions/h3RadsToDegs.cpp
Normal file
88
src/Functions/h3RadsToDegs.cpp
Normal file
@ -0,0 +1,88 @@
|
||||
#include "config_functions.h"
|
||||
|
||||
#if USE_H3
|
||||
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
#include <h3api.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionH3RadsToDegs final : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "h3RadsToDegs";
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionH3RadsToDegs>(); }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
const auto * arg = arguments[0].get();
|
||||
if (!WhichDataType(arg).isFloat64())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of argument {} of function {}. Must be Float64",
|
||||
arg->getName(), 1, getName());
|
||||
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const auto * column = checkAndGetColumn<ColumnFloat64>(arguments[0].column.get());
|
||||
if (!column)
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal type {} of argument {} of function {}. Must be Float64",
|
||||
arguments[0].type->getName(),
|
||||
1,
|
||||
getName());
|
||||
|
||||
const auto & col_rads = column->getData();
|
||||
|
||||
auto dst = ColumnVector<Float64>::create();
|
||||
auto & dst_data = dst->getData();
|
||||
dst_data.resize(input_rows_count);
|
||||
|
||||
for (size_t row = 0; row < input_rows_count; ++row)
|
||||
{
|
||||
const Float64 rads = col_rads[row];
|
||||
auto res = radsToDegs(rads);
|
||||
dst_data[row] = res;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void registerFunctionH3RadsToDegs(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionH3RadsToDegs>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -43,6 +43,12 @@ void registerFunctionH3HexAreaM2(FunctionFactory &);
|
||||
void registerFunctionH3IsResClassIII(FunctionFactory &);
|
||||
void registerFunctionH3IsPentagon(FunctionFactory &);
|
||||
void registerFunctionH3GetFaces(FunctionFactory &);
|
||||
void registerFunctionH3DegsToRads(FunctionFactory &);
|
||||
void registerFunctionH3RadsToDegs(FunctionFactory &);
|
||||
void registerFunctionH3HexAreaKm2(FunctionFactory &);
|
||||
void registerFunctionH3CellAreaM2(FunctionFactory &);
|
||||
void registerFunctionH3CellAreaRads2(FunctionFactory &);
|
||||
|
||||
#endif
|
||||
|
||||
#if USE_S2_GEOMETRY
|
||||
@ -99,6 +105,11 @@ void registerFunctionsGeo(FunctionFactory & factory)
|
||||
registerFunctionH3IsResClassIII(factory);
|
||||
registerFunctionH3IsPentagon(factory);
|
||||
registerFunctionH3GetFaces(factory);
|
||||
registerFunctionH3DegsToRads(factory);
|
||||
registerFunctionH3RadsToDegs(factory);
|
||||
registerFunctionH3HexAreaKm2(factory);
|
||||
registerFunctionH3CellAreaM2(factory);
|
||||
registerFunctionH3CellAreaRads2(factory);
|
||||
#endif
|
||||
|
||||
#if USE_S2_GEOMETRY
|
||||
|
@ -34,6 +34,8 @@ public:
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override;
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <string>
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
#include <base/types.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
@ -1977,6 +1977,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
|
||||
if (!options.ignore_quota && (options.to_stage == QueryProcessingStage::Complete))
|
||||
quota = context->getQuota();
|
||||
|
||||
query_info.settings_limit_offset_done = options.settings_limit_offset_done;
|
||||
storage->read(query_plan, required_columns, metadata_snapshot, query_info, context, processing_stage, max_block_size, max_streams);
|
||||
|
||||
if (context->hasQueryContext() && !options.is_internal)
|
||||
|
@ -83,7 +83,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
}
|
||||
}
|
||||
|
||||
if (num_children == 1 && settings_limit_offset_needed)
|
||||
if (num_children == 1 && settings_limit_offset_needed && !options.settings_limit_offset_done)
|
||||
{
|
||||
const ASTPtr first_select_ast = ast->list_of_selects->children.at(0);
|
||||
ASTSelectQuery * select_query = dynamic_cast<ASTSelectQuery *>(first_select_ast.get());
|
||||
@ -127,7 +127,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(
|
||||
select_query->setExpression(ASTSelectQuery::Expression::LIMIT_LENGTH, std::move(new_limit_length_ast));
|
||||
}
|
||||
|
||||
settings_limit_offset_done = true;
|
||||
options.settings_limit_offset_done = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan)
|
||||
}
|
||||
}
|
||||
|
||||
if (settings_limit_offset_needed && !settings_limit_offset_done)
|
||||
if (settings_limit_offset_needed && !options.settings_limit_offset_done)
|
||||
{
|
||||
if (settings.limit > 0)
|
||||
{
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <Common/tests/gtest_global_register.h>
|
||||
#include <Poco/String.h>
|
||||
|
||||
|
||||
#if USE_MYSQL
|
||||
using namespace DB;
|
||||
|
||||
static inline ASTPtr tryRewrittenCreateQuery(const String & query, ContextPtr context)
|
||||
@ -255,3 +255,4 @@ TEST(MySQLCreateRewritten, QueryWithEnum)
|
||||
std::string(MATERIALIZEDMYSQL_TABLE_COLUMNS) +
|
||||
") ENGINE = ReplacingMergeTree(_version) PARTITION BY intDiv(key, 4294967) ORDER BY tuple(key)");
|
||||
}
|
||||
#endif
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Parsers/ASTSelectIntersectExceptQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTKillQueryQuery.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/queryNormalization.h>
|
||||
#include <Processors/Executors/PipelineExecutor.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
@ -76,6 +77,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
IAST::QueryKind query_kind = ast->getQueryKind();
|
||||
|
||||
const auto queue_max_wait_ms = settings.queue_max_wait_ms.totalMilliseconds();
|
||||
if (!is_unlimited_query && max_size && processes.size() >= max_size)
|
||||
@ -86,15 +88,14 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as
|
||||
throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
||||
}
|
||||
|
||||
String query_kind{ast->getQueryKindString()};
|
||||
if (!is_unlimited_query)
|
||||
{
|
||||
auto amount = getQueryKindAmount(query_kind);
|
||||
if (max_insert_queries_amount && query_kind == "Insert" && amount >= max_insert_queries_amount)
|
||||
QueryAmount amount = getQueryKindAmount(query_kind);
|
||||
if (max_insert_queries_amount && query_kind == IAST::QueryKind::Insert && amount >= max_insert_queries_amount)
|
||||
throw Exception(ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES,
|
||||
"Too many simultaneous insert queries. Maximum: {}, current: {}",
|
||||
max_insert_queries_amount, amount);
|
||||
if (max_select_queries_amount && query_kind == "Select" && amount >= max_select_queries_amount)
|
||||
if (max_select_queries_amount && query_kind == IAST::QueryKind::Select && amount >= max_select_queries_amount)
|
||||
throw Exception(ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES,
|
||||
"Too many simultaneous select queries. Maximum: {}, current: {}",
|
||||
max_select_queries_amount, amount);
|
||||
@ -258,7 +259,7 @@ ProcessListEntry::~ProcessListEntry()
|
||||
|
||||
String user = it->getClientInfo().current_user;
|
||||
String query_id = it->getClientInfo().current_query_id;
|
||||
String query_kind = it->query_kind;
|
||||
IAST::QueryKind query_kind = it->query_kind;
|
||||
|
||||
const QueryStatus * process_list_element_ptr = &*it;
|
||||
|
||||
@ -306,7 +307,7 @@ ProcessListEntry::~ProcessListEntry()
|
||||
|
||||
|
||||
QueryStatus::QueryStatus(
|
||||
ContextPtr context_, const String & query_, const ClientInfo & client_info_, QueryPriorities::Handle && priority_handle_, const String & query_kind_)
|
||||
ContextPtr context_, const String & query_, const ClientInfo & client_info_, QueryPriorities::Handle && priority_handle_, IAST::QueryKind query_kind_)
|
||||
: WithContext(context_)
|
||||
, query(query_)
|
||||
, client_info(client_info_)
|
||||
@ -505,7 +506,7 @@ ProcessList::UserInfo ProcessList::getUserInfo(bool get_profile_events) const
|
||||
return per_user_infos;
|
||||
}
|
||||
|
||||
void ProcessList::increaseQueryKindAmount(const String & query_kind)
|
||||
void ProcessList::increaseQueryKindAmount(const IAST::QueryKind & query_kind)
|
||||
{
|
||||
auto found = query_kind_amounts.find(query_kind);
|
||||
if (found == query_kind_amounts.end())
|
||||
@ -514,7 +515,7 @@ void ProcessList::increaseQueryKindAmount(const String & query_kind)
|
||||
found->second += 1;
|
||||
}
|
||||
|
||||
void ProcessList::decreaseQueryKindAmount(const String & query_kind)
|
||||
void ProcessList::decreaseQueryKindAmount(const IAST::QueryKind & query_kind)
|
||||
{
|
||||
auto found = query_kind_amounts.find(query_kind);
|
||||
/// TODO: we could just rebuild the map, as we have saved all query_kind.
|
||||
@ -524,9 +525,9 @@ void ProcessList::decreaseQueryKindAmount(const String & query_kind)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong query kind amount: decrease to negative on '{}'", query_kind, found->second);
|
||||
else
|
||||
found->second -= 1;
|
||||
|
||||
}
|
||||
ProcessList::QueryAmount ProcessList::getQueryKindAmount(const String & query_kind)
|
||||
|
||||
ProcessList::QueryAmount ProcessList::getQueryKindAmount(const IAST::QueryKind & query_kind) const
|
||||
{
|
||||
auto found = query_kind_amounts.find(query_kind);
|
||||
if (found == query_kind_amounts.end())
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <QueryPipeline/ExecutionSpeedLimits.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Poco/Condition.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
@ -118,7 +119,7 @@ protected:
|
||||
|
||||
ProcessListForUser * user_process_list = nullptr;
|
||||
|
||||
String query_kind;
|
||||
IAST::QueryKind query_kind;
|
||||
|
||||
public:
|
||||
|
||||
@ -127,7 +128,7 @@ public:
|
||||
const String & query_,
|
||||
const ClientInfo & client_info_,
|
||||
QueryPriorities::Handle && priority_handle_,
|
||||
const String & query_kind_
|
||||
IAST::QueryKind query_kind_
|
||||
);
|
||||
|
||||
~QueryStatus();
|
||||
@ -270,7 +271,7 @@ public:
|
||||
/// User -> queries
|
||||
using UserToQueries = std::unordered_map<String, ProcessListForUser>;
|
||||
|
||||
using QueryKindToAmount = std::unordered_map<String, QueryAmount>;
|
||||
using QueryKindAmounts = std::unordered_map<IAST::QueryKind, QueryAmount>;
|
||||
|
||||
protected:
|
||||
friend class ProcessListEntry;
|
||||
@ -301,11 +302,11 @@ protected:
|
||||
size_t max_select_queries_amount = 0;
|
||||
|
||||
/// amount of queries by query kind.
|
||||
QueryKindToAmount query_kind_amounts;
|
||||
QueryKindAmounts query_kind_amounts;
|
||||
|
||||
void increaseQueryKindAmount(const String & query_kind);
|
||||
void decreaseQueryKindAmount(const String & query_kind);
|
||||
QueryAmount getQueryKindAmount(const String & query_kind);
|
||||
void increaseQueryKindAmount(const IAST::QueryKind & query_kind);
|
||||
void decreaseQueryKindAmount(const IAST::QueryKind & query_kind);
|
||||
QueryAmount getQueryKindAmount(const IAST::QueryKind & query_kind) const;
|
||||
|
||||
public:
|
||||
using EntryPtr = std::shared_ptr<ProcessListEntry>;
|
||||
|
@ -48,6 +48,7 @@ struct SelectQueryOptions
|
||||
bool is_internal = false;
|
||||
bool is_subquery = false; // non-subquery can also have subquery_depth > 0, e.g. insert select
|
||||
bool with_all_cols = false; /// asterisk include materialized and aliased columns
|
||||
bool settings_limit_offset_done = false;
|
||||
|
||||
/// These two fields are used to evaluate shardNum() and shardCount() function when
|
||||
/// prefer_localhost_replica == 1 and local instance is selected. They are needed because local
|
||||
@ -58,8 +59,10 @@ struct SelectQueryOptions
|
||||
SelectQueryOptions(
|
||||
QueryProcessingStage::Enum stage = QueryProcessingStage::Complete,
|
||||
size_t depth = 0,
|
||||
bool is_subquery_ = false)
|
||||
: to_stage(stage), subquery_depth(depth), is_subquery(is_subquery_)
|
||||
bool is_subquery_ = false,
|
||||
bool settings_limit_offset_done_ = false)
|
||||
: to_stage(stage), subquery_depth(depth), is_subquery(is_subquery_),
|
||||
settings_limit_offset_done(settings_limit_offset_done_)
|
||||
{}
|
||||
|
||||
SelectQueryOptions copy() const { return *this; }
|
||||
|
@ -168,6 +168,8 @@ public:
|
||||
void shutdown() override
|
||||
{
|
||||
stopFlushThread();
|
||||
|
||||
auto table = DatabaseCatalog::instance().tryGetTable(table_id, getContext());
|
||||
if (table)
|
||||
table->flushAndShutdown();
|
||||
}
|
||||
@ -186,7 +188,6 @@ private:
|
||||
/* Saving thread data */
|
||||
const StorageID table_id;
|
||||
const String storage_def;
|
||||
StoragePtr table;
|
||||
String create_query;
|
||||
String old_create_query;
|
||||
bool is_prepared = false;
|
||||
@ -525,7 +526,7 @@ void SystemLog<LogElement>::prepareTable()
|
||||
{
|
||||
String description = table_id.getNameForLogs();
|
||||
|
||||
table = DatabaseCatalog::instance().tryGetTable(table_id, getContext());
|
||||
auto table = DatabaseCatalog::instance().tryGetTable(table_id, getContext());
|
||||
|
||||
if (table)
|
||||
{
|
||||
|
@ -57,6 +57,8 @@
|
||||
#include <Processors/Executors/CompletedPipelineExecutor.h>
|
||||
#include <Processors/Sources/WaitForAsyncInsertSource.h>
|
||||
|
||||
#include <base/EnumReflection.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
|
||||
@ -271,7 +273,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, ContextPtr
|
||||
// Try log query_kind if ast is valid
|
||||
if (ast)
|
||||
{
|
||||
elem.query_kind = ast->getQueryKindString();
|
||||
elem.query_kind = magic_enum::enum_name(ast->getQueryKind());
|
||||
if (settings.log_formatted_queries)
|
||||
elem.formatted_query = queryToString(ast);
|
||||
}
|
||||
|
@ -11,6 +11,11 @@ namespace ErrorCodes
|
||||
extern const int UNEXPECTED_AST_STRUCTURE;
|
||||
}
|
||||
|
||||
String ASTAlterCommand::getID(char delim) const
|
||||
{
|
||||
return String("AlterCommand") + delim + typeToString(type);
|
||||
}
|
||||
|
||||
ASTPtr ASTAlterCommand::clone() const
|
||||
{
|
||||
auto res = std::make_shared<ASTAlterCommand>(*this);
|
||||
@ -75,6 +80,53 @@ ASTPtr ASTAlterCommand::clone() const
|
||||
return res;
|
||||
}
|
||||
|
||||
const char * ASTAlterCommand::typeToString(ASTAlterCommand::Type type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case ADD_COLUMN: return "ADD_COLUMN";
|
||||
case DROP_COLUMN: return "DROP_COLUMN";
|
||||
case MODIFY_COLUMN: return "MODIFY_COLUMN";
|
||||
case COMMENT_COLUMN: return "COMMENT_COLUMN";
|
||||
case RENAME_COLUMN: return "RENAME_COLUMN";
|
||||
case MATERIALIZE_COLUMN: return "MATERIALIZE_COLUMN";
|
||||
case MODIFY_ORDER_BY: return "MODIFY_ORDER_BY";
|
||||
case MODIFY_SAMPLE_BY: return "MODIFY_SAMPLE_BY";
|
||||
case MODIFY_TTL: return "MODIFY_TTL";
|
||||
case MATERIALIZE_TTL: return "MATERIALIZE_TTL";
|
||||
case MODIFY_SETTING: return "MODIFY_SETTING";
|
||||
case RESET_SETTING: return "RESET_SETTING";
|
||||
case MODIFY_QUERY: return "MODIFY_QUERY";
|
||||
case REMOVE_TTL: return "REMOVE_TTL";
|
||||
case REMOVE_SAMPLE_BY: return "REMOVE_SAMPLE_BY";
|
||||
case ADD_INDEX: return "ADD_INDEX";
|
||||
case DROP_INDEX: return "DROP_INDEX";
|
||||
case MATERIALIZE_INDEX: return "MATERIALIZE_INDEX";
|
||||
case ADD_CONSTRAINT: return "ADD_CONSTRAINT";
|
||||
case DROP_CONSTRAINT: return "DROP_CONSTRAINT";
|
||||
case ADD_PROJECTION: return "ADD_PROJECTION";
|
||||
case DROP_PROJECTION: return "DROP_PROJECTION";
|
||||
case MATERIALIZE_PROJECTION: return "MATERIALIZE_PROJECTION";
|
||||
case DROP_PARTITION: return "DROP_PARTITION";
|
||||
case DROP_DETACHED_PARTITION: return "DROP_DETACHED_PARTITION";
|
||||
case ATTACH_PARTITION: return "ATTACH_PARTITION";
|
||||
case MOVE_PARTITION: return "MOVE_PARTITION";
|
||||
case REPLACE_PARTITION: return "REPLACE_PARTITION";
|
||||
case FETCH_PARTITION: return "FETCH_PARTITION";
|
||||
case FREEZE_PARTITION: return "FREEZE_PARTITION";
|
||||
case FREEZE_ALL: return "FREEZE_ALL";
|
||||
case UNFREEZE_PARTITION: return "UNFREEZE_PARTITION";
|
||||
case UNFREEZE_ALL: return "UNFREEZE_ALL";
|
||||
case DELETE: return "DELETE";
|
||||
case UPDATE: return "UPDATE";
|
||||
case NO_TYPE: return "NO_TYPE";
|
||||
case LIVE_VIEW_REFRESH: return "LIVE_VIEW_REFRESH";
|
||||
case MODIFY_DATABASE_SETTING: return "MODIFY_DATABASE_SETTING";
|
||||
case MODIFY_COMMENT: return "MODIFY_COMMENT";
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
if (type == ASTAlterCommand::ADD_COLUMN)
|
||||
|
@ -204,10 +204,12 @@ public:
|
||||
/// Which property user want to remove
|
||||
String remove_property;
|
||||
|
||||
String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast<int>(type))); }
|
||||
String getID(char delim) const override;
|
||||
|
||||
ASTPtr clone() const override;
|
||||
|
||||
static const char * typeToString(Type type);
|
||||
|
||||
protected:
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
};
|
||||
@ -246,7 +248,7 @@ public:
|
||||
return removeOnCluster<ASTAlterQuery>(clone(), new_database);
|
||||
}
|
||||
|
||||
const char * getQueryKindString() const override { return "Alter"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Alter; }
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
@ -119,7 +119,7 @@ public:
|
||||
|
||||
bool isView() const { return is_ordinary_view || is_materialized_view || is_live_view || is_window_view; }
|
||||
|
||||
const char * getQueryKindString() const override { return "Create"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Create; }
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
@ -45,7 +45,7 @@ public:
|
||||
return removeOnCluster<ASTDropQuery>(clone(), new_database);
|
||||
}
|
||||
|
||||
const char * getQueryKindString() const override { return "Drop"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Drop; }
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
|
||||
|
@ -79,6 +79,13 @@ void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & s
|
||||
settings.ostr << ")";
|
||||
}
|
||||
|
||||
if (infile)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM INFILE " << (settings.hilite ? hilite_none : "") << infile->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||
if (compression)
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " COMPRESSION " << (settings.hilite ? hilite_none : "") << compression->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||
}
|
||||
|
||||
if (select)
|
||||
{
|
||||
settings.ostr << " ";
|
||||
@ -91,12 +98,6 @@ void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & s
|
||||
}
|
||||
else
|
||||
{
|
||||
if (infile)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM INFILE " << (settings.hilite ? hilite_none : "") << infile->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||
if (compression)
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " COMPRESSION " << (settings.hilite ? hilite_none : "") << compression->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||
}
|
||||
if (!format.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FORMAT " << (settings.hilite ? hilite_none : "") << format;
|
||||
|
@ -66,7 +66,7 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
const char * getQueryKindString() const override { return "Insert"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Insert; }
|
||||
|
||||
protected:
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
@ -65,7 +65,7 @@ public:
|
||||
return query_ptr;
|
||||
}
|
||||
|
||||
const char * getQueryKindString() const override { return "Rename"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Rename; }
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
||||
const char * getQueryKindString() const override { return "SelectIntersectExcept"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::SelectIntersectExcept; }
|
||||
|
||||
ASTs getListOfSelects() const;
|
||||
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
|
||||
void setFinal();
|
||||
|
||||
const char * getQueryKindString() const override { return "Select"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Select; }
|
||||
|
||||
protected:
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
@ -17,7 +17,7 @@ public:
|
||||
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
||||
const char * getQueryKindString() const override { return "Select"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::Select; }
|
||||
|
||||
SelectUnionMode union_mode;
|
||||
|
||||
|
@ -107,7 +107,7 @@ public:
|
||||
return removeOnCluster<ASTSystemQuery>(clone(), new_database);
|
||||
}
|
||||
|
||||
const char * getQueryKindString() const override { return "System"; }
|
||||
virtual QueryKind getQueryKind() const override { return QueryKind::System; }
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -34,6 +34,6 @@ public:
|
||||
void replaceEmptyDatabase(const String & current_database);
|
||||
void replaceCurrentUserTag(const String & current_user_name) const;
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override { return removeOnCluster<ASTGrantQuery>(clone()); }
|
||||
const char * getQueryKindString() const override { return is_revoke ? "Revoke" : "Grant"; }
|
||||
virtual QueryKind getQueryKind() const override { return is_revoke ? QueryKind::Revoke : QueryKind::Grant; }
|
||||
};
|
||||
}
|
||||
|
@ -245,10 +245,23 @@ public:
|
||||
|
||||
void cloneChildren();
|
||||
|
||||
// Return query_kind string representation of this AST query.
|
||||
virtual const char * getQueryKindString() const { return ""; }
|
||||
enum class QueryKind : uint8_t
|
||||
{
|
||||
None = 0,
|
||||
Alter,
|
||||
Create,
|
||||
Drop,
|
||||
Grant,
|
||||
Insert,
|
||||
Rename,
|
||||
Revoke,
|
||||
SelectIntersectExcept,
|
||||
Select,
|
||||
System,
|
||||
};
|
||||
/// Return QueryKind of this AST query.
|
||||
virtual QueryKind getQueryKind() const { return QueryKind::None; }
|
||||
|
||||
public:
|
||||
/// For syntax highlighting.
|
||||
static const char * hilite_keyword;
|
||||
static const char * hilite_identifier;
|
||||
|
@ -116,7 +116,7 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
/// Check if file is a source of data.
|
||||
if (s_from_infile.ignore(pos, expected))
|
||||
{
|
||||
/// Read its name to process it later
|
||||
/// Read file name to process it later
|
||||
if (!infile_name_p.parse(pos, infile, expected))
|
||||
return false;
|
||||
|
||||
@ -133,7 +133,8 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
Pos before_values = pos;
|
||||
String format_str;
|
||||
|
||||
/// VALUES or FROM INFILE or FORMAT or SELECT
|
||||
/// VALUES or FORMAT or SELECT or WITH or WATCH.
|
||||
/// After FROM INFILE we expect FORMAT, SELECT, WITH or nothing.
|
||||
if (!infile && s_values.ignore(pos, expected))
|
||||
{
|
||||
/// If VALUES is defined in query, everything except setting will be parsed as data
|
||||
@ -162,21 +163,17 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|
||||
tryGetIdentifierNameInto(format, format_str);
|
||||
}
|
||||
else if (s_watch.ignore(pos, expected))
|
||||
else if (!infile && s_watch.ignore(pos, expected))
|
||||
{
|
||||
/// If WATCH is defined, return to position before WATCH and parse
|
||||
/// rest of query as WATCH query.
|
||||
pos = before_values;
|
||||
ParserWatchQuery watch_p;
|
||||
watch_p.parse(pos, watch, expected);
|
||||
|
||||
/// FORMAT section is expected if we have input() in SELECT part
|
||||
if (s_format.ignore(pos, expected) && !name_p.parse(pos, format, expected))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
else if (!infile)
|
||||
{
|
||||
/// If all previous conditions were false, query is incorrect
|
||||
/// If all previous conditions were false and it's not FROM INFILE, query is incorrect
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,9 @@ static ColumnWithTypeAndName readColumnWithNumericData(std::shared_ptr<arrow::Ch
|
||||
for (size_t chunk_i = 0, num_chunks = static_cast<size_t>(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i)
|
||||
{
|
||||
std::shared_ptr<arrow::Array> chunk = arrow_column->chunk(chunk_i);
|
||||
if (chunk->length() == 0)
|
||||
continue;
|
||||
|
||||
/// buffers[0] is a null bitmap and buffers[1] are actual values
|
||||
std::shared_ptr<arrow::Buffer> buffer = chunk->data()->buffers[1];
|
||||
|
||||
@ -146,6 +149,9 @@ static ColumnWithTypeAndName readColumnWithBooleanData(std::shared_ptr<arrow::Ch
|
||||
for (size_t chunk_i = 0, num_chunks = static_cast<size_t>(arrow_column->num_chunks()); chunk_i < num_chunks; ++chunk_i)
|
||||
{
|
||||
arrow::BooleanArray & chunk = dynamic_cast<arrow::BooleanArray &>(*(arrow_column->chunk(chunk_i)));
|
||||
if (chunk.length() == 0)
|
||||
continue;
|
||||
|
||||
/// buffers[0] is a null bitmap and buffers[1] are actual values
|
||||
std::shared_ptr<arrow::Buffer> buffer = chunk.data()->buffers[1];
|
||||
|
||||
|
@ -137,7 +137,8 @@ void ParallelParsingInputFormat::onBackgroundException(size_t offset)
|
||||
if (e->getLineNumber() != -1)
|
||||
e->setLineNumber(e->getLineNumber() + offset);
|
||||
}
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
if (is_server)
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
parsing_finished = true;
|
||||
first_parser_finished.set();
|
||||
reader_condvar.notify_all();
|
||||
|
@ -82,6 +82,7 @@ public:
|
||||
String format_name;
|
||||
size_t max_threads;
|
||||
size_t min_chunk_bytes;
|
||||
bool is_server;
|
||||
};
|
||||
|
||||
explicit ParallelParsingInputFormat(Params params)
|
||||
@ -90,6 +91,7 @@ public:
|
||||
, file_segmentation_engine(params.file_segmentation_engine)
|
||||
, format_name(params.format_name)
|
||||
, min_chunk_bytes(params.min_chunk_bytes)
|
||||
, is_server(params.is_server)
|
||||
, pool(params.max_threads)
|
||||
{
|
||||
// One unit for each thread, including segmentator and reader, plus a
|
||||
@ -203,6 +205,8 @@ private:
|
||||
std::atomic<bool> parsing_started{false};
|
||||
std::atomic<bool> parsing_finished{false};
|
||||
|
||||
const bool is_server;
|
||||
|
||||
/// There are multiple "parsers", that's why we use thread pool.
|
||||
ThreadPool pool;
|
||||
/// Reading and segmentating the file
|
||||
|
493
src/Processors/Merges/Algorithms/Graphite.cpp
Normal file
493
src/Processors/Merges/Algorithms/Graphite.cpp
Normal file
@ -0,0 +1,493 @@
|
||||
#include <base/find_symbols.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/parseAggregateFunctionParameters.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Processors/Merges/Algorithms/Graphite.h>
|
||||
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||
}
|
||||
|
||||
namespace DB::Graphite
|
||||
{
|
||||
static std::unordered_map<RuleType, const String> ruleTypeMap =
|
||||
{
|
||||
{ RuleTypeAll, "all" },
|
||||
{ RuleTypePlain, "plain" },
|
||||
{ RuleTypeTagged, "tagged"},
|
||||
{ RuleTypeTagList, "tag_list"}
|
||||
};
|
||||
|
||||
const String & ruleTypeStr(RuleType rule_type)
|
||||
{
|
||||
try
|
||||
{
|
||||
return ruleTypeMap.at(rule_type);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
throw Exception("invalid rule type: " + std::to_string(rule_type), DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
RuleType ruleType(const String & s)
|
||||
{
|
||||
if (s == "all")
|
||||
return RuleTypeAll;
|
||||
else if (s == "plain")
|
||||
return RuleTypePlain;
|
||||
else if (s == "tagged")
|
||||
return RuleTypeTagged;
|
||||
else if (s == "tag_list")
|
||||
return RuleTypeTagList;
|
||||
else
|
||||
throw Exception("invalid rule type: " + s, DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
static const Graphite::Pattern undef_pattern =
|
||||
{ /// empty pattern for selectPatternForPath
|
||||
.rule_type = RuleTypeAll,
|
||||
.regexp = nullptr,
|
||||
.regexp_str = "",
|
||||
.function = nullptr,
|
||||
.retentions = Graphite::Retentions(),
|
||||
.type = undef_pattern.TypeUndef,
|
||||
};
|
||||
|
||||
inline static const Patterns & selectPatternsForMetricType(const Graphite::Params & params, const StringRef path)
|
||||
{
|
||||
if (params.patterns_typed)
|
||||
{
|
||||
std::string_view path_view = path.toView();
|
||||
if (path_view.find("?"sv) == path_view.npos)
|
||||
return params.patterns_plain;
|
||||
else
|
||||
return params.patterns_tagged;
|
||||
}
|
||||
else
|
||||
{
|
||||
return params.patterns;
|
||||
}
|
||||
}
|
||||
|
||||
Graphite::RollupRule selectPatternForPath(
|
||||
const Graphite::Params & params,
|
||||
const StringRef path)
|
||||
{
|
||||
const Graphite::Pattern * first_match = &undef_pattern;
|
||||
|
||||
const Patterns & patterns_check = selectPatternsForMetricType(params, path);
|
||||
|
||||
for (const auto & pattern : patterns_check)
|
||||
{
|
||||
if (!pattern.regexp)
|
||||
{
|
||||
/// Default pattern
|
||||
if (first_match->type == first_match->TypeUndef && pattern.type == pattern.TypeAll)
|
||||
{
|
||||
/// There is only default pattern for both retention and aggregation
|
||||
return std::pair(&pattern, &pattern);
|
||||
}
|
||||
if (pattern.type != first_match->type)
|
||||
{
|
||||
if (first_match->type == first_match->TypeRetention)
|
||||
{
|
||||
return std::pair(first_match, &pattern);
|
||||
}
|
||||
if (first_match->type == first_match->TypeAggregation)
|
||||
{
|
||||
return std::pair(&pattern, first_match);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (pattern.regexp->match(path.data, path.size))
|
||||
{
|
||||
/// General pattern with matched path
|
||||
if (pattern.type == pattern.TypeAll)
|
||||
{
|
||||
/// Only for not default patterns with both function and retention parameters
|
||||
return std::pair(&pattern, &pattern);
|
||||
}
|
||||
if (first_match->type == first_match->TypeUndef)
|
||||
{
|
||||
first_match = &pattern;
|
||||
continue;
|
||||
}
|
||||
if (pattern.type != first_match->type)
|
||||
{
|
||||
if (first_match->type == first_match->TypeRetention)
|
||||
{
|
||||
return std::pair(first_match, &pattern);
|
||||
}
|
||||
if (first_match->type == first_match->TypeAggregation)
|
||||
{
|
||||
return std::pair(&pattern, first_match);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {nullptr, nullptr};
|
||||
}
|
||||
|
||||
/** Is used to order Graphite::Retentions by age and precision descending.
|
||||
* Throws exception if not both age and precision are less or greater then another.
|
||||
*/
|
||||
static bool compareRetentions(const Retention & a, const Retention & b)
|
||||
{
|
||||
if (a.age > b.age && a.precision > b.precision)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (a.age < b.age && a.precision < b.precision)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
String error_msg = "age and precision should only grow up: "
|
||||
+ std::to_string(a.age) + ":" + std::to_string(a.precision) + " vs "
|
||||
+ std::to_string(b.age) + ":" + std::to_string(b.precision);
|
||||
throw Exception(
|
||||
error_msg,
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
bool operator==(const Retention & a, const Retention & b)
|
||||
{
|
||||
return a.age == b.age && a.precision == b.precision;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const Retentions & a)
|
||||
{
|
||||
stream << "{ ";
|
||||
for (size_t i = 0; i < a.size(); i++)
|
||||
{
|
||||
if (i > 0)
|
||||
stream << ",";
|
||||
stream << " { age = " << a[i].age << ", precision = " << a[i].precision << " }";
|
||||
}
|
||||
stream << " }";
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
bool operator==(const Pattern & a, const Pattern & b)
|
||||
{
|
||||
// equal
|
||||
// Retentions retentions; /// Must be ordered by 'age' descending.
|
||||
if (a.type != b.type || a.regexp_str != b.regexp_str || a.rule_type != b.rule_type)
|
||||
return false;
|
||||
|
||||
if (a.function == nullptr)
|
||||
{
|
||||
if (b.function != nullptr)
|
||||
return false;
|
||||
}
|
||||
else if (b.function == nullptr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else if (a.function->getName() != b.function->getName())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return a.retentions == b.retentions;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const Pattern & a)
|
||||
{
|
||||
stream << "{ rule_type = " << ruleTypeStr(a.rule_type);
|
||||
if (!a.regexp_str.empty())
|
||||
stream << ", regexp = '" << a.regexp_str << "'";
|
||||
if (a.function != nullptr)
|
||||
stream << ", function = " << a.function->getName();
|
||||
if (!a.retentions.empty())
|
||||
{
|
||||
stream << ",\n retentions = {\n";
|
||||
for (size_t i = 0; i < a.retentions.size(); i++)
|
||||
{
|
||||
stream << " { " << a.retentions[i].age << ", " << a.retentions[i].precision << " }";
|
||||
if (i < a.retentions.size() - 1)
|
||||
stream << ",";
|
||||
stream << "\n";
|
||||
}
|
||||
stream << " }\n";
|
||||
}
|
||||
else
|
||||
stream << " ";
|
||||
|
||||
stream << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::string buildTaggedRegex(std::string regexp_str)
|
||||
{
|
||||
/*
|
||||
* tags list in format (for name or any value can use regexp, alphabet sorting not needed)
|
||||
* spaces are not stiped and used as tag and value part
|
||||
* name must be first (if used)
|
||||
*
|
||||
* tag1=value1; tag2=VALUE2_REGEX;tag3=value3
|
||||
* or
|
||||
* name;tag1=value1;tag2=VALUE2_REGEX;tag3=value3
|
||||
* or for one tag
|
||||
* tag1=value1
|
||||
*
|
||||
* Resulting regex against metric like
|
||||
* name?tag1=value1&tag2=value2
|
||||
*
|
||||
* So,
|
||||
*
|
||||
* name
|
||||
* produce
|
||||
* name\?
|
||||
*
|
||||
* tag2=val2
|
||||
* produce
|
||||
* [\?&]tag2=val2(&.*)?$
|
||||
*
|
||||
* nam.* ; tag1=val1 ; tag2=val2
|
||||
* produce
|
||||
* nam.*\?(.*&)?tag1=val1&(.*&)?tag2=val2(&.*)?$
|
||||
*/
|
||||
|
||||
std::vector<std::string> tags;
|
||||
|
||||
splitInto<';'>(tags, regexp_str);
|
||||
/* remove empthy elements */
|
||||
using namespace std::string_literals;
|
||||
tags.erase(std::remove(tags.begin(), tags.end(), ""s), tags.end());
|
||||
if (tags[0].find('=') == tags[0].npos)
|
||||
{
|
||||
if (tags.size() == 1) /* only name */
|
||||
return "^" + tags[0] + "\\?";
|
||||
/* start with name value */
|
||||
regexp_str = "^" + tags[0] + "\\?(.*&)?";
|
||||
tags.erase(std::begin(tags));
|
||||
}
|
||||
else
|
||||
regexp_str = "[\\?&]";
|
||||
|
||||
std::sort(std::begin(tags), std::end(tags)); /* sorted tag keys */
|
||||
regexp_str += fmt::format(
|
||||
"{}{}",
|
||||
fmt::join(tags, "&(.*&)?"),
|
||||
"(&.*)?$" /* close regex */
|
||||
);
|
||||
|
||||
return regexp_str;
|
||||
}
|
||||
|
||||
/** Read the settings for Graphite rollup from config.
|
||||
* Example
|
||||
*
|
||||
* <graphite_rollup>
|
||||
* <path_column_name>Path</path_column_name>
|
||||
* <pattern>
|
||||
* <regexp>click_cost</regexp>
|
||||
* <function>any</function>
|
||||
* <retention>
|
||||
* <age>0</age>
|
||||
* <precision>3600</precision>
|
||||
* </retention>
|
||||
* <retention>
|
||||
* <age>86400</age>
|
||||
* <precision>60</precision>
|
||||
* </retention>
|
||||
* </pattern>
|
||||
* <default>
|
||||
* <function>max</function>
|
||||
* <retention>
|
||||
* <age>0</age>
|
||||
* <precision>60</precision>
|
||||
* </retention>
|
||||
* <retention>
|
||||
* <age>3600</age>
|
||||
* <precision>300</precision>
|
||||
* </retention>
|
||||
* <retention>
|
||||
* <age>86400</age>
|
||||
* <precision>3600</precision>
|
||||
* </retention>
|
||||
* </default>
|
||||
* </graphite_rollup>
|
||||
*/
|
||||
static const Pattern &
|
||||
appendGraphitePattern(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_element, Patterns & patterns,
|
||||
bool default_rule,
|
||||
ContextPtr context)
|
||||
{
|
||||
Pattern pattern;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_element, keys);
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
if (key == "regexp")
|
||||
{
|
||||
pattern.regexp_str = config.getString(config_element + ".regexp");
|
||||
}
|
||||
else if (key == "function")
|
||||
{
|
||||
String aggregate_function_name_with_params = config.getString(config_element + ".function");
|
||||
String aggregate_function_name;
|
||||
Array params_row;
|
||||
getAggregateFunctionNameAndParametersArray(
|
||||
aggregate_function_name_with_params, aggregate_function_name, params_row, "GraphiteMergeTree storage initialization", context);
|
||||
|
||||
/// TODO Not only Float64
|
||||
AggregateFunctionProperties properties;
|
||||
pattern.function = AggregateFunctionFactory::instance().get(
|
||||
aggregate_function_name, {std::make_shared<DataTypeFloat64>()}, params_row, properties);
|
||||
}
|
||||
else if (key == "rule_type")
|
||||
{
|
||||
String rule_type = config.getString(config_element + ".rule_type");
|
||||
pattern.rule_type = ruleType(rule_type);
|
||||
}
|
||||
else if (startsWith(key, "retention"))
|
||||
{
|
||||
pattern.retentions.emplace_back(Graphite::Retention{
|
||||
.age = config.getUInt(config_element + "." + key + ".age"),
|
||||
.precision = config.getUInt(config_element + "." + key + ".precision")});
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown element in config: " + key, DB::ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
if (!pattern.regexp_str.empty())
|
||||
{
|
||||
if (pattern.rule_type == RuleTypeTagList)
|
||||
{
|
||||
// construct tagged regexp
|
||||
pattern.regexp_str = buildTaggedRegex(pattern.regexp_str);
|
||||
pattern.rule_type = RuleTypeTagged;
|
||||
}
|
||||
pattern.regexp = std::make_shared<OptimizedRegularExpression>(pattern.regexp_str);
|
||||
}
|
||||
|
||||
if (!pattern.function && pattern.retentions.empty())
|
||||
throw Exception(
|
||||
"At least one of an aggregate function or retention rules is mandatory for rollup patterns in GraphiteMergeTree",
|
||||
DB::ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
if (default_rule && pattern.rule_type != RuleTypeAll)
|
||||
{
|
||||
throw Exception(
|
||||
"Default must have rule_type all for rollup patterns in GraphiteMergeTree",
|
||||
DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
if (!pattern.function)
|
||||
{
|
||||
pattern.type = pattern.TypeRetention;
|
||||
}
|
||||
else if (pattern.retentions.empty())
|
||||
{
|
||||
pattern.type = pattern.TypeAggregation;
|
||||
}
|
||||
else
|
||||
{
|
||||
pattern.type = pattern.TypeAll;
|
||||
}
|
||||
|
||||
if (pattern.type & pattern.TypeAggregation) /// TypeAggregation or TypeAll
|
||||
if (pattern.function->allocatesMemoryInArena())
|
||||
throw Exception(
|
||||
"Aggregate function " + pattern.function->getName() + " isn't supported in GraphiteMergeTree", DB::ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
/// retention should be in descending order of age.
|
||||
if (pattern.type & pattern.TypeRetention) /// TypeRetention or TypeAll
|
||||
std::sort(pattern.retentions.begin(), pattern.retentions.end(), compareRetentions);
|
||||
|
||||
patterns.emplace_back(pattern);
|
||||
return patterns.back();
|
||||
}
|
||||
|
||||
void setGraphitePatternsFromConfig(ContextPtr context, const String & config_element, Graphite::Params & params)
|
||||
{
|
||||
const auto & config = context->getConfigRef();
|
||||
|
||||
if (!config.has(config_element))
|
||||
throw Exception("No '" + config_element + "' element in configuration file", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
params.config_name = config_element;
|
||||
params.path_column_name = config.getString(config_element + ".path_column_name", "Path");
|
||||
params.time_column_name = config.getString(config_element + ".time_column_name", "Time");
|
||||
params.value_column_name = config.getString(config_element + ".value_column_name", "Value");
|
||||
params.version_column_name = config.getString(config_element + ".version_column_name", "Timestamp");
|
||||
|
||||
params.patterns_typed = false;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_element, keys);
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
if (startsWith(key, "pattern"))
|
||||
{
|
||||
if (appendGraphitePattern(config, config_element + "." + key, params.patterns, false, context).rule_type != RuleTypeAll)
|
||||
params.patterns_typed = true;
|
||||
}
|
||||
else if (key == "default")
|
||||
{
|
||||
/// See below.
|
||||
}
|
||||
else if (key == "path_column_name" || key == "time_column_name" || key == "value_column_name" || key == "version_column_name")
|
||||
{
|
||||
/// See above.
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown element in config: " + key, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
if (config.has(config_element + ".default"))
|
||||
appendGraphitePattern(config, config_element + "." + ".default", params.patterns, true, context);
|
||||
|
||||
for (const auto & pattern : params.patterns)
|
||||
{
|
||||
if (pattern.rule_type == RuleTypeAll)
|
||||
{
|
||||
if (params.patterns_typed)
|
||||
{
|
||||
params.patterns_plain.push_back(pattern);
|
||||
params.patterns_tagged.push_back(pattern);
|
||||
}
|
||||
}
|
||||
else if (pattern.rule_type == RuleTypePlain)
|
||||
{
|
||||
params.patterns_plain.push_back(pattern);
|
||||
}
|
||||
else if (pattern.rule_type == RuleTypeTagged)
|
||||
{
|
||||
params.patterns_tagged.push_back(pattern);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception("Unhandled rule_type in config: " + ruleTypeStr(pattern.rule_type), ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,13 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/StringRef.h>
|
||||
#include <Common/OptimizedRegularExpression.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class IAggregateFunction;
|
||||
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
|
||||
|
||||
}
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
/** Intended for implementation of "rollup" - aggregation (rounding) of older data
|
||||
* for a table with Graphite data (Graphite is the system for time series monitoring).
|
||||
@ -97,16 +92,32 @@ using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
|
||||
namespace DB::Graphite
|
||||
{
|
||||
|
||||
// sync with rule_types_str
|
||||
enum RuleType
|
||||
{
|
||||
RuleTypeAll = 0, // default, with regex, compatible with old scheme
|
||||
RuleTypePlain = 1, // plain metrics, with regex, compatible with old scheme
|
||||
RuleTypeTagged = 2, // tagged metrics, with regex, compatible with old scheme
|
||||
RuleTypeTagList = 3 // tagged metrics, with regex (converted to RuleTypeTagged from string like 'retention=10min ; env=(staging|prod)')
|
||||
};
|
||||
|
||||
const String & ruleTypeStr(RuleType rule_type);
|
||||
|
||||
struct Retention
|
||||
{
|
||||
UInt32 age;
|
||||
UInt32 precision;
|
||||
};
|
||||
|
||||
bool operator==(const Retention & a, const Retention & b);
|
||||
|
||||
using Retentions = std::vector<Retention>;
|
||||
|
||||
std::ostream &operator<<(std::ostream & stream, const Retentions & a);
|
||||
|
||||
struct Pattern
|
||||
{
|
||||
RuleType rule_type = RuleTypeAll;
|
||||
std::shared_ptr<OptimizedRegularExpression> regexp;
|
||||
std::string regexp_str;
|
||||
AggregateFunctionPtr function;
|
||||
@ -114,6 +125,9 @@ struct Pattern
|
||||
enum { TypeUndef, TypeRetention, TypeAggregation, TypeAll } type = TypeAll; /// The type of defined pattern, filled automatically
|
||||
};
|
||||
|
||||
bool operator==(const Pattern & a, const Pattern & b);
|
||||
std::ostream &operator<<(std::ostream & stream, const Pattern & a);
|
||||
|
||||
using Patterns = std::vector<Pattern>;
|
||||
using RetentionPattern = Pattern;
|
||||
using AggregationPattern = Pattern;
|
||||
@ -125,9 +139,16 @@ struct Params
|
||||
String time_column_name;
|
||||
String value_column_name;
|
||||
String version_column_name;
|
||||
bool patterns_typed;
|
||||
Graphite::Patterns patterns;
|
||||
Graphite::Patterns patterns_plain;
|
||||
Graphite::Patterns patterns_tagged;
|
||||
};
|
||||
|
||||
using RollupRule = std::pair<const RetentionPattern *, const AggregationPattern *>;
|
||||
|
||||
Graphite::RollupRule selectPatternForPath(const Graphite::Params & params, const StringRef path);
|
||||
|
||||
void setGraphitePatternsFromConfig(ContextPtr context, const String & config_element, Graphite::Params & params);
|
||||
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Processors/Merges/Algorithms/Graphite.h>
|
||||
#include <Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
@ -52,62 +53,6 @@ GraphiteRollupSortedAlgorithm::GraphiteRollupSortedAlgorithm(
|
||||
columns_definition = defineColumns(header, params);
|
||||
}
|
||||
|
||||
Graphite::RollupRule GraphiteRollupSortedAlgorithm::selectPatternForPath(StringRef path) const
|
||||
{
|
||||
const Graphite::Pattern * first_match = &undef_pattern;
|
||||
|
||||
for (const auto & pattern : params.patterns)
|
||||
{
|
||||
if (!pattern.regexp)
|
||||
{
|
||||
/// Default pattern
|
||||
if (first_match->type == first_match->TypeUndef && pattern.type == pattern.TypeAll)
|
||||
{
|
||||
/// There is only default pattern for both retention and aggregation
|
||||
return std::pair(&pattern, &pattern);
|
||||
}
|
||||
if (pattern.type != first_match->type)
|
||||
{
|
||||
if (first_match->type == first_match->TypeRetention)
|
||||
{
|
||||
return std::pair(first_match, &pattern);
|
||||
}
|
||||
if (first_match->type == first_match->TypeAggregation)
|
||||
{
|
||||
return std::pair(&pattern, first_match);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (pattern.regexp->match(path.data, path.size))
|
||||
{
|
||||
/// General pattern with matched path
|
||||
if (pattern.type == pattern.TypeAll)
|
||||
{
|
||||
/// Only for not default patterns with both function and retention parameters
|
||||
return std::pair(&pattern, &pattern);
|
||||
}
|
||||
if (first_match->type == first_match->TypeUndef)
|
||||
{
|
||||
first_match = &pattern;
|
||||
continue;
|
||||
}
|
||||
if (pattern.type != first_match->type)
|
||||
{
|
||||
if (first_match->type == first_match->TypeRetention)
|
||||
{
|
||||
return std::pair(first_match, &pattern);
|
||||
}
|
||||
if (first_match->type == first_match->TypeAggregation)
|
||||
{
|
||||
return std::pair(&pattern, first_match);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {nullptr, nullptr};
|
||||
}
|
||||
|
||||
UInt32 GraphiteRollupSortedAlgorithm::selectPrecision(const Graphite::Retentions & retentions, time_t time) const
|
||||
{
|
||||
static_assert(is_signed_v<time_t>, "time_t must be signed type");
|
||||
@ -188,7 +133,7 @@ IMergingAlgorithm::Status GraphiteRollupSortedAlgorithm::merge()
|
||||
|
||||
Graphite::RollupRule next_rule = merged_data.currentRule();
|
||||
if (new_path)
|
||||
next_rule = selectPatternForPath(next_path);
|
||||
next_rule = selectPatternForPath(this->params, next_path);
|
||||
|
||||
const Graphite::RetentionPattern * retention_pattern = std::get<0>(next_rule);
|
||||
time_t next_time_rounded;
|
||||
|
@ -102,16 +102,6 @@ private:
|
||||
time_t current_time = 0;
|
||||
time_t current_time_rounded = 0;
|
||||
|
||||
const Graphite::Pattern undef_pattern =
|
||||
{ /// temporary empty pattern for selectPatternForPath
|
||||
.regexp = nullptr,
|
||||
.regexp_str = "",
|
||||
.function = nullptr,
|
||||
.retentions = DB::Graphite::Retentions(),
|
||||
.type = undef_pattern.TypeUndef,
|
||||
};
|
||||
|
||||
Graphite::RollupRule selectPatternForPath(StringRef path) const;
|
||||
UInt32 selectPrecision(const Graphite::Retentions & retentions, time_t time) const;
|
||||
|
||||
/// Insert the values into the resulting columns, which will not be changed in the future.
|
||||
|
597
src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp
Normal file
597
src/Processors/Merges/Algorithms/tests/gtest_graphite.cpp
Normal file
@ -0,0 +1,597 @@
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Common/tests/gtest_global_context.h>
|
||||
#include <Common/tests/gtest_global_register.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Processors/Merges/Algorithms/Graphite.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
static int regAggregateFunctions = 0;
|
||||
|
||||
void tryRegisterAggregateFunctions()
|
||||
{
|
||||
if (!regAggregateFunctions)
|
||||
{
|
||||
registerAggregateFunctions();
|
||||
regAggregateFunctions = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static ConfigProcessor::LoadedConfig loadConfiguration(const std::string & config_path)
|
||||
{
|
||||
ConfigProcessor config_processor(config_path, true, true);
|
||||
ConfigProcessor::LoadedConfig config = config_processor.loadConfig(false);
|
||||
return config;
|
||||
}
|
||||
|
||||
static ConfigProcessor::LoadedConfig loadConfigurationFromString(std::string & s)
|
||||
{
|
||||
char tmp_file[19];
|
||||
strcpy(tmp_file, "/tmp/rollup-XXXXXX");
|
||||
int fd = mkstemp(tmp_file);
|
||||
if (fd == -1)
|
||||
{
|
||||
throw std::runtime_error(strerror(errno));
|
||||
}
|
||||
try {
|
||||
if (write(fd, s.c_str(), s.size()) < s.size())
|
||||
{
|
||||
throw std::runtime_error("unable write to temp file");
|
||||
}
|
||||
if (write(fd, "\n", 1) != 1)
|
||||
{
|
||||
throw std::runtime_error("unable write to temp file");
|
||||
}
|
||||
close(fd);
|
||||
auto config_path = std::string(tmp_file) + ".xml";
|
||||
if (std::rename(tmp_file, config_path.c_str()))
|
||||
{
|
||||
int err = errno;
|
||||
remove(tmp_file);
|
||||
throw std::runtime_error(strerror(err));
|
||||
}
|
||||
ConfigProcessor::LoadedConfig config = loadConfiguration(config_path);
|
||||
remove(tmp_file);
|
||||
return config;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
remove(tmp_file);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
static Graphite::Params setGraphitePatterns(ContextMutablePtr context, ConfigProcessor::LoadedConfig & config)
|
||||
{
|
||||
context->setConfig(config.configuration);
|
||||
|
||||
Graphite::Params params;
|
||||
setGraphitePatternsFromConfig(context, "graphite_rollup", params);
|
||||
|
||||
return params;
|
||||
}
|
||||
|
||||
struct PatternForCheck
|
||||
{
|
||||
Graphite::RuleType rule_type;
|
||||
std::string regexp_str;
|
||||
String function;
|
||||
Graphite::Retentions retentions;
|
||||
};
|
||||
|
||||
|
||||
bool checkRule(const Graphite::Pattern & pattern, const struct PatternForCheck & pattern_check,
|
||||
const std::string & typ, const std::string & path, std::string & message)
|
||||
{
|
||||
bool rule_type_eq = (pattern.rule_type == pattern_check.rule_type);
|
||||
bool regexp_eq = (pattern.regexp_str == pattern_check.regexp_str);
|
||||
bool function_eq = (pattern.function == nullptr && pattern_check.function.empty())
|
||||
|| (pattern.function != nullptr && pattern.function->getName() == pattern_check.function);
|
||||
bool retentions_eq = (pattern.retentions == pattern_check.retentions);
|
||||
|
||||
if (rule_type_eq && regexp_eq && function_eq && retentions_eq)
|
||||
return true;
|
||||
|
||||
message = typ + " rollup rule mismatch for '" + path + "'," +
|
||||
(rule_type_eq ? "" : "rule_type ") +
|
||||
(regexp_eq ? "" : "regexp ") +
|
||||
(function_eq ? "" : "function ") +
|
||||
(retentions_eq ? "" : "retentions ");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const PatternForCheck & a)
|
||||
{
|
||||
stream << "{ rule_type = " << ruleTypeStr(a.rule_type);
|
||||
if (!a.regexp_str.empty())
|
||||
stream << ", regexp = '" << a.regexp_str << "'";
|
||||
if (!a.function.empty())
|
||||
stream << ", function = " << a.function;
|
||||
if (!a.retentions.empty())
|
||||
{
|
||||
stream << ",\n retentions = {\n";
|
||||
for (size_t i = 0; i < a.retentions.size(); i++)
|
||||
{
|
||||
stream << " { " << a.retentions[i].age << ", " << a.retentions[i].precision << " }";
|
||||
if (i < a.retentions.size() - 1)
|
||||
stream << ",";
|
||||
stream << "\n";
|
||||
}
|
||||
stream << " }\n";
|
||||
}
|
||||
else
|
||||
stream << " ";
|
||||
|
||||
stream << "}";
|
||||
return stream;
|
||||
}
|
||||
|
||||
struct PatternsForPath
|
||||
{
|
||||
std::string path;
|
||||
PatternForCheck retention_want;
|
||||
PatternForCheck aggregation_want;
|
||||
};
|
||||
|
||||
TEST(GraphiteTest, testSelectPattern)
|
||||
{
|
||||
tryRegisterAggregateFunctions();
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
std::string
|
||||
xml(R"END(<yandex>
|
||||
<graphite_rollup>
|
||||
<pattern>
|
||||
<regexp>\.sum$</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>^((.*)|.)sum\?</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>\.max$</regexp>
|
||||
<function>max</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>^((.*)|.)max\?</regexp>
|
||||
<function>max</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>\.min$</regexp>
|
||||
<function>min</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>^((.*)|.)min\?</regexp>
|
||||
<function>min</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>\.(count|sum|sum_sq)$</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>^((.*)|.)(count|sum|sum_sq)\?</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<regexp>^retention\.</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>avg</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
</yandex>
|
||||
)END");
|
||||
|
||||
// Retentions must be ordered by 'age' descending.
|
||||
std::vector<struct PatternsForPath> tests
|
||||
{
|
||||
{
|
||||
"test.sum",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(\.sum$)END", "sum", { } }
|
||||
},
|
||||
{
|
||||
"val.sum?env=test&tag=Fake3",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(^((.*)|.)sum\?)END", "sum", { } }
|
||||
},
|
||||
{
|
||||
"test.max",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(\.max$)END", "max", { } },
|
||||
},
|
||||
{
|
||||
"val.max?env=test&tag=Fake4",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(^((.*)|.)max\?)END", "max", { } },
|
||||
},
|
||||
{
|
||||
"test.min",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(\.min$)END", "min", { } },
|
||||
},
|
||||
{
|
||||
"val.min?env=test&tag=Fake5",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(^((.*)|.)min\?)END", "min", { } },
|
||||
},
|
||||
{
|
||||
"retention.count",
|
||||
{ Graphite::RuleTypeAll, R"END(^retention\.)END", "", { { 86400, 3600 }, { 0, 60 } } }, // ^retention
|
||||
{ Graphite::RuleTypeAll, R"END(\.(count|sum|sum_sq)$)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.retention.count?env=test&tag=Fake5",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.count?env=test&tag=Fake5",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"test.p95",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
},
|
||||
{
|
||||
"val.p95?env=test&tag=FakeNo",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
},
|
||||
{
|
||||
"default",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
},
|
||||
{
|
||||
"val.default?env=test&tag=FakeNo",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
}
|
||||
};
|
||||
|
||||
auto config = loadConfigurationFromString(xml);
|
||||
ContextMutablePtr context = getContext().context;
|
||||
Graphite::Params params = setGraphitePatterns(context, config);
|
||||
|
||||
for (const auto & t : tests)
|
||||
{
|
||||
auto rule = DB::Graphite::selectPatternForPath(params, t.path);
|
||||
std:: string message;
|
||||
if (!checkRule(*rule.first, t.retention_want, "retention", t.path, message))
|
||||
ADD_FAILURE() << message << ", got\n" << *rule.first << "\n, want\n" << t.retention_want << "\n";
|
||||
if (!checkRule(*rule.second, t.aggregation_want, "aggregation", t.path, message))
|
||||
ADD_FAILURE() << message << ", got\n" << *rule.second << "\n, want\n" << t.aggregation_want << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace DB::Graphite
|
||||
{
|
||||
std::string buildTaggedRegex(std::string regexp_str);
|
||||
}
|
||||
|
||||
struct RegexCheck
|
||||
{
|
||||
std::string regex;
|
||||
std::string regex_want;
|
||||
std::string match;
|
||||
std::string nomatch;
|
||||
};
|
||||
|
||||
TEST(GraphiteTest, testBuildTaggedRegex)
|
||||
{
|
||||
std::vector<struct RegexCheck> tests
|
||||
{
|
||||
{
|
||||
"cpu\\.loadavg;project=DB.*;env=st.*",
|
||||
R"END(^cpu\.loadavg\?(.*&)?env=st.*&(.*&)?project=DB.*(&.*)?$)END",
|
||||
R"END(cpu.loadavg?env=staging&project=DBAAS)END",
|
||||
R"END(cpu.loadavg?env=staging&project=D)END"
|
||||
},
|
||||
{
|
||||
R"END(project=DB.*;env=staging;)END",
|
||||
R"END([\?&]env=staging&(.*&)?project=DB.*(&.*)?$)END",
|
||||
R"END(cpu.loadavg?env=staging&project=DBPG)END",
|
||||
R"END(cpu.loadavg?env=stagingN&project=DBAAS)END"
|
||||
},
|
||||
{
|
||||
"env=staging;",
|
||||
R"END([\?&]env=staging(&.*)?$)END",
|
||||
R"END(cpu.loadavg?env=staging&project=DPG)END",
|
||||
R"END(cpu.loadavg?env=stagingN)END"
|
||||
},
|
||||
{
|
||||
" env = staging ;", // spaces are allowed,
|
||||
R"END([\?&] env = staging (&.*)?$)END",
|
||||
R"END(cpu.loadavg? env = staging &project=DPG)END",
|
||||
R"END(cpu.loadavg?env=stagingN)END"
|
||||
},
|
||||
{
|
||||
"name;",
|
||||
R"END(^name\?)END",
|
||||
R"END(name?env=staging&project=DPG)END",
|
||||
R"END(nameN?env=stagingN)END",
|
||||
},
|
||||
{
|
||||
"name",
|
||||
R"END(^name\?)END",
|
||||
R"END(name?env=staging&project=DPG)END",
|
||||
R"END(nameN?env=stagingN)END",
|
||||
}
|
||||
};
|
||||
for (const auto & t : tests)
|
||||
{
|
||||
auto s = DB::Graphite::buildTaggedRegex(t.regex);
|
||||
EXPECT_EQ(t.regex_want, s) << "result for '" << t.regex_want << "' mismatch";
|
||||
auto regexp = OptimizedRegularExpression(s);
|
||||
EXPECT_TRUE(regexp.match(t.match.data(), t.match.size())) << t.match << " match for '" << s << "' failed";
|
||||
EXPECT_FALSE(regexp.match(t.nomatch.data(), t.nomatch.size())) << t.nomatch << " ! match for '" << s << "' failed";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(GraphiteTest, testSelectPatternTyped)
|
||||
{
|
||||
tryRegisterAggregateFunctions();
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
std::string
|
||||
xml(R"END(<yandex>
|
||||
<graphite_rollup>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>\.sum$</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp>^((.*)|.)sum\?</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>\.max$</regexp>
|
||||
<function>max</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp>^((.*)|.)max\?</regexp>
|
||||
<function>max</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>\.min$</regexp>
|
||||
<function>min</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp>^((.*)|.)min\?</regexp>
|
||||
<function>min</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>\.(count|sum|sum_sq)$</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp>^((.*)|.)(count|sum|sum_sq)\?</regexp>
|
||||
<function>sum</function>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>plain</rule_type>
|
||||
<regexp>^retention\.</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tagged</rule_type>
|
||||
<regexp><![CDATA[[\?&]retention=hour(&.*)?$]]></regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tag_list</rule_type>
|
||||
<regexp>retention=10min;env=staging</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tag_list</rule_type>
|
||||
<regexp>retention=10min;env=[A-Za-z-]+rod[A-Za-z-]+</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<pattern>
|
||||
<rule_type>tag_list</rule_type>
|
||||
<regexp>cpu\.loadavg</regexp>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>avg</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
</yandex>
|
||||
)END");
|
||||
|
||||
// Retentions must be ordered by 'age' descending.
|
||||
std::vector<PatternsForPath> tests
|
||||
{
|
||||
{
|
||||
"test.sum",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypePlain, R"END(\.sum$)END", "sum", { } }
|
||||
},
|
||||
{
|
||||
"val.sum?env=test&tag=Fake3",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)sum\?)END", "sum", { } }
|
||||
},
|
||||
{
|
||||
"test.max",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypePlain, R"END(\.max$)END", "max", { } },
|
||||
},
|
||||
{
|
||||
"val.max?env=test&tag=Fake4",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)max\?)END", "max", { } },
|
||||
},
|
||||
{
|
||||
"test.min",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypePlain, R"END(\.min$)END", "min", { } },
|
||||
},
|
||||
{
|
||||
"val.min?env=test&tag=Fake5",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)min\?)END", "min", { } },
|
||||
},
|
||||
{
|
||||
"retention.count",
|
||||
{ Graphite::RuleTypePlain, R"END(^retention\.)END", "", { { 86400, 3600 }, { 0, 60 } } }, // ^retention
|
||||
{ Graphite::RuleTypePlain, R"END(\.(count|sum|sum_sq)$)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.count?env=test&retention=hour&tag=Fake5",
|
||||
{ Graphite::RuleTypeTagged, R"END([\?&]retention=hour(&.*)?$)END", "", { { 86400, 3600 }, { 0, 60 } } }, // tagged retention=hour
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.count?env=test&retention=hour",
|
||||
{ Graphite::RuleTypeTagged, R"END([\?&]retention=hour(&.*)?$)END", "", { { 86400, 3600 }, { 0, 60 } } }, // tagged retention=hour
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.count?env=staging&retention=10min",
|
||||
{ Graphite::RuleTypeTagged, R"END([\?&]env=staging&(.*&)?retention=10min(&.*)?$)END", "", { { 86400, 3600 }, { 0, 600 } } }, // retention=10min ; env=staging
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.count?env=production&retention=10min",
|
||||
{ Graphite::RuleTypeTagged, R"END([\?&]env=[A-Za-z-]+rod[A-Za-z-]+&(.*&)?retention=10min(&.*)?$)END", "", { { 86400, 3600 }, { 0, 600 } } }, // retention=10min ; env=[A-Za-z-]+rod[A-Za-z-]+
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"val.count?env=test&tag=Fake5",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeTagged, R"END(^((.*)|.)(count|sum|sum_sq)\?)END", "sum", { } },
|
||||
},
|
||||
{
|
||||
"cpu.loadavg?env=test&tag=FakeNo",
|
||||
{ Graphite::RuleTypeTagged, R"END(^cpu\.loadavg\?)END", "", { { 86400, 3600 }, { 0, 600 } } }, // name=cpu\.loadavg
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } },
|
||||
},
|
||||
{
|
||||
"test.p95",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
},
|
||||
{
|
||||
"val.p95?env=test&tag=FakeNo",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
},
|
||||
{
|
||||
"default",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
},
|
||||
{
|
||||
"val.default?env=test&tag=FakeNo",
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
{ Graphite::RuleTypeAll, "", "avg", { { 86400, 3600 }, { 3600, 300 }, { 0, 60 } } }, //default
|
||||
}
|
||||
};
|
||||
|
||||
auto config = loadConfigurationFromString(xml);
|
||||
ContextMutablePtr context = getContext().context;
|
||||
Graphite::Params params = setGraphitePatterns(context, config);
|
||||
|
||||
for (const auto & t : tests)
|
||||
{
|
||||
auto rule = DB::Graphite::selectPatternForPath(params, t.path);
|
||||
std:: string message;
|
||||
if (!checkRule(*rule.first, t.retention_want, "retention", t.path, message))
|
||||
ADD_FAILURE() << message << ", got\n" << *rule.first << "\n, want\n" << t.retention_want << "\n";
|
||||
if (!checkRule(*rule.second, t.aggregation_want, "aggregation", t.path, message))
|
||||
ADD_FAILURE() << message << ", got\n" << *rule.second << "\n, want\n" << t.aggregation_want << "\n";
|
||||
}
|
||||
}
|
@ -139,8 +139,10 @@ void TTLTransform::finalize()
|
||||
|
||||
if (delete_algorithm)
|
||||
{
|
||||
size_t rows_removed = all_data_dropped ? data_part->rows_count : delete_algorithm->getNumberOfRemovedRows();
|
||||
LOG_DEBUG(log, "Removed {} rows with expired TTL from part {}", rows_removed, data_part->name);
|
||||
if (all_data_dropped)
|
||||
LOG_DEBUG(log, "Removed all rows from part {} due to expired TTL", data_part->name);
|
||||
else
|
||||
LOG_DEBUG(log, "Removed {} rows with expired TTL from part {}", delete_algorithm->getNumberOfRemovedRows(), data_part->name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,9 @@
|
||||
#if USE_RDKAFKA
|
||||
#include <Storages/Kafka/KafkaSettings.h>
|
||||
#endif
|
||||
#if USE_MYSQL
|
||||
#include <Storages/MySQL/MySQLSettings.h>
|
||||
#endif
|
||||
|
||||
#include <re2/re2.h>
|
||||
|
||||
@ -26,12 +29,31 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
IMPLEMENT_SETTINGS_TRAITS(EmptySettingsTraits, EMPTY_SETTINGS)
|
||||
|
||||
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
|
||||
"host", "port", "user", "password", "db",
|
||||
"database", "table", "schema", "replica",
|
||||
"update_field", "update_tag", "invalidate_query", "query",
|
||||
"where", "name", "secure", "uri", "collection"};
|
||||
|
||||
|
||||
template<typename T>
|
||||
SettingsChanges getSettingsChangesFromConfig(
|
||||
const BaseSettings<T> & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
|
||||
{
|
||||
SettingsChanges config_settings;
|
||||
for (const auto & setting : settings.all())
|
||||
{
|
||||
const auto & setting_name = setting.getName();
|
||||
auto setting_value = config.getString(config_prefix + '.' + setting_name, "");
|
||||
if (!setting_value.empty())
|
||||
config_settings.emplace_back(setting_name, setting_value);
|
||||
}
|
||||
return config_settings;
|
||||
}
|
||||
|
||||
|
||||
String ExternalDataSourceConfiguration::toString() const
|
||||
{
|
||||
WriteBufferFromOwnString configuration_info;
|
||||
@ -67,7 +89,9 @@ void ExternalDataSourceConfiguration::set(const ExternalDataSourceConfiguration
|
||||
}
|
||||
|
||||
|
||||
std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const ASTs & args, ContextPtr context, bool is_database_engine, bool throw_on_no_collection)
|
||||
template <typename T>
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const ASTs & args, ContextPtr context, bool is_database_engine, bool throw_on_no_collection, const BaseSettings<T> & storage_settings)
|
||||
{
|
||||
if (args.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "External data source must have arguments");
|
||||
@ -90,6 +114,8 @@ std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection->name());
|
||||
}
|
||||
|
||||
SettingsChanges config_settings = getSettingsChangesFromConfig(storage_settings, config, collection_prefix);
|
||||
|
||||
configuration.host = config.getString(collection_prefix + ".host", "");
|
||||
configuration.port = config.getInt(collection_prefix + ".port", 0);
|
||||
configuration.username = config.getString(collection_prefix + ".user", "");
|
||||
@ -131,6 +157,7 @@ std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const
|
||||
if (arg_value_literal)
|
||||
{
|
||||
auto arg_value = arg_value_literal->value;
|
||||
|
||||
if (arg_name == "host")
|
||||
configuration.host = arg_value.safeGet<String>();
|
||||
else if (arg_name == "port")
|
||||
@ -147,6 +174,8 @@ std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const
|
||||
configuration.schema = arg_value.safeGet<String>();
|
||||
else if (arg_name == "addresses_expr")
|
||||
configuration.addresses_expr = arg_value.safeGet<String>();
|
||||
else if (storage_settings.has(arg_name))
|
||||
config_settings.emplace_back(arg_name, arg_value);
|
||||
else
|
||||
non_common_args.emplace_back(std::make_pair(arg_name, arg_value_ast));
|
||||
}
|
||||
@ -161,8 +190,7 @@ std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const
|
||||
}
|
||||
}
|
||||
|
||||
ExternalDataSourceConfig source_config{ .configuration = configuration, .specific_args = non_common_args };
|
||||
return source_config;
|
||||
return ExternalDataSourceInfo{ .configuration = configuration, .specific_args = non_common_args, .settings_changes = config_settings };
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -179,9 +207,10 @@ static void validateConfigKeys(
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguration(
|
||||
template <typename T>
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix,
|
||||
ContextPtr context, HasConfigKeyFunc has_config_key)
|
||||
ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings<T> & settings)
|
||||
{
|
||||
validateConfigKeys(dict_config, dict_config_prefix, has_config_key);
|
||||
ExternalDataSourceConfiguration configuration;
|
||||
@ -192,6 +221,10 @@ std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguratio
|
||||
const auto & config = context->getConfigRef();
|
||||
const auto & collection_prefix = fmt::format("named_collections.{}", collection_name);
|
||||
validateConfigKeys(dict_config, collection_prefix, has_config_key);
|
||||
auto config_settings = getSettingsChangesFromConfig(settings, config, collection_prefix);
|
||||
auto dict_settings = getSettingsChangesFromConfig(settings, dict_config, dict_config_prefix);
|
||||
/// dictionary config settings override collection settings.
|
||||
config_settings.insert(config_settings.end(), dict_settings.begin(), dict_settings.end());
|
||||
|
||||
if (!config.has(collection_prefix))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name);
|
||||
@ -210,7 +243,7 @@ std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguratio
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Named collection of connection parameters is missing some of the parameters and dictionary parameters are not added");
|
||||
}
|
||||
return configuration;
|
||||
return ExternalDataSourceInfo{ .configuration = configuration, .specific_args = {}, .settings_changes = config_settings };
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -225,7 +258,7 @@ ExternalDataSourcesByPriority getExternalDataSourceConfigurationByPriority(
|
||||
auto named_collection = getExternalDataSourceConfiguration(dict_config, dict_config_prefix, context, has_config_key);
|
||||
if (named_collection)
|
||||
{
|
||||
common_configuration = *named_collection;
|
||||
common_configuration = named_collection->configuration;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -391,6 +424,7 @@ std::optional<URLBasedDataSourceConfig> getURLBasedDataSourceConfiguration(const
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
bool getExternalDataSourceConfiguration(const ASTs & args, BaseSettings<T> & settings, ContextPtr context)
|
||||
{
|
||||
@ -405,14 +439,7 @@ bool getExternalDataSourceConfiguration(const ASTs & args, BaseSettings<T> & set
|
||||
if (!config.has(config_prefix))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection->name());
|
||||
|
||||
SettingsChanges config_settings;
|
||||
for (const auto & setting : settings.all())
|
||||
{
|
||||
const auto & setting_name = setting.getName();
|
||||
auto setting_value = config.getString(config_prefix + '.' + setting_name, "");
|
||||
if (!setting_value.empty())
|
||||
config_settings.emplace_back(setting_name, setting_value);
|
||||
}
|
||||
auto config_settings = getSettingsChangesFromConfig(settings, config, config_prefix);
|
||||
|
||||
/// Check key-value arguments.
|
||||
for (size_t i = 1; i < args.size(); ++i)
|
||||
@ -450,4 +477,32 @@ bool getExternalDataSourceConfiguration(const ASTs & args, BaseSettings<RabbitMQ
|
||||
template
|
||||
bool getExternalDataSourceConfiguration(const ASTs & args, BaseSettings<KafkaSettingsTraits> & settings, ContextPtr context);
|
||||
#endif
|
||||
|
||||
template
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const ASTs & args, ContextPtr context, bool is_database_engine, bool throw_on_no_collection, const BaseSettings<EmptySettingsTraits> & storage_settings);
|
||||
|
||||
template
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix,
|
||||
ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings<EmptySettingsTraits> & settings);
|
||||
|
||||
template
|
||||
SettingsChanges getSettingsChangesFromConfig(
|
||||
const BaseSettings<EmptySettingsTraits> & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix);
|
||||
|
||||
#if USE_MYSQL
|
||||
template
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const ASTs & args, ContextPtr context, bool is_database_engine, bool throw_on_no_collection, const BaseSettings<MySQLSettingsTraits> & storage_settings);
|
||||
|
||||
template
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix,
|
||||
ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings<MySQLSettingsTraits> & settings);
|
||||
|
||||
template
|
||||
SettingsChanges getSettingsChangesFromConfig(
|
||||
const BaseSettings<MySQLSettingsTraits> & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix);
|
||||
#endif
|
||||
}
|
||||
|
@ -7,6 +7,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
#define EMPTY_SETTINGS(M)
|
||||
DECLARE_SETTINGS_TRAITS(EmptySettingsTraits, EMPTY_SETTINGS)
|
||||
|
||||
struct EmptySettings : public BaseSettings<EmptySettingsTraits> {};
|
||||
|
||||
struct ExternalDataSourceConfiguration
|
||||
{
|
||||
String host;
|
||||
@ -46,10 +51,11 @@ struct StorageMongoDBConfiguration : ExternalDataSourceConfiguration
|
||||
|
||||
using StorageSpecificArgs = std::vector<std::pair<String, ASTPtr>>;
|
||||
|
||||
struct ExternalDataSourceConfig
|
||||
struct ExternalDataSourceInfo
|
||||
{
|
||||
ExternalDataSourceConfiguration configuration;
|
||||
StorageSpecificArgs specific_args;
|
||||
SettingsChanges settings_changes;
|
||||
};
|
||||
|
||||
/* If there is a storage engine's configuration specified in the named_collections,
|
||||
@ -62,13 +68,16 @@ struct ExternalDataSourceConfig
|
||||
* Any key-value engine argument except common (`host`, `port`, `username`, `password`, `database`)
|
||||
* is returned in EngineArgs struct.
|
||||
*/
|
||||
std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const ASTs & args, ContextPtr context, bool is_database_engine = false, bool throw_on_no_collection = true);
|
||||
template <typename T = EmptySettingsTraits>
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const ASTs & args, ContextPtr context, bool is_database_engine = false, bool throw_on_no_collection = true, const BaseSettings<T> & storage_settings = {});
|
||||
|
||||
using HasConfigKeyFunc = std::function<bool(const String &)>;
|
||||
|
||||
std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguration(
|
||||
template <typename T = EmptySettingsTraits>
|
||||
std::optional<ExternalDataSourceInfo> getExternalDataSourceConfiguration(
|
||||
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix,
|
||||
ContextPtr context, HasConfigKeyFunc has_config_key);
|
||||
ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings<T> & settings = {});
|
||||
|
||||
|
||||
/// Highest priority is 0, the bigger the number in map, the less the priority.
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Databases/DatabaseReplicatedHelpers.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndexMinMax.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndexSet.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndices.h>
|
||||
@ -22,17 +23,13 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
|
||||
#include <Databases/DatabaseReplicatedHelpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||
extern const int UNKNOWN_STORAGE;
|
||||
extern const int NO_REPLICA_NAME_GIVEN;
|
||||
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
|
||||
@ -63,171 +60,6 @@ static Names extractColumnNames(const ASTPtr & node)
|
||||
}
|
||||
}
|
||||
|
||||
/** Is used to order Graphite::Retentions by age and precision descending.
|
||||
* Throws exception if not both age and precision are less or greater then another.
|
||||
*/
|
||||
static bool compareRetentions(const Graphite::Retention & a, const Graphite::Retention & b)
|
||||
{
|
||||
if (a.age > b.age && a.precision > b.precision)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (a.age < b.age && a.precision < b.precision)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
String error_msg = "age and precision should only grow up: "
|
||||
+ std::to_string(a.age) + ":" + std::to_string(a.precision) + " vs "
|
||||
+ std::to_string(b.age) + ":" + std::to_string(b.precision);
|
||||
throw Exception(
|
||||
error_msg,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
/** Read the settings for Graphite rollup from config.
|
||||
* Example
|
||||
*
|
||||
* <graphite_rollup>
|
||||
* <path_column_name>Path</path_column_name>
|
||||
* <pattern>
|
||||
* <regexp>click_cost</regexp>
|
||||
* <function>any</function>
|
||||
* <retention>
|
||||
* <age>0</age>
|
||||
* <precision>3600</precision>
|
||||
* </retention>
|
||||
* <retention>
|
||||
* <age>86400</age>
|
||||
* <precision>60</precision>
|
||||
* </retention>
|
||||
* </pattern>
|
||||
* <default>
|
||||
* <function>max</function>
|
||||
* <retention>
|
||||
* <age>0</age>
|
||||
* <precision>60</precision>
|
||||
* </retention>
|
||||
* <retention>
|
||||
* <age>3600</age>
|
||||
* <precision>300</precision>
|
||||
* </retention>
|
||||
* <retention>
|
||||
* <age>86400</age>
|
||||
* <precision>3600</precision>
|
||||
* </retention>
|
||||
* </default>
|
||||
* </graphite_rollup>
|
||||
*/
|
||||
static void appendGraphitePattern(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_element,
|
||||
Graphite::Patterns & out_patterns,
|
||||
ContextPtr context)
|
||||
{
|
||||
Graphite::Pattern pattern;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_element, keys);
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
if (key == "regexp")
|
||||
{
|
||||
pattern.regexp_str = config.getString(config_element + ".regexp");
|
||||
pattern.regexp = std::make_shared<OptimizedRegularExpression>(pattern.regexp_str);
|
||||
}
|
||||
else if (key == "function")
|
||||
{
|
||||
String aggregate_function_name_with_params = config.getString(config_element + ".function");
|
||||
String aggregate_function_name;
|
||||
Array params_row;
|
||||
getAggregateFunctionNameAndParametersArray(
|
||||
aggregate_function_name_with_params, aggregate_function_name, params_row, "GraphiteMergeTree storage initialization", context);
|
||||
|
||||
/// TODO Not only Float64
|
||||
AggregateFunctionProperties properties;
|
||||
pattern.function = AggregateFunctionFactory::instance().get(
|
||||
aggregate_function_name, {std::make_shared<DataTypeFloat64>()}, params_row, properties);
|
||||
}
|
||||
else if (startsWith(key, "retention"))
|
||||
{
|
||||
pattern.retentions.emplace_back(Graphite::Retention{
|
||||
.age = config.getUInt(config_element + "." + key + ".age"),
|
||||
.precision = config.getUInt(config_element + "." + key + ".precision")});
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown element in config: " + key, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
if (!pattern.function && pattern.retentions.empty())
|
||||
throw Exception(
|
||||
"At least one of an aggregate function or retention rules is mandatory for rollup patterns in GraphiteMergeTree",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
if (!pattern.function)
|
||||
{
|
||||
pattern.type = pattern.TypeRetention;
|
||||
}
|
||||
else if (pattern.retentions.empty())
|
||||
{
|
||||
pattern.type = pattern.TypeAggregation;
|
||||
}
|
||||
else
|
||||
{
|
||||
pattern.type = pattern.TypeAll;
|
||||
}
|
||||
|
||||
if (pattern.type & pattern.TypeAggregation) /// TypeAggregation or TypeAll
|
||||
if (pattern.function->allocatesMemoryInArena())
|
||||
throw Exception(
|
||||
"Aggregate function " + pattern.function->getName() + " isn't supported in GraphiteMergeTree", ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
/// retention should be in descending order of age.
|
||||
if (pattern.type & pattern.TypeRetention) /// TypeRetention or TypeAll
|
||||
std::sort(pattern.retentions.begin(), pattern.retentions.end(), compareRetentions);
|
||||
|
||||
out_patterns.emplace_back(pattern);
|
||||
}
|
||||
|
||||
static void setGraphitePatternsFromConfig(ContextPtr context, const String & config_element, Graphite::Params & params)
|
||||
{
|
||||
const auto & config = context->getConfigRef();
|
||||
|
||||
if (!config.has(config_element))
|
||||
throw Exception("No '" + config_element + "' element in configuration file", ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
params.config_name = config_element;
|
||||
params.path_column_name = config.getString(config_element + ".path_column_name", "Path");
|
||||
params.time_column_name = config.getString(config_element + ".time_column_name", "Time");
|
||||
params.value_column_name = config.getString(config_element + ".value_column_name", "Value");
|
||||
params.version_column_name = config.getString(config_element + ".version_column_name", "Timestamp");
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_element, keys);
|
||||
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
if (startsWith(key, "pattern"))
|
||||
{
|
||||
appendGraphitePattern(config, config_element + "." + key, params.patterns, context);
|
||||
}
|
||||
else if (key == "default")
|
||||
{
|
||||
/// See below.
|
||||
}
|
||||
else if (key == "path_column_name" || key == "time_column_name" || key == "value_column_name" || key == "version_column_name")
|
||||
{
|
||||
/// See above.
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown element in config: " + key, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
}
|
||||
|
||||
if (config.has(config_element + ".default"))
|
||||
appendGraphitePattern(config, config_element + "." + ".default", params.patterns, context);
|
||||
}
|
||||
|
||||
|
||||
static String getMergeTreeVerboseHelp(bool)
|
||||
{
|
||||
using namespace std::string_literals;
|
||||
|
@ -8,9 +8,17 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
mysqlxx::PoolWithFailover
|
||||
createMySQLPoolWithFailover(const StorageMySQLConfiguration & configuration, const MySQLSettings & mysql_settings)
|
||||
{
|
||||
if (!mysql_settings.connection_pool_size)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Connection pool cannot have zero size");
|
||||
|
||||
return mysqlxx::PoolWithFailover(
|
||||
configuration.database, configuration.addresses, configuration.username, configuration.password,
|
||||
MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
|
||||
|
@ -25,11 +25,14 @@ class ASTStorage;
|
||||
DECLARE_SETTINGS_TRAITS(MySQLSettingsTraits, LIST_OF_MYSQL_SETTINGS)
|
||||
|
||||
|
||||
using MySQLBaseSettings = BaseSettings<MySQLSettingsTraits>;
|
||||
|
||||
/** Settings for the MySQL family of engines.
|
||||
*/
|
||||
struct MySQLSettings : public BaseSettings<MySQLSettingsTraits>
|
||||
struct MySQLSettings : public MySQLBaseSettings
|
||||
{
|
||||
void loadFromQuery(ASTStorage & storage_def);
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
@ -104,11 +104,16 @@ void PostgreSQLReplicationHandler::addStorage(const std::string & table_name, St
|
||||
}
|
||||
|
||||
|
||||
void PostgreSQLReplicationHandler::startup()
|
||||
void PostgreSQLReplicationHandler::startup(bool delayed)
|
||||
{
|
||||
/// We load tables in a separate thread, because this database is not created yet.
|
||||
/// (will get "database is currently dropped or renamed")
|
||||
startup_task->activateAndSchedule();
|
||||
if (delayed)
|
||||
{
|
||||
startup_task->activateAndSchedule();
|
||||
}
|
||||
else
|
||||
{
|
||||
startSynchronization(/* throw_on_error */ true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user