mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Merge remote-tracking branch 'blessed/master' into perf_experiment
This commit is contained in:
commit
e915ce1e95
208
.github/workflows/backport_branches.yml
vendored
208
.github/workflows/backport_branches.yml
vendored
@ -12,11 +12,10 @@ jobs:
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -24,34 +23,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -59,18 +56,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -79,7 +75,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -94,13 +90,12 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: CompatibilityCheck
|
||||
@ -132,28 +127,25 @@ jobs:
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -177,28 +169,25 @@ jobs:
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -222,26 +211,24 @@ jobs:
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -265,26 +252,24 @@ jobs:
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -308,26 +293,24 @@ jobs:
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -351,28 +334,25 @@ jobs:
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -396,28 +376,25 @@ jobs:
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -436,12 +413,10 @@ jobs:
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
@ -477,14 +452,13 @@ jobs:
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Report Builder
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -516,14 +490,13 @@ jobs:
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Report Builder
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -556,14 +529,13 @@ jobs:
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -594,14 +566,13 @@ jobs:
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -635,14 +606,13 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Stress test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -672,14 +642,13 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -706,11 +675,10 @@ jobs:
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
3
.github/workflows/cherry_pick.yml
vendored
3
.github/workflows/cherry_pick.yml
vendored
@ -28,8 +28,9 @@ jobs:
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
|
63
.github/workflows/docs_check.yml
vendored
63
.github/workflows/docs_check.yml
vendored
@ -21,11 +21,10 @@ jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -34,17 +33,16 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
@ -52,17 +50,16 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -70,18 +67,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -90,7 +86,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -110,15 +106,14 @@ jobs:
|
||||
- name: Download changed images
|
||||
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||
continue-on-error: true
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Style Check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -140,15 +135,14 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/docs_check/ClickHouse
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Docs Check
|
||||
run: |
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -167,11 +161,10 @@ jobs:
|
||||
- DocsCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
40
.github/workflows/docs_release.yml
vendored
40
.github/workflows/docs_release.yml
vendored
@ -23,34 +23,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -58,18 +56,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -78,7 +75,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -97,13 +94,12 @@ jobs:
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
|
12
.github/workflows/jepsen.yml
vendored
12
.github/workflows/jepsen.yml
vendored
@ -19,12 +19,10 @@ jobs:
|
||||
TEMP_PATH=${{runner.temp}}/keeper_jepsen
|
||||
REPO_COPY=${{runner.temp}}/keeper_jepsen/ClickHouse
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0
|
||||
- name: Jepsen Test
|
||||
run: |
|
||||
@ -50,12 +48,10 @@ jobs:
|
||||
# TEMP_PATH=${{runner.temp}}/server_jepsen
|
||||
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
|
||||
# EOF
|
||||
# - name: Clear repository
|
||||
# run: |
|
||||
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v2
|
||||
# uses: ClickHouse/checkout@v1
|
||||
# with:
|
||||
# clear-repository: true
|
||||
# fetch-depth: 0
|
||||
# - name: Jepsen Test
|
||||
# run: |
|
||||
|
894
.github/workflows/master.yml
vendored
894
.github/workflows/master.yml
vendored
File diff suppressed because it is too large
Load Diff
48
.github/workflows/nightly.yml
vendored
48
.github/workflows/nightly.yml
vendored
@ -16,34 +16,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -51,18 +49,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -71,7 +68,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -90,22 +87,17 @@ jobs:
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
id: coverity-checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -134,8 +126,10 @@ jobs:
|
||||
CC: clang-15
|
||||
CXX: clang++-15
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
submodules: true
|
||||
- name: Set up JDK 11
|
||||
|
1188
.github/workflows/pull_request.yml
vendored
1188
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
# Always use the most recent script version
|
||||
ref: master
|
||||
@ -50,12 +50,10 @@ jobs:
|
||||
DockerServerImages:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
|
514
.github/workflows/release_branches.yml
vendored
514
.github/workflows/release_branches.yml
vendored
File diff suppressed because it is too large
Load Diff
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -34,7 +34,7 @@ jobs:
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: master
|
||||
fetch-depth: 0
|
||||
|
6
.github/workflows/woboq.yml
vendored
6
.github/workflows/woboq.yml
vendored
@ -21,12 +21,10 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: 'true'
|
||||
- name: Codebrowser
|
||||
run: |
|
||||
|
@ -377,15 +377,15 @@ set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
if (OS_DARWIN)
|
||||
|
@ -402,10 +402,11 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
words.push_back(hs.get().text());
|
||||
}
|
||||
|
||||
std::string current_query(rx.get_state().text());
|
||||
std::string new_query;
|
||||
try
|
||||
{
|
||||
new_query = std::string(skim(words));
|
||||
new_query = std::string(skim(current_query, words));
|
||||
}
|
||||
catch (const std::exception & e)
|
||||
{
|
||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5
|
||||
Subproject commit 7c78edd52b4d65acc103c2f195818ffcabe6fe0d
|
@ -43,7 +43,10 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
||||
|
||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
target_compile_options(unwind PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||
|
||||
# We should enable optimizations (otherwise it will be too slow in debug)
|
||||
# and disable sanitizers (otherwise infinite loop may happen)
|
||||
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||
|
||||
check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
||||
if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
||||
|
@ -2,6 +2,7 @@
|
||||
<profiles>
|
||||
<default>
|
||||
<max_execution_time>10</max_execution_time>
|
||||
|
||||
<!--
|
||||
Don't let the fuzzer change this setting (I've actually seen it
|
||||
do this before).
|
||||
@ -14,6 +15,11 @@
|
||||
<max_memory_usage>
|
||||
<max>10G</max>
|
||||
</max_memory_usage>
|
||||
|
||||
<!-- Analyzer is unstable, not ready for testing. -->
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
|
@ -75,6 +75,7 @@ function download
|
||||
./clickhouse ||:
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
ln -s ./clickhouse ./clickhouse-client
|
||||
ln -s ./clickhouse ./clickhouse-local
|
||||
|
||||
# clickhouse-server is in the current dir
|
||||
export PATH="$PWD:$PATH"
|
||||
@ -91,6 +92,12 @@ function configure
|
||||
cp -av --dereference "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
cp -av --dereference "$script_dir"/allow-nullable-key.xml db/config.d
|
||||
|
||||
cat > db/config.d/max_server_memory_usage_to_ram_ratio.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage_to_ram_ratio>0.75</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
cat > db/config.d/core.xml <<EOL
|
||||
<clickhouse>
|
||||
<core_dump>
|
||||
@ -256,12 +263,21 @@ quit
|
||||
if [ "$server_died" == 1 ]
|
||||
then
|
||||
# The server has died.
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
|
||||
if grep -F --text 'Sanitizer: out-of-memory' description.txt
|
||||
then
|
||||
# OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description.
|
||||
task_exit_code=0
|
||||
echo "success" > status.txt
|
||||
else
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
fi
|
||||
|
||||
elif [ "$fuzzer_exit_code" == "143" ] || [ "$fuzzer_exit_code" == "0" ]
|
||||
then
|
||||
# Variants of a normal run:
|
||||
@ -352,17 +368,25 @@ th { cursor: pointer; }
|
||||
<body>
|
||||
<div class="main">
|
||||
|
||||
<h1>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</h1>
|
||||
<h1>AST Fuzzer for PR <a href="https://github.com/ClickHouse/ClickHouse/pull/${PR_TO_TEST}">#${PR_TO_TEST}</a> @ ${SHA_TO_TEST}</h1>
|
||||
<p class="links">
|
||||
<a href="runlog.log">runlog.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
</p>
|
||||
<table>
|
||||
<tr><th>Test name</th><th>Test status</th><th>Description</th></tr>
|
||||
<tr><td>AST Fuzzer</td><td>$(cat status.txt)</td><td>$(cat description.txt)</td></tr>
|
||||
<tr>
|
||||
<th>Test name</th>
|
||||
<th>Test status</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>AST Fuzzer</td>
|
||||
<td>$(cat status.txt)</td>
|
||||
<td style="white-space: pre;">$(clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < description.txt)</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,6 +1,6 @@
|
||||
Allow to run simple ClickHouse stress test in Docker from debian packages.
|
||||
Allows to run simple ClickHouse stress test in Docker from debian packages.
|
||||
Actually it runs multiple copies of clickhouse-test (functional tests).
|
||||
This allows to find problems like segmentation fault which cause shutdown of server.
|
||||
This allows to find problems like failed assertions and memory safety issues.
|
||||
|
||||
Usage:
|
||||
```
|
||||
|
@ -123,6 +123,22 @@ EOL
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Analyzer is not yet ready for testing
|
||||
cat > /etc/clickhouse-server/users.d/no_analyzer.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<constraints>
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
}
|
||||
|
||||
function stop()
|
||||
@ -439,7 +455,8 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
# Disable fault injections on start (we don't test them here, and it can lead to tons of requests in case of huge number of tables).
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
|
@ -683,7 +683,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
|
||||
("query_id", value<std::string>()->default_value(""), "")
|
||||
("max-consecutive-errors", value<size_t>()->default_value(0), "set number of allowed consecutive errors")
|
||||
("continue_on_errors", "continue testing even if a query fails")
|
||||
("ignore-error,continue_on_errors", "continue testing even if a query fails")
|
||||
("reconnect", "establish new connection for every query")
|
||||
("client-side-time", "display the time including network communication instead of server-side time; note that for server versions before 22.8 we always display client-side time")
|
||||
;
|
||||
@ -738,7 +738,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options["query_id"].as<std::string>(),
|
||||
options["query"].as<std::string>(),
|
||||
options["max-consecutive-errors"].as<size_t>(),
|
||||
options.count("continue_on_errors"),
|
||||
options.count("ignore-error"),
|
||||
options.count("reconnect"),
|
||||
options.count("client-side-time"),
|
||||
print_stacktrace,
|
||||
|
@ -1051,18 +1051,12 @@ namespace
|
||||
return pid;
|
||||
}
|
||||
|
||||
int stop(const fs::path & pid_file, bool force, bool do_not_kill, unsigned max_tries)
|
||||
bool sendSignalAndWaitForStop(const fs::path & pid_file, int signal, unsigned max_tries, unsigned wait_ms, const char * signal_name)
|
||||
{
|
||||
if (force && do_not_kill)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
||||
|
||||
int pid = isRunning(pid_file);
|
||||
|
||||
if (!pid)
|
||||
return 0;
|
||||
|
||||
int signal = force ? SIGKILL : SIGTERM;
|
||||
const char * signal_name = force ? "kill" : "terminate";
|
||||
return true;
|
||||
|
||||
if (0 == kill(pid, signal))
|
||||
fmt::print("Sent {} signal to process with pid {}.\n", signal_name, pid);
|
||||
@ -1078,46 +1072,51 @@ namespace
|
||||
fmt::print("Server stopped\n");
|
||||
break;
|
||||
}
|
||||
sleepForSeconds(1);
|
||||
sleepForMilliseconds(wait_ms);
|
||||
}
|
||||
|
||||
if (try_num == max_tries)
|
||||
return try_num < max_tries;
|
||||
}
|
||||
|
||||
int stop(const fs::path & pid_file, bool force, bool do_not_kill, unsigned max_tries)
|
||||
{
|
||||
if (force && do_not_kill)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
||||
|
||||
int signal = force ? SIGKILL : SIGTERM;
|
||||
const char * signal_name = force ? "kill" : "terminate";
|
||||
|
||||
if (sendSignalAndWaitForStop(pid_file, signal, max_tries, 1000, signal_name))
|
||||
return 0;
|
||||
|
||||
int pid = isRunning(pid_file);
|
||||
if (!pid)
|
||||
return 0;
|
||||
|
||||
if (do_not_kill)
|
||||
{
|
||||
if (do_not_kill)
|
||||
{
|
||||
fmt::print("Process (pid = {}) is still running. Will not try to kill it.\n", pid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
fmt::print("Will terminate forcefully (pid = {}).\n", pid);
|
||||
if (0 == kill(pid, 9))
|
||||
fmt::print("Sent kill signal (pid = {}).\n", pid);
|
||||
else
|
||||
throwFromErrno("Cannot send kill signal", ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
/// Wait for the process (100 seconds).
|
||||
constexpr size_t num_kill_check_tries = 1000;
|
||||
constexpr size_t kill_check_delay_ms = 100;
|
||||
for (size_t i = 0; i < num_kill_check_tries; ++i)
|
||||
{
|
||||
fmt::print("Waiting for server to be killed\n");
|
||||
if (!isRunning(pid_file))
|
||||
{
|
||||
fmt::print("Server exited\n");
|
||||
break;
|
||||
}
|
||||
sleepForMilliseconds(kill_check_delay_ms);
|
||||
}
|
||||
|
||||
if (isRunning(pid_file))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_KILL,
|
||||
"The server process still exists after {} tries (delay: {} ms)",
|
||||
num_kill_check_tries, kill_check_delay_ms);
|
||||
}
|
||||
fmt::print("Process (pid = {}) is still running. Will not try to kill it.\n", pid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/// Send termination signal again, the server will receive it and immediately terminate.
|
||||
fmt::print("Will send the termination signal again to force the termination (pid = {}).\n", pid);
|
||||
if (sendSignalAndWaitForStop(pid_file, signal, std::min(10U, max_tries), 1000, signal_name))
|
||||
return 0;
|
||||
|
||||
/// Send kill signal. Total wait is 100 seconds.
|
||||
constexpr size_t num_kill_check_tries = 1000;
|
||||
constexpr size_t kill_check_delay_ms = 100;
|
||||
fmt::print("Will terminate forcefully (pid = {}).\n", pid);
|
||||
if (sendSignalAndWaitForStop(pid_file, SIGKILL, num_kill_check_tries, kill_check_delay_ms, signal_name))
|
||||
return 0;
|
||||
|
||||
if (!isRunning(pid_file))
|
||||
return 0;
|
||||
|
||||
throw Exception(ErrorCodes::CANNOT_KILL,
|
||||
"The server process still exists after {} tries (delay: {} ms)",
|
||||
num_kill_check_tries, kill_check_delay_ms);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -703,6 +703,13 @@ try
|
||||
global_context->addWarningMessage("Server was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
const auto memory_amount = getMemoryAmount();
|
||||
|
||||
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
|
||||
formatReadableSizeWithBinarySuffix(memory_amount),
|
||||
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
|
||||
std::thread::hardware_concurrency());
|
||||
|
||||
sanityChecks(*this);
|
||||
|
||||
// Initialize global thread pool. Do it before we fetch configs from zookeeper
|
||||
@ -776,8 +783,6 @@ try
|
||||
|
||||
Settings::checkNoSettingNamesAtTopLevel(config(), config_path);
|
||||
|
||||
const auto memory_amount = getMemoryAmount();
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
std::string executable_path = getExecutablePath();
|
||||
|
||||
@ -1044,8 +1049,8 @@ try
|
||||
bool continue_if_corrupted = config().getBool("merge_tree_metadata_cache.continue_if_corrupted", false);
|
||||
try
|
||||
{
|
||||
LOG_DEBUG(
|
||||
log, "Initializing merge tree metadata cache lru_cache_size:{} continue_if_corrupted:{}", size, continue_if_corrupted);
|
||||
LOG_DEBUG(log, "Initializing MergeTree metadata cache, lru_cache_size: {} continue_if_corrupted: {}",
|
||||
ReadableSize(size), continue_if_corrupted);
|
||||
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);
|
||||
}
|
||||
catch (...)
|
||||
@ -1718,13 +1723,6 @@ try
|
||||
main_config_reloader->start();
|
||||
access_control.startPeriodicReloading();
|
||||
|
||||
{
|
||||
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
|
||||
formatReadableSizeWithBinarySuffix(memory_amount),
|
||||
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
|
||||
/// try to load dictionaries immediately, throw on error and die
|
||||
try
|
||||
{
|
||||
|
1
programs/server/config.d/graphite.xml
Symbolic link
1
programs/server/config.d/graphite.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/config.d/graphite.xml
|
@ -5,4 +5,5 @@ fn main() {
|
||||
}
|
||||
build.compile("skim");
|
||||
println!("cargo:rerun-if-changed=src/lib.rs");
|
||||
println!("cargo:rerun-if-changed=.cargo/config.toml");
|
||||
}
|
||||
|
@ -87,4 +87,4 @@ private:
|
||||
} // namespace cxxbridge1
|
||||
} // namespace rust
|
||||
|
||||
::rust::String skim(::std::vector<::std::string> const &words);
|
||||
::rust::String skim(::std::string const &prefix, ::std::vector<::std::string> const &words);
|
||||
|
@ -5,7 +5,7 @@ use cxx::{CxxString, CxxVector};
|
||||
#[cxx::bridge]
|
||||
mod ffi {
|
||||
extern "Rust" {
|
||||
fn skim(words: &CxxVector<CxxString>) -> Result<String>;
|
||||
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String>;
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,7 @@ impl SkimItem for Item {
|
||||
}
|
||||
}
|
||||
|
||||
fn skim(words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||
// Let's check is terminal available. To avoid panic.
|
||||
if let Err(err) = TermInfo::from_env() {
|
||||
return Err(format!("{}", err));
|
||||
@ -26,6 +26,7 @@ fn skim(words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||
|
||||
let options = SkimOptionsBuilder::default()
|
||||
.height(Some("30%"))
|
||||
.query(Some(prefix.to_str().unwrap()))
|
||||
.tac(true)
|
||||
.tiebreak(Some("-score".to_string()))
|
||||
.build()
|
||||
|
@ -72,9 +72,12 @@ AggregateFunctionPtr AggregateFunctionFactory::get(
|
||||
{
|
||||
auto types_without_low_cardinality = convertLowCardinalityTypesToNested(argument_types);
|
||||
|
||||
/// If one of the types is Nullable, we apply aggregate function combinator "Null".
|
||||
|
||||
if (std::any_of(types_without_low_cardinality.begin(), types_without_low_cardinality.end(),
|
||||
/// If one of the types is Nullable, we apply aggregate function combinator "Null" if it's not window function.
|
||||
/// Window functions are not real aggregate functions. Applying combinators doesn't make sense for them,
|
||||
/// they must handle the nullability themselves
|
||||
auto properties = tryGetPropertiesImpl(name);
|
||||
bool is_window_function = properties.has_value() && properties->is_window_function;
|
||||
if (!is_window_function && std::any_of(types_without_low_cardinality.begin(), types_without_low_cardinality.end(),
|
||||
[](const auto & type) { return type->isNullable(); }))
|
||||
{
|
||||
AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix("Null");
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
throw Exception("Incorrect number of arguments for aggregate function with " + getName() + " suffix",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!isUInt8(arguments.back()))
|
||||
if (!isUInt8(arguments.back()) && !arguments.back()->onlyNull())
|
||||
throw Exception("Illegal type " + arguments.back()->getName() + " of last argument for aggregate function with " + getName() + " suffix",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
@ -52,6 +52,7 @@ class AggregateFunctionIfNullUnary final
|
||||
private:
|
||||
size_t num_arguments;
|
||||
bool filter_is_nullable = false;
|
||||
bool filter_is_only_null = false;
|
||||
|
||||
/// The name of the nested function, including combinators (i.e. *If)
|
||||
///
|
||||
@ -84,10 +85,8 @@ private:
|
||||
|
||||
return assert_cast<const ColumnUInt8 &>(*filter_column).getData()[row_num] && !filter_null_map[row_num];
|
||||
}
|
||||
else
|
||||
{
|
||||
return assert_cast<const ColumnUInt8 &>(*filter_column).getData()[row_num];
|
||||
}
|
||||
|
||||
return assert_cast<const ColumnUInt8 &>(*filter_column).getData()[row_num];
|
||||
}
|
||||
|
||||
public:
|
||||
@ -106,10 +105,14 @@ public:
|
||||
"Aggregate function {} require at least one argument", getName());
|
||||
|
||||
filter_is_nullable = arguments[num_arguments - 1]->isNullable();
|
||||
filter_is_only_null = arguments[num_arguments - 1]->onlyNull();
|
||||
}
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
if (filter_is_only_null)
|
||||
return;
|
||||
|
||||
const ColumnNullable * column = assert_cast<const ColumnNullable *>(columns[0]);
|
||||
const IColumn * nested_column = &column->getNestedColumn();
|
||||
if (!column->isNullAt(row_num) && singleFilter(columns, row_num))
|
||||
@ -127,6 +130,9 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t) const override
|
||||
{
|
||||
if (filter_is_only_null)
|
||||
return;
|
||||
|
||||
const ColumnNullable * column = assert_cast<const ColumnNullable *>(columns[0]);
|
||||
const UInt8 * null_map = column->getNullMapData().data();
|
||||
const IColumn * columns_param[] = {&column->getNestedColumn()};
|
||||
@ -177,6 +183,11 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isCompilable() const override
|
||||
{
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
@ -224,6 +235,9 @@ class AggregateFunctionIfNullVariadic final : public AggregateFunctionNullBase<
|
||||
serialize_flag,
|
||||
AggregateFunctionIfNullVariadic<result_is_nullable, serialize_flag>>
|
||||
{
|
||||
private:
|
||||
bool filter_is_only_null = false;
|
||||
|
||||
public:
|
||||
|
||||
String getName() const override
|
||||
@ -243,6 +257,8 @@ public:
|
||||
|
||||
for (size_t i = 0; i < number_of_arguments; ++i)
|
||||
is_nullable[i] = arguments[i]->isNullable();
|
||||
|
||||
filter_is_only_null = arguments.back()->onlyNull();
|
||||
}
|
||||
|
||||
static inline bool singleFilter(const IColumn ** columns, size_t row_num, size_t num_arguments)
|
||||
@ -282,6 +298,9 @@ public:
|
||||
void addBatchSinglePlace(
|
||||
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t) const final
|
||||
{
|
||||
if (filter_is_only_null)
|
||||
return;
|
||||
|
||||
std::unique_ptr<UInt8[]> final_null_flags = std::make_unique<UInt8[]>(row_end);
|
||||
const size_t filter_column_num = number_of_arguments - 1;
|
||||
|
||||
@ -346,6 +365,11 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isCompilable() const override
|
||||
{
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
{
|
||||
/// TODO: Check
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
if (num_arguments == 0)
|
||||
throw Exception("Aggregate function " + getName() + " require at least one argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!isUInt8(types.back()))
|
||||
if (!isUInt8(types.back()) && !types.back()->onlyNull())
|
||||
throw Exception("Last argument for aggregate function " + getName() + " must be UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
|
||||
@ -199,12 +199,16 @@ public:
|
||||
|
||||
AggregateFunctionPtr getNestedFunction() const override { return nested_func; }
|
||||
|
||||
std::unordered_set<size_t> getArgumentsThatCanBeOnlyNull() const override
|
||||
{
|
||||
return {num_arguments - 1};
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isCompilable() const override
|
||||
{
|
||||
return nested_func->isCompilable();
|
||||
return canBeNativeType(*this->argument_types.back()) && nested_func->isCompilable();
|
||||
}
|
||||
|
||||
void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override
|
||||
|
@ -29,7 +29,13 @@ public:
|
||||
size_t size = arguments.size();
|
||||
DataTypes res(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
res[i] = removeNullable(arguments[i]);
|
||||
{
|
||||
/// Nullable(Nothing) is processed separately, don't convert it to Nothing.
|
||||
if (arguments[i]->onlyNull())
|
||||
res[i] = arguments[i];
|
||||
else
|
||||
res[i] = removeNullable(arguments[i]);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -41,12 +47,16 @@ public:
|
||||
{
|
||||
bool has_nullable_types = false;
|
||||
bool has_null_types = false;
|
||||
for (const auto & arg_type : arguments)
|
||||
std::unordered_set<size_t> arguments_that_can_be_only_null;
|
||||
if (nested_function)
|
||||
arguments_that_can_be_only_null = nested_function->getArgumentsThatCanBeOnlyNull();
|
||||
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
if (arg_type->isNullable())
|
||||
if (arguments[i]->isNullable())
|
||||
{
|
||||
has_nullable_types = true;
|
||||
if (arg_type->onlyNull())
|
||||
if (arguments[i]->onlyNull() && !arguments_that_can_be_only_null.contains(i))
|
||||
{
|
||||
has_null_types = true;
|
||||
break;
|
||||
|
@ -468,6 +468,7 @@ public:
|
||||
bool keepKey(const T &) const { return true; }
|
||||
};
|
||||
|
||||
|
||||
template <typename T, bool overflow, bool tuple_argument>
|
||||
class AggregateFunctionSumMapFiltered final :
|
||||
public AggregateFunctionMapBase<T,
|
||||
@ -502,6 +503,8 @@ public:
|
||||
"Aggregate function {} requires an Array as a parameter",
|
||||
getNameImpl());
|
||||
|
||||
this->parameters = params_;
|
||||
|
||||
keys_to_keep.reserve(keys_to_keep_values.size());
|
||||
|
||||
for (const Field & f : keys_to_keep_values)
|
||||
@ -509,7 +512,16 @@ public:
|
||||
}
|
||||
|
||||
static String getNameImpl()
|
||||
{ return overflow ? "sumMapFilteredWithOverflow" : "sumMapFiltered"; }
|
||||
{
|
||||
if constexpr (overflow)
|
||||
{
|
||||
return "sumMapFilteredWithOverflow";
|
||||
}
|
||||
else
|
||||
{
|
||||
return "sumMapFiltered";
|
||||
}
|
||||
}
|
||||
|
||||
bool keepKey(const T & key) const { return keys_to_keep.count(key); }
|
||||
};
|
||||
|
@ -345,6 +345,14 @@ public:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// For most functions if one of arguments is always NULL, we return NULL (it's implemented in combinator Null),
|
||||
/// but in some functions we can want to process this argument somehow (for example condition argument in If combinator).
|
||||
/// This method returns the set of argument indexes that can be always NULL, they will be skipped in combinator Null.
|
||||
virtual std::unordered_set<size_t> getArgumentsThatCanBeOnlyNull() const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/** Return the nested function if this is an Aggregate Function Combinator.
|
||||
* Otherwise return nullptr.
|
||||
*/
|
||||
@ -828,6 +836,9 @@ struct AggregateFunctionProperties
|
||||
* Some may also name this property as "non-commutative".
|
||||
*/
|
||||
bool is_order_dependent = false;
|
||||
|
||||
/// Indicates if it's actually window function.
|
||||
bool is_window_function = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -81,6 +81,7 @@ public:
|
||||
if (nested_if_function_arguments_nodes.size() != 3)
|
||||
return;
|
||||
|
||||
auto & cond_argument = nested_if_function_arguments_nodes[0];
|
||||
const auto * if_true_condition_constant_node = nested_if_function_arguments_nodes[1]->as<ConstantNode>();
|
||||
const auto * if_false_condition_constant_node = nested_if_function_arguments_nodes[2]->as<ConstantNode>();
|
||||
|
||||
@ -107,8 +108,8 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
/// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))`.
|
||||
if (if_true_condition_value == 0 && if_false_condition_value == 1)
|
||||
/// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))` if condition is not Nullable (otherwise the result can be different).
|
||||
if (if_true_condition_value == 0 && if_false_condition_value == 1 && !cond_argument->getResultType()->isNullable())
|
||||
{
|
||||
DataTypePtr not_function_result_type = std::make_shared<DataTypeUInt8>();
|
||||
|
||||
|
@ -188,15 +188,6 @@ std::optional<FileInfo> BackupCoordinationLocal::getFileInfo(const SizeAndChecks
|
||||
return it->second;
|
||||
}
|
||||
|
||||
std::optional<SizeAndChecksum> BackupCoordinationLocal::getFileSizeAndChecksum(const String & file_name) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = file_names.find(file_name);
|
||||
if (it == file_names.end())
|
||||
return std::nullopt;
|
||||
return it->second;
|
||||
}
|
||||
|
||||
String BackupCoordinationLocal::getNextArchiveSuffix()
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
|
@ -48,7 +48,6 @@ public:
|
||||
|
||||
std::optional<FileInfo> getFileInfo(const String & file_name) const override;
|
||||
std::optional<FileInfo> getFileInfo(const SizeAndChecksum & size_and_checksum) const override;
|
||||
std::optional<SizeAndChecksum> getFileSizeAndChecksum(const String & file_name) const override;
|
||||
|
||||
String getNextArchiveSuffix() override;
|
||||
Strings getAllArchiveSuffixes() const override;
|
||||
|
@ -575,15 +575,6 @@ std::optional<FileInfo> BackupCoordinationRemote::getFileInfo(const SizeAndCheck
|
||||
return deserializeFileInfo(file_info_str);
|
||||
}
|
||||
|
||||
std::optional<SizeAndChecksum> BackupCoordinationRemote::getFileSizeAndChecksum(const String & file_name) const
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
String size_and_checksum;
|
||||
if (!zk->tryGet(zookeeper_path + "/file_names/" + escapeForFileName(file_name), size_and_checksum))
|
||||
return std::nullopt;
|
||||
return deserializeSizeAndChecksum(size_and_checksum);
|
||||
}
|
||||
|
||||
String BackupCoordinationRemote::getNextArchiveSuffix()
|
||||
{
|
||||
auto zk = getZooKeeper();
|
||||
|
@ -51,7 +51,6 @@ public:
|
||||
bool hasFiles(const String & directory) const override;
|
||||
std::optional<FileInfo> getFileInfo(const String & file_name) const override;
|
||||
std::optional<FileInfo> getFileInfo(const SizeAndChecksum & size_and_checksum) const override;
|
||||
std::optional<SizeAndChecksum> getFileSizeAndChecksum(const String & file_name) const override;
|
||||
|
||||
String getNextArchiveSuffix() override;
|
||||
Strings getAllArchiveSuffixes() const override;
|
||||
|
@ -34,6 +34,7 @@ public:
|
||||
bool is_internal_backup = false;
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination;
|
||||
std::optional<UUID> backup_uuid;
|
||||
bool deduplicate_files = true;
|
||||
};
|
||||
|
||||
static BackupFactory & instance();
|
||||
|
@ -80,6 +80,12 @@ namespace
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
return outcome.GetResult().GetContents();
|
||||
}
|
||||
|
||||
bool isNotFoundError(Aws::S3::S3Errors error)
|
||||
{
|
||||
return error == Aws::S3::S3Errors::RESOURCE_NOT_FOUND
|
||||
|| error == Aws::S3::S3Errors::NO_SUCH_KEY;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -370,7 +376,7 @@ void BackupWriterS3::removeFile(const String & file_name)
|
||||
request.SetBucket(s3_uri.bucket);
|
||||
request.SetKey(fs::path(s3_uri.key) / file_name);
|
||||
auto outcome = client->DeleteObject(request);
|
||||
if (!outcome.IsSuccess())
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
|
||||
@ -428,7 +434,7 @@ void BackupWriterS3::removeFilesBatch(const Strings & file_names)
|
||||
request.SetDelete(delkeys);
|
||||
|
||||
auto outcome = client->DeleteObjects(request);
|
||||
if (!outcome.IsSuccess())
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
}
|
||||
|
@ -167,17 +167,19 @@ BackupImpl::BackupImpl(
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup_,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_)
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_)
|
||||
: backup_name_for_logging(backup_name_for_logging_)
|
||||
, archive_params(archive_params_)
|
||||
, use_archives(!archive_params.archive_name.empty())
|
||||
, open_mode(OpenMode::WRITE)
|
||||
, writer(std::move(writer_))
|
||||
, is_internal_backup(is_internal_backup_)
|
||||
, coordination(coordination_ ? coordination_ : std::make_shared<BackupCoordinationLocal>())
|
||||
, coordination(coordination_)
|
||||
, uuid(backup_uuid_)
|
||||
, version(CURRENT_BACKUP_VERSION)
|
||||
, base_backup_info(base_backup_info_)
|
||||
, deduplicate_files(deduplicate_files_)
|
||||
, log(&Poco::Logger::get("BackupImpl"))
|
||||
{
|
||||
open(context_);
|
||||
@ -287,6 +289,7 @@ void BackupImpl::writeBackupMetadata()
|
||||
|
||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> config{new Poco::Util::XMLConfiguration()};
|
||||
config->setInt("version", CURRENT_BACKUP_VERSION);
|
||||
config->setBool("deduplicate_files", deduplicate_files);
|
||||
config->setString("timestamp", toString(LocalDateTime{timestamp}));
|
||||
config->setString("uuid", toString(*uuid));
|
||||
|
||||
@ -759,7 +762,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
};
|
||||
|
||||
/// Empty file, nothing to backup
|
||||
if (info.size == 0)
|
||||
if (info.size == 0 && deduplicate_files)
|
||||
{
|
||||
coordination->addFileInfo(info);
|
||||
return;
|
||||
@ -828,7 +831,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
}
|
||||
|
||||
/// Maybe we have a copy of this file in the backup already.
|
||||
if (coordination->getFileInfo(std::pair{info.size, info.checksum}))
|
||||
if (coordination->getFileInfo(std::pair{info.size, info.checksum}) && deduplicate_files)
|
||||
{
|
||||
LOG_TRACE(log, "File {} already exist in current backup, adding reference", adjusted_path);
|
||||
coordination->addFileInfo(info);
|
||||
@ -861,7 +864,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
|
||||
|
||||
bool is_data_file_required;
|
||||
coordination->addFileInfo(info, is_data_file_required);
|
||||
if (!is_data_file_required)
|
||||
if (!is_data_file_required && deduplicate_files)
|
||||
{
|
||||
LOG_TRACE(log, "File {} doesn't exist in current backup, but we have file with same size and checksum", adjusted_path);
|
||||
return; /// We copy data only if it's a new combination of size & checksum.
|
||||
|
@ -47,9 +47,10 @@ public:
|
||||
const std::optional<BackupInfo> & base_backup_info_,
|
||||
std::shared_ptr<IBackupWriter> writer_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup_ = false,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_ = {},
|
||||
const std::optional<UUID> & backup_uuid_ = {});
|
||||
bool is_internal_backup_,
|
||||
const std::shared_ptr<IBackupCoordination> & coordination_,
|
||||
const std::optional<UUID> & backup_uuid_,
|
||||
bool deduplicate_files_);
|
||||
|
||||
~BackupImpl() override;
|
||||
|
||||
@ -132,6 +133,7 @@ private:
|
||||
String lock_file_name;
|
||||
std::atomic<size_t> num_files_written = 0;
|
||||
bool writing_finalized = false;
|
||||
bool deduplicate_files = true;
|
||||
const Poco::Logger * log;
|
||||
};
|
||||
|
||||
|
@ -65,6 +65,7 @@ namespace
|
||||
M(String, password) \
|
||||
M(Bool, structure_only) \
|
||||
M(Bool, async) \
|
||||
M(Bool, deduplicate_files) \
|
||||
M(UInt64, shard_num) \
|
||||
M(UInt64, replica_num) \
|
||||
M(Bool, internal) \
|
||||
|
@ -32,6 +32,9 @@ struct BackupSettings
|
||||
/// Whether the BACKUP command must return immediately without waiting until the backup has completed.
|
||||
bool async = false;
|
||||
|
||||
/// Whether the BACKUP will omit similar files (within one backup only).
|
||||
bool deduplicate_files = true;
|
||||
|
||||
/// 1-based shard index to store in the backup. 0 means all shards.
|
||||
/// Can only be used with BACKUP ON CLUSTER.
|
||||
size_t shard_num = 0;
|
||||
|
@ -286,6 +286,7 @@ void BackupsWorker::doBackup(
|
||||
backup_create_params.is_internal_backup = backup_settings.internal;
|
||||
backup_create_params.backup_coordination = backup_coordination;
|
||||
backup_create_params.backup_uuid = backup_settings.backup_uuid;
|
||||
backup_create_params.deduplicate_files = backup_settings.deduplicate_files;
|
||||
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
|
||||
|
||||
/// Write the backup.
|
||||
|
@ -108,7 +108,6 @@ public:
|
||||
|
||||
virtual std::optional<FileInfo> getFileInfo(const String & file_name) const = 0;
|
||||
virtual std::optional<FileInfo> getFileInfo(const SizeAndChecksum & size_and_checksum) const = 0;
|
||||
virtual std::optional<SizeAndChecksum> getFileSizeAndChecksum(const String & file_name) const = 0;
|
||||
|
||||
/// Generates a new archive suffix, e.g. "001", "002", "003", ...
|
||||
virtual String getNextArchiveSuffix() = 0;
|
||||
|
@ -116,7 +116,16 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
writer,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files);
|
||||
}
|
||||
#else
|
||||
throw Exception("S3 support is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
|
@ -181,7 +181,16 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
||||
writer = std::make_shared<BackupWriterFile>(path);
|
||||
else
|
||||
writer = std::make_shared<BackupWriterDisk>(disk, path);
|
||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
|
||||
return std::make_unique<BackupImpl>(
|
||||
backup_name_for_logging,
|
||||
archive_params,
|
||||
params.base_backup_info,
|
||||
writer,
|
||||
params.context,
|
||||
params.is_internal_backup,
|
||||
params.backup_coordination,
|
||||
params.backup_uuid,
|
||||
params.deduplicate_files);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -344,7 +344,7 @@ set_source_files_properties(
|
||||
Common/Elf.cpp
|
||||
Common/Dwarf.cpp
|
||||
Common/SymbolIndex.cpp
|
||||
PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}")
|
||||
PROPERTIES COMPILE_FLAGS "-O2 ${WITHOUT_COVERAGE}")
|
||||
|
||||
target_link_libraries (clickhouse_common_io
|
||||
PRIVATE
|
||||
|
@ -2,12 +2,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/getCurrentProcessFDCount.h>
|
||||
#include <Common/getMaxFileDescriptorCount.h>
|
||||
#include <Interpreters/Cache/FileCache.h>
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <IO/UncompressedCache.h>
|
||||
#include <IO/MMappedFileCache.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
@ -156,9 +156,10 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, ssize_t priority, std::
|
||||
propagate_opentelemetry_tracing_context ? DB::OpenTelemetry::CurrentContext() : DB::OpenTelemetry::TracingContextOnThread());
|
||||
|
||||
++scheduled_jobs;
|
||||
new_job_or_shutdown.notify_one();
|
||||
}
|
||||
|
||||
new_job_or_shutdown.notify_one();
|
||||
|
||||
return static_cast<ReturnType>(true);
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ std::string ZooKeeperRequest::toString() const
|
||||
"OpNum = {}\n"
|
||||
"Additional info:\n{}",
|
||||
xid,
|
||||
getOpNum(),
|
||||
Coordination::toString(getOpNum()),
|
||||
toStringImpl());
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,6 @@ ZooKeeper::ZooKeeper(
|
||||
default_acls.emplace_back(std::move(acl));
|
||||
}
|
||||
|
||||
|
||||
/// It makes sense (especially, for async requests) to inject a fault in two places:
|
||||
/// pushRequest (before request is sent) and receiveEvent (after request was executed).
|
||||
if (0 < args.send_fault_probability && args.send_fault_probability <= 1)
|
||||
@ -676,7 +675,7 @@ void ZooKeeper::receiveThread()
|
||||
if (earliest_operation)
|
||||
{
|
||||
throw Exception(Error::ZOPERATIONTIMEOUT, "Operation timeout (no response) for request {} for path: {}",
|
||||
earliest_operation->request->getOpNum(), earliest_operation->request->getPath());
|
||||
toString(earliest_operation->request->getOpNum()), earliest_operation->request->getPath());
|
||||
}
|
||||
waited_us += max_wait_us;
|
||||
if (waited_us >= args.session_timeout_ms * 1000)
|
||||
@ -870,7 +869,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea
|
||||
if (already_started)
|
||||
return;
|
||||
|
||||
LOG_INFO(log, "Finalizing session {}: finalization_started={}, queue_finished={}, reason={}",
|
||||
LOG_INFO(log, "Finalizing session {}. finalization_started: {}, queue_finished: {}, reason: '{}'",
|
||||
session_id, already_started, requests_queue.isFinished(), reason);
|
||||
|
||||
auto expire_session_if_not_expired = [&]
|
||||
|
@ -132,7 +132,7 @@ void assertDigest(
|
||||
"Digest for nodes is not matching after {} request of type '{}'.\nExpected digest - {}, actual digest - {} (digest "
|
||||
"{}). Keeper will terminate to avoid inconsistencies.\nExtra information about the request:\n{}",
|
||||
committing ? "committing" : "preprocessing",
|
||||
request.getOpNum(),
|
||||
Coordination::toString(request.getOpNum()),
|
||||
first.value,
|
||||
second.value,
|
||||
first.version,
|
||||
|
@ -1704,7 +1704,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
||||
break;
|
||||
default:
|
||||
throw DB::Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", sub_zk_request->getOpNum());
|
||||
ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", Coordination::toString(sub_zk_request->getOpNum()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,12 +179,9 @@ __attribute__((__weak__)) void collectCrashLog(
|
||||
class SignalListener : public Poco::Runnable
|
||||
{
|
||||
public:
|
||||
enum Signals : int
|
||||
{
|
||||
StdTerminate = -1,
|
||||
StopThread = -2,
|
||||
SanitizerTrap = -3,
|
||||
};
|
||||
static constexpr int StdTerminate = -1;
|
||||
static constexpr int StopThread = -2;
|
||||
static constexpr int SanitizerTrap = -3;
|
||||
|
||||
explicit SignalListener(BaseDaemon & daemon_)
|
||||
: log(&Poco::Logger::get("BaseDaemon"))
|
||||
@ -209,7 +206,7 @@ public:
|
||||
// Don't use strsignal here, because it's not thread-safe.
|
||||
LOG_TRACE(log, "Received signal {}", sig);
|
||||
|
||||
if (sig == Signals::StopThread)
|
||||
if (sig == StopThread)
|
||||
{
|
||||
LOG_INFO(log, "Stop SignalListener thread");
|
||||
break;
|
||||
@ -220,7 +217,7 @@ public:
|
||||
BaseDaemon::instance().closeLogs(BaseDaemon::instance().logger());
|
||||
LOG_INFO(log, "Opened new log file after received signal.");
|
||||
}
|
||||
else if (sig == Signals::StdTerminate)
|
||||
else if (sig == StdTerminate)
|
||||
{
|
||||
UInt32 thread_num;
|
||||
std::string message;
|
||||
@ -959,7 +956,6 @@ void BaseDaemon::handleSignal(int signal_id)
|
||||
std::lock_guard lock(signal_handler_mutex);
|
||||
{
|
||||
++terminate_signals_counter;
|
||||
sigint_signals_counter += signal_id == SIGINT;
|
||||
signal_event.notify_all();
|
||||
}
|
||||
|
||||
@ -974,9 +970,9 @@ void BaseDaemon::onInterruptSignals(int signal_id)
|
||||
is_cancelled = true;
|
||||
LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id)); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
|
||||
|
||||
if (sigint_signals_counter >= 2)
|
||||
if (terminate_signals_counter >= 2)
|
||||
{
|
||||
LOG_INFO(&logger(), "Received second signal Interrupt. Immediately terminate.");
|
||||
LOG_INFO(&logger(), "This is the second termination signal. Immediately terminate.");
|
||||
call_default_signal_handler(signal_id);
|
||||
/// If the above did not help.
|
||||
_exit(128 + signal_id);
|
||||
|
@ -162,7 +162,6 @@ protected:
|
||||
std::mutex signal_handler_mutex;
|
||||
std::condition_variable signal_event;
|
||||
std::atomic_size_t terminate_signals_counter{0};
|
||||
std::atomic_size_t sigint_signals_counter{0};
|
||||
|
||||
std::string config_path;
|
||||
DB::ConfigProcessor::LoadedConfig loaded_config;
|
||||
|
@ -67,7 +67,7 @@ String DataTypeAggregateFunction::getNameImpl(bool with_version) const
|
||||
if (!parameters.empty())
|
||||
{
|
||||
stream << '(';
|
||||
for (size_t i = 0; i < parameters.size(); ++i)
|
||||
for (size_t i = 0, size = parameters.size(); i < size; ++i)
|
||||
{
|
||||
if (i)
|
||||
stream << ", ";
|
||||
|
@ -318,6 +318,12 @@ DataTypePtr tryInferDataTypeByEscapingRule(const String & field, const FormatSet
|
||||
if (auto date_type = tryInferDateOrDateTimeFromString(field, format_settings))
|
||||
return date_type;
|
||||
|
||||
/// Special case when we have number that starts with 0. In TSV we don't parse such numbers,
|
||||
/// see readIntTextUnsafe in ReadHelpers.h. If we see data started with 0, we can determine it
|
||||
/// as a String, so parsing won't fail.
|
||||
if (field[0] == '0' && field.size() != 1)
|
||||
return std::make_shared<DataTypeString>();
|
||||
|
||||
auto type = tryInferDataTypeForSingleField(field, format_settings);
|
||||
if (!type)
|
||||
return std::make_shared<DataTypeString>();
|
||||
|
@ -250,7 +250,7 @@ namespace
|
||||
{
|
||||
if (isArray(type))
|
||||
nested_types.push_back(assert_cast<const DataTypeArray &>(*type).getNestedType());
|
||||
else
|
||||
else if (isTuple(type))
|
||||
{
|
||||
const auto & elements = assert_cast<const DataTypeTuple &>(*type).getElements();
|
||||
for (const auto & element : elements)
|
||||
@ -262,7 +262,10 @@ namespace
|
||||
if (checkIfTypesAreEqual(nested_types))
|
||||
{
|
||||
for (auto & type : data_types)
|
||||
type = std::make_shared<DataTypeArray>(nested_types.back());
|
||||
{
|
||||
if (isArray(type) || isTuple(type))
|
||||
type = std::make_shared<DataTypeArray>(nested_types.back());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -826,14 +829,40 @@ void transformInferredJSONTypesIfNeeded(
|
||||
|
||||
void transformJSONTupleToArrayIfPossible(DataTypePtr & data_type, const FormatSettings & settings, JSONInferenceInfo * json_info)
|
||||
{
|
||||
if (!data_type || !isTuple(data_type))
|
||||
if (!data_type)
|
||||
return;
|
||||
|
||||
const auto * tuple_type = assert_cast<const DataTypeTuple *>(data_type.get());
|
||||
auto nested_types = tuple_type->getElements();
|
||||
transformInferredTypesIfNeededImpl<true>(nested_types, settings, json_info);
|
||||
if (checkIfTypesAreEqual(nested_types))
|
||||
data_type = std::make_shared<DataTypeArray>(nested_types.back());
|
||||
if (const auto * array_type = typeid_cast<const DataTypeArray *>(data_type.get()))
|
||||
{
|
||||
auto nested_type = array_type->getNestedType();
|
||||
transformJSONTupleToArrayIfPossible(nested_type, settings, json_info);
|
||||
data_type = std::make_shared<DataTypeArray>(nested_type);
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * map_type = typeid_cast<const DataTypeMap *>(data_type.get()))
|
||||
{
|
||||
auto value_type = map_type->getValueType();
|
||||
transformJSONTupleToArrayIfPossible(value_type, settings, json_info);
|
||||
data_type = std::make_shared<DataTypeMap>(map_type->getKeyType(), value_type);
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * tuple_type = typeid_cast<const DataTypeTuple *>(data_type.get()))
|
||||
{
|
||||
auto nested_types = tuple_type->getElements();
|
||||
for (auto & nested_type : nested_types)
|
||||
transformJSONTupleToArrayIfPossible(nested_type, settings, json_info);
|
||||
|
||||
auto nested_types_copy = nested_types;
|
||||
transformInferredTypesIfNeededImpl<true>(nested_types_copy, settings, json_info);
|
||||
if (checkIfTypesAreEqual(nested_types_copy))
|
||||
data_type = std::make_shared<DataTypeArray>(nested_types_copy.back());
|
||||
else
|
||||
data_type = std::make_shared<DataTypeTuple>(nested_types);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSettings & settings)
|
||||
|
@ -176,6 +176,7 @@ public:
|
||||
ColumnPtr executeShortCircuit(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const;
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return !Impl::specialImplementationForNulls(); }
|
||||
|
||||
|
@ -126,7 +126,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -138,6 +138,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) - 1) / static_cast<__int128_t>(step) + 1;
|
||||
else if (start > end_data[row_idx] && step < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) + 1) / static_cast<__int128_t>(step) + 1;
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
@ -161,8 +163,11 @@ private:
|
||||
IColumn::Offset offset{};
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
for (size_t idx = 0; idx < row_length[row_idx]; idx++)
|
||||
out_data[offset++] = static_cast<T>(start + offset * step);
|
||||
for (size_t idx = 0; idx < row_length[row_idx]; ++idx)
|
||||
{
|
||||
out_data[offset] = static_cast<T>(start + offset * step);
|
||||
++offset;
|
||||
}
|
||||
out_offsets[row_idx] = offset;
|
||||
}
|
||||
|
||||
@ -183,7 +188,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -195,7 +200,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) - 1) / static_cast<__int128_t>(step) + 1;
|
||||
else if (start_data[row_idx] > end_data[row_idx] && step < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) + 1) / static_cast<__int128_t>(step) + 1;
|
||||
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
@ -241,7 +247,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -253,6 +259,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) - 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else if (start > end_data[row_idx] && step_data[row_idx] < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) + 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
@ -301,7 +309,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -312,6 +320,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_start[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) - 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else if (start_data[row_idx] > end_start[row_idx] && step_data[row_idx] < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_start[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) + 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
|
@ -1026,6 +1026,7 @@ public:
|
||||
}
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
|
||||
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
|
||||
|
||||
/// Get result types by argument types. If the function does not apply to these arguments, throw an exception.
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
|
@ -51,6 +51,7 @@ public:
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForNothing() const override { return false; }
|
||||
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
|
||||
|
||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t number_of_arguments) const override
|
||||
{
|
||||
|
@ -1098,6 +1098,25 @@ inline String toString(const T & x)
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline String toStringWithFinalSeparator(const std::vector<T> & x, const String & final_sep)
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
for (auto it = x.begin(); it != x.end(); ++it)
|
||||
{
|
||||
if (it != x.begin())
|
||||
{
|
||||
if (std::next(it) == x.end())
|
||||
writeString(final_sep, buf);
|
||||
else
|
||||
writeString(", ", buf);
|
||||
}
|
||||
writeQuoted(*it, buf);
|
||||
}
|
||||
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
inline void writeNullTerminatedString(const String & s, WriteBuffer & buffer)
|
||||
{
|
||||
/// c_str is guaranteed to return zero-terminated string
|
||||
|
32
src/IO/tests/gtest_WriteHelpers.cpp
Normal file
32
src/IO/tests/gtest_WriteHelpers.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
TEST(WriteHelpersTest, ToStringWithFinalSeparatorTest)
|
||||
{
|
||||
{
|
||||
std::vector<std::string> v;
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA'");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA", "BBB"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA' or 'BBB'");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA", "BBB", "CCC"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA', 'BBB' or 'CCC'");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA", "BBB", "CCC", "DDD"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA', 'BBB', 'CCC' or 'DDD'");
|
||||
}
|
||||
}
|
@ -245,7 +245,8 @@ std::future<void> AsynchronousInsertQueue::push(ASTPtr query, ContextPtr query_c
|
||||
/// Here we check whether we hit the limit on maximum data size in the buffer.
|
||||
/// And use setting from query context.
|
||||
/// It works, because queries with the same set of settings are already grouped together.
|
||||
if (data->size_in_bytes > key.settings.async_insert_max_data_size || data->query_number > key.settings.async_insert_max_query_number)
|
||||
if (data->size_in_bytes >= key.settings.async_insert_max_data_size
|
||||
|| data->query_number >= key.settings.async_insert_max_query_number)
|
||||
{
|
||||
data_to_process = std::move(data);
|
||||
shard.iterators.erase(it);
|
||||
|
@ -763,13 +763,17 @@ try
|
||||
fs::directory_iterator dir_end;
|
||||
for (fs::directory_iterator it(path); it != dir_end; ++it)
|
||||
{
|
||||
if (it->is_regular_file() && startsWith(it->path().filename(), "tmp"))
|
||||
if (it->is_regular_file())
|
||||
{
|
||||
LOG_DEBUG(log, "Removing old temporary file {}", it->path().string());
|
||||
fs::remove(it->path());
|
||||
if (startsWith(it->path().filename(), "tmp"))
|
||||
{
|
||||
LOG_DEBUG(log, "Removing old temporary file {}", it->path().string());
|
||||
fs::remove(it->path());
|
||||
}
|
||||
else
|
||||
LOG_DEBUG(log, "Found unknown file in temporary path {}", it->path().string());
|
||||
}
|
||||
else
|
||||
LOG_DEBUG(log, "Found unknown file in temporary path {}", it->path().string());
|
||||
/// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types.
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
@ -1229,16 +1230,24 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
|
||||
if (storage)
|
||||
{
|
||||
std::vector<String> hint_name{};
|
||||
std::set<String> helper_hint_name{};
|
||||
for (const auto & name : columns_context.requiredColumns())
|
||||
{
|
||||
auto hints = storage->getHints(name);
|
||||
hint_name.insert(hint_name.end(), hints.begin(), hints.end());
|
||||
for (const auto & hint : hints)
|
||||
{
|
||||
// We want to preserve the ordering of the hints
|
||||
// (as they are ordered by Levenshtein distance)
|
||||
auto [_, inserted] = helper_hint_name.insert(hint);
|
||||
if (inserted)
|
||||
hint_name.push_back(hint);
|
||||
}
|
||||
}
|
||||
|
||||
if (!hint_name.empty())
|
||||
{
|
||||
ss << ", maybe you meant: ";
|
||||
ss << toString(hint_name);
|
||||
ss << toStringWithFinalSeparator(hint_name, " or ");
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -699,7 +699,7 @@ public:
|
||||
/// 2. If there is already tuple do nothing
|
||||
if (tryGetFunctionName(elements.back()) == "tuple")
|
||||
{
|
||||
pushOperand(elements.back());
|
||||
pushOperand(std::move(elements.back()));
|
||||
elements.pop_back();
|
||||
}
|
||||
/// 3. Put all elements in a single tuple
|
||||
@ -709,6 +709,19 @@ public:
|
||||
elements.clear();
|
||||
pushOperand(function);
|
||||
}
|
||||
|
||||
/// We must check that tuple arguments are identifiers
|
||||
auto * func_ptr = operands.back()->as<ASTFunction>();
|
||||
auto * args_ptr = func_ptr->arguments->as<ASTExpressionList>();
|
||||
|
||||
for (const auto & child : args_ptr->children)
|
||||
{
|
||||
if (typeid_cast<ASTIdentifier *>(child.get()))
|
||||
continue;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1062,9 +1075,7 @@ public:
|
||||
is_tuple = true;
|
||||
|
||||
// Special case for f(x, (y) -> z) = f(x, tuple(y) -> z)
|
||||
auto test_pos = pos;
|
||||
auto test_expected = expected;
|
||||
if (parseOperator(test_pos, "->", test_expected))
|
||||
if (pos->type == TokenType::Arrow)
|
||||
is_tuple = true;
|
||||
}
|
||||
|
||||
@ -1732,6 +1743,29 @@ private:
|
||||
bool parsed_interval_kind = false;
|
||||
};
|
||||
|
||||
class TupleLayer : public LayerWithSeparator<TokenType::Comma, TokenType::ClosingRoundBracket>
|
||||
{
|
||||
public:
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
bool result = LayerWithSeparator::parse(pos, expected, action);
|
||||
|
||||
/// Check that after the tuple() function there is no lambdas operator
|
||||
if (finished && pos->type == TokenType::Arrow)
|
||||
return false;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction("tuple", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class IntervalLayer : public Layer
|
||||
{
|
||||
public:
|
||||
@ -2035,6 +2069,9 @@ std::unique_ptr<Layer> getFunctionLayer(ASTPtr identifier, bool is_table_functio
|
||||
return std::make_unique<ViewLayer>(true);
|
||||
}
|
||||
|
||||
if (function_name == "tuple")
|
||||
return std::make_unique<TupleLayer>();
|
||||
|
||||
if (function_name_lowercase == "cast")
|
||||
return std::make_unique<CastLayer>();
|
||||
else if (function_name_lowercase == "extract")
|
||||
|
@ -27,7 +27,7 @@ private:
|
||||
{
|
||||
ParserNestedTable nested_parser;
|
||||
ParserDataType data_type_parser;
|
||||
ParserLiteral literal_parser;
|
||||
ParserAllCollectionsOfLiterals literal_parser(false);
|
||||
|
||||
const char * operators[] = {"=", "equals", nullptr};
|
||||
ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique<ParserLiteral>());
|
||||
@ -145,4 +145,3 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Interpreters/OpenTelemetrySpanLog.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ASTExplainQuery.h>
|
||||
#include <Parsers/Lexer.h>
|
||||
#include <Parsers/TokenIterator.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
@ -263,7 +264,19 @@ ASTPtr tryParseQuery(
|
||||
|
||||
ASTInsertQuery * insert = nullptr;
|
||||
if (parse_res)
|
||||
insert = res->as<ASTInsertQuery>();
|
||||
{
|
||||
if (auto * explain = res->as<ASTExplainQuery>())
|
||||
{
|
||||
if (auto explained_query = explain->getExplainedQuery())
|
||||
{
|
||||
insert = explained_query->as<ASTInsertQuery>();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
insert = res->as<ASTInsertQuery>();
|
||||
}
|
||||
}
|
||||
|
||||
// If parsed query ends at data for insertion. Data for insertion could be
|
||||
// in any format and not necessary be lexical correct, so we can't perform
|
||||
|
@ -71,7 +71,7 @@ Chunk ArrowBlockInputFormat::generate()
|
||||
|
||||
++record_batch_current;
|
||||
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, *table_result);
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, *table_result, (*table_result)->num_rows());
|
||||
|
||||
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
|
||||
/// Otherwise fill the missing columns with zero values of its type.
|
||||
|
@ -69,7 +69,6 @@ namespace ErrorCodes
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int THERE_IS_NO_COLUMN;
|
||||
extern const int UNKNOWN_EXCEPTION;
|
||||
extern const int INCORRECT_NUMBER_OF_COLUMNS;
|
||||
extern const int INCORRECT_DATA;
|
||||
}
|
||||
|
||||
@ -810,7 +809,7 @@ ArrowColumnToCHColumn::ArrowColumnToCHColumn(
|
||||
{
|
||||
}
|
||||
|
||||
void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table)
|
||||
void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table, size_t num_rows)
|
||||
{
|
||||
NameToColumnPtr name_to_column_ptr;
|
||||
for (auto column_name : table->ColumnNames())
|
||||
@ -824,16 +823,12 @@ void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr<arr
|
||||
name_to_column_ptr[std::move(column_name)] = arrow_column;
|
||||
}
|
||||
|
||||
arrowColumnsToCHChunk(res, name_to_column_ptr);
|
||||
arrowColumnsToCHChunk(res, name_to_column_ptr, num_rows);
|
||||
}
|
||||
|
||||
void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr)
|
||||
void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr, size_t num_rows)
|
||||
{
|
||||
if (unlikely(name_to_column_ptr.empty()))
|
||||
throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Columns is empty");
|
||||
|
||||
Columns columns_list;
|
||||
UInt64 num_rows = name_to_column_ptr.begin()->second->length();
|
||||
columns_list.reserve(header.columns());
|
||||
std::unordered_map<String, std::pair<BlockPtr, std::shared_ptr<NestedColumnExtractHelper>>> nested_tables;
|
||||
bool skipped = false;
|
||||
|
@ -28,9 +28,9 @@ public:
|
||||
bool allow_missing_columns_,
|
||||
bool case_insensitive_matching_ = false);
|
||||
|
||||
void arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table);
|
||||
void arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table, size_t num_rows);
|
||||
|
||||
void arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr);
|
||||
void arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr, size_t num_rows);
|
||||
|
||||
/// Get missing columns that exists in header but not in arrow::Schema
|
||||
std::vector<size_t> getMissingColumns(const arrow::Schema & schema) const;
|
||||
|
@ -54,14 +54,19 @@ Chunk ORCBlockInputFormat::generate()
|
||||
throw ParsingException(
|
||||
ErrorCodes::CANNOT_READ_ALL_DATA, "Error while reading batch of ORC data: {}", table_result.status().ToString());
|
||||
|
||||
/// We should extract the number of rows directly from the stripe, because in case when
|
||||
/// record batch contains 0 columns (for example if we requested only columns that
|
||||
/// are not presented in data) the number of rows in record batch will be 0.
|
||||
size_t num_rows = file_reader->GetRawORCReader()->getStripe(stripe_current)->getNumberOfRows();
|
||||
|
||||
auto table = table_result.ValueOrDie();
|
||||
if (!table || !table->num_rows())
|
||||
if (!table || !num_rows)
|
||||
return {};
|
||||
|
||||
++stripe_current;
|
||||
|
||||
Chunk res;
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table);
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table, num_rows);
|
||||
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
|
||||
/// Otherwise fill the missing columns with zero values of its type.
|
||||
if (format_settings.defaults_for_omitted_fields)
|
||||
|
@ -70,7 +70,7 @@ Chunk ParquetBlockInputFormat::generate()
|
||||
|
||||
++row_group_current;
|
||||
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table);
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table, table->num_rows());
|
||||
|
||||
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
|
||||
/// Otherwise fill the missing columns with zero values of its type.
|
||||
|
@ -332,8 +332,7 @@ std::string buildTaggedRegex(std::string regexp_str)
|
||||
* </default>
|
||||
* </graphite_rollup>
|
||||
*/
|
||||
static const Pattern &
|
||||
appendGraphitePattern(
|
||||
static const Pattern & appendGraphitePattern(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_element, Patterns & patterns,
|
||||
bool default_rule,
|
||||
|
@ -9,6 +9,12 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
static GraphiteRollupSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
const Block & header, const Graphite::Params & params)
|
||||
{
|
||||
@ -26,6 +32,9 @@ static GraphiteRollupSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
if (i != def.time_column_num && i != def.value_column_num && i != def.version_column_num)
|
||||
def.unmodified_column_numbers.push_back(i);
|
||||
|
||||
if (!WhichDataType(header.getByPosition(def.value_column_num).type).isFloat64())
|
||||
throw Exception("Only `Float64` data type is allowed for the value column of GraphiteMergeTree", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
return def;
|
||||
}
|
||||
|
||||
|
@ -2313,7 +2313,8 @@ void registerWindowFunctions(AggregateFunctionFactory & factory)
|
||||
.returns_default_when_only_null = true,
|
||||
// This probably doesn't make any difference for window functions because
|
||||
// it is an Aggregator-specific setting.
|
||||
.is_order_dependent = true };
|
||||
.is_order_dependent = true,
|
||||
.is_window_function = true};
|
||||
|
||||
factory.registerFunction("rank", {[](const std::string & name,
|
||||
const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
|
@ -1013,7 +1013,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
size_t suspicious_broken_parts_bytes = 0;
|
||||
std::atomic<bool> has_adaptive_parts = false;
|
||||
std::atomic<bool> has_non_adaptive_parts = false;
|
||||
std::atomic<bool> has_lightweight_in_parts = false;
|
||||
std::atomic<bool> has_lightweight_deletes_in_parts = false;
|
||||
|
||||
std::mutex mutex;
|
||||
auto load_part = [&](const String & part_name, const DiskPtr & part_disk_ptr)
|
||||
@ -1105,7 +1105,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
|
||||
/// Check if there is lightweight delete in part
|
||||
if (part->hasLightweightDelete())
|
||||
has_lightweight_in_parts.store(true, std::memory_order_relaxed);
|
||||
has_lightweight_deletes_in_parts.store(true, std::memory_order_relaxed);
|
||||
|
||||
part->modification_time = part_disk_ptr->getLastModified(fs::path(relative_data_path) / part_name).epochTime();
|
||||
/// Assume that all parts are Active, covered parts will be detected and marked as Outdated later
|
||||
@ -1189,7 +1189,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
|
||||
has_non_adaptive_index_granularity_parts = has_non_adaptive_parts;
|
||||
|
||||
if (has_lightweight_in_parts)
|
||||
if (has_lightweight_deletes_in_parts)
|
||||
has_lightweight_delete_parts.store(true);
|
||||
|
||||
if (suspicious_broken_parts > settings->max_suspicious_broken_parts && !skip_sanity_checks)
|
||||
@ -5982,20 +5982,25 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
||||
if (select_query->interpolate() && !select_query->interpolate()->children.empty())
|
||||
return std::nullopt;
|
||||
|
||||
// Currently projections don't support GROUPING SET yet.
|
||||
if (select_query->group_by_with_grouping_sets)
|
||||
// Projections don't support grouping sets yet.
|
||||
if (select_query->group_by_with_grouping_sets
|
||||
|| select_query->group_by_with_totals
|
||||
|| select_query->group_by_with_rollup
|
||||
|| select_query->group_by_with_cube)
|
||||
return std::nullopt;
|
||||
|
||||
auto query_options = SelectQueryOptions(
|
||||
QueryProcessingStage::WithMergeableState,
|
||||
/* depth */ 1,
|
||||
/* is_subquery_= */ true
|
||||
).ignoreProjections().ignoreAlias();
|
||||
).ignoreProjections().ignoreAlias();
|
||||
|
||||
InterpreterSelectQuery select(
|
||||
query_ptr,
|
||||
query_context,
|
||||
query_options,
|
||||
query_info.prepared_sets);
|
||||
|
||||
const auto & analysis_result = select.getAnalysisResult();
|
||||
|
||||
query_info.prepared_sets = select.getQueryAnalyzer()->getPreparedSets();
|
||||
|
@ -17,7 +17,7 @@ struct ReplicatedMergeTreeLogEntryData;
|
||||
/// (so instead of doing exactly the same merge cluster-wise you can do merge once and fetch ready part)
|
||||
/// Fetches may be desirable for other operational reasons (backup replica without lot of CPU resources).
|
||||
///
|
||||
/// That class allow to take a decisions about preferred strategy for a concreate merge.
|
||||
/// That class allow to take a decisions about preferred strategy for a concrete merge.
|
||||
///
|
||||
/// Since that code is used in shouldExecuteLogEntry we need to be able to:
|
||||
/// 1) make decision fast
|
||||
|
@ -476,7 +476,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
{
|
||||
String graphite_config_name;
|
||||
String error_msg
|
||||
= "Last parameter of GraphiteMergeTree must be name (in single quotes) of element in configuration file with Graphite options";
|
||||
= "Last parameter of GraphiteMergeTree must be the name (in single quotes) of the element in configuration file with the Graphite options";
|
||||
error_msg += getMergeTreeVerboseHelp(is_extended_storage_def);
|
||||
|
||||
if (const auto * ast = engine_args[arg_cnt - 1]->as<ASTLiteral>())
|
||||
|
@ -171,7 +171,6 @@ struct ProjectionCandidate
|
||||
*/
|
||||
struct SelectQueryInfo
|
||||
{
|
||||
|
||||
SelectQueryInfo()
|
||||
: prepared_sets(std::make_shared<PreparedSets>())
|
||||
{}
|
||||
|
@ -95,7 +95,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
logging.info("Going to run %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(temp_path, "runlog.log")
|
||||
run_log_path = os.path.join(temp_path, "run.log")
|
||||
with open(run_log_path, "w", encoding="utf-8") as log:
|
||||
with subprocess.Popen(
|
||||
run_command, shell=True, stderr=log, stdout=log
|
||||
@ -113,7 +113,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
s3_prefix = f"{pr_info.number}/{pr_info.sha}/fuzzer_{check_name_lower}/"
|
||||
paths = {
|
||||
"runlog.log": run_log_path,
|
||||
"run.log": run_log_path,
|
||||
"main.log": os.path.join(workspace_path, "main.log"),
|
||||
"server.log.gz": os.path.join(workspace_path, "server.log.gz"),
|
||||
"fuzzer.log": os.path.join(workspace_path, "fuzzer.log"),
|
||||
@ -130,8 +130,8 @@ if __name__ == "__main__":
|
||||
paths[f] = ""
|
||||
|
||||
report_url = GITHUB_RUN_URL
|
||||
if paths["runlog.log"]:
|
||||
report_url = paths["runlog.log"]
|
||||
if paths["run.log"]:
|
||||
report_url = paths["run.log"]
|
||||
if paths["main.log"]:
|
||||
report_url = paths["main.log"]
|
||||
if paths["server.log.gz"]:
|
||||
|
@ -57,7 +57,7 @@ if __name__ == "__main__":
|
||||
|
||||
logging.info("Going to run codebrowser: %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(TEMP_PATH, "runlog.log")
|
||||
run_log_path = os.path.join(TEMP_PATH, "run.log")
|
||||
|
||||
with TeePopen(run_command, run_log_path) as process:
|
||||
retcode = process.wait()
|
||||
|
@ -476,7 +476,6 @@ def main():
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {url}")
|
||||
print(f'::set-output name=url_output::"{url}"')
|
||||
|
||||
if not args.reports:
|
||||
return
|
||||
|
@ -208,7 +208,6 @@ def main():
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {url}")
|
||||
print(f'::set-output name=url_output::"{url}"')
|
||||
|
||||
if not args.reports:
|
||||
return
|
||||
|
@ -340,7 +340,6 @@ def main():
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {url}")
|
||||
print(f'::set-output name=url_output::"{url}"')
|
||||
|
||||
if not args.reports:
|
||||
return
|
||||
|
@ -82,7 +82,7 @@ if __name__ == "__main__":
|
||||
f"{docker_image}"
|
||||
)
|
||||
|
||||
run_log_path = os.path.join(test_output, "runlog.log")
|
||||
run_log_path = os.path.join(test_output, "run.log")
|
||||
logging.info("Running command: '%s'", cmd)
|
||||
|
||||
with TeePopen(cmd, run_log_path) as process:
|
||||
|
@ -60,7 +60,7 @@ if __name__ == "__main__":
|
||||
else:
|
||||
user = f"{os.geteuid()}:{os.getegid()}"
|
||||
|
||||
run_log_path = os.path.join(test_output, "runlog.log")
|
||||
run_log_path = os.path.join(test_output, "run.log")
|
||||
|
||||
with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"):
|
||||
cmd = (
|
||||
|
@ -155,7 +155,7 @@ if __name__ == "__main__":
|
||||
if not os.path.exists(logs_path):
|
||||
os.makedirs(logs_path)
|
||||
|
||||
run_log_path = os.path.join(logs_path, "runlog.log")
|
||||
run_log_path = os.path.join(logs_path, "run.log")
|
||||
with TeePopen(run_cmd, run_log_path, timeout=40 * 60) as process:
|
||||
retcode = process.wait()
|
||||
if retcode == 0:
|
||||
|
@ -292,7 +292,7 @@ if __name__ == "__main__":
|
||||
if not os.path.exists(result_path):
|
||||
os.makedirs(result_path)
|
||||
|
||||
run_log_path = os.path.join(result_path, "runlog.log")
|
||||
run_log_path = os.path.join(result_path, "run.log")
|
||||
|
||||
additional_envs = get_additional_envs(
|
||||
check_name, run_by_hash_num, run_by_hash_total
|
||||
|
@ -251,7 +251,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
logging.info("Going to run jepsen: %s", cmd)
|
||||
|
||||
run_log_path = os.path.join(TEMP_PATH, "runlog.log")
|
||||
run_log_path = os.path.join(TEMP_PATH, "run.log")
|
||||
|
||||
with TeePopen(cmd, run_log_path) as process:
|
||||
retcode = process.wait()
|
||||
|
@ -176,7 +176,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
logging.info("Going to run command %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(temp_path, "runlog.log")
|
||||
run_log_path = os.path.join(temp_path, "run.log")
|
||||
|
||||
popen_env = os.environ.copy()
|
||||
popen_env.update(env_extra)
|
||||
@ -198,7 +198,7 @@ if __name__ == "__main__":
|
||||
"all-query-metrics.tsv": os.path.join(
|
||||
result_path, "report/all-query-metrics.tsv"
|
||||
),
|
||||
"runlog.log": run_log_path,
|
||||
"run.log": run_log_path,
|
||||
}
|
||||
|
||||
s3_prefix = f"{pr_info.number}/{pr_info.sha}/{check_name_prefix}/"
|
||||
@ -253,8 +253,8 @@ if __name__ == "__main__":
|
||||
|
||||
report_url = GITHUB_RUN_URL
|
||||
|
||||
if uploaded["runlog.log"]:
|
||||
report_url = uploaded["runlog.log"]
|
||||
if uploaded["run.log"]:
|
||||
report_url = uploaded["run.log"]
|
||||
|
||||
if uploaded["compare.log"]:
|
||||
report_url = uploaded["compare.log"]
|
||||
|
@ -95,7 +95,7 @@ if __name__ == "__main__":
|
||||
run_command = get_run_command(build_url, workspace_path, docker_image)
|
||||
logging.info("Going to run %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(workspace_path, "runlog.log")
|
||||
run_log_path = os.path.join(workspace_path, "run.log")
|
||||
with open(run_log_path, "w", encoding="utf-8") as log:
|
||||
with subprocess.Popen(
|
||||
run_command, shell=True, stderr=log, stdout=log
|
||||
|
@ -139,7 +139,7 @@ if __name__ == "__main__":
|
||||
if not os.path.exists(result_path):
|
||||
os.makedirs(result_path)
|
||||
|
||||
run_log_path = os.path.join(temp_path, "runlog.log")
|
||||
run_log_path = os.path.join(temp_path, "run.log")
|
||||
|
||||
run_command = get_run_command(
|
||||
packages_path, result_path, repo_tests_path, server_log_path, docker_image
|
||||
|
@ -140,7 +140,7 @@ if __name__ == "__main__":
|
||||
|
||||
run_command = f"docker run --cap-add=SYS_PTRACE --volume={tests_binary_path}:/unit_tests_dbms --volume={test_output}:/test_output {docker_image}"
|
||||
|
||||
run_log_path = os.path.join(test_output, "runlog.log")
|
||||
run_log_path = os.path.join(test_output, "run.log")
|
||||
|
||||
logging.info("Going to run func tests: %s", run_command)
|
||||
|
||||
|
@ -25,7 +25,7 @@ MAX_RETRY = 5
|
||||
|
||||
# Number of times a check can re-run as a whole.
|
||||
# It is needed, because we are using AWS "spot" instances, that are terminated often
|
||||
MAX_WORKFLOW_RERUN = 20
|
||||
MAX_WORKFLOW_RERUN = 30
|
||||
|
||||
WorkflowDescription = namedtuple(
|
||||
"WorkflowDescription",
|
||||
|
@ -4,7 +4,6 @@
|
||||
<host>localhost</host>
|
||||
<port>9181</port>
|
||||
</node>
|
||||
|
||||
<!-- Settings for fault injection.
|
||||
Approximate probability of request success:
|
||||
(1 - send_fault_probability) * (1 - recv_fault_probability) = 0.99998 * 0.99998 = 0.99996
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user