mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into system-symbols
This commit is contained in:
commit
cc39484f7f
11
.github/actions/clean/action.yml
vendored
Normal file
11
.github/actions/clean/action.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
name: Clean runner
|
||||||
|
description: Clean the runner's temp path on ending
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Clean
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "${{runner.temp}}"
|
33
.github/actions/common_setup/action.yml
vendored
Normal file
33
.github/actions/common_setup/action.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
name: Common setup
|
||||||
|
description: Setup necessary environments
|
||||||
|
inputs:
|
||||||
|
job_type:
|
||||||
|
description: the name to use in the TEMP_PATH and REPO_COPY
|
||||||
|
default: common
|
||||||
|
type: string
|
||||||
|
nested_job:
|
||||||
|
description: the fuse for unintended use inside of the reusable callable jobs
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Setup and check ENV
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Setup the common ENV variables"
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
|
||||||
|
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
|
||||||
|
EOF
|
||||||
|
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
|
||||||
|
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
- name: Setup $TEMP_PATH
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# to remove every leftovers
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$REPO_COPY"
|
||||||
|
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/
|
314
.github/workflows/backport_branches.yml
vendored
314
.github/workflows/backport_branches.yml
vendored
@ -1,3 +1,4 @@
|
|||||||
|
# yamllint disable rule:comments-indentation
|
||||||
name: BackportPR
|
name: BackportPR
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -169,320 +170,43 @@ jobs:
|
|||||||
#########################################################################################
|
#########################################################################################
|
||||||
BuilderDebRelease:
|
BuilderDebRelease:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_release
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_release
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_asan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_asan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_tsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_tsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_debug
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_debug
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
777
.github/workflows/master.yml
vendored
777
.github/workflows/master.yml
vendored
@ -1,3 +1,4 @@
|
|||||||
|
# yamllint disable rule:comments-indentation
|
||||||
name: MasterCI
|
name: MasterCI
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -184,789 +185,109 @@ jobs:
|
|||||||
#########################################################################################
|
#########################################################################################
|
||||||
BuilderDebRelease:
|
BuilderDebRelease:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_release
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
checkout_depth: 0
|
||||||
path: ${{ env.IMAGES_PATH }}
|
build_name: package_release
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
checkout_depth: 0
|
||||||
path: ${{ runner.temp }}/images_path
|
build_name: package_aarch64
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinRelease:
|
BuilderBinRelease:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_release
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
checkout_depth: 0
|
||||||
path: ${{ env.IMAGES_PATH }}
|
build_name: binary_release
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_asan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_asan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebUBsan:
|
BuilderDebUBsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_ubsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_ubsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_tsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_tsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebMsan:
|
BuilderDebMsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_msan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_msan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_debug
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_debug
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
##########################################################################################
|
##########################################################################################
|
||||||
##################################### SPECIAL BUILDS #####################################
|
##################################### SPECIAL BUILDS #####################################
|
||||||
##########################################################################################
|
##########################################################################################
|
||||||
BuilderBinClangTidy:
|
BuilderBinClangTidy:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_tidy
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_tidy
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinAarch64:
|
BuilderBinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinFreeBSD:
|
BuilderBinFreeBSD:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_freebsd
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_freebsd
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinPPC64:
|
BuilderBinPPC64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_ppc64le
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_ppc64le
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinAmd64Compat:
|
BuilderBinAmd64Compat:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_amd64_compat
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_amd64_compat
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinAarch64V80Compat:
|
BuilderBinAarch64V80Compat:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_aarch64_v80compat
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_aarch64_v80compat
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinRISCV64:
|
BuilderBinRISCV64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_riscv64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_riscv64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinS390X:
|
BuilderBinS390X:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_s390x
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_s390x
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
786
.github/workflows/pull_request.yml
vendored
786
.github/workflows/pull_request.yml
vendored
@ -1,3 +1,4 @@
|
|||||||
|
# yamllint disable rule:comments-indentation
|
||||||
name: PullRequestCI
|
name: PullRequestCI
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -246,771 +247,100 @@ jobs:
|
|||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
BuilderDebRelease:
|
BuilderDebRelease:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_release
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_release
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
fetch-depth: 0 # for performance artifact
|
|
||||||
filter: tree:0
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
BuilderBinRelease:
|
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
|
||||||
runs-on: [self-hosted, builder]
|
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_release
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: changed_images
|
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_aarch64
|
||||||
path: ${{ runner.temp }}/images_path
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
BuilderBinRelease:
|
||||||
uses: ClickHouse/checkout@v1
|
needs: [FastTest, StyleCheck]
|
||||||
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
with:
|
with:
|
||||||
clear-repository: true
|
build_name: binary_release
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # for performance artifact
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_asan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_asan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebUBsan:
|
BuilderDebUBsan:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_ubsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_ubsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_tsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_tsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebMsan:
|
BuilderDebMsan:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_msan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_msan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_debug
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_debug
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
##########################################################################################
|
##########################################################################################
|
||||||
##################################### SPECIAL BUILDS #####################################
|
##################################### SPECIAL BUILDS #####################################
|
||||||
##########################################################################################
|
##########################################################################################
|
||||||
BuilderBinClangTidy:
|
BuilderBinClangTidy:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_tidy
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_tidy
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinAarch64:
|
BuilderBinAarch64:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinFreeBSD:
|
BuilderBinFreeBSD:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_freebsd
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_freebsd
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinPPC64:
|
BuilderBinPPC64:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_ppc64le
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_ppc64le
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinAmd64Compat:
|
BuilderBinAmd64Compat:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_amd64_compat
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_amd64_compat
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinAarch64V80Compat:
|
BuilderBinAarch64V80Compat:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_aarch64_v80compat
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_aarch64_v80compat
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinRISCV64:
|
BuilderBinRISCV64:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_riscv64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_riscv64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinS390X:
|
BuilderBinS390X:
|
||||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
needs: [FastTest, StyleCheck]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_s390x
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_s390x
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
393
.github/workflows/release_branches.yml
vendored
393
.github/workflows/release_branches.yml
vendored
@ -1,3 +1,4 @@
|
|||||||
|
# yamllint disable rule:comments-indentation
|
||||||
name: ReleaseBranchCI
|
name: ReleaseBranchCI
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -140,401 +141,53 @@ jobs:
|
|||||||
#########################################################################################
|
#########################################################################################
|
||||||
BuilderDebRelease:
|
BuilderDebRelease:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_release
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_release
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_aarch64
|
||||||
path: ${{ runner.temp }}/images_path
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
|
||||||
filter: tree:0
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_asan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_asan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebUBsan:
|
BuilderDebUBsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_ubsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_ubsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebTsan:
|
BuilderDebTsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_tsan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_tsan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebMsan:
|
BuilderDebMsan:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_msan
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_msan
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderDebDebug:
|
BuilderDebDebug:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=package_debug
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: package_debug
|
||||||
path: ${{ env.IMAGES_PATH }}
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
runs-on: [self-hosted, builder]
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
steps:
|
|
||||||
- name: Set envs
|
|
||||||
run: |
|
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
|
||||||
TEMP_PATH=${{runner.temp}}/build_check
|
|
||||||
IMAGES_PATH=${{runner.temp}}/images_path
|
|
||||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
|
||||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
|
||||||
BUILD_NAME=binary_darwin_aarch64
|
|
||||||
EOF
|
|
||||||
- name: Download changed images
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
with:
|
||||||
name: changed_images
|
build_name: binary_darwin_aarch64
|
||||||
path: ${{ env.IMAGES_PATH }}
|
checkout_depth: 0
|
||||||
- name: Check out repository code
|
|
||||||
uses: ClickHouse/checkout@v1
|
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
|
||||||
filter: tree:0
|
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
|
||||||
run: |
|
|
||||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
|
||||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
|
||||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
|
||||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
|
||||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
sudo rm -fr "$TEMP_PATH"
|
|
||||||
mkdir -p "$TEMP_PATH"
|
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
|
||||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
|
||||||
- name: Upload build URLs to artifacts
|
|
||||||
if: ${{ success() || failure() }}
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_URLS }}
|
|
||||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
|
||||||
- name: Cleanup
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
|
||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
|
74
.github/workflows/reusable_build.yml
vendored
Normal file
74
.github/workflows/reusable_build.yml
vendored
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
### For the pure soul wishes to move it to another place
|
||||||
|
# https://github.com/orgs/community/discussions/9050
|
||||||
|
|
||||||
|
name: Build ClickHouse
|
||||||
|
'on':
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
build_name:
|
||||||
|
description: the value of build type from tests/ci/ci_config.py
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
checkout_depth:
|
||||||
|
description: the value of the git shallow checkout
|
||||||
|
required: false
|
||||||
|
type: number
|
||||||
|
default: 1
|
||||||
|
runner_type:
|
||||||
|
description: the label of runner to use
|
||||||
|
default: builder
|
||||||
|
type: string
|
||||||
|
additional_envs:
|
||||||
|
description: additional ENV variables to setup the job
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
Build:
|
||||||
|
name: Build-${{inputs.build_name}}
|
||||||
|
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: ClickHouse/checkout@v1
|
||||||
|
with:
|
||||||
|
clear-repository: true
|
||||||
|
submodules: true
|
||||||
|
fetch-depth: ${{inputs.checkout_depth}}
|
||||||
|
filter: tree:0
|
||||||
|
- name: Set build envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
IMAGES_PATH=${{runner.temp}}/images_path
|
||||||
|
GITHUB_JOB_OVERRIDDEN=Build-${{inputs.build_name}}
|
||||||
|
${{inputs.additional_envs}}
|
||||||
|
EOF
|
||||||
|
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
||||||
|
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||||
|
# This step is done in GITHUB_WORKSPACE,
|
||||||
|
# because it's broken in REPO_COPY for some reason
|
||||||
|
if: ${{ env.BUILD_SPARSE_CHECKOUT == 'true' }}
|
||||||
|
run: |
|
||||||
|
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||||
|
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||||
|
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||||
|
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||||
|
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||||
|
- name: Common setup
|
||||||
|
uses: ./.github/actions/common_setup
|
||||||
|
with:
|
||||||
|
job_type: build_check
|
||||||
|
- name: Download changed images
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: changed_images
|
||||||
|
path: ${{ env.IMAGES_PATH }}
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||||
|
- name: Upload build URLs to artifacts
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ env.BUILD_URLS }}
|
||||||
|
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||||
|
- name: Clean
|
||||||
|
uses: ./.github/actions/clean
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +1,6 @@
|
|||||||
|
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
|
||||||
|
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
|
||||||
|
# more confusing than useful.
|
||||||
[submodule "contrib/zstd"]
|
[submodule "contrib/zstd"]
|
||||||
path = contrib/zstd
|
path = contrib/zstd
|
||||||
url = https://github.com/facebook/zstd
|
url = https://github.com/facebook/zstd
|
||||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit bef8212d1e01f99e406c282ceab3d42da08e09ce
|
Subproject commit 267af8c3a1ea4a5a4d9e5a070ad2d1ac7c701923
|
@ -6,12 +6,12 @@ FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
|||||||
echo '/*' > $FILES_TO_CHECKOUT
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '/test/build/*' >> $FILES_TO_CHECKOUT
|
echo '/test/build/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/test/core/tsi/alts/fake_handshaker/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '!/tools/*' >> $FILES_TO_CHECKOUT
|
echo '!/tools/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '/tools/codegen/*' >> $FILES_TO_CHECKOUT
|
echo '/tools/codegen/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '!/examples/*' >> $FILES_TO_CHECKOUT
|
echo '!/examples/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '!/doc/*' >> $FILES_TO_CHECKOUT
|
echo '!/doc/*' >> $FILES_TO_CHECKOUT
|
||||||
# FIXME why do we need csharp?
|
echo '!/src/csharp/*' >> $FILES_TO_CHECKOUT
|
||||||
#echo '!/src/csharp/*' >> $FILES_TO_CHECKOUT
|
|
||||||
echo '!/src/python/*' >> $FILES_TO_CHECKOUT
|
echo '!/src/python/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '!/src/objective-c/*' >> $FILES_TO_CHECKOUT
|
echo '!/src/objective-c/*' >> $FILES_TO_CHECKOUT
|
||||||
echo '!/src/php/*' >> $FILES_TO_CHECKOUT
|
echo '!/src/php/*' >> $FILES_TO_CHECKOUT
|
||||||
|
11
contrib/update-submodules.sh
vendored
11
contrib/update-submodules.sh
vendored
@ -1,11 +1,12 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
WORKDIR=$(dirname "$0")
|
SCRIPT_PATH=$(realpath "$0")
|
||||||
WORKDIR=$(readlink -f "${WORKDIR}")
|
SCRIPT_DIR=$(dirname "${SCRIPT_PATH}")
|
||||||
|
GIT_DIR=$(git -C "$SCRIPT_DIR" rev-parse --show-toplevel)
|
||||||
|
cd $GIT_DIR
|
||||||
|
|
||||||
"$WORKDIR/sparse-checkout/setup-sparse-checkout.sh"
|
contrib/sparse-checkout/setup-sparse-checkout.sh
|
||||||
git submodule init
|
git submodule init
|
||||||
git submodule sync
|
git submodule sync
|
||||||
git submodule update --depth=1
|
git config --file .gitmodules --get-regexp .*path | sed 's/[^ ]* //' | xargs -I _ --max-procs 64 git submodule update --depth=1 --single-branch _
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.10.1.1976"
|
ARG VERSION="23.10.2.13"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.10.1.1976"
|
ARG VERSION="23.10.2.13"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.10.1.1976"
|
ARG VERSION="23.10.2.13"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -53,31 +53,28 @@ function configure()
|
|||||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
function randomize_config_boolean_value {
|
function randomize_keeper_config_boolean_value {
|
||||||
value=$(($RANDOM % 2))
|
value=$(($RANDOM % 2))
|
||||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
sudo cat /etc/clickhouse-server/config.d/$2.xml \
|
||||||
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
||||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
> /etc/clickhouse-server/config.d/$2.xml.tmp
|
||||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo mv /etc/clickhouse-server/config.d/$2.xml.tmp /etc/clickhouse-server/config.d/$2.xml
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ -n "$RANDOMIZE_KEEPER_FEATURE_FLAGS" ]] && [[ "$RANDOMIZE_KEEPER_FEATURE_FLAGS" -eq 1 ]]; then
|
if [[ -n "$RANDOMIZE_KEEPER_FEATURE_FLAGS" ]] && [[ "$RANDOMIZE_KEEPER_FEATURE_FLAGS" -eq 1 ]]; then
|
||||||
# Randomize all Keeper feature flags
|
# Randomize all Keeper feature flags
|
||||||
randomize_config_boolean_value filtered_list
|
randomize_config_boolean_value filtered_list keeper_port
|
||||||
randomize_config_boolean_value multi_read
|
randomize_config_boolean_value multi_read keeper_port
|
||||||
randomize_config_boolean_value check_not_exists
|
randomize_config_boolean_value check_not_exists keeper_port
|
||||||
randomize_config_boolean_value create_if_not_exists
|
randomize_config_boolean_value create_if_not_exists keeper_port
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
#Randomize merge tree setting allow_experimental_block_number_column
|
randomize_config_boolean_value use_compression zookeeper
|
||||||
value=$(($RANDOM % 2))
|
|
||||||
sudo cat /etc/clickhouse-server/config.d/merge_tree_settings.xml \
|
randomize_config_boolean_value allow_experimental_block_number_column merge_tree_settings
|
||||||
| sed "s|<allow_experimental_block_number_column>[01]</allow_experimental_block_number_column>|<allow_experimental_block_number_column>$value</allow_experimental_block_number_column>|" \
|
|
||||||
> /etc/clickhouse-server/config.d/merge_tree_settings.xml.tmp
|
|
||||||
sudo mv /etc/clickhouse-server/config.d/merge_tree_settings.xml.tmp /etc/clickhouse-server/config.d/merge_tree_settings.xml
|
|
||||||
|
|
||||||
# for clickhouse-server (via service)
|
# for clickhouse-server (via service)
|
||||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||||
|
18
docs/changelogs/v23.10.2.13-stable.md
Normal file
18
docs/changelogs/v23.10.2.13-stable.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.10.2.13-stable (65d8522bb1d) FIXME as compared to v23.10.1.1976-stable (13adae0e42f)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix inconsistency of "cast('0' as DateTime64(3))" and "cast('0' as Nullable(DateTime64(3)))" [#56286](https://github.com/ClickHouse/ClickHouse/pull/56286) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
14
docs/changelogs/v23.3.16.7-lts.md
Normal file
14
docs/changelogs/v23.3.16.7-lts.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.16.7-lts (fb4125cc92a) FIXME as compared to v23.3.15.29-lts (218336662e4)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix: avoid using regex match, possibly containing alternation, as a key condition. [#54696](https://github.com/ClickHouse/ClickHouse/pull/54696) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
21
docs/changelogs/v23.8.6.16-lts.md
Normal file
21
docs/changelogs/v23.8.6.16-lts.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.8.6.16-lts (077df679bed) FIXME as compared to v23.8.5.16-lts (e8a1af5fe2f)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix rare case of CHECKSUM_DOESNT_MATCH error [#54549](https://github.com/ClickHouse/ClickHouse/pull/54549) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix: avoid using regex match, possibly containing alternation, as a key condition. [#54696](https://github.com/ClickHouse/ClickHouse/pull/54696) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Improve enrich image [#55793](https://github.com/ClickHouse/ClickHouse/pull/55793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
17
docs/changelogs/v23.9.4.11-stable.md
Normal file
17
docs/changelogs/v23.9.4.11-stable.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.9.4.11-stable (74c1f49dd6a) FIXME as compared to v23.9.3.12-stable (b7230b06563)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix wrong query result when http_write_exception_in_output_format=1 [#56135](https://github.com/ClickHouse/ClickHouse/pull/56135) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix schema cache for fallback JSON->JSONEachRow with changed settings [#56172](https://github.com/ClickHouse/ClickHouse/pull/56172) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
@ -23,43 +23,34 @@ Create a fork of ClickHouse repository. To do that please click on the “fork
|
|||||||
|
|
||||||
The development process consists of first committing the intended changes into your fork of ClickHouse and then creating a “pull request” for these changes to be accepted into the main repository (ClickHouse/ClickHouse).
|
The development process consists of first committing the intended changes into your fork of ClickHouse and then creating a “pull request” for these changes to be accepted into the main repository (ClickHouse/ClickHouse).
|
||||||
|
|
||||||
To work with git repositories, please install `git`.
|
To work with Git repositories, please install `git`. To do that in Ubuntu you would run in the command line terminal:
|
||||||
|
|
||||||
To do that in Ubuntu you would run in the command line terminal:
|
|
||||||
|
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install git
|
sudo apt install git
|
||||||
|
|
||||||
A brief manual on using Git can be found here: https://education.github.com/git-cheat-sheet-education.pdf.
|
A brief manual on using Git can be found [here](https://education.github.com/git-cheat-sheet-education.pdf).
|
||||||
For a detailed manual on Git see https://git-scm.com/book/en/v2.
|
For a detailed manual on Git see [here](https://git-scm.com/book/en/v2).
|
||||||
|
|
||||||
## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
|
## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
|
||||||
|
|
||||||
Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine.
|
Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine.
|
||||||
|
|
||||||
In the command line terminal run:
|
Run in your terminal:
|
||||||
|
|
||||||
git clone --shallow-submodules git@github.com:your_github_username/ClickHouse.git
|
git clone git@github.com:your_github_username/ClickHouse.git # replace placeholder with your GitHub user name
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
|
|
||||||
Or (if you'd like to use sparse checkout for submodules and avoid checking out unneeded files):
|
This command will create a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory (after the URL), it is important that this path does not contain whitespaces as it may lead to problems with the build system.
|
||||||
|
|
||||||
git clone git@github.com:your_github_username/ClickHouse.git
|
To make library dependencies available for the build, the ClickHouse repository uses Git submodules, i.e. references to external repositories. These are not checked out by default. To do so, you can either
|
||||||
cd ClickHouse
|
|
||||||
./contrib/update-submodules.sh
|
|
||||||
|
|
||||||
Note: please, substitute *your_github_username* with what is appropriate!
|
- run `git clone` with option `--recurse-submodules`,
|
||||||
|
|
||||||
This command will create a directory `ClickHouse` containing the working copy of the project.
|
- if `git clone` did not check out submodules, run `git submodule update --init --jobs <N>` (e.g. `<N> = 12` to parallelize the checkout) to achieve the same as the previous alternative, or
|
||||||
|
|
||||||
It is important that the path to the working directory contains no whitespaces as it may lead to problems with running the build system.
|
- if `git clone` did not check out submodules and you like to use [sparse](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) and [shallow](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) submodule checkout to omit unneeded files and history in submodules to save space (ca. 5 GB instead of ca. 15 GB), run `./contrib/update-submodules.sh`. Not really recommended as it generally makes working with submodules less convenient and slower.
|
||||||
|
|
||||||
Please note that ClickHouse repository uses `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` flag as in the example above. If the repository has been cloned without submodules, to download them you need to run the following:
|
You can check the Git status with the command: `git submodule status`.
|
||||||
|
|
||||||
git submodule init
|
|
||||||
git submodule update
|
|
||||||
|
|
||||||
You can check the status with the command: `git submodule status`.
|
|
||||||
|
|
||||||
If you get the following error message:
|
If you get the following error message:
|
||||||
|
|
||||||
@ -83,36 +74,6 @@ You can also add original ClickHouse repo address to your local repository to pu
|
|||||||
|
|
||||||
After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`.
|
After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`.
|
||||||
|
|
||||||
### Working with Submodules {#working-with-submodules}
|
|
||||||
|
|
||||||
Working with submodules in git could be painful. Next commands will help to manage it:
|
|
||||||
|
|
||||||
# ! each command accepts
|
|
||||||
# Update remote URLs for submodules. Barely rare case
|
|
||||||
git submodule sync
|
|
||||||
# Add new submodules
|
|
||||||
git submodule init
|
|
||||||
# Update existing submodules to the current state
|
|
||||||
git submodule update
|
|
||||||
# Two last commands could be merged together
|
|
||||||
git submodule update --init
|
|
||||||
|
|
||||||
The next commands would help you to reset all submodules to the initial state (!WARNING! - any changes inside will be deleted):
|
|
||||||
|
|
||||||
# Synchronizes submodules' remote URL with .gitmodules
|
|
||||||
git submodule sync
|
|
||||||
# Update the registered submodules with initialize not yet initialized
|
|
||||||
git submodule update --init
|
|
||||||
# Reset all changes done after HEAD
|
|
||||||
git submodule foreach git reset --hard
|
|
||||||
# Clean files from .gitignore
|
|
||||||
git submodule foreach git clean -xfd
|
|
||||||
# Repeat last 4 commands for all submodule
|
|
||||||
git submodule foreach git submodule sync
|
|
||||||
git submodule foreach git submodule update --init
|
|
||||||
git submodule foreach git submodule foreach git reset --hard
|
|
||||||
git submodule foreach git submodule foreach git clean -xfd
|
|
||||||
|
|
||||||
## Build System {#build-system}
|
## Build System {#build-system}
|
||||||
|
|
||||||
ClickHouse uses CMake and Ninja for building.
|
ClickHouse uses CMake and Ninja for building.
|
||||||
|
@ -2,9 +2,10 @@
|
|||||||
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
||||||
sidebar_position: 130
|
sidebar_position: 130
|
||||||
sidebar_label: MaterializedPostgreSQL
|
sidebar_label: MaterializedPostgreSQL
|
||||||
title: MaterializedPostgreSQL
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# [experimental] MaterializedPostgreSQL
|
||||||
|
|
||||||
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
|
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
|
||||||
|
|
||||||
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.
|
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.
|
||||||
|
@ -11,7 +11,8 @@ ClickHouse runs sampling profiler that allows analyzing query execution. Using p
|
|||||||
|
|
||||||
Query profiler is automatically enabled in ClickHouse Cloud and you can run a sample query as follows
|
Query profiler is automatically enabled in ClickHouse Cloud and you can run a sample query as follows
|
||||||
|
|
||||||
:::note If you are running the following query in ClickHouse Cloud, make sure to change `FROM system.trace_log` to `FROM clusterAllReplicas(default, system.trace_log)` to select from all nodes of the cluster :::
|
:::note If you are running the following query in ClickHouse Cloud, make sure to change `FROM system.trace_log` to `FROM clusterAllReplicas(default, system.trace_log)` to select from all nodes of the cluster
|
||||||
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -2427,6 +2427,8 @@ This section contains the following parameters:
|
|||||||
* hostname_levenshtein_distance - just like nearest_hostname, but it compares hostname in a levenshtein distance manner.
|
* hostname_levenshtein_distance - just like nearest_hostname, but it compares hostname in a levenshtein distance manner.
|
||||||
* first_or_random - selects the first ZooKeeper node, if it's not available then randomly selects one of remaining ZooKeeper nodes.
|
* first_or_random - selects the first ZooKeeper node, if it's not available then randomly selects one of remaining ZooKeeper nodes.
|
||||||
* round_robin - selects the first ZooKeeper node, if reconnection happens selects the next.
|
* round_robin - selects the first ZooKeeper node, if reconnection happens selects the next.
|
||||||
|
- `use_compression` — If set to true, enables compression in Keeper protocol.
|
||||||
|
|
||||||
|
|
||||||
**Example configuration**
|
**Example configuration**
|
||||||
|
|
||||||
|
@ -3943,6 +3943,17 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `''`.
|
Default value: `''`.
|
||||||
|
|
||||||
|
## preferred_optimize_projection_name {#preferred_optimize_projection_name}
|
||||||
|
|
||||||
|
If it is set to a non-empty string, ClickHouse will try to apply specified projection in query.
|
||||||
|
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- string: name of preferred projection
|
||||||
|
|
||||||
|
Default value: `''`.
|
||||||
|
|
||||||
## alter_sync {#alter-sync}
|
## alter_sync {#alter-sync}
|
||||||
|
|
||||||
Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
|
Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
|
||||||
|
@ -20,6 +20,12 @@
|
|||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event BackupEntriesCollectorMicroseconds;
|
||||||
|
extern const Event BackupEntriesCollectorForTablesDataMicroseconds;
|
||||||
|
extern const Event BackupEntriesCollectorRunPostTasksMicroseconds;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -82,7 +88,8 @@ BackupEntriesCollector::BackupEntriesCollector(
|
|||||||
const BackupSettings & backup_settings_,
|
const BackupSettings & backup_settings_,
|
||||||
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
||||||
const ReadSettings & read_settings_,
|
const ReadSettings & read_settings_,
|
||||||
const ContextPtr & context_)
|
const ContextPtr & context_,
|
||||||
|
ThreadPool & threadpool_)
|
||||||
: backup_query_elements(backup_query_elements_)
|
: backup_query_elements(backup_query_elements_)
|
||||||
, backup_settings(backup_settings_)
|
, backup_settings(backup_settings_)
|
||||||
, backup_coordination(backup_coordination_)
|
, backup_coordination(backup_coordination_)
|
||||||
@ -101,6 +108,7 @@ BackupEntriesCollector::BackupEntriesCollector(
|
|||||||
context->getSettingsRef().backup_restore_keeper_max_retries,
|
context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||||
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||||
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
||||||
|
, threadpool(threadpool_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,6 +116,8 @@ BackupEntriesCollector::~BackupEntriesCollector() = default;
|
|||||||
|
|
||||||
BackupEntries BackupEntriesCollector::run()
|
BackupEntries BackupEntriesCollector::run()
|
||||||
{
|
{
|
||||||
|
auto timer = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupEntriesCollectorMicroseconds);
|
||||||
|
|
||||||
/// run() can be called onle once.
|
/// run() can be called onle once.
|
||||||
if (!current_stage.empty())
|
if (!current_stage.empty())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Already making backup entries");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Already making backup entries");
|
||||||
@ -133,11 +143,19 @@ BackupEntries BackupEntriesCollector::run()
|
|||||||
|
|
||||||
/// Make backup entries for the data of the found tables.
|
/// Make backup entries for the data of the found tables.
|
||||||
setStage(Stage::EXTRACTING_DATA_FROM_TABLES);
|
setStage(Stage::EXTRACTING_DATA_FROM_TABLES);
|
||||||
|
|
||||||
|
{
|
||||||
|
auto timer2 = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupEntriesCollectorForTablesDataMicroseconds);
|
||||||
makeBackupEntriesForTablesData();
|
makeBackupEntriesForTablesData();
|
||||||
|
}
|
||||||
|
|
||||||
/// Run all the tasks added with addPostCollectingTask().
|
/// Run all the tasks added with addPostCollectingTask().
|
||||||
setStage(Stage::RUNNING_POST_TASKS);
|
setStage(Stage::RUNNING_POST_TASKS);
|
||||||
|
|
||||||
|
{
|
||||||
|
auto timer2 = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupEntriesCollectorRunPostTasksMicroseconds);
|
||||||
runPostTasks();
|
runPostTasks();
|
||||||
|
}
|
||||||
|
|
||||||
/// No more backup entries or tasks are allowed after this point.
|
/// No more backup entries or tasks are allowed after this point.
|
||||||
|
|
||||||
@ -738,8 +756,20 @@ void BackupEntriesCollector::makeBackupEntriesForTablesData()
|
|||||||
if (backup_settings.structure_only)
|
if (backup_settings.structure_only)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
std::vector<std::future<void>> futures;
|
||||||
for (const auto & table_name : table_infos | boost::adaptors::map_keys)
|
for (const auto & table_name : table_infos | boost::adaptors::map_keys)
|
||||||
|
{
|
||||||
|
futures.push_back(scheduleFromThreadPool<void>([&]()
|
||||||
|
{
|
||||||
makeBackupEntriesForTableData(table_name);
|
makeBackupEntriesForTableData(table_name);
|
||||||
|
}, threadpool, "BackupCollect"));
|
||||||
|
}
|
||||||
|
/// Wait for all tasks.
|
||||||
|
for (auto & future : futures)
|
||||||
|
future.wait();
|
||||||
|
/// Make sure there is no exception.
|
||||||
|
for (auto & future : futures)
|
||||||
|
future.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupEntriesCollector::makeBackupEntriesForTableData(const QualifiedTableName & table_name)
|
void BackupEntriesCollector::makeBackupEntriesForTableData(const QualifiedTableName & table_name)
|
||||||
@ -775,20 +805,28 @@ void BackupEntriesCollector::makeBackupEntriesForTableData(const QualifiedTableN
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupEntriesCollector::addBackupEntry(const String & file_name, BackupEntryPtr backup_entry)
|
void BackupEntriesCollector::addBackupEntryUnlocked(const String & file_name, BackupEntryPtr backup_entry)
|
||||||
{
|
{
|
||||||
if (current_stage == Stage::WRITING_BACKUP)
|
if (current_stage == Stage::WRITING_BACKUP)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding backup entries is not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding backup entries is not allowed");
|
||||||
backup_entries.emplace_back(file_name, backup_entry);
|
backup_entries.emplace_back(file_name, backup_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BackupEntriesCollector::addBackupEntry(const String & file_name, BackupEntryPtr backup_entry)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
addBackupEntryUnlocked(file_name, backup_entry);
|
||||||
|
}
|
||||||
|
|
||||||
void BackupEntriesCollector::addBackupEntry(const std::pair<String, BackupEntryPtr> & backup_entry)
|
void BackupEntriesCollector::addBackupEntry(const std::pair<String, BackupEntryPtr> & backup_entry)
|
||||||
{
|
{
|
||||||
addBackupEntry(backup_entry.first, backup_entry.second);
|
std::lock_guard lock(mutex);
|
||||||
|
addBackupEntryUnlocked(backup_entry.first, backup_entry.second);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupEntriesCollector::addBackupEntries(const BackupEntries & backup_entries_)
|
void BackupEntriesCollector::addBackupEntries(const BackupEntries & backup_entries_)
|
||||||
{
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
if (current_stage == Stage::WRITING_BACKUP)
|
if (current_stage == Stage::WRITING_BACKUP)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of backup entries is not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of backup entries is not allowed");
|
||||||
insertAtEnd(backup_entries, backup_entries_);
|
insertAtEnd(backup_entries, backup_entries_);
|
||||||
@ -796,6 +834,7 @@ void BackupEntriesCollector::addBackupEntries(const BackupEntries & backup_entri
|
|||||||
|
|
||||||
void BackupEntriesCollector::addBackupEntries(BackupEntries && backup_entries_)
|
void BackupEntriesCollector::addBackupEntries(BackupEntries && backup_entries_)
|
||||||
{
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
if (current_stage == Stage::WRITING_BACKUP)
|
if (current_stage == Stage::WRITING_BACKUP)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of backup entries is not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of backup entries is not allowed");
|
||||||
insertAtEnd(backup_entries, std::move(backup_entries_));
|
insertAtEnd(backup_entries, std::move(backup_entries_));
|
||||||
@ -803,6 +842,7 @@ void BackupEntriesCollector::addBackupEntries(BackupEntries && backup_entries_)
|
|||||||
|
|
||||||
void BackupEntriesCollector::addPostTask(std::function<void()> task)
|
void BackupEntriesCollector::addPostTask(std::function<void()> task)
|
||||||
{
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
if (current_stage == Stage::WRITING_BACKUP)
|
if (current_stage == Stage::WRITING_BACKUP)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of post tasks is not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of post tasks is not allowed");
|
||||||
post_tasks.push(std::move(task));
|
post_tasks.push(std::move(task));
|
||||||
@ -824,6 +864,7 @@ void BackupEntriesCollector::runPostTasks()
|
|||||||
|
|
||||||
size_t BackupEntriesCollector::getAccessCounter(AccessEntityType type)
|
size_t BackupEntriesCollector::getAccessCounter(AccessEntityType type)
|
||||||
{
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
access_counters.resize(static_cast<size_t>(AccessEntityType::MAX));
|
access_counters.resize(static_cast<size_t>(AccessEntityType::MAX));
|
||||||
return access_counters[static_cast<size_t>(type)]++;
|
return access_counters[static_cast<size_t>(type)]++;
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,8 @@ public:
|
|||||||
const BackupSettings & backup_settings_,
|
const BackupSettings & backup_settings_,
|
||||||
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
||||||
const ReadSettings & read_settings_,
|
const ReadSettings & read_settings_,
|
||||||
const ContextPtr & context_);
|
const ContextPtr & context_,
|
||||||
|
ThreadPool & threadpool_);
|
||||||
~BackupEntriesCollector();
|
~BackupEntriesCollector();
|
||||||
|
|
||||||
/// Collects backup entries and returns the result.
|
/// Collects backup entries and returns the result.
|
||||||
@ -90,6 +91,8 @@ private:
|
|||||||
void makeBackupEntriesForTablesData();
|
void makeBackupEntriesForTablesData();
|
||||||
void makeBackupEntriesForTableData(const QualifiedTableName & table_name);
|
void makeBackupEntriesForTableData(const QualifiedTableName & table_name);
|
||||||
|
|
||||||
|
void addBackupEntryUnlocked(const String & file_name, BackupEntryPtr backup_entry);
|
||||||
|
|
||||||
void runPostTasks();
|
void runPostTasks();
|
||||||
|
|
||||||
Strings setStage(const String & new_stage, const String & message = "");
|
Strings setStage(const String & new_stage, const String & message = "");
|
||||||
@ -170,6 +173,9 @@ private:
|
|||||||
BackupEntries backup_entries;
|
BackupEntries backup_entries;
|
||||||
std::queue<std::function<void()>> post_tasks;
|
std::queue<std::function<void()>> post_tasks;
|
||||||
std::vector<size_t> access_counters;
|
std::vector<size_t> access_counters;
|
||||||
|
|
||||||
|
ThreadPool & threadpool;
|
||||||
|
std::mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Backups/BackupStatus.h>
|
#include <Backups/BackupStatus.h>
|
||||||
|
#include <Common/ProfileEvents.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -47,6 +48,9 @@ struct BackupOperationInfo
|
|||||||
std::exception_ptr exception;
|
std::exception_ptr exception;
|
||||||
String error_message;
|
String error_message;
|
||||||
|
|
||||||
|
/// Profile events collected during the backup.
|
||||||
|
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters = nullptr;
|
||||||
|
|
||||||
std::chrono::system_clock::time_point start_time;
|
std::chrono::system_clock::time_point start_time;
|
||||||
std::chrono::system_clock::time_point end_time;
|
std::chrono::system_clock::time_point end_time;
|
||||||
};
|
};
|
||||||
|
@ -550,7 +550,9 @@ void BackupsWorker::doBackup(
|
|||||||
/// Prepare backup entries.
|
/// Prepare backup entries.
|
||||||
BackupEntries backup_entries;
|
BackupEntries backup_entries;
|
||||||
{
|
{
|
||||||
BackupEntriesCollector backup_entries_collector{backup_query->elements, backup_settings, backup_coordination, backup_create_params.read_settings, context};
|
BackupEntriesCollector backup_entries_collector(
|
||||||
|
backup_query->elements, backup_settings, backup_coordination,
|
||||||
|
backup_create_params.read_settings, context, getThreadPool(ThreadPoolId::BACKUP_MAKE_FILES_LIST));
|
||||||
backup_entries = backup_entries_collector.run();
|
backup_entries = backup_entries_collector.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1056,6 +1058,7 @@ void BackupsWorker::setStatus(const String & id, BackupStatus status, bool throw
|
|||||||
auto old_status = info.status;
|
auto old_status = info.status;
|
||||||
|
|
||||||
info.status = status;
|
info.status = status;
|
||||||
|
info.profile_counters = std::make_shared<ProfileEvents::Counters::Snapshot>(CurrentThread::getProfileEvents().getPartiallyAtomicSnapshot());
|
||||||
|
|
||||||
if (isFinalStatus(status))
|
if (isFinalStatus(status))
|
||||||
info.end_time = std::chrono::system_clock::now();
|
info.end_time = std::chrono::system_clock::now();
|
||||||
|
@ -89,6 +89,17 @@ add_headers_and_sources(clickhouse_common_io IO/Resource)
|
|||||||
add_headers_and_sources(clickhouse_common_io IO/S3)
|
add_headers_and_sources(clickhouse_common_io IO/S3)
|
||||||
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
|
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
|
||||||
|
|
||||||
|
|
||||||
|
add_headers_and_sources(clickhouse_compression Compression)
|
||||||
|
add_headers_and_sources(clickhouse_compression Parsers)
|
||||||
|
add_headers_and_sources(clickhouse_compression Core)
|
||||||
|
#Included these specific files to avoid linking grpc
|
||||||
|
add_glob(clickhouse_compression_headers Server/ServerType.h)
|
||||||
|
add_glob(clickhouse_compression_sources Server/ServerType.cpp)
|
||||||
|
add_headers_and_sources(clickhouse_compression Common/SSH)
|
||||||
|
add_library(clickhouse_compression ${clickhouse_compression_headers} ${clickhouse_compression_sources})
|
||||||
|
|
||||||
|
|
||||||
add_headers_and_sources(dbms Disks/IO)
|
add_headers_and_sources(dbms Disks/IO)
|
||||||
add_headers_and_sources(dbms Disks/ObjectStorages)
|
add_headers_and_sources(dbms Disks/ObjectStorages)
|
||||||
if (TARGET ch_contrib::sqlite)
|
if (TARGET ch_contrib::sqlite)
|
||||||
@ -270,6 +281,7 @@ target_include_directories (clickhouse_common_io PUBLIC "${ClickHouse_SOURCE_DIR
|
|||||||
|
|
||||||
if (TARGET ch_contrib::llvm)
|
if (TARGET ch_contrib::llvm)
|
||||||
dbms_target_link_libraries (PUBLIC ch_contrib::llvm)
|
dbms_target_link_libraries (PUBLIC ch_contrib::llvm)
|
||||||
|
target_link_libraries (clickhouse_compression PUBLIC ch_contrib::llvm)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (TARGET ch_contrib::gwp_asan)
|
if (TARGET ch_contrib::gwp_asan)
|
||||||
@ -293,6 +305,18 @@ target_link_libraries (clickhouse_common_io
|
|||||||
common
|
common
|
||||||
ch_contrib::double_conversion
|
ch_contrib::double_conversion
|
||||||
ch_contrib::dragonbox_to_chars
|
ch_contrib::dragonbox_to_chars
|
||||||
|
ch_contrib::libdivide
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
target_link_libraries (clickhouse_compression
|
||||||
|
PUBLIC
|
||||||
|
string_utils
|
||||||
|
pcg_random
|
||||||
|
clickhouse_parsers
|
||||||
|
PRIVATE
|
||||||
|
ch_contrib::lz4
|
||||||
|
ch_contrib::roaring
|
||||||
)
|
)
|
||||||
|
|
||||||
# Use X86 AVX2/AVX512 instructions to accelerate filter operations
|
# Use X86 AVX2/AVX512 instructions to accelerate filter operations
|
||||||
@ -336,6 +360,7 @@ if (TARGET ch_contrib::crc32-vpmsum)
|
|||||||
|
|
||||||
if (TARGET ch_contrib::ssh)
|
if (TARGET ch_contrib::ssh)
|
||||||
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::ssh)
|
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::ssh)
|
||||||
|
target_link_libraries(clickhouse_compression PUBLIC ch_contrib::ssh)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
dbms_target_link_libraries(PUBLIC ch_contrib::abseil_swiss_tables)
|
dbms_target_link_libraries(PUBLIC ch_contrib::abseil_swiss_tables)
|
||||||
@ -359,10 +384,12 @@ endif()
|
|||||||
|
|
||||||
if (TARGET ch_contrib::krb5)
|
if (TARGET ch_contrib::krb5)
|
||||||
dbms_target_link_libraries(PRIVATE ch_contrib::krb5)
|
dbms_target_link_libraries(PRIVATE ch_contrib::krb5)
|
||||||
|
target_link_libraries (clickhouse_compression PRIVATE ch_contrib::krb5)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::nuraft)
|
if (TARGET ch_contrib::nuraft)
|
||||||
dbms_target_link_libraries(PUBLIC ch_contrib::nuraft)
|
dbms_target_link_libraries(PUBLIC ch_contrib::nuraft)
|
||||||
|
target_link_libraries (clickhouse_compression PUBLIC ch_contrib::nuraft)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
dbms_target_link_libraries (
|
dbms_target_link_libraries (
|
||||||
@ -432,6 +459,7 @@ endif ()
|
|||||||
|
|
||||||
if (TARGET ch_contrib::ldap)
|
if (TARGET ch_contrib::ldap)
|
||||||
dbms_target_link_libraries (PRIVATE ch_contrib::ldap ch_contrib::lber)
|
dbms_target_link_libraries (PRIVATE ch_contrib::ldap ch_contrib::lber)
|
||||||
|
target_link_libraries (clickhouse_compression PRIVATE ch_contrib::ldap ch_contrib::lber)
|
||||||
endif ()
|
endif ()
|
||||||
dbms_target_link_libraries (PUBLIC ch_contrib::sparsehash)
|
dbms_target_link_libraries (PUBLIC ch_contrib::sparsehash)
|
||||||
|
|
||||||
|
@ -546,6 +546,10 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(IOUringCQEsCompleted, "Total number of successfully completed io_uring CQEs") \
|
M(IOUringCQEsCompleted, "Total number of successfully completed io_uring CQEs") \
|
||||||
M(IOUringCQEsFailed, "Total number of completed io_uring CQEs with failures") \
|
M(IOUringCQEsFailed, "Total number of completed io_uring CQEs with failures") \
|
||||||
\
|
\
|
||||||
|
M(BackupEntriesCollectorMicroseconds, "Time spent making backup entries") \
|
||||||
|
M(BackupEntriesCollectorForTablesDataMicroseconds, "Time spent making backup entries for tables data") \
|
||||||
|
M(BackupEntriesCollectorRunPostTasksMicroseconds, "Time spent running post tasks after making backup entries") \
|
||||||
|
\
|
||||||
M(ReadTaskRequestsReceived, "The number of callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the initiator server side.") \
|
M(ReadTaskRequestsReceived, "The number of callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the initiator server side.") \
|
||||||
M(MergeTreeReadTaskRequestsReceived, "The number of callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the initiator server side.") \
|
M(MergeTreeReadTaskRequestsReceived, "The number of callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the initiator server side.") \
|
||||||
\
|
\
|
||||||
|
@ -10,6 +10,7 @@ target_compile_definitions (clickhouse_common_zookeeper PRIVATE -DZOOKEEPER_LOG)
|
|||||||
target_link_libraries (clickhouse_common_zookeeper
|
target_link_libraries (clickhouse_common_zookeeper
|
||||||
PUBLIC
|
PUBLIC
|
||||||
clickhouse_common_io
|
clickhouse_common_io
|
||||||
|
clickhouse_compression
|
||||||
common
|
common
|
||||||
PRIVATE
|
PRIVATE
|
||||||
string_utils
|
string_utils
|
||||||
@ -20,6 +21,7 @@ add_library(clickhouse_common_zookeeper_no_log ${clickhouse_common_zookeeper_hea
|
|||||||
target_link_libraries (clickhouse_common_zookeeper_no_log
|
target_link_libraries (clickhouse_common_zookeeper_no_log
|
||||||
PUBLIC
|
PUBLIC
|
||||||
clickhouse_common_io
|
clickhouse_common_io
|
||||||
|
clickhouse_compression
|
||||||
common
|
common
|
||||||
PRIVATE
|
PRIVATE
|
||||||
string_utils
|
string_utils
|
||||||
|
@ -214,6 +214,10 @@ void ZooKeeperArgs::initFromKeeperSection(const Poco::Util::AbstractConfiguratio
|
|||||||
.max_sec = config.getUInt(config_name + "." + key + ".max"),
|
.max_sec = config.getUInt(config_name + "." + key + ".max"),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
else if (key == "use_compression")
|
||||||
|
{
|
||||||
|
use_compression = config.getBool(config_name + "." + key);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw KeeperException(Coordination::Error::ZBADARGUMENTS, "Unknown key {} in config file", key);
|
throw KeeperException(Coordination::Error::ZBADARGUMENTS, "Unknown key {} in config file", key);
|
||||||
}
|
}
|
||||||
|
@ -44,6 +44,7 @@ struct ZooKeeperArgs
|
|||||||
double recv_sleep_probability = 0.0;
|
double recv_sleep_probability = 0.0;
|
||||||
UInt64 send_sleep_ms = 0;
|
UInt64 send_sleep_ms = 0;
|
||||||
UInt64 recv_sleep_ms = 0;
|
UInt64 recv_sleep_ms = 0;
|
||||||
|
bool use_compression = false;
|
||||||
|
|
||||||
SessionLifetimeConfiguration fallback_session_lifetime = {};
|
SessionLifetimeConfiguration fallback_session_lifetime = {};
|
||||||
DB::GetPriorityForLoadBalancing get_priority_load_balancing;
|
DB::GetPriorityForLoadBalancing get_priority_load_balancing;
|
||||||
|
@ -27,7 +27,6 @@ void ZooKeeperResponse::write(WriteBuffer & out) const
|
|||||||
if (error == Error::ZOK)
|
if (error == Error::ZOK)
|
||||||
writeImpl(buf);
|
writeImpl(buf);
|
||||||
Coordination::write(buf.str(), out);
|
Coordination::write(buf.str(), out);
|
||||||
out.next();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string ZooKeeperRequest::toString() const
|
std::string ZooKeeperRequest::toString() const
|
||||||
@ -49,7 +48,6 @@ void ZooKeeperRequest::write(WriteBuffer & out) const
|
|||||||
Coordination::write(getOpNum(), buf);
|
Coordination::write(getOpNum(), buf);
|
||||||
writeImpl(buf);
|
writeImpl(buf);
|
||||||
Coordination::write(buf.str(), out);
|
Coordination::write(buf.str(), out);
|
||||||
out.next();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZooKeeperSyncRequest::writeImpl(WriteBuffer & out) const
|
void ZooKeeperSyncRequest::writeImpl(WriteBuffer & out) const
|
||||||
|
@ -46,6 +46,7 @@ enum class OpNum : int32_t
|
|||||||
OpNum getOpNum(int32_t raw_op_num);
|
OpNum getOpNum(int32_t raw_op_num);
|
||||||
|
|
||||||
static constexpr int32_t ZOOKEEPER_PROTOCOL_VERSION = 0;
|
static constexpr int32_t ZOOKEEPER_PROTOCOL_VERSION = 0;
|
||||||
|
static constexpr int32_t ZOOKEEPER_PROTOCOL_VERSION_WITH_COMPRESSION = 10;
|
||||||
static constexpr int32_t KEEPER_PROTOCOL_VERSION_CONNECTION_REJECT = 42;
|
static constexpr int32_t KEEPER_PROTOCOL_VERSION_CONNECTION_REJECT = 42;
|
||||||
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH = 44;
|
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH = 44;
|
||||||
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH_WITH_READONLY = 45;
|
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH_WITH_READONLY = 45;
|
||||||
|
@ -16,6 +16,9 @@
|
|||||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
|
#include <Compression/CompressedReadBuffer.h>
|
||||||
|
#include <Compression/CompressedWriteBuffer.h>
|
||||||
|
#include <Compression/CompressionFactory.h>
|
||||||
|
|
||||||
#include "Coordination/KeeperConstants.h"
|
#include "Coordination/KeeperConstants.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
@ -274,13 +277,34 @@ using namespace DB;
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void ZooKeeper::write(const T & x)
|
void ZooKeeper::write(const T & x)
|
||||||
{
|
{
|
||||||
Coordination::write(x, *out);
|
Coordination::write(x, getWriteBuffer());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void ZooKeeper::read(T & x)
|
void ZooKeeper::read(T & x)
|
||||||
{
|
{
|
||||||
Coordination::read(x, *in);
|
Coordination::read(x, getReadBuffer());
|
||||||
|
}
|
||||||
|
|
||||||
|
WriteBuffer & ZooKeeper::getWriteBuffer()
|
||||||
|
{
|
||||||
|
if (compressed_out)
|
||||||
|
return *compressed_out;
|
||||||
|
return *out;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ZooKeeper::flushWriteBuffer()
|
||||||
|
{
|
||||||
|
if (compressed_out)
|
||||||
|
compressed_out->next();
|
||||||
|
out->next();
|
||||||
|
}
|
||||||
|
|
||||||
|
ReadBuffer & ZooKeeper::getReadBuffer()
|
||||||
|
{
|
||||||
|
if (compressed_in)
|
||||||
|
return *compressed_in;
|
||||||
|
return *in;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void removeRootPath(String & path, const String & chroot)
|
static void removeRootPath(String & path, const String & chroot)
|
||||||
@ -345,7 +369,23 @@ ZooKeeper::ZooKeeper(
|
|||||||
if (args.enable_fault_injections_during_startup)
|
if (args.enable_fault_injections_during_startup)
|
||||||
setupFaultDistributions();
|
setupFaultDistributions();
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
use_compression = args.use_compression;
|
||||||
connect(nodes, args.connection_timeout_ms * 1000);
|
connect(nodes, args.connection_timeout_ms * 1000);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
/// If we get exception & compression is enabled, then its possible that keeper does not support compression,
|
||||||
|
/// try without compression
|
||||||
|
if (use_compression)
|
||||||
|
{
|
||||||
|
use_compression = false;
|
||||||
|
connect(nodes, args.connection_timeout_ms * 1000);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
if (!args.auth_scheme.empty())
|
if (!args.auth_scheme.empty())
|
||||||
sendAuth(args.auth_scheme, args.identity);
|
sendAuth(args.auth_scheme, args.identity);
|
||||||
@ -424,6 +464,8 @@ void ZooKeeper::connect(
|
|||||||
|
|
||||||
in.emplace(socket);
|
in.emplace(socket);
|
||||||
out.emplace(socket);
|
out.emplace(socket);
|
||||||
|
compressed_in.reset();
|
||||||
|
compressed_out.reset();
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -444,7 +486,14 @@ void ZooKeeper::connect(
|
|||||||
e.addMessage("while receiving handshake from ZooKeeper");
|
e.addMessage("while receiving handshake from ZooKeeper");
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
connected = true;
|
connected = true;
|
||||||
|
if (use_compression)
|
||||||
|
{
|
||||||
|
compressed_in.emplace(*in);
|
||||||
|
compressed_out.emplace(*out, CompressionCodecFactory::instance().get("LZ4", {}));
|
||||||
|
}
|
||||||
|
|
||||||
original_index = static_cast<Int8>(node.original_index);
|
original_index = static_cast<Int8>(node.original_index);
|
||||||
|
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
@ -511,16 +560,17 @@ void ZooKeeper::sendHandshake()
|
|||||||
std::array<char, passwd_len> passwd {};
|
std::array<char, passwd_len> passwd {};
|
||||||
|
|
||||||
write(handshake_length);
|
write(handshake_length);
|
||||||
|
if (use_compression)
|
||||||
|
write(ZOOKEEPER_PROTOCOL_VERSION_WITH_COMPRESSION);
|
||||||
|
else
|
||||||
write(ZOOKEEPER_PROTOCOL_VERSION);
|
write(ZOOKEEPER_PROTOCOL_VERSION);
|
||||||
write(last_zxid_seen);
|
write(last_zxid_seen);
|
||||||
write(timeout);
|
write(timeout);
|
||||||
write(previous_session_id);
|
write(previous_session_id);
|
||||||
write(passwd);
|
write(passwd);
|
||||||
|
flushWriteBuffer();
|
||||||
out->next();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ZooKeeper::receiveHandshake()
|
void ZooKeeper::receiveHandshake()
|
||||||
{
|
{
|
||||||
int32_t handshake_length;
|
int32_t handshake_length;
|
||||||
@ -533,8 +583,7 @@ void ZooKeeper::receiveHandshake()
|
|||||||
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected handshake length received: {}", handshake_length);
|
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected handshake length received: {}", handshake_length);
|
||||||
|
|
||||||
read(protocol_version_read);
|
read(protocol_version_read);
|
||||||
if (protocol_version_read != ZOOKEEPER_PROTOCOL_VERSION)
|
|
||||||
{
|
|
||||||
/// Special way to tell a client that server is not ready to serve it.
|
/// Special way to tell a client that server is not ready to serve it.
|
||||||
/// It's better for faster failover than just connection drop.
|
/// It's better for faster failover than just connection drop.
|
||||||
/// Implemented in clickhouse-keeper.
|
/// Implemented in clickhouse-keeper.
|
||||||
@ -542,9 +591,14 @@ void ZooKeeper::receiveHandshake()
|
|||||||
throw Exception::fromMessage(Error::ZCONNECTIONLOSS,
|
throw Exception::fromMessage(Error::ZCONNECTIONLOSS,
|
||||||
"Keeper server rejected the connection during the handshake. "
|
"Keeper server rejected the connection during the handshake. "
|
||||||
"Possibly it's overloaded, doesn't see leader or stale");
|
"Possibly it's overloaded, doesn't see leader or stale");
|
||||||
else
|
|
||||||
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected protocol version: {}", protocol_version_read);
|
if (use_compression)
|
||||||
|
{
|
||||||
|
if (protocol_version_read != ZOOKEEPER_PROTOCOL_VERSION_WITH_COMPRESSION)
|
||||||
|
throw Exception(Error::ZMARSHALLINGERROR,"Unexpected protocol version with compression: {}", protocol_version_read);
|
||||||
}
|
}
|
||||||
|
else if (protocol_version_read != ZOOKEEPER_PROTOCOL_VERSION)
|
||||||
|
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected protocol version: {}", protocol_version_read);
|
||||||
|
|
||||||
read(timeout);
|
read(timeout);
|
||||||
if (timeout != args.session_timeout_ms)
|
if (timeout != args.session_timeout_ms)
|
||||||
@ -562,7 +616,8 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
|
|||||||
request.scheme = scheme;
|
request.scheme = scheme;
|
||||||
request.data = data;
|
request.data = data;
|
||||||
request.xid = AUTH_XID;
|
request.xid = AUTH_XID;
|
||||||
request.write(*out);
|
request.write(getWriteBuffer());
|
||||||
|
flushWriteBuffer();
|
||||||
|
|
||||||
int32_t length;
|
int32_t length;
|
||||||
XID read_xid;
|
XID read_xid;
|
||||||
@ -578,10 +633,14 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
|
|||||||
if (read_xid != AUTH_XID)
|
if (read_xid != AUTH_XID)
|
||||||
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected event received in reply to auth request: {}", read_xid);
|
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected event received in reply to auth request: {}", read_xid);
|
||||||
|
|
||||||
|
if (!use_compression)
|
||||||
|
{
|
||||||
int32_t actual_length = static_cast<int32_t>(in->count() - count_before_event);
|
int32_t actual_length = static_cast<int32_t>(in->count() - count_before_event);
|
||||||
if (length != actual_length)
|
if (length != actual_length)
|
||||||
throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}", length, actual_length);
|
throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}", length, actual_length);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if (err != Error::ZOK)
|
if (err != Error::ZOK)
|
||||||
throw Exception(Error::ZMARSHALLINGERROR, "Error received in reply to auth request. Code: {}. Message: {}",
|
throw Exception(Error::ZMARSHALLINGERROR, "Error received in reply to auth request. Code: {}. Message: {}",
|
||||||
static_cast<int32_t>(err), err);
|
static_cast<int32_t>(err), err);
|
||||||
@ -637,7 +696,8 @@ void ZooKeeper::sendThread()
|
|||||||
info.request->addRootPath(args.chroot);
|
info.request->addRootPath(args.chroot);
|
||||||
|
|
||||||
info.request->probably_sent = true;
|
info.request->probably_sent = true;
|
||||||
info.request->write(*out);
|
info.request->write(getWriteBuffer());
|
||||||
|
flushWriteBuffer();
|
||||||
|
|
||||||
logOperationIfNeeded(info.request);
|
logOperationIfNeeded(info.request);
|
||||||
|
|
||||||
@ -653,7 +713,8 @@ void ZooKeeper::sendThread()
|
|||||||
|
|
||||||
ZooKeeperHeartbeatRequest request;
|
ZooKeeperHeartbeatRequest request;
|
||||||
request.xid = PING_XID;
|
request.xid = PING_XID;
|
||||||
request.write(*out);
|
request.write(getWriteBuffer());
|
||||||
|
flushWriteBuffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::ZooKeeperBytesSent, out->count() - prev_bytes_sent);
|
ProfileEvents::increment(ProfileEvents::ZooKeeperBytesSent, out->count() - prev_bytes_sent);
|
||||||
@ -825,7 +886,7 @@ void ZooKeeper::receiveEvent()
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
response->readImpl(*in);
|
response->readImpl(getReadBuffer());
|
||||||
response->removeRootPath(args.chroot);
|
response->removeRootPath(args.chroot);
|
||||||
}
|
}
|
||||||
/// Instead of setting the watch in sendEvent, set it in receiveEvent because need to check the response.
|
/// Instead of setting the watch in sendEvent, set it in receiveEvent because need to check the response.
|
||||||
@ -858,9 +919,14 @@ void ZooKeeper::receiveEvent()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!use_compression)
|
||||||
|
{
|
||||||
int32_t actual_length = static_cast<int32_t>(in->count() - count_before_event);
|
int32_t actual_length = static_cast<int32_t>(in->count() - count_before_event);
|
||||||
|
|
||||||
if (length != actual_length)
|
if (length != actual_length)
|
||||||
throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}", length, actual_length);
|
throw Exception(Error::ZMARSHALLINGERROR, "Response length doesn't match. Expected: {}, actual: {}",
|
||||||
|
length, actual_length);
|
||||||
|
}
|
||||||
|
|
||||||
logOperationIfNeeded(request_info.request, response, /* finalize= */ false, elapsed_ms);
|
logOperationIfNeeded(request_info.request, response, /* finalize= */ false, elapsed_ms);
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
#include <IO/WriteBuffer.h>
|
#include <IO/WriteBuffer.h>
|
||||||
#include <IO/ReadBufferFromPocoSocket.h>
|
#include <IO/ReadBufferFromPocoSocket.h>
|
||||||
#include <IO/WriteBufferFromPocoSocket.h>
|
#include <IO/WriteBufferFromPocoSocket.h>
|
||||||
|
#include <Compression/CompressedReadBuffer.h>
|
||||||
|
#include <Compression/CompressedWriteBuffer.h>
|
||||||
|
|
||||||
#include <Poco/Net/StreamSocket.h>
|
#include <Poco/Net/StreamSocket.h>
|
||||||
#include <Poco/Net/SocketAddress.h>
|
#include <Poco/Net/SocketAddress.h>
|
||||||
@ -239,8 +241,13 @@ private:
|
|||||||
Poco::Net::StreamSocket socket;
|
Poco::Net::StreamSocket socket;
|
||||||
/// To avoid excessive getpeername(2) calls.
|
/// To avoid excessive getpeername(2) calls.
|
||||||
Poco::Net::SocketAddress socket_address;
|
Poco::Net::SocketAddress socket_address;
|
||||||
|
|
||||||
std::optional<ReadBufferFromPocoSocket> in;
|
std::optional<ReadBufferFromPocoSocket> in;
|
||||||
std::optional<WriteBufferFromPocoSocket> out;
|
std::optional<WriteBufferFromPocoSocket> out;
|
||||||
|
std::optional<CompressedReadBuffer> compressed_in;
|
||||||
|
std::optional<CompressedWriteBuffer> compressed_out;
|
||||||
|
|
||||||
|
bool use_compression = false;
|
||||||
|
|
||||||
int64_t session_id = 0;
|
int64_t session_id = 0;
|
||||||
|
|
||||||
@ -328,6 +335,10 @@ private:
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void read(T &);
|
void read(T &);
|
||||||
|
|
||||||
|
WriteBuffer & getWriteBuffer();
|
||||||
|
void flushWriteBuffer();
|
||||||
|
ReadBuffer & getReadBuffer();
|
||||||
|
|
||||||
void logOperationIfNeeded(const ZooKeeperRequestPtr & request, const ZooKeeperResponsePtr & response = nullptr, bool finalize = false, UInt64 elapsed_ms = 0);
|
void logOperationIfNeeded(const ZooKeeperRequestPtr & request, const ZooKeeperResponsePtr & response = nullptr, bool finalize = false, UInt64 elapsed_ms = 0);
|
||||||
|
|
||||||
void initFeatureFlags();
|
void initFeatureFlags();
|
||||||
|
@ -2,7 +2,7 @@ clickhouse_add_executable(zkutil_test_commands zkutil_test_commands.cpp)
|
|||||||
target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper_no_log)
|
target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper_no_log)
|
||||||
|
|
||||||
clickhouse_add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp)
|
clickhouse_add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp)
|
||||||
target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper_no_log string_utils)
|
target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper_no_log clickhouse_compression string_utils)
|
||||||
|
|
||||||
clickhouse_add_executable(zkutil_test_async zkutil_test_async.cpp)
|
clickhouse_add_executable(zkutil_test_async zkutil_test_async.cpp)
|
||||||
target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper_no_log)
|
target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper_no_log)
|
||||||
|
@ -604,6 +604,7 @@ class IColumn;
|
|||||||
M(Bool, optimize_use_implicit_projections, true, "Automatically choose implicit projections to perform SELECT query", 0) \
|
M(Bool, optimize_use_implicit_projections, true, "Automatically choose implicit projections to perform SELECT query", 0) \
|
||||||
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
||||||
M(String, force_optimize_projection_name, "", "If it is set to a non-empty string, check that this projection is used in the query at least once.", 0) \
|
M(String, force_optimize_projection_name, "", "If it is set to a non-empty string, check that this projection is used in the query at least once.", 0) \
|
||||||
|
M(String, preferred_optimize_projection_name, "", "If it is set to a non-empty string, ClickHouse tries to apply specified projection", 0) \
|
||||||
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
||||||
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
||||||
M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \
|
M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \
|
||||||
|
@ -387,12 +387,25 @@ Model::CompleteMultipartUploadOutcome Client::CompleteMultipartUpload(const Comp
|
|||||||
auto outcome = doRequestWithRetryNetworkErrors</*IsReadMethod*/ false>(
|
auto outcome = doRequestWithRetryNetworkErrors</*IsReadMethod*/ false>(
|
||||||
request, [this](const Model::CompleteMultipartUploadRequest & req) { return CompleteMultipartUpload(req); });
|
request, [this](const Model::CompleteMultipartUploadRequest & req) { return CompleteMultipartUpload(req); });
|
||||||
|
|
||||||
if (!outcome.IsSuccess() || provider_type != ProviderType::GCS)
|
|
||||||
return outcome;
|
|
||||||
|
|
||||||
const auto & key = request.GetKey();
|
const auto & key = request.GetKey();
|
||||||
const auto & bucket = request.GetBucket();
|
const auto & bucket = request.GetBucket();
|
||||||
|
|
||||||
|
if (!outcome.IsSuccess()
|
||||||
|
&& outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_UPLOAD)
|
||||||
|
{
|
||||||
|
auto check_request = HeadObjectRequest()
|
||||||
|
.WithBucket(bucket)
|
||||||
|
.WithKey(key);
|
||||||
|
auto check_outcome = HeadObject(check_request);
|
||||||
|
|
||||||
|
/// if the key exists, than MultipartUpload has been completed at some of the retries
|
||||||
|
/// rewrite outcome with success status
|
||||||
|
if (check_outcome.IsSuccess())
|
||||||
|
outcome = Aws::S3::Model::CompleteMultipartUploadOutcome(Aws::S3::Model::CompleteMultipartUploadResult());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (outcome.IsSuccess() && provider_type == ProviderType::GCS)
|
||||||
|
{
|
||||||
/// For GCS we will try to compose object at the end, otherwise we cannot do a native copy
|
/// For GCS we will try to compose object at the end, otherwise we cannot do a native copy
|
||||||
/// for the object (e.g. for backups)
|
/// for the object (e.g. for backups)
|
||||||
/// We don't care if the compose fails, because the upload was still successful, only the
|
/// We don't care if the compose fails, because the upload was still successful, only the
|
||||||
@ -407,7 +420,11 @@ Model::CompleteMultipartUploadOutcome Client::CompleteMultipartUpload(const Comp
|
|||||||
if (compose_outcome.IsSuccess())
|
if (compose_outcome.IsSuccess())
|
||||||
LOG_TRACE(log, "Composing object was successful");
|
LOG_TRACE(log, "Composing object was successful");
|
||||||
else
|
else
|
||||||
LOG_INFO(log, "Failed to compose object. Message: {}, Key: {}, Bucket: {}", compose_outcome.GetError().GetMessage(), key, bucket);
|
LOG_INFO(
|
||||||
|
log,
|
||||||
|
"Failed to compose object. Message: {}, Key: {}, Bucket: {}",
|
||||||
|
compose_outcome.GetError().GetMessage(), key, bucket);
|
||||||
|
}
|
||||||
|
|
||||||
return outcome;
|
return outcome;
|
||||||
}
|
}
|
||||||
|
@ -582,7 +582,7 @@ void WriteBufferFromS3::completeMultipartUpload()
|
|||||||
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY)
|
||||||
{
|
{
|
||||||
/// For unknown reason, at least MinIO can respond with NO_SUCH_KEY for put requests
|
/// For unknown reason, at least MinIO can respond with NO_SUCH_KEY for put requests
|
||||||
/// BTW, NO_SUCH_UPLOAD is expected error and we shouldn't retry it
|
/// BTW, NO_SUCH_UPLOAD is expected error and we shouldn't retry it here, DB::S3::Client take care of it
|
||||||
LOG_INFO(log, "Multipart upload failed with NO_SUCH_KEY error, will retry. {}, Parts: {}", getVerboseLogDetails(), multipart_tags.size());
|
LOG_INFO(log, "Multipart upload failed with NO_SUCH_KEY error, will retry. {}, Parts: {}", getVerboseLogDetails(), multipart_tags.size());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -335,6 +335,28 @@ const ActionsDAG::Node * ActionsDAG::tryFindInOutputs(const std::string & name)
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ActionsDAG::NodeRawConstPtrs ActionsDAG::findInOutpus(const Names & names) const
|
||||||
|
{
|
||||||
|
NodeRawConstPtrs required_nodes;
|
||||||
|
required_nodes.reserve(names.size());
|
||||||
|
|
||||||
|
std::unordered_map<std::string_view, const Node *> names_map;
|
||||||
|
for (const auto * node : outputs)
|
||||||
|
names_map[node->result_name] = node;
|
||||||
|
|
||||||
|
for (const auto & name : names)
|
||||||
|
{
|
||||||
|
auto it = names_map.find(name);
|
||||||
|
if (it == names_map.end())
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER,
|
||||||
|
"Unknown column: {}, there are only columns {}", name, dumpDAG());
|
||||||
|
|
||||||
|
required_nodes.push_back(it->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
return required_nodes;
|
||||||
|
}
|
||||||
|
|
||||||
void ActionsDAG::addOrReplaceInOutputs(const Node & node)
|
void ActionsDAG::addOrReplaceInOutputs(const Node & node)
|
||||||
{
|
{
|
||||||
for (auto & output_node : outputs)
|
for (auto & output_node : outputs)
|
||||||
@ -441,23 +463,7 @@ void ActionsDAG::removeUnusedActions(const NameSet & required_names, bool allow_
|
|||||||
|
|
||||||
void ActionsDAG::removeUnusedActions(const Names & required_names, bool allow_remove_inputs, bool allow_constant_folding)
|
void ActionsDAG::removeUnusedActions(const Names & required_names, bool allow_remove_inputs, bool allow_constant_folding)
|
||||||
{
|
{
|
||||||
NodeRawConstPtrs required_nodes;
|
auto required_nodes = findInOutpus(required_names);
|
||||||
required_nodes.reserve(required_names.size());
|
|
||||||
|
|
||||||
std::unordered_map<std::string_view, const Node *> names_map;
|
|
||||||
for (const auto * node : outputs)
|
|
||||||
names_map[node->result_name] = node;
|
|
||||||
|
|
||||||
for (const auto & name : required_names)
|
|
||||||
{
|
|
||||||
auto it = names_map.find(name);
|
|
||||||
if (it == names_map.end())
|
|
||||||
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER,
|
|
||||||
"Unknown column: {}, there are only columns {}", name, dumpDAG());
|
|
||||||
|
|
||||||
required_nodes.push_back(it->second);
|
|
||||||
}
|
|
||||||
|
|
||||||
outputs.swap(required_nodes);
|
outputs.swap(required_nodes);
|
||||||
removeUnusedActions(allow_remove_inputs, allow_constant_folding);
|
removeUnusedActions(allow_remove_inputs, allow_constant_folding);
|
||||||
}
|
}
|
||||||
@ -535,6 +541,62 @@ void ActionsDAG::removeUnusedActions(const std::unordered_set<const Node *> & us
|
|||||||
std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); });
|
std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases)
|
||||||
|
{
|
||||||
|
auto actions = std::make_shared<ActionsDAG>();
|
||||||
|
std::unordered_map<const Node *, Node *> copy_map;
|
||||||
|
|
||||||
|
struct Frame
|
||||||
|
{
|
||||||
|
const Node * node = nullptr;
|
||||||
|
size_t next_child = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::stack<Frame> stack;
|
||||||
|
|
||||||
|
for (const auto * output : outputs)
|
||||||
|
{
|
||||||
|
if (copy_map.contains(output))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
stack.push(Frame{output});
|
||||||
|
while (!stack.empty())
|
||||||
|
{
|
||||||
|
auto & frame = stack.top();
|
||||||
|
const auto & children = frame.node->children;
|
||||||
|
while (frame.next_child < children.size() && copy_map.contains(children[frame.next_child]))
|
||||||
|
++frame.next_child;
|
||||||
|
|
||||||
|
if (frame.next_child < children.size())
|
||||||
|
{
|
||||||
|
stack.push(Frame{children[frame.next_child]});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto & copy_node = copy_map[frame.node];
|
||||||
|
|
||||||
|
if (remove_aliases && frame.node->type == ActionType::ALIAS)
|
||||||
|
copy_node = copy_map[frame.node->children.front()];
|
||||||
|
else
|
||||||
|
copy_node = &actions->nodes.emplace_back(*frame.node);
|
||||||
|
|
||||||
|
if (frame.node->type == ActionType::INPUT)
|
||||||
|
actions->inputs.push_back(copy_node);
|
||||||
|
|
||||||
|
stack.pop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto & node : actions->nodes)
|
||||||
|
for (auto & child : node.children)
|
||||||
|
child = copy_map[child];
|
||||||
|
|
||||||
|
for (const auto * output : outputs)
|
||||||
|
actions->outputs.push_back(copy_map[output]);
|
||||||
|
|
||||||
|
return actions;
|
||||||
|
}
|
||||||
|
|
||||||
static ColumnWithTypeAndName executeActionForHeader(const ActionsDAG::Node * node, ColumnsWithTypeAndName arguments)
|
static ColumnWithTypeAndName executeActionForHeader(const ActionsDAG::Node * node, ColumnsWithTypeAndName arguments)
|
||||||
{
|
{
|
||||||
ColumnWithTypeAndName res_column;
|
ColumnWithTypeAndName res_column;
|
||||||
|
@ -157,6 +157,9 @@ public:
|
|||||||
/// Same, but return nullptr if node not found.
|
/// Same, but return nullptr if node not found.
|
||||||
const Node * tryFindInOutputs(const std::string & name) const;
|
const Node * tryFindInOutputs(const std::string & name) const;
|
||||||
|
|
||||||
|
/// Same, but for the list of names.
|
||||||
|
NodeRawConstPtrs findInOutpus(const Names & names) const;
|
||||||
|
|
||||||
/// Find first node with the same name in output nodes and replace it.
|
/// Find first node with the same name in output nodes and replace it.
|
||||||
/// If was not found, add node to outputs end.
|
/// If was not found, add node to outputs end.
|
||||||
void addOrReplaceInOutputs(const Node & node);
|
void addOrReplaceInOutputs(const Node & node);
|
||||||
@ -260,6 +263,8 @@ public:
|
|||||||
|
|
||||||
ActionsDAGPtr clone() const;
|
ActionsDAGPtr clone() const;
|
||||||
|
|
||||||
|
static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases);
|
||||||
|
|
||||||
/// Execute actions for header. Input block must have empty columns.
|
/// Execute actions for header. Input block must have empty columns.
|
||||||
/// Result should be equal to the execution of ExpressionActions built from this DAG.
|
/// Result should be equal to the execution of ExpressionActions built from this DAG.
|
||||||
/// Actions are not changed, no expressions are compiled.
|
/// Actions are not changed, no expressions are compiled.
|
||||||
|
@ -444,6 +444,7 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
|
|||||||
|
|
||||||
const auto & projections = metadata->projections;
|
const auto & projections = metadata->projections;
|
||||||
std::vector<const ProjectionDescription *> agg_projections;
|
std::vector<const ProjectionDescription *> agg_projections;
|
||||||
|
|
||||||
for (const auto & projection : projections)
|
for (const auto & projection : projections)
|
||||||
if (projection.type == ProjectionDescription::Type::Aggregate)
|
if (projection.type == ProjectionDescription::Type::Aggregate)
|
||||||
agg_projections.push_back(&projection);
|
agg_projections.push_back(&projection);
|
||||||
@ -584,6 +585,9 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes &
|
|||||||
auto ordinary_reading_select_result = reading->selectRangesToRead(parts, /* alter_conversions = */ {});
|
auto ordinary_reading_select_result = reading->selectRangesToRead(parts, /* alter_conversions = */ {});
|
||||||
size_t ordinary_reading_marks = ordinary_reading_select_result->marks();
|
size_t ordinary_reading_marks = ordinary_reading_select_result->marks();
|
||||||
|
|
||||||
|
const auto & proj_name_from_settings = context->getSettings().preferred_optimize_projection_name.value;
|
||||||
|
bool found_best_candidate = false;
|
||||||
|
|
||||||
/// Selecting best candidate.
|
/// Selecting best candidate.
|
||||||
for (auto & candidate : candidates.real)
|
for (auto & candidate : candidates.real)
|
||||||
{
|
{
|
||||||
@ -602,8 +606,13 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes &
|
|||||||
if (candidate.sum_marks > ordinary_reading_marks)
|
if (candidate.sum_marks > ordinary_reading_marks)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (best_candidate == nullptr || best_candidate->sum_marks > candidate.sum_marks)
|
if ((best_candidate == nullptr || best_candidate->sum_marks > candidate.sum_marks) && !found_best_candidate)
|
||||||
best_candidate = &candidate;
|
best_candidate = &candidate;
|
||||||
|
if (!proj_name_from_settings.empty() && candidate.projection->name == proj_name_from_settings)
|
||||||
|
{
|
||||||
|
best_candidate = &candidate;
|
||||||
|
found_best_candidate = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!best_candidate)
|
if (!best_candidate)
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
|
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
|
||||||
#include <Processors/Sources/NullSource.h>
|
#include <Processors/Sources/NullSource.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
|
#include <Storages/ProjectionsDescription.h>
|
||||||
|
#include <Storages/SelectQueryInfo.h>
|
||||||
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
|
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
|
||||||
#include <stack>
|
#include <stack>
|
||||||
|
|
||||||
@ -131,6 +133,21 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes)
|
|||||||
|
|
||||||
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
|
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
|
||||||
|
|
||||||
|
// Here we iterate over the projections and check if we have the same projections as we specified in preferred_projection_name
|
||||||
|
bool is_projection_found = false;
|
||||||
|
const auto & proj_name_from_settings = context->getSettings().preferred_optimize_projection_name.value;
|
||||||
|
if (!proj_name_from_settings.empty())
|
||||||
|
{
|
||||||
|
for (const auto * projection : normal_projections)
|
||||||
|
{
|
||||||
|
if (projection->name == proj_name_from_settings)
|
||||||
|
{
|
||||||
|
is_projection_found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (const auto * projection : normal_projections)
|
for (const auto * projection : normal_projections)
|
||||||
{
|
{
|
||||||
if (!hasAllRequiredColumns(projection, required_columns))
|
if (!hasAllRequiredColumns(projection, required_columns))
|
||||||
@ -153,7 +170,9 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes)
|
|||||||
if (candidate.sum_marks >= ordinary_reading_marks)
|
if (candidate.sum_marks >= ordinary_reading_marks)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (best_candidate == nullptr || candidate.sum_marks < best_candidate->sum_marks)
|
if (!is_projection_found && (best_candidate == nullptr || candidate.sum_marks < best_candidate->sum_marks))
|
||||||
|
best_candidate = &candidate;
|
||||||
|
else if (is_projection_found && projection->name == proj_name_from_settings)
|
||||||
best_candidate = &candidate;
|
best_candidate = &candidate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ bool allOutputsDependsOnlyOnAllowedNodes(
|
|||||||
{
|
{
|
||||||
const auto & match = matches.at(node);
|
const auto & match = matches.at(node);
|
||||||
/// Function could be mapped into its argument. In this case .monotonicity != std::nullopt (see matchTrees)
|
/// Function could be mapped into its argument. In this case .monotonicity != std::nullopt (see matchTrees)
|
||||||
if (match.node && match.node->result_name == node->result_name && !match.monotonicity)
|
if (match.node && !match.monotonicity)
|
||||||
res = irreducible_nodes.contains(match.node);
|
res = irreducible_nodes.contains(match.node);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,9 +155,10 @@ bool isPartitionKeySuitsGroupByKey(
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/// We are interested only in calculations required to obtain group by keys (and not aggregate function arguments for example).
|
/// We are interested only in calculations required to obtain group by keys (and not aggregate function arguments for example).
|
||||||
group_by_actions->removeUnusedActions(aggregating.getParams().keys);
|
auto key_nodes = group_by_actions->findInOutpus(aggregating.getParams().keys);
|
||||||
|
auto group_by_key_actions = ActionsDAG::cloneSubDAG(key_nodes, /*remove_aliases=*/ true);
|
||||||
|
|
||||||
const auto & gb_key_required_columns = group_by_actions->getRequiredColumnsNames();
|
const auto & gb_key_required_columns = group_by_key_actions->getRequiredColumnsNames();
|
||||||
|
|
||||||
const auto & partition_actions = reading.getStorageMetadata()->getPartitionKey().expression->getActionsDAG();
|
const auto & partition_actions = reading.getStorageMetadata()->getPartitionKey().expression->getActionsDAG();
|
||||||
|
|
||||||
@ -166,9 +167,9 @@ bool isPartitionKeySuitsGroupByKey(
|
|||||||
if (std::ranges::find(gb_key_required_columns, col) == gb_key_required_columns.end())
|
if (std::ranges::find(gb_key_required_columns, col) == gb_key_required_columns.end())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
const auto irreducibe_nodes = removeInjectiveFunctionsFromResultsRecursively(group_by_actions);
|
const auto irreducibe_nodes = removeInjectiveFunctionsFromResultsRecursively(group_by_key_actions);
|
||||||
|
|
||||||
const auto matches = matchTrees(group_by_actions->getOutputs(), partition_actions);
|
const auto matches = matchTrees(group_by_key_actions->getOutputs(), partition_actions);
|
||||||
|
|
||||||
return allOutputsDependsOnlyOnAllowedNodes(partition_actions, irreducibe_nodes, matches);
|
return allOutputsDependsOnlyOnAllowedNodes(partition_actions, irreducibe_nodes, matches);
|
||||||
}
|
}
|
||||||
@ -206,7 +207,7 @@ size_t tryAggregatePartitionsIndependently(QueryPlan::Node * node, QueryPlan::No
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!reading->willOutputEachPartitionThroughSeparatePort()
|
if (!reading->willOutputEachPartitionThroughSeparatePort()
|
||||||
&& isPartitionKeySuitsGroupByKey(*reading, expression_step->getExpression()->clone(), *aggregating_step))
|
&& isPartitionKeySuitsGroupByKey(*reading, expression_step->getExpression(), *aggregating_step))
|
||||||
{
|
{
|
||||||
if (reading->requestOutputEachPartitionThroughSeparatePort())
|
if (reading->requestOutputEachPartitionThroughSeparatePort())
|
||||||
aggregating_step->skipMerging();
|
aggregating_step->skipMerging();
|
||||||
|
@ -161,7 +161,7 @@ namespace
|
|||||||
case GRPCObsoleteTransportCompression::NO_COMPRESSION: res.algorithm = GRPC_COMPRESS_NONE; break;
|
case GRPCObsoleteTransportCompression::NO_COMPRESSION: res.algorithm = GRPC_COMPRESS_NONE; break;
|
||||||
case GRPCObsoleteTransportCompression::DEFLATE: res.algorithm = GRPC_COMPRESS_DEFLATE; break;
|
case GRPCObsoleteTransportCompression::DEFLATE: res.algorithm = GRPC_COMPRESS_DEFLATE; break;
|
||||||
case GRPCObsoleteTransportCompression::GZIP: res.algorithm = GRPC_COMPRESS_GZIP; break;
|
case GRPCObsoleteTransportCompression::GZIP: res.algorithm = GRPC_COMPRESS_GZIP; break;
|
||||||
case GRPCObsoleteTransportCompression::STREAM_GZIP: res.algorithm = GRPC_COMPRESS_STREAM_GZIP; break;
|
case GRPCObsoleteTransportCompression::STREAM_GZIP: throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "STREAM_GZIP is no longer supported"); /// was flagged experimental in gRPC, removed as per v1.44
|
||||||
default: throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "Unknown compression algorithm: {}", GRPCObsoleteTransportCompression::CompressionAlgorithm_Name(query_info.obsolete_result_compression().algorithm()));
|
default: throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "Unknown compression algorithm: {}", GRPCObsoleteTransportCompression::CompressionAlgorithm_Name(query_info.obsolete_result_compression().algorithm()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,7 +206,7 @@ namespace
|
|||||||
else if (str == "gzip")
|
else if (str == "gzip")
|
||||||
algorithm = GRPC_COMPRESS_GZIP;
|
algorithm = GRPC_COMPRESS_GZIP;
|
||||||
else if (str == "stream_gzip")
|
else if (str == "stream_gzip")
|
||||||
algorithm = GRPC_COMPRESS_STREAM_GZIP;
|
throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "STREAM_GZIP is no longer supported"); /// was flagged experimental in gRPC, removed as per v1.44
|
||||||
else
|
else
|
||||||
throw Exception(error_code, "Unknown compression algorithm: '{}'", str);
|
throw Exception(error_code, "Unknown compression algorithm: '{}'", str);
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include <queue>
|
#include <queue>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <Coordination/FourLetterCommand.h>
|
#include <Coordination/FourLetterCommand.h>
|
||||||
|
#include <IO/CompressionMethod.h>
|
||||||
#include <base/hex.h>
|
#include <base/hex.h>
|
||||||
|
|
||||||
|
|
||||||
@ -110,13 +111,13 @@ struct SocketInterruptablePollWrapper
|
|||||||
return pipe.fds_rw[1];
|
return pipe.fds_rw[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
PollResult poll(Poco::Timespan remaining_time, const std::shared_ptr<ReadBufferFromPocoSocket> & in)
|
PollResult poll(Poco::Timespan remaining_time, const ReadBufferFromPocoSocket & in)
|
||||||
{
|
{
|
||||||
|
|
||||||
bool socket_ready = false;
|
bool socket_ready = false;
|
||||||
bool fd_ready = false;
|
bool fd_ready = false;
|
||||||
|
|
||||||
if (in->available() != 0)
|
if (in.available() != 0)
|
||||||
socket_ready = true;
|
socket_ready = true;
|
||||||
|
|
||||||
if (response_in.available() != 0)
|
if (response_in.available() != 0)
|
||||||
@ -242,11 +243,14 @@ KeeperTCPHandler::KeeperTCPHandler(
|
|||||||
KeeperTCPHandler::registerConnection(this);
|
KeeperTCPHandler::registerConnection(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperTCPHandler::sendHandshake(bool has_leader)
|
void KeeperTCPHandler::sendHandshake(bool has_leader, bool & use_compression)
|
||||||
{
|
{
|
||||||
Coordination::write(Coordination::SERVER_HANDSHAKE_LENGTH, *out);
|
Coordination::write(Coordination::SERVER_HANDSHAKE_LENGTH, *out);
|
||||||
if (has_leader)
|
if (has_leader)
|
||||||
{
|
{
|
||||||
|
if (use_compression)
|
||||||
|
Coordination::write(Coordination::ZOOKEEPER_PROTOCOL_VERSION_WITH_COMPRESSION, *out);
|
||||||
|
else
|
||||||
Coordination::write(Coordination::ZOOKEEPER_PROTOCOL_VERSION, *out);
|
Coordination::write(Coordination::ZOOKEEPER_PROTOCOL_VERSION, *out);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -269,7 +273,7 @@ void KeeperTCPHandler::run()
|
|||||||
runImpl();
|
runImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Timespan KeeperTCPHandler::receiveHandshake(int32_t handshake_length)
|
Poco::Timespan KeeperTCPHandler::receiveHandshake(int32_t handshake_length, bool & use_compression)
|
||||||
{
|
{
|
||||||
int32_t protocol_version;
|
int32_t protocol_version;
|
||||||
int64_t last_zxid_seen;
|
int64_t last_zxid_seen;
|
||||||
@ -282,9 +286,11 @@ Poco::Timespan KeeperTCPHandler::receiveHandshake(int32_t handshake_length)
|
|||||||
|
|
||||||
Coordination::read(protocol_version, *in);
|
Coordination::read(protocol_version, *in);
|
||||||
|
|
||||||
if (protocol_version != Coordination::ZOOKEEPER_PROTOCOL_VERSION)
|
if (protocol_version != Coordination::ZOOKEEPER_PROTOCOL_VERSION && protocol_version != Coordination::ZOOKEEPER_PROTOCOL_VERSION_WITH_COMPRESSION)
|
||||||
throw Exception(ErrorCodes::UNEXPECTED_PACKET_FROM_CLIENT, "Unexpected protocol version: {}", toString(protocol_version));
|
throw Exception(ErrorCodes::UNEXPECTED_PACKET_FROM_CLIENT, "Unexpected protocol version: {}", toString(protocol_version));
|
||||||
|
|
||||||
|
use_compression = (protocol_version == Coordination::ZOOKEEPER_PROTOCOL_VERSION_WITH_COMPRESSION);
|
||||||
|
|
||||||
Coordination::read(last_zxid_seen, *in);
|
Coordination::read(last_zxid_seen, *in);
|
||||||
Coordination::read(timeout_ms, *in);
|
Coordination::read(timeout_ms, *in);
|
||||||
|
|
||||||
@ -309,8 +315,12 @@ void KeeperTCPHandler::runImpl()
|
|||||||
socket().setSendTimeout(send_timeout);
|
socket().setSendTimeout(send_timeout);
|
||||||
socket().setNoDelay(true);
|
socket().setNoDelay(true);
|
||||||
|
|
||||||
in = std::make_shared<ReadBufferFromPocoSocket>(socket());
|
in.emplace(socket());
|
||||||
out = std::make_shared<WriteBufferFromPocoSocket>(socket());
|
out.emplace(socket());
|
||||||
|
compressed_in.reset();
|
||||||
|
compressed_out.reset();
|
||||||
|
|
||||||
|
bool use_compression = false;
|
||||||
|
|
||||||
if (in->eof())
|
if (in->eof())
|
||||||
{
|
{
|
||||||
@ -343,7 +353,7 @@ void KeeperTCPHandler::runImpl()
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
int32_t handshake_length = header;
|
int32_t handshake_length = header;
|
||||||
auto client_timeout = receiveHandshake(handshake_length);
|
auto client_timeout = receiveHandshake(handshake_length, use_compression);
|
||||||
|
|
||||||
if (client_timeout.totalMilliseconds() == 0)
|
if (client_timeout.totalMilliseconds() == 0)
|
||||||
client_timeout = Poco::Timespan(Coordination::DEFAULT_SESSION_TIMEOUT_MS * Poco::Timespan::MILLISECONDS);
|
client_timeout = Poco::Timespan(Coordination::DEFAULT_SESSION_TIMEOUT_MS * Poco::Timespan::MILLISECONDS);
|
||||||
@ -367,20 +377,26 @@ void KeeperTCPHandler::runImpl()
|
|||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Cannot receive session id {}", e.displayText());
|
LOG_WARNING(log, "Cannot receive session id {}", e.displayText());
|
||||||
sendHandshake(false);
|
sendHandshake(/* has_leader */ false, use_compression);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sendHandshake(true);
|
sendHandshake(/* has_leader */ true, use_compression);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Ignoring user request, because the server is not active yet");
|
LOG_WARNING(log, "Ignoring user request, because the server is not active yet");
|
||||||
sendHandshake(false);
|
sendHandshake(/* has_leader */ false, use_compression);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (use_compression)
|
||||||
|
{
|
||||||
|
compressed_in.emplace(*in);
|
||||||
|
compressed_out.emplace(*out, CompressionCodecFactory::instance().get("LZ4",{}));
|
||||||
|
}
|
||||||
|
|
||||||
auto response_fd = poll_wrapper->getResponseFD();
|
auto response_fd = poll_wrapper->getResponseFD();
|
||||||
auto response_callback = [responses_ = this->responses, response_fd](const Coordination::ZooKeeperResponsePtr & response)
|
auto response_callback = [responses_ = this->responses, response_fd](const Coordination::ZooKeeperResponsePtr & response)
|
||||||
{
|
{
|
||||||
@ -415,7 +431,7 @@ void KeeperTCPHandler::runImpl()
|
|||||||
{
|
{
|
||||||
using namespace std::chrono_literals;
|
using namespace std::chrono_literals;
|
||||||
|
|
||||||
PollResult result = poll_wrapper->poll(session_timeout, in);
|
PollResult result = poll_wrapper->poll(session_timeout, *in);
|
||||||
log_long_operation("Polling socket");
|
log_long_operation("Polling socket");
|
||||||
if (result.has_requests && !close_received)
|
if (result.has_requests && !close_received)
|
||||||
{
|
{
|
||||||
@ -467,7 +483,8 @@ void KeeperTCPHandler::runImpl()
|
|||||||
updateStats(response);
|
updateStats(response);
|
||||||
packageSent();
|
packageSent();
|
||||||
|
|
||||||
response->write(*out);
|
response->write(getWriteBuffer());
|
||||||
|
flushWriteBuffer();
|
||||||
log_long_operation("Sending response");
|
log_long_operation("Sending response");
|
||||||
if (response->error == Coordination::Error::ZSESSIONEXPIRED)
|
if (response->error == Coordination::Error::ZSESSIONEXPIRED)
|
||||||
{
|
{
|
||||||
@ -525,7 +542,7 @@ bool KeeperTCPHandler::tryExecuteFourLetterWordCmd(int32_t command)
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
String res = command_ptr->run();
|
String res = command_ptr->run();
|
||||||
out->write(res.data(), res.size());
|
out->write(res.data(),res.size());
|
||||||
out->next();
|
out->next();
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
@ -537,19 +554,41 @@ bool KeeperTCPHandler::tryExecuteFourLetterWordCmd(int32_t command)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WriteBuffer & KeeperTCPHandler::getWriteBuffer()
|
||||||
|
{
|
||||||
|
if (compressed_out)
|
||||||
|
return *compressed_out;
|
||||||
|
return *out;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KeeperTCPHandler::flushWriteBuffer()
|
||||||
|
{
|
||||||
|
if (compressed_out)
|
||||||
|
compressed_out->next();
|
||||||
|
out->next();
|
||||||
|
}
|
||||||
|
|
||||||
|
ReadBuffer & KeeperTCPHandler::getReadBuffer()
|
||||||
|
{
|
||||||
|
if (compressed_in)
|
||||||
|
return *compressed_in;
|
||||||
|
return *in;
|
||||||
|
}
|
||||||
|
|
||||||
std::pair<Coordination::OpNum, Coordination::XID> KeeperTCPHandler::receiveRequest()
|
std::pair<Coordination::OpNum, Coordination::XID> KeeperTCPHandler::receiveRequest()
|
||||||
{
|
{
|
||||||
|
auto & read_buffer = getReadBuffer();
|
||||||
int32_t length;
|
int32_t length;
|
||||||
Coordination::read(length, *in);
|
Coordination::read(length, read_buffer);
|
||||||
int32_t xid;
|
int32_t xid;
|
||||||
Coordination::read(xid, *in);
|
Coordination::read(xid, read_buffer);
|
||||||
|
|
||||||
Coordination::OpNum opnum;
|
Coordination::OpNum opnum;
|
||||||
Coordination::read(opnum, *in);
|
Coordination::read(opnum, read_buffer);
|
||||||
|
|
||||||
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
|
Coordination::ZooKeeperRequestPtr request = Coordination::ZooKeeperRequestFactory::instance().get(opnum);
|
||||||
request->xid = xid;
|
request->xid = xid;
|
||||||
request->readImpl(*in);
|
request->readImpl(read_buffer);
|
||||||
|
|
||||||
if (!keeper_dispatcher->putRequest(request, session_id))
|
if (!keeper_dispatcher->putRequest(request, session_id))
|
||||||
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Session {} already disconnected", session_id);
|
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Session {} already disconnected", session_id);
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <Coordination/KeeperConnectionStats.h>
|
#include <Coordination/KeeperConnectionStats.h>
|
||||||
#include <Poco/Timestamp.h>
|
#include <Poco/Timestamp.h>
|
||||||
|
#include <Compression/CompressedReadBuffer.h>
|
||||||
|
#include <Compression/CompressedWriteBuffer.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -78,15 +80,21 @@ private:
|
|||||||
Coordination::XID close_xid = Coordination::CLOSE_XID;
|
Coordination::XID close_xid = Coordination::CLOSE_XID;
|
||||||
|
|
||||||
/// Streams for reading/writing from/to client connection socket.
|
/// Streams for reading/writing from/to client connection socket.
|
||||||
std::shared_ptr<ReadBufferFromPocoSocket> in;
|
std::optional<ReadBufferFromPocoSocket> in;
|
||||||
std::shared_ptr<WriteBufferFromPocoSocket> out;
|
std::optional<WriteBufferFromPocoSocket> out;
|
||||||
|
std::optional<CompressedReadBuffer> compressed_in;
|
||||||
|
std::optional<CompressedWriteBuffer> compressed_out;
|
||||||
|
|
||||||
std::atomic<bool> connected{false};
|
std::atomic<bool> connected{false};
|
||||||
|
|
||||||
void runImpl();
|
void runImpl();
|
||||||
|
|
||||||
void sendHandshake(bool has_leader);
|
WriteBuffer & getWriteBuffer();
|
||||||
Poco::Timespan receiveHandshake(int32_t handshake_length);
|
void flushWriteBuffer();
|
||||||
|
ReadBuffer & getReadBuffer();
|
||||||
|
|
||||||
|
void sendHandshake(bool has_leader, bool & use_compression);
|
||||||
|
Poco::Timespan receiveHandshake(int32_t handshake_length, bool & use_compression);
|
||||||
|
|
||||||
static bool isHandShake(int32_t handshake_length);
|
static bool isHandShake(int32_t handshake_length);
|
||||||
bool tryExecuteFourLetterWordCmd(int32_t command);
|
bool tryExecuteFourLetterWordCmd(int32_t command);
|
||||||
|
@ -157,16 +157,12 @@ struct DeltaLakeMetadataParser<Configuration, MetadataReadHelper>::Impl
|
|||||||
if (json.has("add"))
|
if (json.has("add"))
|
||||||
{
|
{
|
||||||
const auto path = json["add"]["path"].getString();
|
const auto path = json["add"]["path"].getString();
|
||||||
const auto [_, inserted] = result.insert(fs::path(configuration.getPath()) / path);
|
result.insert(fs::path(configuration.getPath()) / path);
|
||||||
if (!inserted)
|
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "File already exists {}", path);
|
|
||||||
}
|
}
|
||||||
else if (json.has("remove"))
|
else if (json.has("remove"))
|
||||||
{
|
{
|
||||||
const auto path = json["remove"]["path"].getString();
|
const auto path = json["remove"]["path"].getString();
|
||||||
const bool erase = result.erase(fs::path(configuration.getPath()) / path);
|
result.erase(fs::path(configuration.getPath()) / path);
|
||||||
if (!erase)
|
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "File doesn't exist {}", path);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include <Storages/KVStorageUtils.h>
|
#include <Storages/KVStorageUtils.h>
|
||||||
|
|
||||||
|
#include <Columns/ColumnSet.h>
|
||||||
|
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTSubquery.h>
|
#include <Parsers/ASTSubquery.h>
|
||||||
@ -121,6 +123,121 @@ bool traverseASTFilter(
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool traverseDAGFilter(
|
||||||
|
const std::string & primary_key, const DataTypePtr & primary_key_type, const ActionsDAG::Node * elem, const ContextPtr & context, FieldVectorPtr & res)
|
||||||
|
{
|
||||||
|
if (elem->type == ActionsDAG::ActionType::ALIAS)
|
||||||
|
return traverseDAGFilter(primary_key, primary_key_type, elem->children.at(0), context, res);
|
||||||
|
|
||||||
|
if (elem->type != ActionsDAG::ActionType::FUNCTION)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto func_name = elem->function_base->getName();
|
||||||
|
|
||||||
|
if (func_name == "and")
|
||||||
|
{
|
||||||
|
// one child has the key filter condition is ok
|
||||||
|
for (const auto * child : elem->children)
|
||||||
|
if (traverseDAGFilter(primary_key, primary_key_type, child, context, res))
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else if (func_name == "or")
|
||||||
|
{
|
||||||
|
// make sure every child has the key filter condition
|
||||||
|
for (const auto * child : elem->children)
|
||||||
|
if (!traverseDAGFilter(primary_key, primary_key_type, child, context, res))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else if (func_name == "equals" || func_name == "in")
|
||||||
|
{
|
||||||
|
if (elem->children.size() != 2)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (func_name == "in")
|
||||||
|
{
|
||||||
|
const auto * key = elem->children.at(0);
|
||||||
|
while (key->type == ActionsDAG::ActionType::ALIAS)
|
||||||
|
key = key->children.at(0);
|
||||||
|
|
||||||
|
if (key->type != ActionsDAG::ActionType::INPUT)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (key->result_name != primary_key)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const auto * value = elem->children.at(1);
|
||||||
|
if (value->type != ActionsDAG::ActionType::COLUMN)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const IColumn * value_col = value->column.get();
|
||||||
|
if (const auto * col_const = typeid_cast<const ColumnConst *>(value_col))
|
||||||
|
value_col = &col_const->getDataColumn();
|
||||||
|
|
||||||
|
const auto * col_set = typeid_cast<const ColumnSet *>(value_col);
|
||||||
|
if (!col_set)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto future_set = col_set->getData();
|
||||||
|
future_set->buildOrderedSetInplace(context);
|
||||||
|
|
||||||
|
auto set = future_set->get();
|
||||||
|
if (!set)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!set->hasExplicitSetElements())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
set->checkColumnsNumber(1);
|
||||||
|
const auto & set_column = *set->getSetElements()[0];
|
||||||
|
|
||||||
|
if (set_column.getDataType() != primary_key_type->getTypeId())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (size_t row = 0; row < set_column.size(); ++row)
|
||||||
|
res->push_back(set_column[row]);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
const auto * key = elem->children.at(0);
|
||||||
|
while (key->type == ActionsDAG::ActionType::ALIAS)
|
||||||
|
key = key->children.at(0);
|
||||||
|
|
||||||
|
if (key->type != ActionsDAG::ActionType::INPUT)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (key->result_name != primary_key)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const auto * value = elem->children.at(1);
|
||||||
|
if (value->type != ActionsDAG::ActionType::COLUMN)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
auto converted_field = convertFieldToType((*value->column)[0], *primary_key_type);
|
||||||
|
if (!converted_field.isNull())
|
||||||
|
res->push_back(converted_field);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<FieldVectorPtr, bool> getFilterKeys(
|
||||||
|
const String & primary_key, const DataTypePtr & primary_key_type, const ActionDAGNodes & filter_nodes, const ContextPtr & context)
|
||||||
|
{
|
||||||
|
if (filter_nodes.nodes.empty())
|
||||||
|
return {{}, true};
|
||||||
|
|
||||||
|
auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(filter_nodes.nodes, {}, context);
|
||||||
|
const auto * predicate = filter_actions_dag->getOutputs().at(0);
|
||||||
|
|
||||||
|
FieldVectorPtr res = std::make_shared<FieldVector>();
|
||||||
|
auto matched_keys = traverseDAGFilter(primary_key, primary_key_type, predicate, context, res);
|
||||||
|
return std::make_pair(res, !matched_keys);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<FieldVectorPtr, bool> getFilterKeys(
|
std::pair<FieldVectorPtr, bool> getFilterKeys(
|
||||||
|
@ -21,6 +21,9 @@ using DataTypePtr = std::shared_ptr<const IDataType>;
|
|||||||
std::pair<FieldVectorPtr, bool> getFilterKeys(
|
std::pair<FieldVectorPtr, bool> getFilterKeys(
|
||||||
const std::string & primary_key, const DataTypePtr & primary_key_type, const SelectQueryInfo & query_info, const ContextPtr & context);
|
const std::string & primary_key, const DataTypePtr & primary_key_type, const SelectQueryInfo & query_info, const ContextPtr & context);
|
||||||
|
|
||||||
|
std::pair<FieldVectorPtr, bool> getFilterKeys(
|
||||||
|
const String & primary_key, const DataTypePtr & primary_key_type, const ActionDAGNodes & filter_nodes, const ContextPtr & context);
|
||||||
|
|
||||||
template <typename K, typename V>
|
template <typename K, typename V>
|
||||||
void fillColumns(const K & key, const V & value, size_t key_pos, const Block & header, MutableColumns & columns)
|
void fillColumns(const K & key, const V & value, size_t key_pos, const Block & header, MutableColumns & columns)
|
||||||
{
|
{
|
||||||
|
@ -381,11 +381,6 @@ void IMergeTreeDataPart::setState(MergeTreeDataPartState new_state) const
|
|||||||
incrementStateMetric(state);
|
incrementStateMetric(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreeDataPartState IMergeTreeDataPart::getState() const
|
|
||||||
{
|
|
||||||
return state;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::pair<DayNum, DayNum> IMergeTreeDataPart::getMinMaxDate() const
|
std::pair<DayNum, DayNum> IMergeTreeDataPart::getMinMaxDate() const
|
||||||
{
|
{
|
||||||
|
@ -210,6 +210,8 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
String mutable_name;
|
String mutable_name;
|
||||||
|
mutable MergeTreeDataPartState state{MergeTreeDataPartState::Temporary};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
const String & name; // const ref to private mutable_name
|
const String & name; // const ref to private mutable_name
|
||||||
MergeTreePartInfo info;
|
MergeTreePartInfo info;
|
||||||
@ -274,7 +276,7 @@ public:
|
|||||||
|
|
||||||
/// Current state of the part. If the part is in working set already, it should be accessed via data_parts mutex
|
/// Current state of the part. If the part is in working set already, it should be accessed via data_parts mutex
|
||||||
void setState(MergeTreeDataPartState new_state) const;
|
void setState(MergeTreeDataPartState new_state) const;
|
||||||
MergeTreeDataPartState getState() const;
|
ALWAYS_INLINE MergeTreeDataPartState getState() const { return state; }
|
||||||
|
|
||||||
static constexpr std::string_view stateString(MergeTreeDataPartState state) { return magic_enum::enum_name(state); }
|
static constexpr std::string_view stateString(MergeTreeDataPartState state) { return magic_enum::enum_name(state); }
|
||||||
constexpr std::string_view stateString() const { return stateString(state); }
|
constexpr std::string_view stateString() const { return stateString(state); }
|
||||||
@ -676,8 +678,6 @@ private:
|
|||||||
void incrementStateMetric(MergeTreeDataPartState state) const;
|
void incrementStateMetric(MergeTreeDataPartState state) const;
|
||||||
void decrementStateMetric(MergeTreeDataPartState state) const;
|
void decrementStateMetric(MergeTreeDataPartState state) const;
|
||||||
|
|
||||||
mutable MergeTreeDataPartState state{MergeTreeDataPartState::Temporary};
|
|
||||||
|
|
||||||
/// This ugly flag is needed for debug assertions only
|
/// This ugly flag is needed for debug assertions only
|
||||||
mutable bool part_is_probably_removed_from_disk = false;
|
mutable bool part_is_probably_removed_from_disk = false;
|
||||||
};
|
};
|
||||||
|
@ -20,6 +20,9 @@
|
|||||||
#include <Interpreters/MutationsInterpreter.h>
|
#include <Interpreters/MutationsInterpreter.h>
|
||||||
|
|
||||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||||
|
#include <Processors/QueryPlan/SourceStepWithFilter.h>
|
||||||
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
|
#include <Processors/Sources/NullSource.h>
|
||||||
|
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
@ -440,7 +443,46 @@ void StorageEmbeddedRocksDB::initDB()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Pipe StorageEmbeddedRocksDB::read(
|
class ReadFromEmbeddedRocksDB : public SourceStepWithFilter
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
std::string getName() const override { return "ReadFromEmbeddedRocksDB"; }
|
||||||
|
void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override;
|
||||||
|
void applyFilters() override;
|
||||||
|
|
||||||
|
ReadFromEmbeddedRocksDB(
|
||||||
|
Block sample_block,
|
||||||
|
StorageSnapshotPtr storage_snapshot_,
|
||||||
|
const StorageEmbeddedRocksDB & storage_,
|
||||||
|
SelectQueryInfo query_info_,
|
||||||
|
ContextPtr context_,
|
||||||
|
size_t max_block_size_,
|
||||||
|
size_t num_streams_)
|
||||||
|
: SourceStepWithFilter(DataStream{.header = std::move(sample_block)})
|
||||||
|
, storage_snapshot(std::move(storage_snapshot_))
|
||||||
|
, storage(storage_)
|
||||||
|
, query_info(std::move(query_info_))
|
||||||
|
, context(std::move(context_))
|
||||||
|
, max_block_size(max_block_size_)
|
||||||
|
, num_streams(num_streams_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
StorageSnapshotPtr storage_snapshot;
|
||||||
|
const StorageEmbeddedRocksDB & storage;
|
||||||
|
SelectQueryInfo query_info;
|
||||||
|
ContextPtr context;
|
||||||
|
|
||||||
|
size_t max_block_size;
|
||||||
|
size_t num_streams;
|
||||||
|
|
||||||
|
FieldVectorPtr keys;
|
||||||
|
bool all_scan = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
void StorageEmbeddedRocksDB::read(
|
||||||
|
QueryPlan & query_plan,
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const StorageSnapshotPtr & storage_snapshot,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
SelectQueryInfo & query_info,
|
SelectQueryInfo & query_info,
|
||||||
@ -450,23 +492,39 @@ Pipe StorageEmbeddedRocksDB::read(
|
|||||||
size_t num_streams)
|
size_t num_streams)
|
||||||
{
|
{
|
||||||
storage_snapshot->check(column_names);
|
storage_snapshot->check(column_names);
|
||||||
|
|
||||||
FieldVectorPtr keys;
|
|
||||||
bool all_scan = false;
|
|
||||||
|
|
||||||
Block sample_block = storage_snapshot->metadata->getSampleBlock();
|
Block sample_block = storage_snapshot->metadata->getSampleBlock();
|
||||||
auto primary_key_data_type = sample_block.getByName(primary_key).type;
|
|
||||||
std::tie(keys, all_scan) = getFilterKeys(primary_key, primary_key_data_type, query_info, context_);
|
auto reading = std::make_unique<ReadFromEmbeddedRocksDB>(
|
||||||
|
std::move(sample_block),
|
||||||
|
storage_snapshot,
|
||||||
|
*this,
|
||||||
|
query_info,
|
||||||
|
context_,
|
||||||
|
max_block_size,
|
||||||
|
num_streams);
|
||||||
|
|
||||||
|
query_plan.addStep(std::move(reading));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadFromEmbeddedRocksDB::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||||
|
{
|
||||||
|
const auto & sample_block = getOutputStream().header;
|
||||||
|
|
||||||
if (all_scan)
|
if (all_scan)
|
||||||
{
|
{
|
||||||
auto iterator = std::unique_ptr<rocksdb::Iterator>(rocksdb_ptr->NewIterator(rocksdb::ReadOptions()));
|
auto iterator = std::unique_ptr<rocksdb::Iterator>(storage.rocksdb_ptr->NewIterator(rocksdb::ReadOptions()));
|
||||||
iterator->SeekToFirst();
|
iterator->SeekToFirst();
|
||||||
return Pipe(std::make_shared<EmbeddedRocksDBSource>(*this, sample_block, std::move(iterator), max_block_size));
|
auto source = std::make_shared<EmbeddedRocksDBSource>(storage, sample_block, std::move(iterator), max_block_size);
|
||||||
|
source->setStorageLimits(query_info.storage_limits);
|
||||||
|
pipeline.init(Pipe(std::move(source)));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (keys->empty())
|
if (keys->empty())
|
||||||
return {};
|
{
|
||||||
|
pipeline.init(Pipe(std::make_shared<NullSource>(sample_block)));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
::sort(keys->begin(), keys->end());
|
::sort(keys->begin(), keys->end());
|
||||||
keys->erase(std::unique(keys->begin(), keys->end()), keys->end());
|
keys->erase(std::unique(keys->begin(), keys->end()), keys->end());
|
||||||
@ -484,13 +542,22 @@ Pipe StorageEmbeddedRocksDB::read(
|
|||||||
size_t begin = num_keys * thread_idx / num_threads;
|
size_t begin = num_keys * thread_idx / num_threads;
|
||||||
size_t end = num_keys * (thread_idx + 1) / num_threads;
|
size_t end = num_keys * (thread_idx + 1) / num_threads;
|
||||||
|
|
||||||
pipes.emplace_back(std::make_shared<EmbeddedRocksDBSource>(
|
auto source = std::make_shared<EmbeddedRocksDBSource>(
|
||||||
*this, sample_block, keys, keys->begin() + begin, keys->begin() + end, max_block_size));
|
storage, sample_block, keys, keys->begin() + begin, keys->begin() + end, max_block_size);
|
||||||
|
source->setStorageLimits(query_info.storage_limits);
|
||||||
|
pipes.emplace_back(std::move(source));
|
||||||
}
|
}
|
||||||
return Pipe::unitePipes(std::move(pipes));
|
pipeline.init(Pipe::unitePipes(std::move(pipes)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ReadFromEmbeddedRocksDB::applyFilters()
|
||||||
|
{
|
||||||
|
const auto & sample_block = getOutputStream().header;
|
||||||
|
auto primary_key_data_type = sample_block.getByName(storage.primary_key).type;
|
||||||
|
std::tie(keys, all_scan) = getFilterKeys(storage.primary_key, primary_key_data_type, filter_nodes, context);
|
||||||
|
}
|
||||||
|
|
||||||
SinkToStoragePtr StorageEmbeddedRocksDB::write(
|
SinkToStoragePtr StorageEmbeddedRocksDB::write(
|
||||||
const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/, bool /*async_insert*/)
|
const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr /*context*/, bool /*async_insert*/)
|
||||||
{
|
{
|
||||||
|
@ -26,6 +26,7 @@ class Context;
|
|||||||
class StorageEmbeddedRocksDB final : public IStorage, public IKeyValueEntity, WithContext
|
class StorageEmbeddedRocksDB final : public IStorage, public IKeyValueEntity, WithContext
|
||||||
{
|
{
|
||||||
friend class EmbeddedRocksDBSink;
|
friend class EmbeddedRocksDBSink;
|
||||||
|
friend class ReadFromEmbeddedRocksDB;
|
||||||
public:
|
public:
|
||||||
StorageEmbeddedRocksDB(const StorageID & table_id_,
|
StorageEmbeddedRocksDB(const StorageID & table_id_,
|
||||||
const String & relative_data_path_,
|
const String & relative_data_path_,
|
||||||
@ -39,7 +40,8 @@ public:
|
|||||||
|
|
||||||
std::string getName() const override { return "EmbeddedRocksDB"; }
|
std::string getName() const override { return "EmbeddedRocksDB"; }
|
||||||
|
|
||||||
Pipe read(
|
void read(
|
||||||
|
QueryPlan & query_plan,
|
||||||
const Names & column_names,
|
const Names & column_names,
|
||||||
const StorageSnapshotPtr & storage_snapshot,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
SelectQueryInfo & query_info,
|
SelectQueryInfo & query_info,
|
||||||
|
@ -435,7 +435,13 @@ void StorageMaterializedView::backupData(BackupEntriesCollector & backup_entries
|
|||||||
{
|
{
|
||||||
/// We backup the target table's data only if it's inner.
|
/// We backup the target table's data only if it's inner.
|
||||||
if (hasInnerTable())
|
if (hasInnerTable())
|
||||||
getTargetTable()->backupData(backup_entries_collector, data_path_in_backup, partitions);
|
{
|
||||||
|
if (auto table = tryGetTargetTable())
|
||||||
|
table->backupData(backup_entries_collector, data_path_in_backup, partitions);
|
||||||
|
else
|
||||||
|
LOG_WARNING(&Poco::Logger::get("StorageMaterializedView"),
|
||||||
|
"Inner table does not exist, will not backup any data");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void StorageMaterializedView::restoreDataFromBackup(RestorerFromBackup & restorer, const String & data_path_in_backup, const std::optional<ASTs> & partitions)
|
void StorageMaterializedView::restoreDataFromBackup(RestorerFromBackup & restorer, const String & data_path_in_backup, const std::optional<ASTs> & partitions)
|
||||||
|
@ -1,48 +1,55 @@
|
|||||||
#include <QueryPipeline/narrowPipe.h>
|
#include <algorithm>
|
||||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
#include <functional>
|
||||||
#include <Storages/StorageMerge.h>
|
|
||||||
#include <Storages/StorageFactory.h>
|
|
||||||
#include <Storages/StorageView.h>
|
|
||||||
#include <Storages/VirtualColumnUtils.h>
|
|
||||||
#include <Storages/AlterCommands.h>
|
|
||||||
#include <Storages/checkAndGetLiteralArgument.h>
|
|
||||||
#include <Interpreters/Context.h>
|
|
||||||
#include <Interpreters/TreeRewriter.h>
|
|
||||||
#include <Interpreters/ExpressionActions.h>
|
|
||||||
#include <Interpreters/evaluateConstantExpression.h>
|
|
||||||
#include <Interpreters/InterpreterSelectQuery.h>
|
|
||||||
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
|
||||||
#include <Interpreters/IdentifierSemantic.h>
|
|
||||||
#include <Interpreters/getHeaderForProcessingStage.h>
|
|
||||||
#include <Interpreters/addTypeConversionToAST.h>
|
|
||||||
#include <Interpreters/replaceAliasColumnsInQuery.h>
|
|
||||||
#include <Planner/Utils.h>
|
|
||||||
#include <Analyzer/Utils.h>
|
|
||||||
#include <Analyzer/ConstantNode.h>
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/TableNode.h>
|
#include <Analyzer/TableNode.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Analyzer/Utils.h>
|
||||||
#include <Parsers/ASTSelectQuery.h>
|
#include <Columns/ColumnSet.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
|
||||||
#include <Parsers/ASTIdentifier.h>
|
|
||||||
#include <Parsers/ASTExpressionList.h>
|
|
||||||
#include <DataTypes/DataTypeString.h>
|
|
||||||
#include <Columns/ColumnString.h>
|
#include <Columns/ColumnString.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Core/SortDescription.h>
|
||||||
#include <Common/checkStackSize.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include "DataTypes/IDataType.h"
|
#include <DataTypes/IDataType.h>
|
||||||
#include <Processors/QueryPlan/ReadFromMergeTree.h>
|
#include <Databases/IDatabase.h>
|
||||||
#include <Processors/Sources/NullSource.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Interpreters/ExpressionActions.h>
|
||||||
|
#include <Interpreters/IdentifierSemantic.h>
|
||||||
|
#include <Interpreters/InterpreterSelectQuery.h>
|
||||||
|
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
||||||
|
#include <Interpreters/TreeRewriter.h>
|
||||||
|
#include <Interpreters/addTypeConversionToAST.h>
|
||||||
|
#include <Interpreters/evaluateConstantExpression.h>
|
||||||
|
#include <Interpreters/getHeaderForProcessingStage.h>
|
||||||
|
#include <Interpreters/replaceAliasColumnsInQuery.h>
|
||||||
|
#include <Parsers/ASTExpressionList.h>
|
||||||
|
#include <Parsers/ASTFunction.h>
|
||||||
|
#include <Parsers/ASTIdentifier.h>
|
||||||
|
#include <Parsers/ASTLiteral.h>
|
||||||
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
|
#include <Planner/Utils.h>
|
||||||
|
#include <Processors/ConcatProcessor.h>
|
||||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||||
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
||||||
#include <Processors/Transforms/MaterializingTransform.h>
|
|
||||||
#include <Processors/ConcatProcessor.h>
|
|
||||||
#include <Processors/Transforms/ExpressionTransform.h>
|
|
||||||
#include <Processors/QueryPlan/QueryPlan.h>
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
|
#include <Processors/QueryPlan/ReadFromMergeTree.h>
|
||||||
|
#include <Processors/Sources/NullSource.h>
|
||||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
#include <Databases/IDatabase.h>
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
|
#include <Processors/Transforms/MaterializingTransform.h>
|
||||||
|
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||||
|
#include <QueryPipeline/narrowPipe.h>
|
||||||
|
#include <Storages/AlterCommands.h>
|
||||||
|
#include <Storages/SelectQueryInfo.h>
|
||||||
|
#include <Storages/StorageFactory.h>
|
||||||
|
#include <Storages/StorageMerge.h>
|
||||||
|
#include <Storages/StorageView.h>
|
||||||
|
#include <Storages/VirtualColumnUtils.h>
|
||||||
|
#include <Storages/checkAndGetLiteralArgument.h>
|
||||||
|
#include <base/defines.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <algorithm>
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/assert_cast.h>
|
||||||
|
#include <Common/checkStackSize.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -398,6 +405,7 @@ ReadFromMerge::ReadFromMerge(
|
|||||||
, context(std::move(context_))
|
, context(std::move(context_))
|
||||||
, common_processed_stage(processed_stage)
|
, common_processed_stage(processed_stage)
|
||||||
{
|
{
|
||||||
|
createChildPlans();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||||
@ -408,6 +416,65 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QueryPlanResourceHolder resources;
|
||||||
|
std::vector<std::unique_ptr<QueryPipelineBuilder>> pipelines;
|
||||||
|
|
||||||
|
chassert(selected_tables.size() == child_plans.size());
|
||||||
|
chassert(selected_tables.size() == table_aliases.size());
|
||||||
|
auto table_it = selected_tables.begin();
|
||||||
|
for (size_t i = 0; i < selected_tables.size(); ++i, ++table_it)
|
||||||
|
{
|
||||||
|
auto & plan = child_plans.at(i);
|
||||||
|
const auto & table = *table_it;
|
||||||
|
|
||||||
|
const auto storage = std::get<1>(table);
|
||||||
|
const auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr();
|
||||||
|
const auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, context);
|
||||||
|
|
||||||
|
auto modified_query_info = getModifiedQueryInfo(query_info, context, table, nested_storage_snaphsot);
|
||||||
|
|
||||||
|
auto source_pipeline = createSources(
|
||||||
|
plan, nested_storage_snaphsot, modified_query_info, common_processed_stage, common_header, table_aliases.at(i), table, context);
|
||||||
|
|
||||||
|
if (source_pipeline && source_pipeline->initialized())
|
||||||
|
{
|
||||||
|
resources.storage_holders.push_back(std::get<1>(table));
|
||||||
|
resources.table_locks.push_back(std::get<2>(table));
|
||||||
|
|
||||||
|
pipelines.emplace_back(std::move(source_pipeline));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pipelines.empty())
|
||||||
|
{
|
||||||
|
pipeline.init(Pipe(std::make_shared<NullSource>(output_stream->header)));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeline = QueryPipelineBuilder::unitePipelines(std::move(pipelines));
|
||||||
|
|
||||||
|
if (!query_info.input_order_info)
|
||||||
|
{
|
||||||
|
size_t tables_count = selected_tables.size();
|
||||||
|
Float64 num_streams_multiplier = std::min(
|
||||||
|
static_cast<size_t>(tables_count),
|
||||||
|
std::max(1UL, static_cast<size_t>(context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
|
||||||
|
size_t num_streams = static_cast<size_t>(requested_num_streams * num_streams_multiplier);
|
||||||
|
|
||||||
|
// It's possible to have many tables read from merge, resize(num_streams) might open too many files at the same time.
|
||||||
|
// Using narrowPipe instead. But in case of reading in order of primary key, we cannot do it,
|
||||||
|
// because narrowPipe doesn't preserve order.
|
||||||
|
pipeline.narrow(num_streams);
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeline.addResources(std::move(resources));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadFromMerge::createChildPlans()
|
||||||
|
{
|
||||||
|
if (selected_tables.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
size_t tables_count = selected_tables.size();
|
size_t tables_count = selected_tables.size();
|
||||||
Float64 num_streams_multiplier
|
Float64 num_streams_multiplier
|
||||||
= std::min(static_cast<size_t>(tables_count), std::max(1UL, static_cast<size_t>(context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
|
= std::min(static_cast<size_t>(tables_count), std::max(1UL, static_cast<size_t>(context->getSettingsRef().max_streams_multiplier_for_merge_tables)));
|
||||||
@ -438,11 +505,6 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu
|
|||||||
query_info.input_order_info = input_sorting_info;
|
query_info.input_order_info = input_sorting_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto sample_block = merge_storage_snapshot->getMetadataForQuery()->getSampleBlock();
|
|
||||||
|
|
||||||
std::vector<std::unique_ptr<QueryPipelineBuilder>> pipelines;
|
|
||||||
QueryPlanResourceHolder resources;
|
|
||||||
|
|
||||||
for (const auto & table : selected_tables)
|
for (const auto & table : selected_tables)
|
||||||
{
|
{
|
||||||
size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count);
|
size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count);
|
||||||
@ -460,7 +522,7 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu
|
|||||||
if (sampling_requested && !storage->supportsSampling())
|
if (sampling_requested && !storage->supportsSampling())
|
||||||
throw Exception(ErrorCodes::SAMPLING_NOT_SUPPORTED, "Illegal SAMPLE: table {} doesn't support sampling", storage->getStorageID().getNameForLogs());
|
throw Exception(ErrorCodes::SAMPLING_NOT_SUPPORTED, "Illegal SAMPLE: table {} doesn't support sampling", storage->getStorageID().getNameForLogs());
|
||||||
|
|
||||||
Aliases aliases;
|
auto & aliases = table_aliases.emplace_back();
|
||||||
auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr();
|
auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr();
|
||||||
auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, context);
|
auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, context);
|
||||||
|
|
||||||
@ -479,6 +541,8 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu
|
|||||||
ASTPtr required_columns_expr_list = std::make_shared<ASTExpressionList>();
|
ASTPtr required_columns_expr_list = std::make_shared<ASTExpressionList>();
|
||||||
ASTPtr column_expr;
|
ASTPtr column_expr;
|
||||||
|
|
||||||
|
auto sample_block = merge_storage_snapshot->getMetadataForQuery()->getSampleBlock();
|
||||||
|
|
||||||
for (const auto & column : column_names)
|
for (const auto & column : column_names)
|
||||||
{
|
{
|
||||||
const auto column_default = storage_columns.getDefault(column);
|
const auto column_default = storage_columns.getDefault(column);
|
||||||
@ -515,42 +579,16 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto source_pipeline = createSources(
|
child_plans.emplace_back(createPlanForTable(
|
||||||
nested_storage_snaphsot,
|
nested_storage_snaphsot,
|
||||||
modified_query_info,
|
modified_query_info,
|
||||||
common_processed_stage,
|
common_processed_stage,
|
||||||
required_max_block_size,
|
required_max_block_size,
|
||||||
common_header,
|
|
||||||
aliases,
|
|
||||||
table,
|
table,
|
||||||
column_names_as_aliases.empty() ? column_names : column_names_as_aliases,
|
column_names_as_aliases.empty() ? column_names : column_names_as_aliases,
|
||||||
context,
|
context,
|
||||||
current_streams);
|
current_streams));
|
||||||
|
|
||||||
if (source_pipeline && source_pipeline->initialized())
|
|
||||||
{
|
|
||||||
resources.storage_holders.push_back(std::get<1>(table));
|
|
||||||
resources.table_locks.push_back(std::get<2>(table));
|
|
||||||
|
|
||||||
pipelines.emplace_back(std::move(source_pipeline));
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (pipelines.empty())
|
|
||||||
{
|
|
||||||
pipeline.init(Pipe(std::make_shared<NullSource>(output_stream->header)));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
pipeline = QueryPipelineBuilder::unitePipelines(std::move(pipelines));
|
|
||||||
|
|
||||||
if (!query_info.input_order_info)
|
|
||||||
// It's possible to have many tables read from merge, resize(num_streams) might open too many files at the same time.
|
|
||||||
// Using narrowPipe instead. But in case of reading in order of primary key, we cannot do it,
|
|
||||||
// because narrowPipe doesn't preserve order.
|
|
||||||
pipeline.narrow(num_streams);
|
|
||||||
|
|
||||||
pipeline.addResources(std::move(resources));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const SelectQueryInfo & query_info,
|
SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const SelectQueryInfo & query_info,
|
||||||
@ -616,23 +654,121 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const SelectQueryInfo & quer
|
|||||||
return modified_query_info;
|
return modified_query_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool recursivelyApplyToReadingSteps(QueryPlan::Node * node, const std::function<bool(ReadFromMergeTree &)> & func)
|
||||||
|
{
|
||||||
|
bool ok = true;
|
||||||
|
for (auto * child : node->children)
|
||||||
|
ok &= recursivelyApplyToReadingSteps(child, func);
|
||||||
|
|
||||||
|
// This code is mainly meant to be used to call `requestReadingInOrder` on child steps.
|
||||||
|
// In this case it is ok if one child will read in order and other will not (though I don't know when it is possible),
|
||||||
|
// the only important part is to acknowledge this at the parent and don't rely on any particular ordering of input data.
|
||||||
|
if (!ok)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (auto * read_from_merge_tree = typeid_cast<ReadFromMergeTree *>(node->step.get()))
|
||||||
|
ok &= func(*read_from_merge_tree);
|
||||||
|
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
||||||
|
QueryPlan & plan,
|
||||||
const StorageSnapshotPtr & storage_snapshot,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
SelectQueryInfo & modified_query_info,
|
SelectQueryInfo & modified_query_info,
|
||||||
const QueryProcessingStage::Enum & processed_stage,
|
const QueryProcessingStage::Enum & processed_stage,
|
||||||
const UInt64 max_block_size,
|
|
||||||
const Block & header,
|
const Block & header,
|
||||||
const Aliases & aliases,
|
const Aliases & aliases,
|
||||||
const StorageWithLockAndName & storage_with_lock,
|
const StorageWithLockAndName & storage_with_lock,
|
||||||
|
ContextMutablePtr modified_context,
|
||||||
|
bool concat_streams) const
|
||||||
|
{
|
||||||
|
if (!plan.isInitialized())
|
||||||
|
return std::make_unique<QueryPipelineBuilder>();
|
||||||
|
|
||||||
|
QueryPipelineBuilderPtr builder;
|
||||||
|
|
||||||
|
const auto & [database_name, storage, _, table_name] = storage_with_lock;
|
||||||
|
bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer;
|
||||||
|
auto storage_stage
|
||||||
|
= storage->getQueryProcessingStage(modified_context, QueryProcessingStage::Complete, storage_snapshot, modified_query_info);
|
||||||
|
|
||||||
|
builder = plan.buildQueryPipeline(
|
||||||
|
QueryPlanOptimizationSettings::fromContext(modified_context), BuildQueryPipelineSettings::fromContext(modified_context));
|
||||||
|
|
||||||
|
if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns))
|
||||||
|
{
|
||||||
|
/** Materialization is needed, since from distributed storage the constants come materialized.
|
||||||
|
* If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
|
||||||
|
* And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
|
||||||
|
*/
|
||||||
|
builder->addSimpleTransform([](const Block & stream_header) { return std::make_shared<MaterializingTransform>(stream_header); });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (builder->initialized())
|
||||||
|
{
|
||||||
|
if (concat_streams && builder->getNumStreams() > 1)
|
||||||
|
{
|
||||||
|
// It's possible to have many tables read from merge, resize(1) might open too many files at the same time.
|
||||||
|
// Using concat instead.
|
||||||
|
builder->addTransform(std::make_shared<ConcatProcessor>(builder->getHeader(), builder->getNumStreams()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add virtual columns if we don't already have them.
|
||||||
|
|
||||||
|
Block pipe_header = builder->getHeader();
|
||||||
|
|
||||||
|
if (has_database_virtual_column && !pipe_header.has("_database"))
|
||||||
|
{
|
||||||
|
ColumnWithTypeAndName column;
|
||||||
|
column.name = "_database";
|
||||||
|
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||||
|
column.column = column.type->createColumnConst(0, Field(database_name));
|
||||||
|
|
||||||
|
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||||
|
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||||
|
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes));
|
||||||
|
|
||||||
|
builder->addSimpleTransform([&](const Block & stream_header)
|
||||||
|
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_table_virtual_column && !pipe_header.has("_table"))
|
||||||
|
{
|
||||||
|
ColumnWithTypeAndName column;
|
||||||
|
column.name = "_table";
|
||||||
|
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||||
|
column.column = column.type->createColumnConst(0, Field(table_name));
|
||||||
|
|
||||||
|
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||||
|
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||||
|
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes));
|
||||||
|
|
||||||
|
builder->addSimpleTransform([&](const Block & stream_header)
|
||||||
|
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Subordinary tables could have different but convertible types, like numeric types of different width.
|
||||||
|
/// We must return streams with structure equals to structure of Merge table.
|
||||||
|
convertingSourceStream(header, storage_snapshot->metadata, aliases, modified_context, *builder, processed_stage);
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryPlan ReadFromMerge::createPlanForTable(
|
||||||
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
|
SelectQueryInfo & modified_query_info,
|
||||||
|
const QueryProcessingStage::Enum & processed_stage,
|
||||||
|
UInt64 max_block_size,
|
||||||
|
const StorageWithLockAndName & storage_with_lock,
|
||||||
Names real_column_names,
|
Names real_column_names,
|
||||||
ContextMutablePtr modified_context,
|
ContextMutablePtr modified_context,
|
||||||
size_t streams_num,
|
size_t streams_num)
|
||||||
bool concat_streams)
|
|
||||||
{
|
{
|
||||||
const auto & [database_name, storage, _, table_name] = storage_with_lock;
|
const auto & [database_name, storage, _, table_name] = storage_with_lock;
|
||||||
auto & modified_select = modified_query_info.query->as<ASTSelectQuery &>();
|
auto & modified_select = modified_query_info.query->as<ASTSelectQuery &>();
|
||||||
|
|
||||||
QueryPipelineBuilderPtr builder;
|
|
||||||
if (!InterpreterSelectQuery::isQueryWithFinal(modified_query_info) && storage->needRewriteQueryWithFinal(real_column_names))
|
if (!InterpreterSelectQuery::isQueryWithFinal(modified_query_info) && storage->needRewriteQueryWithFinal(real_column_names))
|
||||||
{
|
{
|
||||||
/// NOTE: It may not work correctly in some cases, because query was analyzed without final.
|
/// NOTE: It may not work correctly in some cases, because query was analyzed without final.
|
||||||
@ -647,14 +783,14 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
|||||||
storage_snapshot,
|
storage_snapshot,
|
||||||
modified_query_info);
|
modified_query_info);
|
||||||
|
|
||||||
|
QueryPlan plan;
|
||||||
|
|
||||||
if (processed_stage <= storage_stage || (allow_experimental_analyzer && processed_stage == QueryProcessingStage::FetchColumns))
|
if (processed_stage <= storage_stage || (allow_experimental_analyzer && processed_stage == QueryProcessingStage::FetchColumns))
|
||||||
{
|
{
|
||||||
/// If there are only virtual columns in query, you must request at least one other column.
|
/// If there are only virtual columns in query, you must request at least one other column.
|
||||||
if (real_column_names.empty())
|
if (real_column_names.empty())
|
||||||
real_column_names.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical()).name);
|
real_column_names.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical()).name);
|
||||||
|
|
||||||
QueryPlan & plan = child_plans.emplace_back();
|
|
||||||
|
|
||||||
StorageView * view = dynamic_cast<StorageView *>(storage.get());
|
StorageView * view = dynamic_cast<StorageView *>(storage.get());
|
||||||
if (!view || allow_experimental_analyzer)
|
if (!view || allow_experimental_analyzer)
|
||||||
{
|
{
|
||||||
@ -688,16 +824,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
|||||||
if (!plan.isInitialized())
|
if (!plan.isInitialized())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
if (auto * read_from_merge_tree = typeid_cast<ReadFromMergeTree *>(plan.getRootNode()->step.get()))
|
applyFilters(plan);
|
||||||
{
|
|
||||||
size_t filters_dags_size = filter_dags.size();
|
|
||||||
for (size_t i = 0; i < filters_dags_size; ++i)
|
|
||||||
read_from_merge_tree->addFilter(filter_dags[i], filter_nodes.nodes[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
builder = plan.buildQueryPipeline(
|
|
||||||
QueryPlanOptimizationSettings::fromContext(modified_context),
|
|
||||||
BuildQueryPipelineSettings::fromContext(modified_context));
|
|
||||||
}
|
}
|
||||||
else if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns))
|
else if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns))
|
||||||
{
|
{
|
||||||
@ -705,15 +832,14 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
|||||||
modified_context->setSetting("max_threads", streams_num);
|
modified_context->setSetting("max_threads", streams_num);
|
||||||
modified_context->setSetting("max_streams_to_max_threads_ratio", 1);
|
modified_context->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||||
|
|
||||||
QueryPlan & plan = child_plans.emplace_back();
|
|
||||||
|
|
||||||
if (allow_experimental_analyzer)
|
if (allow_experimental_analyzer)
|
||||||
{
|
{
|
||||||
InterpreterSelectQueryAnalyzer interpreter(modified_query_info.query_tree,
|
InterpreterSelectQueryAnalyzer interpreter(modified_query_info.query_tree,
|
||||||
modified_context,
|
modified_context,
|
||||||
SelectQueryOptions(processed_stage).ignoreProjections());
|
SelectQueryOptions(processed_stage).ignoreProjections());
|
||||||
builder = std::make_unique<QueryPipelineBuilder>(interpreter.buildQueryPipeline());
|
auto & planner = interpreter.getPlanner();
|
||||||
plan = std::move(interpreter.getPlanner()).extractQueryPlan();
|
planner.buildQueryPlanIfNeeded();
|
||||||
|
plan = std::move(planner).extractQueryPlan();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -722,71 +848,11 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
|||||||
InterpreterSelectQuery interpreter{modified_query_info.query,
|
InterpreterSelectQuery interpreter{modified_query_info.query,
|
||||||
modified_context,
|
modified_context,
|
||||||
SelectQueryOptions(processed_stage).ignoreProjections()};
|
SelectQueryOptions(processed_stage).ignoreProjections()};
|
||||||
builder = std::make_unique<QueryPipelineBuilder>(interpreter.buildQueryPipeline(plan));
|
interpreter.buildQueryPlan(plan);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Materialization is needed, since from distributed storage the constants come materialized.
|
return plan;
|
||||||
* If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
|
|
||||||
* And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
|
|
||||||
*/
|
|
||||||
builder->addSimpleTransform([](const Block & stream_header) { return std::make_shared<MaterializingTransform>(stream_header); });
|
|
||||||
}
|
|
||||||
|
|
||||||
if (builder->initialized())
|
|
||||||
{
|
|
||||||
if (concat_streams && builder->getNumStreams() > 1)
|
|
||||||
{
|
|
||||||
// It's possible to have many tables read from merge, resize(1) might open too many files at the same time.
|
|
||||||
// Using concat instead.
|
|
||||||
builder->addTransform(std::make_shared<ConcatProcessor>(builder->getHeader(), builder->getNumStreams()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add virtual columns if we don't already have them.
|
|
||||||
|
|
||||||
Block pipe_header = builder->getHeader();
|
|
||||||
|
|
||||||
if (has_database_virtual_column && !pipe_header.has("_database"))
|
|
||||||
{
|
|
||||||
ColumnWithTypeAndName column;
|
|
||||||
column.name = "_database";
|
|
||||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
|
||||||
column.column = column.type->createColumnConst(0, Field(database_name));
|
|
||||||
|
|
||||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
|
||||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
|
||||||
std::move(adding_column_dag),
|
|
||||||
ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes));
|
|
||||||
|
|
||||||
builder->addSimpleTransform([&](const Block & stream_header)
|
|
||||||
{
|
|
||||||
return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (has_table_virtual_column && !pipe_header.has("_table"))
|
|
||||||
{
|
|
||||||
ColumnWithTypeAndName column;
|
|
||||||
column.name = "_table";
|
|
||||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
|
||||||
column.column = column.type->createColumnConst(0, Field(table_name));
|
|
||||||
|
|
||||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
|
||||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
|
||||||
std::move(adding_column_dag),
|
|
||||||
ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes));
|
|
||||||
|
|
||||||
builder->addSimpleTransform([&](const Block & stream_header)
|
|
||||||
{
|
|
||||||
return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Subordinary tables could have different but convertible types, like numeric types of different width.
|
|
||||||
/// We must return streams with structure equals to structure of Merge table.
|
|
||||||
convertingSourceStream(header, storage_snapshot->metadata, aliases, modified_context, *builder, processed_stage);
|
|
||||||
}
|
|
||||||
|
|
||||||
return builder;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(
|
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(
|
||||||
@ -1014,10 +1080,47 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_)
|
|||||||
if (order_info_->direction != 1 && InterpreterSelectQuery::isQueryWithFinal(query_info))
|
if (order_info_->direction != 1 && InterpreterSelectQuery::isQueryWithFinal(query_info))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
auto request_read_in_order = [order_info_](ReadFromMergeTree & read_from_merge_tree)
|
||||||
|
{
|
||||||
|
return read_from_merge_tree.requestReadingInOrder(
|
||||||
|
order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit);
|
||||||
|
};
|
||||||
|
|
||||||
|
bool ok = true;
|
||||||
|
for (const auto & plan : child_plans)
|
||||||
|
if (plan.isInitialized())
|
||||||
|
ok &= recursivelyApplyToReadingSteps(plan.getRootNode(), request_read_in_order);
|
||||||
|
|
||||||
|
if (!ok)
|
||||||
|
return false;
|
||||||
|
|
||||||
order_info = order_info_;
|
order_info = order_info_;
|
||||||
|
query_info.input_order_info = order_info;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ReadFromMerge::applyFilters(const QueryPlan & plan) const
|
||||||
|
{
|
||||||
|
auto apply_filters = [this](ReadFromMergeTree & read_from_merge_tree)
|
||||||
|
{
|
||||||
|
size_t filters_dags_size = filter_dags.size();
|
||||||
|
for (size_t i = 0; i < filters_dags_size; ++i)
|
||||||
|
read_from_merge_tree.addFilter(filter_dags[i], filter_nodes.nodes[i]);
|
||||||
|
|
||||||
|
read_from_merge_tree.applyFilters();
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
recursivelyApplyToReadingSteps(plan.getRootNode(), apply_filters);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReadFromMerge::applyFilters()
|
||||||
|
{
|
||||||
|
for (const auto & plan : child_plans)
|
||||||
|
if (plan.isInitialized())
|
||||||
|
applyFilters(plan);
|
||||||
|
}
|
||||||
|
|
||||||
IStorage::ColumnSizeByName StorageMerge::getColumnSizes() const
|
IStorage::ColumnSizeByName StorageMerge::getColumnSizes() const
|
||||||
{
|
{
|
||||||
ColumnSizeByName column_sizes;
|
ColumnSizeByName column_sizes;
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/OptimizedRegularExpression.h>
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
#include <Storages/SelectQueryInfo.h>
|
|
||||||
#include <Storages/IStorage.h>
|
|
||||||
#include <Processors/QueryPlan/SourceStepWithFilter.h>
|
#include <Processors/QueryPlan/SourceStepWithFilter.h>
|
||||||
|
#include <Storages/IStorage.h>
|
||||||
|
#include <Storages/SelectQueryInfo.h>
|
||||||
|
#include <Common/OptimizedRegularExpression.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -146,6 +147,8 @@ public:
|
|||||||
/// Returns `false` if requested reading cannot be performed.
|
/// Returns `false` if requested reading cannot be performed.
|
||||||
bool requestReadingInOrder(InputOrderInfoPtr order_info_);
|
bool requestReadingInOrder(InputOrderInfoPtr order_info_);
|
||||||
|
|
||||||
|
void applyFilters() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const size_t required_max_block_size;
|
const size_t required_max_block_size;
|
||||||
const size_t requested_num_streams;
|
const size_t requested_num_streams;
|
||||||
@ -177,23 +180,37 @@ private:
|
|||||||
|
|
||||||
using Aliases = std::vector<AliasData>;
|
using Aliases = std::vector<AliasData>;
|
||||||
|
|
||||||
static SelectQueryInfo getModifiedQueryInfo(const SelectQueryInfo & query_info,
|
std::vector<Aliases> table_aliases;
|
||||||
const ContextPtr & modified_context,
|
|
||||||
const StorageWithLockAndName & storage_with_lock_and_name,
|
|
||||||
const StorageSnapshotPtr & storage_snapshot);
|
|
||||||
|
|
||||||
QueryPipelineBuilderPtr createSources(
|
void createChildPlans();
|
||||||
|
|
||||||
|
void applyFilters(const QueryPlan & plan) const;
|
||||||
|
|
||||||
|
QueryPlan createPlanForTable(
|
||||||
const StorageSnapshotPtr & storage_snapshot,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
SelectQueryInfo & query_info,
|
SelectQueryInfo & query_info,
|
||||||
const QueryProcessingStage::Enum & processed_stage,
|
const QueryProcessingStage::Enum & processed_stage,
|
||||||
UInt64 max_block_size,
|
UInt64 max_block_size,
|
||||||
const Block & header,
|
|
||||||
const Aliases & aliases,
|
|
||||||
const StorageWithLockAndName & storage_with_lock,
|
const StorageWithLockAndName & storage_with_lock,
|
||||||
Names real_column_names,
|
Names real_column_names,
|
||||||
ContextMutablePtr modified_context,
|
ContextMutablePtr modified_context,
|
||||||
size_t streams_num,
|
size_t streams_num);
|
||||||
bool concat_streams = false);
|
|
||||||
|
QueryPipelineBuilderPtr createSources(
|
||||||
|
QueryPlan & plan,
|
||||||
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
|
SelectQueryInfo & modified_query_info,
|
||||||
|
const QueryProcessingStage::Enum & processed_stage,
|
||||||
|
const Block & header,
|
||||||
|
const Aliases & aliases,
|
||||||
|
const StorageWithLockAndName & storage_with_lock,
|
||||||
|
ContextMutablePtr modified_context,
|
||||||
|
bool concat_streams = false) const;
|
||||||
|
|
||||||
|
static SelectQueryInfo getModifiedQueryInfo(const SelectQueryInfo & query_info,
|
||||||
|
const ContextPtr & modified_context,
|
||||||
|
const StorageWithLockAndName & storage_with_lock_and_name,
|
||||||
|
const StorageSnapshotPtr & storage_snapshot);
|
||||||
|
|
||||||
static void convertingSourceStream(
|
static void convertingSourceStream(
|
||||||
const Block & header,
|
const Block & header,
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <Columns/ColumnString.h>
|
#include <Columns/ColumnString.h>
|
||||||
#include <Columns/ColumnsNumber.h>
|
#include <Columns/ColumnsNumber.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Interpreters/ProfileEventsExt.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -29,6 +30,7 @@ NamesAndTypesList StorageSystemBackups::getNamesAndTypes()
|
|||||||
{"compressed_size", std::make_shared<DataTypeUInt64>()},
|
{"compressed_size", std::make_shared<DataTypeUInt64>()},
|
||||||
{"files_read", std::make_shared<DataTypeUInt64>()},
|
{"files_read", std::make_shared<DataTypeUInt64>()},
|
||||||
{"bytes_read", std::make_shared<DataTypeUInt64>()},
|
{"bytes_read", std::make_shared<DataTypeUInt64>()},
|
||||||
|
{"ProfileEvents", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt64>())},
|
||||||
};
|
};
|
||||||
return names_and_types;
|
return names_and_types;
|
||||||
}
|
}
|
||||||
@ -50,6 +52,7 @@ void StorageSystemBackups::fillData(MutableColumns & res_columns, ContextPtr con
|
|||||||
auto & column_compressed_size = assert_cast<ColumnUInt64 &>(*res_columns[column_index++]);
|
auto & column_compressed_size = assert_cast<ColumnUInt64 &>(*res_columns[column_index++]);
|
||||||
auto & column_num_read_files = assert_cast<ColumnUInt64 &>(*res_columns[column_index++]);
|
auto & column_num_read_files = assert_cast<ColumnUInt64 &>(*res_columns[column_index++]);
|
||||||
auto & column_num_read_bytes = assert_cast<ColumnUInt64 &>(*res_columns[column_index++]);
|
auto & column_num_read_bytes = assert_cast<ColumnUInt64 &>(*res_columns[column_index++]);
|
||||||
|
auto & column_profile_events = assert_cast<ColumnMap &>(*res_columns[column_index++]);
|
||||||
|
|
||||||
auto add_row = [&](const BackupOperationInfo & info)
|
auto add_row = [&](const BackupOperationInfo & info)
|
||||||
{
|
{
|
||||||
@ -66,6 +69,10 @@ void StorageSystemBackups::fillData(MutableColumns & res_columns, ContextPtr con
|
|||||||
column_compressed_size.insertValue(info.compressed_size);
|
column_compressed_size.insertValue(info.compressed_size);
|
||||||
column_num_read_files.insertValue(info.num_read_files);
|
column_num_read_files.insertValue(info.num_read_files);
|
||||||
column_num_read_bytes.insertValue(info.num_read_bytes);
|
column_num_read_bytes.insertValue(info.num_read_bytes);
|
||||||
|
if (info.profile_counters)
|
||||||
|
ProfileEvents::dumpToMapColumn(*info.profile_counters, &column_profile_events, true);
|
||||||
|
else
|
||||||
|
column_profile_events.insertDefault();
|
||||||
};
|
};
|
||||||
|
|
||||||
for (const auto & entry : context->getBackupsWorker().getAllInfos())
|
for (const auto & entry : context->getBackupsWorker().getAllInfos())
|
||||||
|
@ -92,7 +92,7 @@ StoragePtr TableFunctionMySQL::executeImpl(
|
|||||||
const ASTPtr & /*ast_function*/,
|
const ASTPtr & /*ast_function*/,
|
||||||
ContextPtr context,
|
ContextPtr context,
|
||||||
const std::string & table_name,
|
const std::string & table_name,
|
||||||
ColumnsDescription /*cached_columns*/,
|
ColumnsDescription cached_columns,
|
||||||
bool /*is_insert_query*/) const
|
bool /*is_insert_query*/) const
|
||||||
{
|
{
|
||||||
auto res = std::make_shared<StorageMySQL>(
|
auto res = std::make_shared<StorageMySQL>(
|
||||||
@ -102,7 +102,7 @@ StoragePtr TableFunctionMySQL::executeImpl(
|
|||||||
configuration->table,
|
configuration->table,
|
||||||
configuration->replace_query,
|
configuration->replace_query,
|
||||||
configuration->on_duplicate_clause,
|
configuration->on_duplicate_clause,
|
||||||
ColumnsDescription{},
|
cached_columns,
|
||||||
ConstraintsDescription{},
|
ConstraintsDescription{},
|
||||||
String{},
|
String{},
|
||||||
context,
|
context,
|
||||||
|
@ -44,13 +44,13 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/,
|
StoragePtr TableFunctionPostgreSQL::executeImpl(const ASTPtr & /*ast_function*/,
|
||||||
ContextPtr context, const std::string & table_name, ColumnsDescription /*cached_columns*/, bool /*is_insert_query*/) const
|
ContextPtr context, const std::string & table_name, ColumnsDescription cached_columns, bool /*is_insert_query*/) const
|
||||||
{
|
{
|
||||||
auto result = std::make_shared<StoragePostgreSQL>(
|
auto result = std::make_shared<StoragePostgreSQL>(
|
||||||
StorageID(getDatabaseName(), table_name),
|
StorageID(getDatabaseName(), table_name),
|
||||||
connection_pool,
|
connection_pool,
|
||||||
configuration->table,
|
configuration->table,
|
||||||
ColumnsDescription{},
|
cached_columns,
|
||||||
ConstraintsDescription{},
|
ConstraintsDescription{},
|
||||||
String{},
|
String{},
|
||||||
context,
|
context,
|
||||||
|
@ -336,7 +336,7 @@ bool TableFunctionS3::supportsReadingSubsetOfColumns(const ContextPtr & context)
|
|||||||
return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(configuration.format, context);
|
return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(configuration.format, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
StoragePtr TableFunctionS3::executeImpl(const ASTPtr & /*ast_function*/, ContextPtr context, const std::string & table_name, ColumnsDescription /*cached_columns*/, bool /*is_insert_query*/) const
|
StoragePtr TableFunctionS3::executeImpl(const ASTPtr & /*ast_function*/, ContextPtr context, const std::string & table_name, ColumnsDescription cached_columns, bool /*is_insert_query*/) const
|
||||||
{
|
{
|
||||||
S3::URI s3_uri (configuration.url);
|
S3::URI s3_uri (configuration.url);
|
||||||
|
|
||||||
@ -345,6 +345,8 @@ StoragePtr TableFunctionS3::executeImpl(const ASTPtr & /*ast_function*/, Context
|
|||||||
columns = parseColumnsListFromString(configuration.structure, context);
|
columns = parseColumnsListFromString(configuration.structure, context);
|
||||||
else if (!structure_hint.empty())
|
else if (!structure_hint.empty())
|
||||||
columns = structure_hint;
|
columns = structure_hint;
|
||||||
|
else if (!cached_columns.empty())
|
||||||
|
columns = cached_columns;
|
||||||
|
|
||||||
StoragePtr storage = std::make_shared<StorageS3>(
|
StoragePtr storage = std::make_shared<StorageS3>(
|
||||||
configuration,
|
configuration,
|
||||||
|
@ -51,13 +51,13 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
StoragePtr TableFunctionSQLite::executeImpl(const ASTPtr & /*ast_function*/,
|
StoragePtr TableFunctionSQLite::executeImpl(const ASTPtr & /*ast_function*/,
|
||||||
ContextPtr context, const String & table_name, ColumnsDescription /*cached_columns*/, bool /*is_insert_query*/) const
|
ContextPtr context, const String & table_name, ColumnsDescription cached_columns, bool /*is_insert_query*/) const
|
||||||
{
|
{
|
||||||
auto storage = std::make_shared<StorageSQLite>(StorageID(getDatabaseName(), table_name),
|
auto storage = std::make_shared<StorageSQLite>(StorageID(getDatabaseName(), table_name),
|
||||||
sqlite_db,
|
sqlite_db,
|
||||||
database_path,
|
database_path,
|
||||||
remote_table_name,
|
remote_table_name,
|
||||||
ColumnsDescription{}, ConstraintsDescription{}, context);
|
cached_columns, ConstraintsDescription{}, context);
|
||||||
|
|
||||||
storage->startup();
|
storage->startup();
|
||||||
return storage;
|
return storage;
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
01268_shard_avgweighted
|
01268_shard_avgweighted
|
||||||
01455_shard_leaf_max_rows_bytes_to_read
|
01455_shard_leaf_max_rows_bytes_to_read
|
||||||
01495_subqueries_in_with_statement
|
01495_subqueries_in_with_statement
|
||||||
01504_rocksdb
|
|
||||||
01560_merge_distributed_join
|
01560_merge_distributed_join
|
||||||
01584_distributed_buffer_cannot_find_column
|
01584_distributed_buffer_cannot_find_column
|
||||||
01586_columns_pruning
|
01586_columns_pruning
|
||||||
@ -37,7 +36,6 @@
|
|||||||
02345_implicit_transaction
|
02345_implicit_transaction
|
||||||
02352_grouby_shadows_arg
|
02352_grouby_shadows_arg
|
||||||
02354_annoy
|
02354_annoy
|
||||||
02375_rocksdb_with_filters
|
|
||||||
02402_merge_engine_with_view
|
02402_merge_engine_with_view
|
||||||
02404_memory_bound_merging
|
02404_memory_bound_merging
|
||||||
02426_orc_bug
|
02426_orc_bug
|
||||||
@ -45,7 +43,6 @@
|
|||||||
02458_use_structure_from_insertion_table
|
02458_use_structure_from_insertion_table
|
||||||
02479_race_condition_between_insert_and_droppin_mv
|
02479_race_condition_between_insert_and_droppin_mv
|
||||||
02493_inconsistent_hex_and_binary_number
|
02493_inconsistent_hex_and_binary_number
|
||||||
02521_aggregation_by_partitions
|
|
||||||
02554_fix_grouping_sets_predicate_push_down
|
02554_fix_grouping_sets_predicate_push_down
|
||||||
02575_merge_prewhere_different_default_kind
|
02575_merge_prewhere_different_default_kind
|
||||||
02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET
|
02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET
|
||||||
|
@ -11,7 +11,7 @@ from ci_config import CI_CONFIG, BuildConfig
|
|||||||
from ccache_utils import CargoCache
|
from ccache_utils import CargoCache
|
||||||
from docker_pull_helper import get_image_with_version
|
from docker_pull_helper import get_image_with_version
|
||||||
from env_helper import (
|
from env_helper import (
|
||||||
GITHUB_JOB,
|
GITHUB_JOB_API_URL,
|
||||||
IMAGES_PATH,
|
IMAGES_PATH,
|
||||||
REPO_COPY,
|
REPO_COPY,
|
||||||
S3_BUILDS_BUCKET,
|
S3_BUILDS_BUCKET,
|
||||||
@ -163,9 +163,14 @@ def check_for_success_run(
|
|||||||
version.describe,
|
version.describe,
|
||||||
SUCCESS if success else FAILURE,
|
SUCCESS if success else FAILURE,
|
||||||
0,
|
0,
|
||||||
GITHUB_JOB,
|
GITHUB_JOB_API_URL(),
|
||||||
|
)
|
||||||
|
result_json_path = build_result.write_json(Path(TEMP_PATH))
|
||||||
|
logging.info(
|
||||||
|
"Build result file %s is written, content:\n %s",
|
||||||
|
result_json_path,
|
||||||
|
result_json_path.read_text(encoding="utf-8"),
|
||||||
)
|
)
|
||||||
build_result.write_json(Path(TEMP_PATH))
|
|
||||||
# Fail build job if not successeded
|
# Fail build job if not successeded
|
||||||
if not success:
|
if not success:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -348,7 +353,7 @@ def main():
|
|||||||
version.describe,
|
version.describe,
|
||||||
build_status,
|
build_status,
|
||||||
elapsed,
|
elapsed,
|
||||||
GITHUB_JOB,
|
GITHUB_JOB_API_URL(),
|
||||||
)
|
)
|
||||||
result_json_path = build_result.write_json(temp_path)
|
result_json_path = build_result.write_json(temp_path)
|
||||||
logging.info(
|
logging.info(
|
||||||
|
@ -94,7 +94,7 @@ def main():
|
|||||||
missing_job_names = [
|
missing_job_names = [
|
||||||
name
|
name
|
||||||
for name in needs_data
|
for name in needs_data
|
||||||
if not any(1 for build_result in build_results if build_result.job_name == name)
|
if not any(1 for br in build_results if br.job_name.startswith(name))
|
||||||
]
|
]
|
||||||
missing_builds = len(missing_job_names)
|
missing_builds = len(missing_job_names)
|
||||||
for job_name in reversed(missing_job_names):
|
for job_name in reversed(missing_job_names):
|
||||||
|
@ -2,21 +2,34 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Callable, Dict, List, Literal
|
from typing import Callable, Dict, List, Literal, Union
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BuildConfig:
|
class BuildConfig:
|
||||||
|
name: str
|
||||||
compiler: str
|
compiler: str
|
||||||
package_type: Literal["deb", "binary", "fuzzers"]
|
package_type: Literal["deb", "binary", "fuzzers"]
|
||||||
additional_pkgs: bool = False
|
additional_pkgs: bool = False
|
||||||
debug_build: bool = False
|
debug_build: bool = False
|
||||||
sanitizer: str = ""
|
sanitizer: str = ""
|
||||||
tidy: bool = False
|
tidy: bool = False
|
||||||
|
sparse_checkout: bool = False
|
||||||
comment: str = ""
|
comment: str = ""
|
||||||
static_binary_name: str = ""
|
static_binary_name: str = ""
|
||||||
|
|
||||||
|
def export_env(self, export: bool = False) -> str:
|
||||||
|
def process(field_name: str, field: Union[bool, str]) -> str:
|
||||||
|
if isinstance(field, bool):
|
||||||
|
field = str(field).lower()
|
||||||
|
if export:
|
||||||
|
return f"export BUILD_{field_name.upper()}={repr(field)}"
|
||||||
|
return f"BUILD_{field_name.upper()}={field}"
|
||||||
|
|
||||||
|
return "\n".join(process(k, v) for k, v in self.__dict__.items())
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class TestConfig:
|
class TestConfig:
|
||||||
@ -87,48 +100,57 @@ class CiConfig:
|
|||||||
CI_CONFIG = CiConfig(
|
CI_CONFIG = CiConfig(
|
||||||
build_config={
|
build_config={
|
||||||
"package_release": BuildConfig(
|
"package_release": BuildConfig(
|
||||||
|
name="package_release",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
static_binary_name="amd64",
|
static_binary_name="amd64",
|
||||||
additional_pkgs=True,
|
additional_pkgs=True,
|
||||||
),
|
),
|
||||||
"package_aarch64": BuildConfig(
|
"package_aarch64": BuildConfig(
|
||||||
|
name="package_aarch64",
|
||||||
compiler="clang-17-aarch64",
|
compiler="clang-17-aarch64",
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
static_binary_name="aarch64",
|
static_binary_name="aarch64",
|
||||||
additional_pkgs=True,
|
additional_pkgs=True,
|
||||||
),
|
),
|
||||||
"package_asan": BuildConfig(
|
"package_asan": BuildConfig(
|
||||||
|
name="package_asan",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
sanitizer="address",
|
sanitizer="address",
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
),
|
),
|
||||||
"package_ubsan": BuildConfig(
|
"package_ubsan": BuildConfig(
|
||||||
|
name="package_ubsan",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
sanitizer="undefined",
|
sanitizer="undefined",
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
),
|
),
|
||||||
"package_tsan": BuildConfig(
|
"package_tsan": BuildConfig(
|
||||||
|
name="package_tsan",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
sanitizer="thread",
|
sanitizer="thread",
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
),
|
),
|
||||||
"package_msan": BuildConfig(
|
"package_msan": BuildConfig(
|
||||||
|
name="package_msan",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
sanitizer="memory",
|
sanitizer="memory",
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
),
|
),
|
||||||
"package_debug": BuildConfig(
|
"package_debug": BuildConfig(
|
||||||
|
name="package_debug",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
debug_build=True,
|
debug_build=True,
|
||||||
package_type="deb",
|
package_type="deb",
|
||||||
comment="Note: sparse checkout was used",
|
sparse_checkout=True,
|
||||||
),
|
),
|
||||||
"binary_release": BuildConfig(
|
"binary_release": BuildConfig(
|
||||||
|
name="binary_release",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
),
|
),
|
||||||
"binary_tidy": BuildConfig(
|
"binary_tidy": BuildConfig(
|
||||||
|
name="binary_tidy",
|
||||||
compiler="clang-17",
|
compiler="clang-17",
|
||||||
debug_build=True,
|
debug_build=True,
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
@ -137,52 +159,63 @@ CI_CONFIG = CiConfig(
|
|||||||
comment="clang-tidy is used for static analysis",
|
comment="clang-tidy is used for static analysis",
|
||||||
),
|
),
|
||||||
"binary_darwin": BuildConfig(
|
"binary_darwin": BuildConfig(
|
||||||
|
name="binary_darwin",
|
||||||
compiler="clang-17-darwin",
|
compiler="clang-17-darwin",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="macos",
|
static_binary_name="macos",
|
||||||
|
sparse_checkout=True,
|
||||||
),
|
),
|
||||||
"binary_aarch64": BuildConfig(
|
"binary_aarch64": BuildConfig(
|
||||||
|
name="binary_aarch64",
|
||||||
compiler="clang-17-aarch64",
|
compiler="clang-17-aarch64",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
),
|
),
|
||||||
"binary_aarch64_v80compat": BuildConfig(
|
"binary_aarch64_v80compat": BuildConfig(
|
||||||
|
name="binary_aarch64_v80compat",
|
||||||
compiler="clang-17-aarch64-v80compat",
|
compiler="clang-17-aarch64-v80compat",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="aarch64v80compat",
|
static_binary_name="aarch64v80compat",
|
||||||
comment="For ARMv8.1 and older",
|
comment="For ARMv8.1 and older",
|
||||||
),
|
),
|
||||||
"binary_freebsd": BuildConfig(
|
"binary_freebsd": BuildConfig(
|
||||||
|
name="binary_freebsd",
|
||||||
compiler="clang-17-freebsd",
|
compiler="clang-17-freebsd",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="freebsd",
|
static_binary_name="freebsd",
|
||||||
),
|
),
|
||||||
"binary_darwin_aarch64": BuildConfig(
|
"binary_darwin_aarch64": BuildConfig(
|
||||||
|
name="binary_darwin_aarch64",
|
||||||
compiler="clang-17-darwin-aarch64",
|
compiler="clang-17-darwin-aarch64",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="macos-aarch64",
|
static_binary_name="macos-aarch64",
|
||||||
),
|
),
|
||||||
"binary_ppc64le": BuildConfig(
|
"binary_ppc64le": BuildConfig(
|
||||||
|
name="binary_ppc64le",
|
||||||
compiler="clang-17-ppc64le",
|
compiler="clang-17-ppc64le",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="powerpc64le",
|
static_binary_name="powerpc64le",
|
||||||
),
|
),
|
||||||
"binary_amd64_compat": BuildConfig(
|
"binary_amd64_compat": BuildConfig(
|
||||||
|
name="binary_amd64_compat",
|
||||||
compiler="clang-17-amd64-compat",
|
compiler="clang-17-amd64-compat",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="amd64compat",
|
static_binary_name="amd64compat",
|
||||||
comment="SSE2-only build",
|
comment="SSE2-only build",
|
||||||
),
|
),
|
||||||
"binary_riscv64": BuildConfig(
|
"binary_riscv64": BuildConfig(
|
||||||
|
name="binary_riscv64",
|
||||||
compiler="clang-17-riscv64",
|
compiler="clang-17-riscv64",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="riscv64",
|
static_binary_name="riscv64",
|
||||||
),
|
),
|
||||||
"binary_s390x": BuildConfig(
|
"binary_s390x": BuildConfig(
|
||||||
|
name="binary_s390x",
|
||||||
compiler="clang-17-s390x",
|
compiler="clang-17-s390x",
|
||||||
package_type="binary",
|
package_type="binary",
|
||||||
static_binary_name="s390x",
|
static_binary_name="s390x",
|
||||||
),
|
),
|
||||||
"fuzzers": BuildConfig(
|
"fuzzers": BuildConfig(
|
||||||
|
name="fuzzers",
|
||||||
compiler="clang-16",
|
compiler="clang-16",
|
||||||
package_type="fuzzers",
|
package_type="fuzzers",
|
||||||
),
|
),
|
||||||
@ -467,3 +500,24 @@ CHECK_DESCRIPTIONS = [
|
|||||||
lambda x: True,
|
lambda x: True,
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = ArgumentParser(
|
||||||
|
formatter_class=ArgumentDefaultsHelpFormatter,
|
||||||
|
description="The script provides build config for GITHUB_ENV or shell export",
|
||||||
|
)
|
||||||
|
parser.add_argument("--build-name", help="the build config to export")
|
||||||
|
parser.add_argument(
|
||||||
|
"--export",
|
||||||
|
action="store_true",
|
||||||
|
help="if set, the ENV parameters are provided for shell export",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
build_config = CI_CONFIG.build_config.get(args.build_name)
|
||||||
|
if build_config:
|
||||||
|
print(build_config.export_env(args.export))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@ -16,7 +16,7 @@ TEMP_PATH = os.getenv("TEMP_PATH", p.abspath(p.join(module_dir, "./tmp")))
|
|||||||
CACHES_PATH = os.getenv("CACHES_PATH", TEMP_PATH)
|
CACHES_PATH = os.getenv("CACHES_PATH", TEMP_PATH)
|
||||||
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
|
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
|
||||||
GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "")
|
GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "")
|
||||||
GITHUB_JOB = os.getenv("GITHUB_JOB", "local")
|
GITHUB_JOB = os.getenv("GITHUB_JOB_OVERRIDDEN", "") or os.getenv("GITHUB_JOB", "local")
|
||||||
GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
|
GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
|
||||||
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
|
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
|
||||||
GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
|
GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
|
||||||
@ -38,14 +38,16 @@ S3_ARTIFACT_DOWNLOAD_TEMPLATE = (
|
|||||||
# These parameters are set only on demand, and only once
|
# These parameters are set only on demand, and only once
|
||||||
_GITHUB_JOB_ID = ""
|
_GITHUB_JOB_ID = ""
|
||||||
_GITHUB_JOB_URL = ""
|
_GITHUB_JOB_URL = ""
|
||||||
|
_GITHUB_JOB_API_URL = ""
|
||||||
|
|
||||||
|
|
||||||
def GITHUB_JOB_ID() -> str:
|
def GITHUB_JOB_ID() -> str:
|
||||||
global _GITHUB_JOB_ID
|
global _GITHUB_JOB_ID
|
||||||
global _GITHUB_JOB_URL
|
global _GITHUB_JOB_URL
|
||||||
|
global _GITHUB_JOB_API_URL
|
||||||
if _GITHUB_JOB_ID:
|
if _GITHUB_JOB_ID:
|
||||||
return _GITHUB_JOB_ID
|
return _GITHUB_JOB_ID
|
||||||
_GITHUB_JOB_ID, _GITHUB_JOB_URL = get_job_id_url(GITHUB_JOB)
|
_GITHUB_JOB_ID, _GITHUB_JOB_URL, _GITHUB_JOB_API_URL = get_job_id_url(GITHUB_JOB)
|
||||||
return _GITHUB_JOB_ID
|
return _GITHUB_JOB_ID
|
||||||
|
|
||||||
|
|
||||||
@ -54,13 +56,19 @@ def GITHUB_JOB_URL() -> str:
|
|||||||
return _GITHUB_JOB_URL
|
return _GITHUB_JOB_URL
|
||||||
|
|
||||||
|
|
||||||
def get_job_id_url(job_name: str) -> Tuple[str, str]:
|
def GITHUB_JOB_API_URL() -> str:
|
||||||
|
GITHUB_JOB_ID()
|
||||||
|
return _GITHUB_JOB_API_URL
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_id_url(job_name: str) -> Tuple[str, str, str]:
|
||||||
job_id = ""
|
job_id = ""
|
||||||
job_url = ""
|
job_url = ""
|
||||||
|
job_api_url = ""
|
||||||
if GITHUB_RUN_ID == "0":
|
if GITHUB_RUN_ID == "0":
|
||||||
job_id = "0"
|
job_id = "0"
|
||||||
if job_id:
|
if job_id:
|
||||||
return job_id, job_url
|
return job_id, job_url, job_api_url
|
||||||
jobs = []
|
jobs = []
|
||||||
page = 1
|
page = 1
|
||||||
while not job_id:
|
while not job_id:
|
||||||
@ -76,7 +84,8 @@ def get_job_id_url(job_name: str) -> Tuple[str, str]:
|
|||||||
continue
|
continue
|
||||||
job_id = job["id"]
|
job_id = job["id"]
|
||||||
job_url = job["html_url"]
|
job_url = job["html_url"]
|
||||||
return job_id, job_url
|
job_api_url = job["url"]
|
||||||
|
return job_id, job_url, job_api_url
|
||||||
if (
|
if (
|
||||||
len(jobs) >= data["total_count"] # just in case of inconsistency
|
len(jobs) >= data["total_count"] # just in case of inconsistency
|
||||||
or len(data["jobs"]) == 0 # if we excided pages
|
or len(data["jobs"]) == 0 # if we excided pages
|
||||||
@ -100,7 +109,8 @@ def get_job_id_url(job_name: str) -> Tuple[str, str]:
|
|||||||
# The best case scenario
|
# The best case scenario
|
||||||
job_id = matched_jobs[0]["id"]
|
job_id = matched_jobs[0]["id"]
|
||||||
job_url = matched_jobs[0]["html_url"]
|
job_url = matched_jobs[0]["html_url"]
|
||||||
return job_id, job_url
|
job_api_url = matched_jobs[0]["url"]
|
||||||
|
return job_id, job_url, job_api_url
|
||||||
if matched_jobs:
|
if matched_jobs:
|
||||||
logging.error(
|
logging.error(
|
||||||
"We could not get the ID and URL for the current job name %s, there "
|
"We could not get the ID and URL for the current job name %s, there "
|
||||||
@ -109,4 +119,4 @@ def get_job_id_url(job_name: str) -> Tuple[str, str]:
|
|||||||
job_name,
|
job_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
return job_id, job_url
|
return job_id, job_url, job_api_url
|
||||||
|
@ -10,8 +10,8 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from build_download_helper import get_gh_api
|
||||||
from ci_config import BuildConfig, CI_CONFIG
|
from ci_config import BuildConfig, CI_CONFIG
|
||||||
from env_helper import get_job_id_url
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -298,8 +298,10 @@ class BuildResult:
|
|||||||
version: str
|
version: str
|
||||||
status: StatusType
|
status: StatusType
|
||||||
elapsed_seconds: int
|
elapsed_seconds: int
|
||||||
job_name: str
|
job_api_url: str
|
||||||
_job_link: Optional[str] = None
|
_job_name: Optional[str] = None
|
||||||
|
_job_html_url: Optional[str] = None
|
||||||
|
_job_html_link: Optional[str] = None
|
||||||
_grouped_urls: Optional[List[List[str]]] = None
|
_grouped_urls: Optional[List[List[str]]] = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -387,11 +389,39 @@ class BuildResult:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def job_link(self) -> str:
|
def job_link(self) -> str:
|
||||||
if self._job_link is not None:
|
if self._job_html_link is not None:
|
||||||
return self._job_link
|
return self._job_html_link
|
||||||
_, job_url = get_job_id_url(self.job_name)
|
self._job_html_link = f'<a href="{self.job_html_url}">{self.job_name}</a>'
|
||||||
self._job_link = f'<a href="{job_url}">{self.job_name}</a>'
|
return self._job_html_link
|
||||||
return self._job_link
|
|
||||||
|
@property
|
||||||
|
def job_html_url(self) -> str:
|
||||||
|
if self._job_html_url is not None:
|
||||||
|
return self._job_html_url
|
||||||
|
self._set_properties()
|
||||||
|
return self._job_html_url or ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def job_name(self) -> str:
|
||||||
|
if self._job_name is not None:
|
||||||
|
return self._job_name
|
||||||
|
self._set_properties()
|
||||||
|
return self._job_name or ""
|
||||||
|
|
||||||
|
@job_name.setter
|
||||||
|
def job_name(self, job_name: str) -> None:
|
||||||
|
self._job_name = job_name
|
||||||
|
|
||||||
|
def _set_properties(self) -> None:
|
||||||
|
if all(p is not None for p in (self._job_name, self._job_html_url)):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
job_data = get_gh_api(self.job_api_url).json()
|
||||||
|
except Exception:
|
||||||
|
job_data = {}
|
||||||
|
# job_name can be set manually
|
||||||
|
self._job_name = self._job_name or job_data.get("name", "unknown")
|
||||||
|
self._job_html_url = job_data.get("html_url", "")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_report_name(name: str) -> Path:
|
def get_report_name(name: str) -> Path:
|
||||||
@ -416,7 +446,7 @@ class BuildResult:
|
|||||||
data.get("version", ""),
|
data.get("version", ""),
|
||||||
data.get("status", ERROR),
|
data.get("status", ERROR),
|
||||||
data.get("elapsed_seconds", 0),
|
data.get("elapsed_seconds", 0),
|
||||||
data.get("job_name", ""),
|
data.get("job_api_url", ""),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -434,7 +464,7 @@ class BuildResult:
|
|||||||
"version": self.version,
|
"version": self.version,
|
||||||
"status": self.status,
|
"status": self.status,
|
||||||
"elapsed_seconds": self.elapsed_seconds,
|
"elapsed_seconds": self.elapsed_seconds,
|
||||||
"job_name": self.job_name,
|
"job_api_url": self.job_api_url,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
encoding="utf-8",
|
encoding="utf-8",
|
||||||
@ -706,7 +736,13 @@ def create_build_html_report(
|
|||||||
)
|
)
|
||||||
row.append(f"<td>{link_separator.join(links)}</td>")
|
row.append(f"<td>{link_separator.join(links)}</td>")
|
||||||
|
|
||||||
row.append(f"<td>{build_result.comment}</td>")
|
comment = build_result.comment
|
||||||
|
if (
|
||||||
|
build_result.build_config is not None
|
||||||
|
and build_result.build_config.sparse_checkout
|
||||||
|
):
|
||||||
|
comment += " (note: sparse checkout is used)"
|
||||||
|
row.append(f"<td>{comment}</td>")
|
||||||
|
|
||||||
row.append("</tr>")
|
row.append("</tr>")
|
||||||
rows.append("".join(row))
|
rows.append("".join(row))
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
<zookeeper>
|
<zookeeper>
|
||||||
<!--<zookeeper_load_balancing>random / in_order / nearest_hostname / hostname_levenshtein_distance / first_or_random / round_robin</zookeeper_load_balancing>-->
|
<!--<zookeeper_load_balancing>random / in_order / nearest_hostname / hostname_levenshtein_distance / first_or_random / round_robin</zookeeper_load_balancing>-->
|
||||||
<zookeeper_load_balancing>random</zookeeper_load_balancing>
|
<zookeeper_load_balancing>random</zookeeper_load_balancing>
|
||||||
|
<use_compression>true</use_compression>
|
||||||
<node index="1">
|
<node index="1">
|
||||||
<host>127.0.0.1</host>
|
<host>127.0.0.1</host>
|
||||||
<port>9181</port>
|
<port>9181</port>
|
||||||
|
29
tests/integration/test_keeper_compression/configs/keeper.xml
Normal file
29
tests/integration/test_keeper_compression/configs/keeper.xml
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<clickhouse>
|
||||||
|
<keeper_server>
|
||||||
|
<tcp_port>9181</tcp_port>
|
||||||
|
<server_id>1</server_id>
|
||||||
|
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||||
|
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||||
|
<coordination_settings>
|
||||||
|
<operation_timeout_ms>5000</operation_timeout_ms>
|
||||||
|
<raft_logs_level>trace</raft_logs_level>
|
||||||
|
<session_timeout_ms>10000</session_timeout_ms>
|
||||||
|
</coordination_settings>
|
||||||
|
<raft_configuration>
|
||||||
|
<server>
|
||||||
|
<can_become_leader>true</can_become_leader>
|
||||||
|
<hostname>node1</hostname>
|
||||||
|
<id>1</id>
|
||||||
|
<port>2888</port>
|
||||||
|
<priority>1</priority>
|
||||||
|
</server>
|
||||||
|
</raft_configuration>
|
||||||
|
</keeper_server>
|
||||||
|
|
||||||
|
<user_directories>
|
||||||
|
<replicated>
|
||||||
|
<zookeeper_path>/clickhouse/access</zookeeper_path>
|
||||||
|
</replicated>
|
||||||
|
</user_directories>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,9 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<zookeeper>
|
||||||
|
<use_compression>true</use_compression>
|
||||||
|
<node index="1">
|
||||||
|
<host>node1</host>
|
||||||
|
<port>9181</port>
|
||||||
|
</node>
|
||||||
|
</zookeeper>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,9 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<zookeeper>
|
||||||
|
<use_compression>false</use_compression>
|
||||||
|
<node index="1">
|
||||||
|
<host>node1</host>
|
||||||
|
<port>9181</port>
|
||||||
|
</node>
|
||||||
|
</zookeeper>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
|
node1 = cluster.add_instance(
|
||||||
|
"node1",
|
||||||
|
main_configs=["configs/keeper.xml", "configs/keeper_with_compression.xml"],
|
||||||
|
stay_alive=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# test that server is able to start
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def started_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
|
||||||
|
yield cluster
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def test_select(started_cluster):
|
||||||
|
assert node1.query("SELECT 1") == "1\n"
|
@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
|
node1 = cluster.add_instance(
|
||||||
|
"node1",
|
||||||
|
main_configs=["configs/keeper.xml", "configs/keeper_without_compression.xml"],
|
||||||
|
stay_alive=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# test that server is able to start
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def started_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
|
||||||
|
yield cluster
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def test_select(started_cluster):
|
||||||
|
assert node1.query("SELECT 1") == "1\n"
|
@ -0,0 +1 @@
|
|||||||
|
3
|
65
tests/queries/0_stateless/02875_merge_engine_set_index.sh
Executable file
65
tests/queries/0_stateless/02875_merge_engine_set_index.sh
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -nq "
|
||||||
|
CREATE TABLE t1
|
||||||
|
(
|
||||||
|
a UInt32,
|
||||||
|
b UInt32
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY (a, b)
|
||||||
|
SETTINGS index_granularity = 8192;
|
||||||
|
|
||||||
|
INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6);
|
||||||
|
|
||||||
|
CREATE TABLE t2
|
||||||
|
(
|
||||||
|
a UInt32,
|
||||||
|
b UInt32
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY (a, b)
|
||||||
|
SETTINGS index_granularity = 8192;
|
||||||
|
|
||||||
|
INSERT INTO t2 VALUES (1, 1) (2, 2) (3, 3);
|
||||||
|
|
||||||
|
CREATE TABLE t
|
||||||
|
(
|
||||||
|
a UInt32,
|
||||||
|
b UInt32
|
||||||
|
)
|
||||||
|
ENGINE = Merge(currentDatabase(), 't*');"
|
||||||
|
|
||||||
|
query_id="${CLICKHOUSE_DATABASE}_merge_engine_set_index_$RANDOM$RANDOM"
|
||||||
|
$CLICKHOUSE_CLIENT --query_id="$query_id" --multiquery -q "
|
||||||
|
SELECT
|
||||||
|
a,
|
||||||
|
b
|
||||||
|
FROM t
|
||||||
|
WHERE (a, b) IN (
|
||||||
|
SELECT DISTINCT
|
||||||
|
a,
|
||||||
|
b
|
||||||
|
FROM t2
|
||||||
|
)
|
||||||
|
GROUP BY
|
||||||
|
a,
|
||||||
|
b
|
||||||
|
ORDER BY
|
||||||
|
a ASC,
|
||||||
|
b DESC
|
||||||
|
FORMAT Null;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -nq "
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
|
||||||
|
SELECT ProfileEvents['SelectedMarks']
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE event_date >= yesterday() AND current_database = currentDatabase() AND (query_id = '$query_id') AND (type = 'QueryFinish');"
|
@ -0,0 +1,12 @@
|
|||||||
|
tablefunc01 StorageProxy CREATE TABLE default.tablefunc01 (`x` Int32) AS postgresql(\'127.121.0.1:5432\', \'postgres_db\', \'postgres_table\', \'postgres_user\', \'[HIDDEN]\') [] 1 1
|
||||||
|
tablefunc02 StorageProxy CREATE TABLE default.tablefunc02 (`x` Int32) AS mysql(\'127.123.0.1:3306\', \'mysql_db\', \'mysql_table\', \'mysql_user\', \'[HIDDEN]\') [] 1 1
|
||||||
|
tablefunc03 StorageProxy CREATE TABLE default.tablefunc03 (`a` Int32) AS sqlite(\'db_path\', \'table_name\') [] 1 1
|
||||||
|
tablefunc04 StorageProxy CREATE TABLE default.tablefunc04 (`a` Int32) AS mongodb(\'127.0.0.1:27017\', \'test\', \'my_collection\', \'test_user\', \'[HIDDEN]\', \'a Int\') [] 1 1
|
||||||
|
tablefunc05 StorageProxy CREATE TABLE default.tablefunc05 (`a` Int32) AS redis(\'127.0.0.1:6379\', \'key\', \'key UInt32\') [] 1 1
|
||||||
|
tablefunc06 StorageProxy CREATE TABLE default.tablefunc06 (`a` Int32) AS s3(\'http://some_addr:9000/cloud-storage-01/data.tsv\', \'M9O7o0SX5I4udXhWxI12\', \'[HIDDEN]\', \'TSV\') [] 1 1
|
||||||
|
tablefunc01 StorageProxy CREATE TABLE default.tablefunc01 (`x` Int32) AS postgresql(\'127.121.0.1:5432\', \'postgres_db\', \'postgres_table\', \'postgres_user\', \'[HIDDEN]\') [] 1 1
|
||||||
|
tablefunc02 StorageProxy CREATE TABLE default.tablefunc02 (`x` Int32) AS mysql(\'127.123.0.1:3306\', \'mysql_db\', \'mysql_table\', \'mysql_user\', \'[HIDDEN]\') [] 1 1
|
||||||
|
tablefunc03 StorageProxy CREATE TABLE default.tablefunc03 (`a` Int32) AS sqlite(\'db_path\', \'table_name\') [] 1 1
|
||||||
|
tablefunc04 StorageProxy CREATE TABLE default.tablefunc04 (`a` Int32) AS mongodb(\'127.0.0.1:27017\', \'test\', \'my_collection\', \'test_user\', \'[HIDDEN]\', \'a Int\') [] 1 1
|
||||||
|
tablefunc05 StorageProxy CREATE TABLE default.tablefunc05 (`a` Int32) AS redis(\'127.0.0.1:6379\', \'key\', \'key UInt32\') [] 1 1
|
||||||
|
tablefunc06 StorageProxy CREATE TABLE default.tablefunc06 (`a` Int32) AS s3(\'http://some_addr:9000/cloud-storage-01/data.tsv\', \'M9O7o0SX5I4udXhWxI12\', \'[HIDDEN]\', \'TSV\') [] 1 1
|
@ -0,0 +1,43 @@
|
|||||||
|
-- Tags: no-fasttest
|
||||||
|
|
||||||
|
DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier};
|
||||||
|
|
||||||
|
CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier};
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01 (x int) AS postgresql('127.121.0.1:5432', 'postgres_db', 'postgres_table', 'postgres_user', '124444');
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02 (x int) AS mysql('127.123.0.1:3306', 'mysql_db', 'mysql_table', 'mysql_user','123123');
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03 (a int) AS sqlite('db_path', 'table_name');
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04 (a int) AS mongodb('127.0.0.1:27017','test', 'my_collection', 'test_user', 'password', 'a Int');
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05 (a int) AS redis('127.0.0.1:6379', 'key', 'key UInt32');
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06 (a int) AS s3('http://some_addr:9000/cloud-storage-01/data.tsv', 'M9O7o0SX5I4udXhWxI12', '9ijqzmVN83fzD9XDkEAAAAAAAA', 'TSV');
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01_without_schema AS postgresql('127.121.0.1:5432', 'postgres_db', 'postgres_table', 'postgres_user', '124444'); -- { serverError 614 }
|
||||||
|
CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02_without_schema AS mysql('127.123.0.1:3306', 'mysql_db', 'mysql_table', 'mysql_user','123123'); -- {serverError 279 }
|
||||||
|
|
||||||
|
SELECT name, engine, engine_full, create_table_query, data_paths, notEmpty([metadata_path]), notEmpty([uuid])
|
||||||
|
FROM system.tables
|
||||||
|
WHERE name like '%tablefunc%' and database=currentDatabase()
|
||||||
|
ORDER BY name;
|
||||||
|
|
||||||
|
DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01;
|
||||||
|
DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02;
|
||||||
|
DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03;
|
||||||
|
DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04;
|
||||||
|
DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05;
|
||||||
|
DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06;
|
||||||
|
|
||||||
|
ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01;
|
||||||
|
ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02;
|
||||||
|
ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03;
|
||||||
|
ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04;
|
||||||
|
ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05;
|
||||||
|
ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06;
|
||||||
|
|
||||||
|
SELECT name, engine, engine_full, create_table_query, data_paths, notEmpty([metadata_path]), notEmpty([uuid])
|
||||||
|
FROM system.tables
|
||||||
|
WHERE name like '%tablefunc%' and database=currentDatabase()
|
||||||
|
ORDER BY name;
|
||||||
|
|
||||||
|
DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier};
|
@ -0,0 +1,3 @@
|
|||||||
|
BACKUP_CREATED
|
||||||
|
RESTORED
|
||||||
|
0
|
30
tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh
Executable file
30
tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh
Executable file
@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Tags: no-ordinary-database
|
||||||
|
|
||||||
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "
|
||||||
|
drop table if exists src;
|
||||||
|
create table src (a Int32) engine = MergeTree() order by tuple();
|
||||||
|
|
||||||
|
drop table if exists mv;
|
||||||
|
create materialized view mv (a Int32) engine = MergeTree() order by tuple() as select * from src;
|
||||||
|
"
|
||||||
|
|
||||||
|
uuid=$(${CLICKHOUSE_CLIENT} --query "select uuid from system.tables where table='mv' and database == currentDatabase()")
|
||||||
|
inner_table=".inner_id.${uuid}"
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "drop table \`$inner_table\` sync"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "
|
||||||
|
set send_logs_level = 'error';
|
||||||
|
backup table ${CLICKHOUSE_DATABASE}.\`mv\` to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
|
||||||
|
" | grep -o "BACKUP_CREATED"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "
|
||||||
|
drop table mv;
|
||||||
|
restore table ${CLICKHOUSE_DATABASE}.\`mv\` from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}');
|
||||||
|
" | grep -o "RESTORED"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "select count() from mv;"
|
@ -0,0 +1,8 @@
|
|||||||
|
test
|
||||||
|
projection_test_by_string
|
||||||
|
Executing query with setting
|
||||||
|
test
|
||||||
|
projection_test_by_more
|
||||||
|
Executing query with wrong projection
|
||||||
|
test
|
||||||
|
projection_test_by_string
|
107
tests/queries/0_stateless/02907_preferred_optimize_projection_name.sh
Executable file
107
tests/queries/0_stateless/02907_preferred_optimize_projection_name.sh
Executable file
@ -0,0 +1,107 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Tags: no-fasttest
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_opt_proj;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
CREATE TABLE test_opt_proj (
|
||||||
|
test_id UInt64,
|
||||||
|
test_name String,
|
||||||
|
test_count Nullable(Float64),
|
||||||
|
test_string String,
|
||||||
|
PROJECTION projection_test_by_string (
|
||||||
|
SELECT test_string,
|
||||||
|
sum(test_count)
|
||||||
|
GROUP BY test_id,
|
||||||
|
test_string,
|
||||||
|
test_name
|
||||||
|
),
|
||||||
|
PROJECTION projection_test_by_more (
|
||||||
|
SELECT test_string,
|
||||||
|
test_name,
|
||||||
|
sum(test_count)
|
||||||
|
GROUP BY test_id,
|
||||||
|
test_string,
|
||||||
|
test_name
|
||||||
|
)
|
||||||
|
) ENGINE = MergeTree
|
||||||
|
ORDER BY test_string;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
INSERT INTO test_opt_proj
|
||||||
|
SELECT number,
|
||||||
|
'test',
|
||||||
|
1.* (number / 2),
|
||||||
|
'test'
|
||||||
|
FROM numbers(100, 500);"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query_id 02907_test_$CLICKHOUSE_DATABASE -q "
|
||||||
|
SELECT test_string
|
||||||
|
FROM test_opt_proj
|
||||||
|
WHERE (test_id > 50)
|
||||||
|
AND (test_id < 150)
|
||||||
|
GROUP BY test_string;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
SELECT projections
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query_id = '02907_test_$CLICKHOUSE_DATABASE' AND current_database=currentDatabase()
|
||||||
|
LIMIT 1;" | grep -o "projection_test_by_string" || true
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
SELECT projections
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query_id = '02907_test_$CLICKHOUSE_DATABASE' AND current_database=currentDatabase()
|
||||||
|
LIMIT 1;" | grep -o "projection_test_by_more" || true
|
||||||
|
|
||||||
|
echo "Executing query with setting"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query_id 02907_test_1_$CLICKHOUSE_DATABASE --preferred_optimize_projection_name 'projection_test_by_more' -q "
|
||||||
|
SELECT test_string
|
||||||
|
FROM test_opt_proj
|
||||||
|
WHERE (test_id > 50)
|
||||||
|
AND (test_id < 150)
|
||||||
|
GROUP BY test_string;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
SELECT projections
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query_id = '02907_test_1_$CLICKHOUSE_DATABASE' AND current_database=currentDatabase()
|
||||||
|
LIMIT 1;" | grep -o "projection_test_by_more" || true
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
SELECT projections
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query_id = '02907_test_1_$CLICKHOUSE_DATABASE' AND current_database=currentDatabase()
|
||||||
|
LIMIT 1" | grep -o "projection_test_by_string" || true
|
||||||
|
|
||||||
|
echo "Executing query with wrong projection"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query_id 02907_test_2_$CLICKHOUSE_DATABASE --preferred_optimize_projection_name 'non_existing_projection' -q "
|
||||||
|
SELECT test_string
|
||||||
|
FROM test_opt_proj
|
||||||
|
WHERE (test_id > 50)
|
||||||
|
AND (test_id < 150)
|
||||||
|
GROUP BY test_string;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS;"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
SELECT projections
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query_id = '02907_test_2_$CLICKHOUSE_DATABASE' AND current_database=currentDatabase()
|
||||||
|
LIMIT 1;" | grep -o "projection_test_by_string" || true
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
SELECT projections
|
||||||
|
FROM system.query_log
|
||||||
|
WHERE query_id = '02907_test_2_$CLICKHOUSE_DATABASE' AND current_database=currentDatabase()
|
||||||
|
LIMIT 1;" | grep -o "projection_test_by_more" || true
|
@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_CREATED
|
||||||
|
1
|
21
tests/queries/0_stateless/02907_system_backups_profile_events.sh
Executable file
21
tests/queries/0_stateless/02907_system_backups_profile_events.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "
|
||||||
|
drop table if exists test;
|
||||||
|
create table test (a Int32) engine = MergeTree() order by tuple();
|
||||||
|
"
|
||||||
|
|
||||||
|
backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME}
|
||||||
|
backup_name="Disk('backups', '$backup_id')";
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "
|
||||||
|
backup table ${CLICKHOUSE_DATABASE}.test to $backup_name;
|
||||||
|
" | grep -o "BACKUP_CREATED"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} -nm --query "
|
||||||
|
select ProfileEvents['BackupEntriesCollectorMicroseconds'] > 10 from system.backups where name='Disk(\'backups\', \'$backup_id\')'
|
||||||
|
"
|
@ -127,6 +127,9 @@ void Runner::parseHostsFromConfig(const Poco::Util::AbstractConfiguration & conf
|
|||||||
|
|
||||||
if (config.has(key + ".connection_timeout_ms"))
|
if (config.has(key + ".connection_timeout_ms"))
|
||||||
connection_info.connection_timeout_ms = config.getInt(key + ".connection_timeout_ms");
|
connection_info.connection_timeout_ms = config.getInt(key + ".connection_timeout_ms");
|
||||||
|
|
||||||
|
if (config.has(key + ".use_compression"))
|
||||||
|
connection_info.use_compression = config.getBool(key + ".use_compression");
|
||||||
};
|
};
|
||||||
|
|
||||||
fill_connection_details("connections", default_connection_info);
|
fill_connection_details("connections", default_connection_info);
|
||||||
@ -430,8 +433,9 @@ std::shared_ptr<Coordination::ZooKeeper> Runner::getConnection(const ConnectionI
|
|||||||
nodes.push_back(node);
|
nodes.push_back(node);
|
||||||
zkutil::ZooKeeperArgs args;
|
zkutil::ZooKeeperArgs args;
|
||||||
args.session_timeout_ms = connection_info.session_timeout_ms;
|
args.session_timeout_ms = connection_info.session_timeout_ms;
|
||||||
args.connection_timeout_ms = connection_info.operation_timeout_ms;
|
args.connection_timeout_ms = connection_info.connection_timeout_ms;
|
||||||
args.operation_timeout_ms = connection_info.connection_timeout_ms;
|
args.operation_timeout_ms = connection_info.operation_timeout_ms;
|
||||||
|
args.use_compression = connection_info.use_compression;
|
||||||
return std::make_shared<Coordination::ZooKeeper>(nodes, args, nullptr);
|
return std::make_shared<Coordination::ZooKeeper>(nodes, args, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,6 +79,7 @@ private:
|
|||||||
int32_t session_timeout_ms = Coordination::DEFAULT_SESSION_TIMEOUT_MS;
|
int32_t session_timeout_ms = Coordination::DEFAULT_SESSION_TIMEOUT_MS;
|
||||||
int32_t connection_timeout_ms = Coordination::DEFAULT_CONNECTION_TIMEOUT_MS;
|
int32_t connection_timeout_ms = Coordination::DEFAULT_CONNECTION_TIMEOUT_MS;
|
||||||
int32_t operation_timeout_ms = Coordination::DEFAULT_OPERATION_TIMEOUT_MS;
|
int32_t operation_timeout_ms = Coordination::DEFAULT_OPERATION_TIMEOUT_MS;
|
||||||
|
bool use_compression = false;
|
||||||
|
|
||||||
size_t sessions = 1;
|
size_t sessions = 1;
|
||||||
};
|
};
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
|
v23.10.2.13-stable 2023-11-08
|
||||||
v23.10.1.1976-stable 2023-11-02
|
v23.10.1.1976-stable 2023-11-02
|
||||||
|
v23.9.4.11-stable 2023-11-08
|
||||||
v23.9.3.12-stable 2023-10-31
|
v23.9.3.12-stable 2023-10-31
|
||||||
v23.9.2.56-stable 2023-10-19
|
v23.9.2.56-stable 2023-10-19
|
||||||
v23.9.1.1854-stable 2023-09-29
|
v23.9.1.1854-stable 2023-09-29
|
||||||
|
v23.8.6.16-lts 2023-11-08
|
||||||
v23.8.5.16-lts 2023-10-31
|
v23.8.5.16-lts 2023-10-31
|
||||||
v23.8.4.69-lts 2023-10-19
|
v23.8.4.69-lts 2023-10-19
|
||||||
v23.8.3.48-lts 2023-09-27
|
v23.8.3.48-lts 2023-09-27
|
||||||
@ -27,6 +30,7 @@ v23.4.4.16-stable 2023-06-17
|
|||||||
v23.4.3.48-stable 2023-06-12
|
v23.4.3.48-stable 2023-06-12
|
||||||
v23.4.2.11-stable 2023-05-02
|
v23.4.2.11-stable 2023-05-02
|
||||||
v23.4.1.1943-stable 2023-04-27
|
v23.4.1.1943-stable 2023-04-27
|
||||||
|
v23.3.16.7-lts 2023-11-08
|
||||||
v23.3.15.29-lts 2023-10-31
|
v23.3.15.29-lts 2023-10-31
|
||||||
v23.3.14.78-lts 2023-10-18
|
v23.3.14.78-lts 2023-10-18
|
||||||
v23.3.13.6-lts 2023-09-05
|
v23.3.13.6-lts 2023-09-05
|
||||||
|
|
Loading…
Reference in New Issue
Block a user