mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge branch 'master' into taming-query-profiler
This commit is contained in:
commit
636487b93e
11
.github/actions/clean/action.yml
vendored
Normal file
11
.github/actions/clean/action.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
name: Clean runner
|
||||
description: Clean the runner's temp path on ending
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Clean
|
||||
shell: bash
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "${{runner.temp}}"
|
33
.github/actions/common_setup/action.yml
vendored
Normal file
33
.github/actions/common_setup/action.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
name: Common setup
|
||||
description: Setup necessary environments
|
||||
inputs:
|
||||
job_type:
|
||||
description: the name to use in the TEMP_PATH and REPO_COPY
|
||||
default: common
|
||||
type: string
|
||||
nested_job:
|
||||
description: the fuse for unintended use inside of the reusable callable jobs
|
||||
default: true
|
||||
type: boolean
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup and check ENV
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Setup the common ENV variables"
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
|
||||
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
|
||||
EOF
|
||||
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
|
||||
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
|
||||
exit 1
|
||||
fi
|
||||
- name: Setup $TEMP_PATH
|
||||
shell: bash
|
||||
run: |
|
||||
# to remove every leftovers
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$REPO_COPY"
|
||||
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/
|
323
.github/workflows/backport_branches.yml
vendored
323
.github/workflows/backport_branches.yml
vendored
@ -1,3 +1,4 @@
|
||||
# yamllint disable rule:comments-indentation
|
||||
name: BackportPR
|
||||
|
||||
env:
|
||||
@ -33,7 +34,12 @@ jobs:
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
needs: CheckLabels
|
||||
@ -69,7 +75,7 @@ jobs:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
DockerHubPush:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
@ -164,320 +170,43 @@ jobs:
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_asan
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_tsan
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_debug
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin
|
||||
checkout_depth: 0
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin_aarch64
|
||||
checkout_depth: 0
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
784
.github/workflows/master.yml
vendored
784
.github/workflows/master.yml
vendored
@ -1,3 +1,4 @@
|
||||
# yamllint disable rule:comments-indentation
|
||||
name: MasterCI
|
||||
|
||||
env:
|
||||
@ -19,7 +20,12 @@ jobs:
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
@ -179,789 +185,109 @@ jobs:
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
checkout_depth: 0
|
||||
build_name: package_release
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
checkout_depth: 0
|
||||
build_name: package_aarch64
|
||||
BuilderBinRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
checkout_depth: 0
|
||||
build_name: binary_release
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_asan
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_ubsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_ubsan
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_tsan
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_msan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_msan
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_debug
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderBinClangTidy:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_tidy
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_tidy
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin
|
||||
checkout_depth: 0
|
||||
BuilderBinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_aarch64
|
||||
checkout_depth: 0
|
||||
BuilderBinFreeBSD:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_freebsd
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_freebsd
|
||||
checkout_depth: 0
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin_aarch64
|
||||
checkout_depth: 0
|
||||
BuilderBinPPC64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_ppc64le
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_ppc64le
|
||||
checkout_depth: 0
|
||||
BuilderBinAmd64Compat:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_amd64_compat
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_amd64_compat
|
||||
checkout_depth: 0
|
||||
BuilderBinAarch64V80Compat:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_aarch64_v80compat
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_aarch64_v80compat
|
||||
checkout_depth: 0
|
||||
BuilderBinRISCV64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_riscv64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_riscv64
|
||||
checkout_depth: 0
|
||||
BuilderBinS390X:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_s390x
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_s390x
|
||||
checkout_depth: 0
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
790
.github/workflows/pull_request.yml
vendored
790
.github/workflows/pull_request.yml
vendored
@ -1,3 +1,4 @@
|
||||
# yamllint disable rule:comments-indentation
|
||||
name: PullRequestCI
|
||||
|
||||
env:
|
||||
@ -47,10 +48,10 @@ jobs:
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
python3 -m unittest discover -s . -p 'test_*.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p '*_test.py'
|
||||
python3 -m unittest discover -s "$dir" -p 'test_*.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
@ -246,771 +247,100 @@ jobs:
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # for performance artifact
|
||||
filter: tree:0
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
BuilderBinRelease:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
BuilderBinRelease:
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # for performance artifact
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_release
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_asan
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_ubsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_ubsan
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_tsan
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_msan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_msan
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_debug
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderBinClangTidy:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_tidy
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_tidy
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin
|
||||
BuilderBinAarch64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_aarch64
|
||||
BuilderBinFreeBSD:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_freebsd
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_freebsd
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin_aarch64
|
||||
BuilderBinPPC64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_ppc64le
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_ppc64le
|
||||
BuilderBinAmd64Compat:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_amd64_compat
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_amd64_compat
|
||||
BuilderBinAarch64V80Compat:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_aarch64_v80compat
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_aarch64_v80compat
|
||||
BuilderBinRISCV64:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_riscv64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_riscv64
|
||||
BuilderBinS390X:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_s390x
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
needs: [FastTest, StyleCheck]
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_s390x
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
393
.github/workflows/release_branches.yml
vendored
393
.github/workflows/release_branches.yml
vendored
@ -1,3 +1,4 @@
|
||||
# yamllint disable rule:comments-indentation
|
||||
name: ReleaseBranchCI
|
||||
|
||||
env:
|
||||
@ -140,401 +141,53 @@ jobs:
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_release
|
||||
checkout_depth: 0
|
||||
BuilderDebAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
filter: tree:0
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_aarch64
|
||||
checkout_depth: 0
|
||||
BuilderDebAsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_asan
|
||||
BuilderDebUBsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_ubsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_ubsan
|
||||
BuilderDebTsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_tsan
|
||||
BuilderDebMsan:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_msan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_msan
|
||||
BuilderDebDebug:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: package_debug
|
||||
BuilderBinDarwin:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin
|
||||
checkout_depth: 0
|
||||
BuilderBinDarwinAarch64:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
uses: ./.github/workflows/reusable_build.yml
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
filter: tree:0
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
build_name: binary_darwin_aarch64
|
||||
checkout_depth: 0
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
74
.github/workflows/reusable_build.yml
vendored
Normal file
74
.github/workflows/reusable_build.yml
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
### For the pure soul wishes to move it to another place
|
||||
# https://github.com/orgs/community/discussions/9050
|
||||
|
||||
name: Build ClickHouse
|
||||
'on':
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_name:
|
||||
description: the value of build type from tests/ci/ci_config.py
|
||||
required: true
|
||||
type: string
|
||||
checkout_depth:
|
||||
description: the value of the git shallow checkout
|
||||
required: false
|
||||
type: number
|
||||
default: 1
|
||||
runner_type:
|
||||
description: the label of runner to use
|
||||
default: builder
|
||||
type: string
|
||||
additional_envs:
|
||||
description: additional ENV variables to setup the job
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
Build:
|
||||
name: Build-${{inputs.build_name}}
|
||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: ${{inputs.checkout_depth}}
|
||||
filter: tree:0
|
||||
- name: Set build envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
GITHUB_JOB_OVERRIDDEN=Build-${{inputs.build_name}}
|
||||
${{inputs.additional_envs}}
|
||||
EOF
|
||||
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||
# This step is done in GITHUB_WORKSPACE,
|
||||
# because it's broken in REPO_COPY for some reason
|
||||
if: ${{ env.BUILD_SPARSE_CHECKOUT == 'true' }}
|
||||
run: |
|
||||
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||
- name: Common setup
|
||||
uses: ./.github/actions/common_setup
|
||||
with:
|
||||
job_type: build_check
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Build
|
||||
run: |
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Clean
|
||||
uses: ./.github/actions/clean
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +1,6 @@
|
||||
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
|
||||
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
|
||||
# more confusing than useful.
|
||||
[submodule "contrib/zstd"]
|
||||
path = contrib/zstd
|
||||
url = https://github.com/facebook/zstd
|
||||
|
20
README.md
20
README.md
@ -1,6 +1,17 @@
|
||||
[<img alt="ClickHouse — open source distributed column-oriented DBMS" width="400px" src="https://clickhouse.com/images/ch_gh_logo_rounded.png" />](https://clickhouse.com?utm_source=github)
|
||||
<div align=center>
|
||||
|
||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
|
||||
[![Website](https://img.shields.io/website?up_message=AVAILABLE&down_message=DOWN&url=https%3A%2F%2Fclickhouse.com&style=for-the-badge)](https://clickhouse.com)
|
||||
[![Apache 2.0 License](https://img.shields.io/badge/license-Apache%202.0-blueviolet?style=for-the-badge)](https://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
||||
<picture align=center>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/ClickHouse/clickhouse-docs/assets/9611008/4ef9c104-2d3f-4646-b186-507358d2fe28">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://github.com/ClickHouse/clickhouse-docs/assets/9611008/b001dc7b-5a45-4dcd-9275-e03beb7f9177">
|
||||
<img alt="The ClickHouse company logo." src="https://github.com/ClickHouse/clickhouse-docs/assets/9611008/b001dc7b-5a45-4dcd-9275-e03beb7f9177">
|
||||
</picture>
|
||||
|
||||
<h4>ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.</h4>
|
||||
|
||||
</div>
|
||||
|
||||
## How To Install (Linux, macOS, FreeBSD)
|
||||
```
|
||||
@ -22,8 +33,7 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [**ClickHouse Meetup in Beijing**](https://www.meetup.com/clickhouse-beijing-user-group/events/296334856/) - Nov 4
|
||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/296334923/) - Nov 8
|
||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/296334923/) - Nov 14
|
||||
* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/296334976/) - Nov 15
|
||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/296488501/) - Nov 30
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/296488779/) - Dec 11
|
||||
@ -33,7 +43,7 @@ Also, keep an eye out for upcoming meetups around the world. Somewhere else you
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
|
||||
* **Recording available**: [**v23.10 Release Webinar**](https://www.youtube.com/watch?v=PGQS6uPb970) All the features of 23.10, one convenient video! Watch it now!
|
||||
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
|
||||
|
||||
|
||||
|
@ -119,17 +119,16 @@
|
||||
#include <base/types.h>
|
||||
namespace DB
|
||||
{
|
||||
void abortOnFailedAssertion(const String & description);
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||
}
|
||||
#define chassert(x) static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x)
|
||||
#define chassert(x) do { static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x); } while (0)
|
||||
#define UNREACHABLE() abort()
|
||||
// clang-format off
|
||||
#else
|
||||
/// Here sizeof() trick is used to suppress unused warning for result,
|
||||
/// since simple "(void)x" will evaluate the expression, while
|
||||
/// "sizeof(!(x))" will not.
|
||||
#define NIL_EXPRESSION(x) (void)sizeof(!(x))
|
||||
#define chassert(x) NIL_EXPRESSION(x)
|
||||
#define chassert(x) (void)sizeof(!(x))
|
||||
#define UNREACHABLE() __builtin_unreachable()
|
||||
#endif
|
||||
#endif
|
||||
|
@ -1,3 +1,5 @@
|
||||
# Generates a separate file with debug symbols while stripping it from the main binary.
|
||||
# This is needed for Debian packages.
|
||||
macro(clickhouse_split_debug_symbols)
|
||||
set(oneValueArgs TARGET DESTINATION_DIR BINARY_PATH)
|
||||
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit eb1572129c71beb2156dcdaadc3fb136954aed96
|
||||
Subproject commit b7ea89b817a18dc0eafc1f909d568869f02d2d04
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit bef8212d1e01f99e406c282ceab3d42da08e09ce
|
||||
Subproject commit b723ecae0991bb873fe87a595dfb187178733fde
|
@ -1,3 +1,10 @@
|
||||
option (ENABLE_SSH "Enable support for SSH keys and protocol" ON)
|
||||
|
||||
if (NOT ENABLE_SSH)
|
||||
message(STATUS "Not using SSH")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(LIB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libssh")
|
||||
set(LIB_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libssh")
|
||||
# Specify search path for CMake modules to be loaded by include()
|
||||
|
@ -6,12 +6,13 @@ FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||
echo '/*' > $FILES_TO_CHECKOUT
|
||||
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/test/build/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/test/core/tsi/alts/fake_handshaker/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/test/core/event_engine/fuzzing_event_engine/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/tools/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/tools/codegen/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/examples/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/doc/*' >> $FILES_TO_CHECKOUT
|
||||
# FIXME why do we need csharp?
|
||||
#echo '!/src/csharp/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/src/csharp/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/src/python/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/src/objective-c/*' >> $FILES_TO_CHECKOUT
|
||||
echo '!/src/php/*' >> $FILES_TO_CHECKOUT
|
||||
|
11
contrib/update-submodules.sh
vendored
11
contrib/update-submodules.sh
vendored
@ -1,11 +1,12 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
WORKDIR=$(dirname "$0")
|
||||
WORKDIR=$(readlink -f "${WORKDIR}")
|
||||
SCRIPT_PATH=$(realpath "$0")
|
||||
SCRIPT_DIR=$(dirname "${SCRIPT_PATH}")
|
||||
GIT_DIR=$(git -C "$SCRIPT_DIR" rev-parse --show-toplevel)
|
||||
cd $GIT_DIR
|
||||
|
||||
"$WORKDIR/sparse-checkout/setup-sparse-checkout.sh"
|
||||
contrib/sparse-checkout/setup-sparse-checkout.sh
|
||||
git submodule init
|
||||
git submodule sync
|
||||
git submodule update --depth=1
|
||||
git config --file .gitmodules --get-regexp .*path | sed 's/[^ ]* //' | xargs -I _ --max-procs 64 git submodule update --depth=1 --single-branch _
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.10.1.1976"
|
||||
ARG VERSION="23.10.3.5"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -126,6 +126,7 @@ fi
|
||||
|
||||
mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output
|
||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||
[ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output
|
||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
mv ./programs/*.dict ./programs/*.options ./programs/*_seed_corpus.zip /output ||: # libFuzzer oss-fuzz compatible infrastructure
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.10.1.1976"
|
||||
ARG VERSION="23.10.3.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.10.1.1976"
|
||||
ARG VERSION="23.10.3.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -15,10 +15,15 @@ CLICKHOUSE_CI_LOGS_USER=${CLICKHOUSE_CI_LOGS_USER:-ci}
|
||||
# Pre-configured destination cluster, where to export the data
|
||||
CLICKHOUSE_CI_LOGS_CLUSTER=${CLICKHOUSE_CI_LOGS_CLUSTER:-system_logs_export}
|
||||
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name String, instance_type String, instance_id String, "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type, '' AS instance_id"}
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, toLowCardinality('') AS check_name, toLowCardinality('') AS instance_type, '' AS instance_id"}
|
||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
||||
|
||||
# trace_log needs more columns for symbolization
|
||||
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> toLowCardinality(demangle(addressToSymbol(x))), trace) AS symbols, arrayMap(x -> toLowCardinality(addressToLine(x)), trace) AS lines"
|
||||
|
||||
|
||||
function __set_connection_args
|
||||
{
|
||||
# It's impossible to use generous $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
||||
@ -125,9 +130,18 @@ function setup_logs_replication
|
||||
echo 'Create %_log tables'
|
||||
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
||||
do
|
||||
if [[ "$table" = "trace_log" ]]
|
||||
then
|
||||
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS_TRACE_LOG}"
|
||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
|
||||
else
|
||||
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}"
|
||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
||||
fi
|
||||
|
||||
# Calculate hash of its structure. Note: 4 is the version of extra columns - increment it if extra columns are changed:
|
||||
hash=$(clickhouse-client --query "
|
||||
SELECT sipHash64(4, groupArray((name, type)))
|
||||
SELECT sipHash64(9, groupArray((name, type)))
|
||||
FROM (SELECT name, type FROM system.columns
|
||||
WHERE database = 'system' AND table = '$table'
|
||||
ORDER BY position)
|
||||
@ -135,7 +149,7 @@ function setup_logs_replication
|
||||
|
||||
# Create the destination table with adapted name and structure:
|
||||
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
||||
s/^\($/('"$EXTRA_COLUMNS"'/;
|
||||
s/^\($/('"$EXTRA_COLUMNS_FOR_TABLE"'/;
|
||||
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
|
||||
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
||||
/^TTL /d
|
||||
@ -155,7 +169,7 @@ function setup_logs_replication
|
||||
ENGINE = Distributed(${CLICKHOUSE_CI_LOGS_CLUSTER}, default, ${table}_${hash})
|
||||
SETTINGS flush_on_detach=0
|
||||
EMPTY AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION_FOR_TABLE}, *
|
||||
FROM system.${table}
|
||||
" || continue
|
||||
|
||||
@ -163,7 +177,7 @@ function setup_logs_replication
|
||||
|
||||
clickhouse-client --query "
|
||||
CREATE MATERIALIZED VIEW system.${table}_watcher TO system.${table}_sender AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION_FOR_TABLE}, *
|
||||
FROM system.${table}
|
||||
" || continue
|
||||
done
|
||||
|
@ -19,6 +19,11 @@ dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
# Check that the tools are available under short names
|
||||
ch --query "SELECT 1" || exit 1
|
||||
chl --query "SELECT 1" || exit 1
|
||||
chc --version || exit 1
|
||||
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
@ -62,7 +67,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
# simpliest way to forward env variables to server
|
||||
# simplest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||
else
|
||||
sudo clickhouse start
|
||||
|
@ -53,31 +53,28 @@ function configure()
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
function randomize_config_boolean_value {
|
||||
function randomize_keeper_config_boolean_value {
|
||||
value=$(($RANDOM % 2))
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
sudo cat /etc/clickhouse-server/config.d/$2.xml \
|
||||
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
> /etc/clickhouse-server/config.d/$2.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/$2.xml.tmp /etc/clickhouse-server/config.d/$2.xml
|
||||
}
|
||||
|
||||
if [[ -n "$RANDOMIZE_KEEPER_FEATURE_FLAGS" ]] && [[ "$RANDOMIZE_KEEPER_FEATURE_FLAGS" -eq 1 ]]; then
|
||||
# Randomize all Keeper feature flags
|
||||
randomize_config_boolean_value filtered_list
|
||||
randomize_config_boolean_value multi_read
|
||||
randomize_config_boolean_value check_not_exists
|
||||
randomize_config_boolean_value create_if_not_exists
|
||||
randomize_config_boolean_value filtered_list keeper_port
|
||||
randomize_config_boolean_value multi_read keeper_port
|
||||
randomize_config_boolean_value check_not_exists keeper_port
|
||||
randomize_config_boolean_value create_if_not_exists keeper_port
|
||||
fi
|
||||
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
#Randomize merge tree setting allow_experimental_block_number_column
|
||||
value=$(($RANDOM % 2))
|
||||
sudo cat /etc/clickhouse-server/config.d/merge_tree_settings.xml \
|
||||
| sed "s|<allow_experimental_block_number_column>[01]</allow_experimental_block_number_column>|<allow_experimental_block_number_column>$value</allow_experimental_block_number_column>|" \
|
||||
> /etc/clickhouse-server/config.d/merge_tree_settings.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/merge_tree_settings.xml.tmp /etc/clickhouse-server/config.d/merge_tree_settings.xml
|
||||
randomize_config_boolean_value use_compression zookeeper
|
||||
|
||||
randomize_config_boolean_value allow_experimental_block_number_column merge_tree_settings
|
||||
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
|
18
docs/changelogs/v23.10.2.13-stable.md
Normal file
18
docs/changelogs/v23.10.2.13-stable.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.10.2.13-stable (65d8522bb1d) FIXME as compared to v23.10.1.1976-stable (13adae0e42f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix inconsistency of "cast('0' as DateTime64(3))" and "cast('0' as Nullable(DateTime64(3)))" [#56286](https://github.com/ClickHouse/ClickHouse/pull/56286) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
16
docs/changelogs/v23.10.3.5-stable.md
Normal file
16
docs/changelogs/v23.10.3.5-stable.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.10.3.5-stable (b2ba7637a41) FIXME as compared to v23.10.2.13-stable (65d8522bb1d)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#56513](https://github.com/ClickHouse/ClickHouse/issues/56513): Allow backup of materialized view with dropped inner table instead of failing the backup. [#56387](https://github.com/ClickHouse/ClickHouse/pull/56387) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#56605](https://github.com/ClickHouse/ClickHouse/issues/56605):. [#56598](https://github.com/ClickHouse/ClickHouse/pull/56598) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
14
docs/changelogs/v23.3.16.7-lts.md
Normal file
14
docs/changelogs/v23.3.16.7-lts.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.16.7-lts (fb4125cc92a) FIXME as compared to v23.3.15.29-lts (218336662e4)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix: avoid using regex match, possibly containing alternation, as a key condition. [#54696](https://github.com/ClickHouse/ClickHouse/pull/54696) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
21
docs/changelogs/v23.8.6.16-lts.md
Normal file
21
docs/changelogs/v23.8.6.16-lts.md
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.8.6.16-lts (077df679bed) FIXME as compared to v23.8.5.16-lts (e8a1af5fe2f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix rare case of CHECKSUM_DOESNT_MATCH error [#54549](https://github.com/ClickHouse/ClickHouse/pull/54549) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix: avoid using regex match, possibly containing alternation, as a key condition. [#54696](https://github.com/ClickHouse/ClickHouse/pull/54696) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Improve enrich image [#55793](https://github.com/ClickHouse/ClickHouse/pull/55793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
17
docs/changelogs/v23.9.4.11-stable.md
Normal file
17
docs/changelogs/v23.9.4.11-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.9.4.11-stable (74c1f49dd6a) FIXME as compared to v23.9.3.12-stable (b7230b06563)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix wrong query result when http_write_exception_in_output_format=1 [#56135](https://github.com/ClickHouse/ClickHouse/pull/56135) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix schema cache for fallback JSON->JSONEachRow with changed settings [#56172](https://github.com/ClickHouse/ClickHouse/pull/56172) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -67,22 +67,30 @@ Implementations of `ReadBuffer`/`WriteBuffer` are used for working with files an
|
||||
|
||||
Read/WriteBuffers only deal with bytes. There are functions from `ReadHelpers` and `WriteHelpers` header files to help with formatting input/output. For example, there are helpers to write a number in decimal format.
|
||||
|
||||
Let’s look at what happens when you want to write a result set in `JSON` format to stdout. You have a result set ready to be fetched from `IBlockInputStream`. You create `WriteBufferFromFileDescriptor(STDOUT_FILENO)` to write bytes to stdout. You create `JSONRowOutputStream`, initialized with that `WriteBuffer`, to write rows in `JSON` to stdout. You create `BlockOutputStreamFromRowOutputStream` on top of it, to represent it as `IBlockOutputStream`. Then you call `copyData` to transfer data from `IBlockInputStream` to `IBlockOutputStream`, and everything works. Internally, `JSONRowOutputStream` will write various JSON delimiters and call the `IDataType::serializeTextJSON` method with a reference to `IColumn` and the row number as arguments. Consequently, `IDataType::serializeTextJSON` will call a method from `WriteHelpers.h`: for example, `writeText` for numeric types and `writeJSONString` for `DataTypeString`.
|
||||
Let's examine what happens when you want to write a result set in `JSON` format to stdout.
|
||||
You have a result set ready to be fetched from a pulling `QueryPipeline`.
|
||||
First, you create a `WriteBufferFromFileDescriptor(STDOUT_FILENO)` to write bytes to stdout.
|
||||
Next, you connect the result from the query pipeline to `JSONRowOutputFormat`, which is initialized with that `WriteBuffer`, to write rows in `JSON` format to stdout.
|
||||
This can be done via the `complete` method, which turns a pulling `QueryPipeline` into a completed `QueryPipeline`.
|
||||
Internally, `JSONRowOutputFormat` will write various JSON delimiters and call the `IDataType::serializeTextJSON` method with a reference to `IColumn` and the row number as arguments. Consequently, `IDataType::serializeTextJSON` will call a method from `WriteHelpers.h`: for example, `writeText` for numeric types and `writeJSONString` for `DataTypeString`.
|
||||
|
||||
## Tables {#tables}
|
||||
|
||||
The `IStorage` interface represents tables. Different implementations of that interface are different table engines. Examples are `StorageMergeTree`, `StorageMemory`, and so on. Instances of these classes are just tables.
|
||||
|
||||
The key `IStorage` methods are `read` and `write`. There are also `alter`, `rename`, `drop`, and so on. The `read` method accepts the following arguments: the set of columns to read from a table, the `AST` query to consider, and the desired number of streams to return. It returns one or multiple `IBlockInputStream` objects and information about the stage of data processing that was completed inside a table engine during query execution.
|
||||
The key methods in `IStorage` are `read` and `write`, along with others such as `alter`, `rename`, and `drop`. The `read` method accepts the following arguments: a set of columns to read from a table, the `AST` query to consider, and the desired number of streams. It returns a `Pipe`.
|
||||
|
||||
In most cases, the read method is only responsible for reading the specified columns from a table, not for any further data processing. All further data processing is done by the query interpreter and is outside the responsibility of `IStorage`.
|
||||
In most cases, the read method is responsible only for reading the specified columns from a table, not for any further data processing.
|
||||
All subsequent data processing is handled by another part of the pipeline, which falls outside the responsibility of `IStorage`.
|
||||
|
||||
But there are notable exceptions:
|
||||
|
||||
- The AST query is passed to the `read` method, and the table engine can use it to derive index usage and to read fewer data from a table.
|
||||
- Sometimes the table engine can process data itself to a specific stage. For example, `StorageDistributed` can send a query to remote servers, ask them to process data to a stage where data from different remote servers can be merged, and return that preprocessed data. The query interpreter then finishes processing the data.
|
||||
|
||||
The table’s `read` method can return multiple `IBlockInputStream` objects to allow parallel data processing. These multiple block input streams can read from a table in parallel. Then you can wrap these streams with various transformations (such as expression evaluation or filtering) that can be calculated independently and create a `UnionBlockInputStream` on top of them, to read from multiple streams in parallel.
|
||||
The table’s `read` method can return a `Pipe` consisting of multiple `Processors`. These `Processors` can read from a table in parallel.
|
||||
Then, you can connect these processors with various other transformations (such as expression evaluation or filtering), which can be calculated independently.
|
||||
And then, create a `QueryPipeline` on top of them, and execute it via `PipelineExecutor`.
|
||||
|
||||
There are also `TableFunction`s. These are functions that return a temporary `IStorage` object to use in the `FROM` clause of a query.
|
||||
|
||||
@ -98,9 +106,19 @@ A hand-written recursive descent parser parses a query. For example, `ParserSele
|
||||
|
||||
## Interpreters {#interpreters}
|
||||
|
||||
Interpreters are responsible for creating the query execution pipeline from an `AST`. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, or the more sophisticated `InterpreterSelectQuery`. The query execution pipeline is a combination of block input or output streams. For example, the result of interpreting the `SELECT` query is the `IBlockInputStream` to read the result set from; the result of the `INSERT` query is the `IBlockOutputStream` to write data for insertion to, and the result of interpreting the `INSERT SELECT` query is the `IBlockInputStream` that returns an empty result set on the first read, but that copies data from `SELECT` to `INSERT` at the same time.
|
||||
Interpreters are responsible for creating the query execution pipeline from an AST. There are simple interpreters, such as `InterpreterExistsQuery` and `InterpreterDropQuery`, as well as the more sophisticated `InterpreterSelectQuery`.
|
||||
|
||||
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are done. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted to separate classes to allow modular transformations of query.
|
||||
The query execution pipeline is a combination of processors that can consume and produce chunks (sets of columns with specific types).
|
||||
A processor communicates via ports and can have multiple input ports and multiple output ports.
|
||||
A more detailed description can be found in [src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h).
|
||||
|
||||
For example, the result of interpreting the `SELECT` query is a "pulling" `QueryPipeline` which has a special output port to read the result set from.
|
||||
The result of the `INSERT` query is a "pushing" `QueryPipeline` with an input port to write data for insertion.
|
||||
And the result of interpreting the `INSERT SELECT` query is a "completed" `QueryPipeline` that has no inputs or outputs but copies data from `SELECT` to `INSERT` simultaneously.
|
||||
|
||||
`InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query.
|
||||
|
||||
To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `allow_experimental_analyzer` flag.
|
||||
|
||||
## Functions {#functions}
|
||||
|
||||
|
@ -23,43 +23,34 @@ Create a fork of ClickHouse repository. To do that please click on the “fork
|
||||
|
||||
The development process consists of first committing the intended changes into your fork of ClickHouse and then creating a “pull request” for these changes to be accepted into the main repository (ClickHouse/ClickHouse).
|
||||
|
||||
To work with git repositories, please install `git`.
|
||||
|
||||
To do that in Ubuntu you would run in the command line terminal:
|
||||
To work with Git repositories, please install `git`. To do that in Ubuntu you would run in the command line terminal:
|
||||
|
||||
sudo apt update
|
||||
sudo apt install git
|
||||
|
||||
A brief manual on using Git can be found here: https://education.github.com/git-cheat-sheet-education.pdf.
|
||||
For a detailed manual on Git see https://git-scm.com/book/en/v2.
|
||||
A brief manual on using Git can be found [here](https://education.github.com/git-cheat-sheet-education.pdf).
|
||||
For a detailed manual on Git see [here](https://git-scm.com/book/en/v2).
|
||||
|
||||
## Cloning a Repository to Your Development Machine {#cloning-a-repository-to-your-development-machine}
|
||||
|
||||
Next, you need to download the source files onto your working machine. This is called “to clone a repository” because it creates a local copy of the repository on your working machine.
|
||||
|
||||
In the command line terminal run:
|
||||
Run in your terminal:
|
||||
|
||||
git clone --shallow-submodules git@github.com:your_github_username/ClickHouse.git
|
||||
git clone git@github.com:your_github_username/ClickHouse.git # replace placeholder with your GitHub user name
|
||||
cd ClickHouse
|
||||
|
||||
Or (if you'd like to use sparse checkout for submodules and avoid checking out unneeded files):
|
||||
This command will create a directory `ClickHouse/` containing the source code of ClickHouse. If you specify a custom checkout directory (after the URL), it is important that this path does not contain whitespaces as it may lead to problems with the build system.
|
||||
|
||||
git clone git@github.com:your_github_username/ClickHouse.git
|
||||
cd ClickHouse
|
||||
./contrib/update-submodules.sh
|
||||
To make library dependencies available for the build, the ClickHouse repository uses Git submodules, i.e. references to external repositories. These are not checked out by default. To do so, you can either
|
||||
|
||||
Note: please, substitute *your_github_username* with what is appropriate!
|
||||
- run `git clone` with option `--recurse-submodules`,
|
||||
|
||||
This command will create a directory `ClickHouse` containing the working copy of the project.
|
||||
- if `git clone` did not check out submodules, run `git submodule update --init --jobs <N>` (e.g. `<N> = 12` to parallelize the checkout) to achieve the same as the previous alternative, or
|
||||
|
||||
It is important that the path to the working directory contains no whitespaces as it may lead to problems with running the build system.
|
||||
- if `git clone` did not check out submodules and you like to use [sparse](https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/) and [shallow](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/) submodule checkout to omit unneeded files and history in submodules to save space (ca. 5 GB instead of ca. 15 GB), run `./contrib/update-submodules.sh`. Not really recommended as it generally makes working with submodules less convenient and slower.
|
||||
|
||||
Please note that ClickHouse repository uses `submodules`. That is what the references to additional repositories are called (i.e. external libraries on which the project depends). It means that when cloning the repository you need to specify the `--recursive` flag as in the example above. If the repository has been cloned without submodules, to download them you need to run the following:
|
||||
|
||||
git submodule init
|
||||
git submodule update
|
||||
|
||||
You can check the status with the command: `git submodule status`.
|
||||
You can check the Git status with the command: `git submodule status`.
|
||||
|
||||
If you get the following error message:
|
||||
|
||||
@ -83,36 +74,6 @@ You can also add original ClickHouse repo address to your local repository to pu
|
||||
|
||||
After successfully running this command you will be able to pull updates from the main ClickHouse repo by running `git pull upstream master`.
|
||||
|
||||
### Working with Submodules {#working-with-submodules}
|
||||
|
||||
Working with submodules in git could be painful. Next commands will help to manage it:
|
||||
|
||||
# ! each command accepts
|
||||
# Update remote URLs for submodules. Barely rare case
|
||||
git submodule sync
|
||||
# Add new submodules
|
||||
git submodule init
|
||||
# Update existing submodules to the current state
|
||||
git submodule update
|
||||
# Two last commands could be merged together
|
||||
git submodule update --init
|
||||
|
||||
The next commands would help you to reset all submodules to the initial state (!WARNING! - any changes inside will be deleted):
|
||||
|
||||
# Synchronizes submodules' remote URL with .gitmodules
|
||||
git submodule sync
|
||||
# Update the registered submodules with initialize not yet initialized
|
||||
git submodule update --init
|
||||
# Reset all changes done after HEAD
|
||||
git submodule foreach git reset --hard
|
||||
# Clean files from .gitignore
|
||||
git submodule foreach git clean -xfd
|
||||
# Repeat last 4 commands for all submodule
|
||||
git submodule foreach git submodule sync
|
||||
git submodule foreach git submodule update --init
|
||||
git submodule foreach git submodule foreach git reset --hard
|
||||
git submodule foreach git submodule foreach git clean -xfd
|
||||
|
||||
## Build System {#build-system}
|
||||
|
||||
ClickHouse uses CMake and Ninja for building.
|
||||
|
@ -345,7 +345,7 @@ struct ExtractDomain
|
||||
**7.** For abstract classes (interfaces) you can add the `I` prefix.
|
||||
|
||||
``` cpp
|
||||
class IBlockInputStream
|
||||
class IProcessor
|
||||
```
|
||||
|
||||
**8.** If you use a variable locally, you can use the short name.
|
||||
|
@ -2,9 +2,10 @@
|
||||
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
||||
sidebar_position: 130
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
title: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
# [experimental] MaterializedPostgreSQL
|
||||
|
||||
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
|
||||
|
||||
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.
|
||||
|
@ -46,6 +46,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2
|
||||
|
||||
`sharding_key` - (optionally) sharding key
|
||||
|
||||
Specifying the `sharding_key` is necessary for the following:
|
||||
|
||||
- For `INSERTs` into a distributed table (as the table engine needs the `sharding_key` to determine how to split the data). However, if `insert_distributed_one_random_shard` setting is enabled, then `INSERTs` do not need the sharding key
|
||||
- For use with `optimize_skip_unused_shards` as the `sharding_key` is necessary to determine what shards should be queried
|
||||
|
||||
#### policy_name
|
||||
|
||||
`policy_name` - (optionally) policy name, it will be used to store temporary files for background send
|
||||
|
@ -1,5 +1,4 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/wikistat
|
||||
sidebar_label: WikiStat
|
||||
---
|
||||
|
||||
@ -41,7 +40,8 @@ CREATE TABLE wikistat
|
||||
project LowCardinality(String),
|
||||
subproject LowCardinality(String),
|
||||
path String CODEC(ZSTD(3)),
|
||||
hits UInt64 CODEC(ZSTD(3))
|
||||
hits UInt64 CODEC(ZSTD(3)),
|
||||
size UInt64 CODEC(ZSTD(3))
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (path, time);
|
||||
|
@ -2156,7 +2156,7 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
|
||||
- [input_format_parquet_local_file_min_bytes_for_seek](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_local_file_min_bytes_for_seek) - min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format. Default value - `8192`.
|
||||
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
|
||||
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
|
||||
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`.
|
||||
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `lz4`.
|
||||
|
||||
## ParquetMetadata {data-format-parquet-metadata}
|
||||
|
||||
|
@ -438,7 +438,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
@ -603,7 +603,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -643,7 +643,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -695,7 +695,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
@ -714,7 +714,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
|
@ -74,6 +74,7 @@ ClickHouse Inc does **not** maintain the libraries listed below and hasn’t don
|
||||
### Elixir
|
||||
- [clickhousex](https://github.com/appodeal/clickhousex/)
|
||||
- [pillar](https://github.com/sofakingworld/pillar)
|
||||
- [ecto_ch](https://github.com/plausible/ecto_ch)
|
||||
### Nim
|
||||
- [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse)
|
||||
### Haskell
|
||||
|
@ -1,5 +1,4 @@
|
||||
---
|
||||
slug: /en/operations/optimizing-performance/profile-guided-optimization
|
||||
sidebar_position: 54
|
||||
sidebar_label: Profile Guided Optimization (PGO)
|
||||
---
|
||||
|
@ -11,7 +11,8 @@ ClickHouse runs sampling profiler that allows analyzing query execution. Using p
|
||||
|
||||
Query profiler is automatically enabled in ClickHouse Cloud and you can run a sample query as follows
|
||||
|
||||
:::note If you are running the following query in ClickHouse Cloud, make sure to change `FROM system.trace_log` to `FROM clusterAllReplicas(default, system.trace_log)` to select from all nodes of the cluster :::
|
||||
:::note If you are running the following query in ClickHouse Cloud, make sure to change `FROM system.trace_log` to `FROM clusterAllReplicas(default, system.trace_log)` to select from all nodes of the cluster
|
||||
:::
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
|
@ -214,7 +214,7 @@ Max consecutive resolving failures before dropping a host from ClickHouse DNS ca
|
||||
|
||||
Type: UInt32
|
||||
|
||||
Default: 1024
|
||||
Default: 10
|
||||
|
||||
|
||||
## index_mark_cache_policy
|
||||
@ -2427,6 +2427,8 @@ This section contains the following parameters:
|
||||
* hostname_levenshtein_distance - just like nearest_hostname, but it compares hostname in a levenshtein distance manner.
|
||||
* first_or_random - selects the first ZooKeeper node, if it's not available then randomly selects one of remaining ZooKeeper nodes.
|
||||
* round_robin - selects the first ZooKeeper node, if reconnection happens selects the next.
|
||||
- `use_compression` — If set to true, enables compression in Keeper protocol.
|
||||
|
||||
|
||||
**Example configuration**
|
||||
|
||||
|
@ -897,6 +897,12 @@ Use DOS/Windows-style line separator (CRLF) in CSV instead of Unix style (LF).
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_csv_allow_cr_end_of_line {#input_format_csv_allow_cr_end_of_line}
|
||||
|
||||
If it is set true, CR(\\r) will be allowed at end of line not followed by LF(\\n)
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_csv_enum_as_number {#input_format_csv_enum_as_number}
|
||||
|
||||
When enabled, always treat enum values as enum ids for CSV input format. It's recommended to enable this setting if data contains only enum ids to optimize enum parsing.
|
||||
|
@ -3310,22 +3310,11 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## use_mysql_types_in_show_columns {#use_mysql_types_in_show_columns}
|
||||
|
||||
Show the names of MySQL data types corresponding to ClickHouse data types in [SHOW COLUMNS](../../sql-reference/statements/show.md#show_columns).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Show names of native ClickHouse data types.
|
||||
- 1 - Show names of MySQL data types corresponding to ClickHouse data types.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## mysql_map_string_to_text_in_show_columns {#mysql_map_string_to_text_in_show_columns}
|
||||
|
||||
When enabled, [String](../../sql-reference/data-types/string.md) ClickHouse data type will be displayed as `TEXT` in [SHOW COLUMNS](../../sql-reference/statements/show.md#show_columns).
|
||||
|
||||
Has effect only when [use_mysql_types_in_show_columns](#use_mysql_types_in_show_columns) is enabled.
|
||||
Has an effect only when the connection is made through the MySQL wire protocol.
|
||||
|
||||
- 0 - Use `BLOB`.
|
||||
- 1 - Use `TEXT`.
|
||||
@ -3336,7 +3325,7 @@ Default value: `0`.
|
||||
|
||||
When enabled, [FixedString](../../sql-reference/data-types/fixedstring.md) ClickHouse data type will be displayed as `TEXT` in [SHOW COLUMNS](../../sql-reference/statements/show.md#show_columns).
|
||||
|
||||
Has effect only when [use_mysql_types_in_show_columns](#use_mysql_types_in_show_columns) is enabled.
|
||||
Has an effect only when the connection is made through the MySQL wire protocol.
|
||||
|
||||
- 0 - Use `BLOB`.
|
||||
- 1 - Use `TEXT`.
|
||||
@ -3954,6 +3943,17 @@ Possible values:
|
||||
|
||||
Default value: `''`.
|
||||
|
||||
## preferred_optimize_projection_name {#preferred_optimize_projection_name}
|
||||
|
||||
If it is set to a non-empty string, ClickHouse will try to apply specified projection in query.
|
||||
|
||||
|
||||
Possible values:
|
||||
|
||||
- string: name of preferred projection
|
||||
|
||||
Default value: `''`.
|
||||
|
||||
## alter_sync {#alter-sync}
|
||||
|
||||
Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries.
|
||||
@ -4812,3 +4812,10 @@ LIFETIME(MIN 0 MAX 3600)
|
||||
LAYOUT(COMPLEX_KEY_HASHED_ARRAY())
|
||||
SETTINGS(dictionary_use_async_executor=1, max_threads=8);
|
||||
```
|
||||
|
||||
## storage_metadata_write_full_object_key {#storage_metadata_write_full_object_key}
|
||||
|
||||
When set to `true` the metadata files are written with `VERSION_FULL_OBJECT_KEY` format version. With that format full object storage key names are written to the metadata files.
|
||||
When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section.
|
||||
|
||||
Default value: `false`.
|
||||
|
@ -18,12 +18,14 @@ SHOW TABLES FROM information_schema;
|
||||
│ KEY_COLUMN_USAGE │
|
||||
│ REFERENTIAL_CONSTRAINTS │
|
||||
│ SCHEMATA │
|
||||
| STATISTICS |
|
||||
│ TABLES │
|
||||
│ VIEWS │
|
||||
│ columns │
|
||||
│ key_column_usage │
|
||||
│ referential_constraints │
|
||||
│ schemata │
|
||||
| statistics |
|
||||
│ tables │
|
||||
│ views │
|
||||
└─────────────────────────┘
|
||||
@ -32,11 +34,12 @@ SHOW TABLES FROM information_schema;
|
||||
`INFORMATION_SCHEMA` contains the following views:
|
||||
|
||||
- [COLUMNS](#columns)
|
||||
- [SCHEMATA](#schemata)
|
||||
- [TABLES](#tables)
|
||||
- [VIEWS](#views)
|
||||
- [KEY_COLUMN_USAGE](#key_column_usage)
|
||||
- [REFERENTIAL_CONSTRAINTS](#referential_constraints)
|
||||
- [SCHEMATA](#schemata)
|
||||
- [STATISTICS](#statistics)
|
||||
- [TABLES](#tables)
|
||||
- [VIEWS](#views)
|
||||
|
||||
Case-insensitive equivalent views, e.g. `INFORMATION_SCHEMA.columns` are provided for reasons of compatibility with other databases. The same applies to all the columns in these views - both lowercase (for example, `table_name`) and uppercase (`TABLE_NAME`) variants are provided.
|
||||
|
||||
@ -372,3 +375,28 @@ Columns:
|
||||
- `delete_rule` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `referenced_table_name` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
|
||||
## STATISTICS {#statistics}
|
||||
|
||||
Provides information about table indexes. Currently returns an empty result (no rows) which is just enough to provide compatibility with 3rd party tools like Tableau Online.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `non_unique` ([Int32](../../sql-reference/data-types/int-uint.md)) — Currently unused.
|
||||
- `index_schema` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `index_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Currently unused.
|
||||
- `seq_in_index` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Currently unused.
|
||||
- `column_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Currently unused.
|
||||
- `collation` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Currently unused.
|
||||
- `cardinality` ([Nullable](../../sql-reference/data-types/nullable.md)([Int64](../../sql-reference/data-types/int-uint.md))) — Currently unused.
|
||||
- `sub_part` ([Nullable](../../sql-reference/data-types/nullable.md)([Int64](../../sql-reference/data-types/int-uint.md))) — Currently unused.
|
||||
- `packed` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Currently unused.
|
||||
- `nullable` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `index_type` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `index_comment` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `is_visible` ([String](../../sql-reference/data-types/string.md)) — Currently unused.
|
||||
- `expression` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Currently unused.
|
||||
|
@ -35,27 +35,25 @@ WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_nam
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
thread_name: clickhouse-serv
|
||||
|
||||
thread_id: 686
|
||||
query_id: 1a11f70b-626d-47c1-b948-f9c7b206395d
|
||||
res: sigqueue
|
||||
DB::StorageSystemStackTrace::fillData(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::Context const&, DB::SelectQueryInfo const&) const
|
||||
DB::IStorageSystemOneBlock<DB::StorageSystemStackTrace>::read(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&, DB::SelectQueryInfo const&, DB::Context const&, DB::QueryProcessingStage::Enum, unsigned long, unsigned int)
|
||||
DB::InterpreterSelectQuery::executeFetchColumns(DB::QueryProcessingStage::Enum, DB::QueryPipeline&, std::__1::shared_ptr<DB::PrewhereInfo> const&, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&)
|
||||
DB::InterpreterSelectQuery::executeImpl(DB::QueryPipeline&, std::__1::shared_ptr<DB::IBlockInputStream> const&, std::__1::optional<DB::Pipe>)
|
||||
DB::InterpreterSelectQuery::execute()
|
||||
DB::InterpreterSelectWithUnionQuery::execute()
|
||||
DB::executeQueryImpl(char const*, char const*, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool, DB::ReadBuffer*)
|
||||
DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool)
|
||||
DB::TCPHandler::runImpl()
|
||||
DB::TCPHandler::run()
|
||||
Poco::Net::TCPServerConnection::start()
|
||||
Poco::Net::TCPServerDispatcher::run()
|
||||
Poco::PooledThread::run()
|
||||
Poco::ThreadImpl::runnableEntry(void*)
|
||||
start_thread
|
||||
__clone
|
||||
thread_name: QueryPipelineEx
|
||||
thread_id: 743490
|
||||
query_id: dc55a564-febb-4e37-95bb-090ef182c6f1
|
||||
res: memcpy
|
||||
large_ralloc
|
||||
arena_ralloc
|
||||
do_rallocx
|
||||
Allocator<true, true>::realloc(void*, unsigned long, unsigned long, unsigned long)
|
||||
HashTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>::resize(unsigned long, unsigned long)
|
||||
void DB::Aggregator::executeImplBatch<false, false, true, DB::AggregationMethodOneNumber<unsigned long, HashMapTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>, true, false>>(DB::AggregationMethodOneNumber<unsigned long, HashMapTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>, true, false>&, DB::AggregationMethodOneNumber<unsigned long, HashMapTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>, true, false>::State&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, char*) const
|
||||
DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const
|
||||
DB::Aggregator::executeOnBlock(std::__1::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn>>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>&, std::__1::vector<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>, std::__1::allocator<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>>>&, bool&) const
|
||||
DB::AggregatingTransform::work()
|
||||
DB::ExecutionThreadContext::executeTask()
|
||||
DB::PipelineExecutor::executeStepImpl(unsigned long, std::__1::atomic<bool>*)
|
||||
void std::__1::__function::__policy_invoker<void ()>::__call_impl<std::__1::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads()::$_0, void ()>>(std::__1::__function::__policy_storage const*)
|
||||
ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>::worker(std::__1::__list_iterator<ThreadFromGlobalPoolImpl<false>, void*>)
|
||||
void std::__1::__function::__policy_invoker<void ()>::__call_impl<std::__1::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false>::ThreadFromGlobalPoolImpl<void ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>::scheduleImpl<void>(std::__1::function<void ()>, Priority, std::__1::optional<unsigned long>, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__1::__function::__policy_storage const*)
|
||||
void* std::__1::__thread_proxy[abi:v15000]<std::__1::tuple<std::__1::unique_ptr<std::__1::__thread_struct, std::__1::default_delete<std::__1::__thread_struct>>, void ThreadPoolImpl<std::__1::thread>::scheduleImpl<void>(std::__1::function<void ()>, Priority, std::__1::optional<unsigned long>, bool)::'lambda0'()>>(void*)
|
||||
```
|
||||
|
||||
Getting filenames and line numbers in ClickHouse source code:
|
||||
|
35
docs/en/operations/system-tables/symbols.md
Normal file
35
docs/en/operations/system-tables/symbols.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/symbols
|
||||
---
|
||||
# symbols
|
||||
|
||||
Contains information for introspection of `clickhouse` binary. It requires the introspection privilege to access.
|
||||
This table is only useful for C++ experts and ClickHouse engineers.
|
||||
|
||||
Columns:
|
||||
|
||||
- `symbol` ([String](../../sql-reference/data-types/string.md)) — Symbol name in the binary. It is mangled. You can apply `demangle(symbol)` to obtain a readable name.
|
||||
- `address_begin` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Start address of the symbol in the binary.
|
||||
- `address_end` ([UInt64](../../sql-reference/data-types/int-uint.md)) — End address of the symbol in the binary.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Alias for `event`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT address_begin, address_end - address_begin AS size, demangle(symbol) FROM system.symbols ORDER BY size DESC LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─address_begin─┬─────size─┬─demangle(symbol)──────────────────────────────────────────────────────────────────┐
|
||||
│ 25000976 │ 29466000 │ icudt70_dat │
|
||||
│ 400605288 │ 2097272 │ arena_emap_global │
|
||||
│ 18760592 │ 1048576 │ CLD2::kQuadChrome1015_2 │
|
||||
│ 9807152 │ 884808 │ TopLevelDomainLookupHash::isValid(char const*, unsigned long)::wordlist │
|
||||
│ 57442432 │ 850608 │ llvm::X86Insts │
|
||||
│ 55682944 │ 681360 │ (anonymous namespace)::X86DAGToDAGISel::SelectCode(llvm::SDNode*)::MatcherTable │
|
||||
│ 55130368 │ 502840 │ (anonymous namespace)::X86InstructionSelector::getMatchTable() const::MatchTable0 │
|
||||
│ 402930616 │ 404032 │ qpl::ml::dispatcher::hw_dispatcher::get_instance()::instance │
|
||||
│ 274131872 │ 356795 │ DB::SettingsTraits::Accessor::instance()::$_0::operator()() const │
|
||||
│ 58293040 │ 249424 │ llvm::X86InstrNameData │
|
||||
└───────────────┴──────────┴───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -12,6 +12,7 @@ A client application to interact with clickhouse-keeper by its native protocol.
|
||||
- `-q QUERY`, `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-keeper-client` will start in interactive mode.
|
||||
- `-h HOST`, `--host=HOST` — Server host. Default value: `localhost`.
|
||||
- `-p N`, `--port=N` — Server port. Default value: 9181
|
||||
- `-c FILE_PATH`, `--config-file=FILE_PATH` — Set path of config file to get the connection string. Default value: `config.xml`.
|
||||
- `--connection-timeout=TIMEOUT` — Set connection timeout in seconds. Default value: 10s.
|
||||
- `--session-timeout=TIMEOUT` — Set session timeout in seconds. Default value: 10s.
|
||||
- `--operation-timeout=TIMEOUT` — Set operation timeout in seconds. Default value: 10s.
|
||||
|
@ -16,7 +16,7 @@ CREATE TABLE IF NOT EXISTS float_vs_decimal
|
||||
my_decimal Decimal64(3)
|
||||
)Engine=MergeTree ORDER BY tuple()
|
||||
|
||||
INSERT INTO float_vs_decimal SELECT round(canonicalRand(), 3) AS res, res FROM system.numbers LIMIT 1000000; # Generate 1 000 000 random number with 2 decimal places and store them as a float and as a decimal
|
||||
INSERT INTO float_vs_decimal SELECT round(randCanonical(), 3) AS res, res FROM system.numbers LIMIT 1000000; # Generate 1 000 000 random number with 2 decimal places and store them as a float and as a decimal
|
||||
|
||||
SELECT sum(my_float), sum(my_decimal) FROM float_vs_decimal;
|
||||
> 500279.56300000014 500279.563
|
||||
|
@ -2175,7 +2175,7 @@ Result:
|
||||
|
||||
## arrayRandomSample
|
||||
|
||||
Function `arrayRandomSample` returns a subset with `samples`-many random elements of an input array. If `samples` exceeds the size of the input array, the sample size is limited to the size of the array. In this case, all elements of the input array are returned, but the order is not guaranteed. The function can handle both flat arrays and nested arrays.
|
||||
Function `arrayRandomSample` returns a subset with `samples`-many random elements of an input array. If `samples` exceeds the size of the input array, the sample size is limited to the size of the array, i.e. all array elements are returned but their order is not guaranteed. The function can handle both flat arrays and nested arrays.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -2185,13 +2185,15 @@ arrayRandomSample(arr, samples)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arr` — The input array from which to sample elements. This may be flat or nested arrays.
|
||||
- `samples` — An unsigned integer specifying the number of elements to include in the random sample.
|
||||
- `arr` — The input array from which to sample elements. ([Array(T)](../data-types/array.md))
|
||||
- `samples` — The number of elements to include in the random sample ([UInt*](../data-types/int-uint.md))
|
||||
|
||||
**Returned Value**
|
||||
|
||||
- An array containing a random sample of elements from the input array.
|
||||
|
||||
Type: [Array](../data-types/array.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
@ -2201,9 +2203,10 @@ SELECT arrayRandomSample(['apple', 'banana', 'cherry', 'date'], 2) as res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─res────────────────┐
|
||||
│ ['banana','apple'] │
|
||||
│ ['cherry','apple'] │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
@ -2214,6 +2217,7 @@ SELECT arrayRandomSample([[1, 2], [3, 4], [5, 6]], 2) as res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─res───────────┐
|
||||
│ [[3,4],[5,6]] │
|
||||
@ -2222,24 +2226,12 @@ Result:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT arrayRandomSample([1, 2, 3, 4, 5], 0) as res;
|
||||
```
|
||||
|
||||
Result:
|
||||
```
|
||||
┌─res─┐
|
||||
│ [] │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT arrayRandomSample([1, 2, 3], 5) as res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─res─────┐
|
||||
│ [3,1,2] │
|
||||
|
@ -2766,9 +2766,11 @@ Result:
|
||||
|
||||
## fromUnixTimestamp
|
||||
|
||||
Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type.
|
||||
This function converts a Unix timestamp to a calendar date and a time of a day.
|
||||
|
||||
fromUnixTimestamp uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
It can be called in two ways:
|
||||
|
||||
When given a single argument of type [Integer](../../sql-reference/data-types/int-uint.md), it returns a value of type [DateTime](../../sql-reference/data-types/datetime.md), i.e. behaves like [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime).
|
||||
|
||||
Alias: `FROM_UNIXTIME`.
|
||||
|
||||
@ -2786,14 +2788,16 @@ Result:
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
When there are two or three arguments, the first an [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second a constant format string and the third an optional constant time zone string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type.
|
||||
When given two or three arguments where the first argument is a value of type [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second argument is a constant format string and the third argument is an optional constant time zone string, the function returns a value of type [String](../../sql-reference/data-types/string.md#string), i.e. it behaves like [formatDateTime](#formatdatetime). In this case, [MySQL's datetime format style](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format) is used.
|
||||
|
||||
For example:
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
SELECT fromUnixTimestamp(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─DateTime────────────┐
|
||||
│ 2009-02-11 14:42:23 │
|
||||
@ -2806,19 +2810,20 @@ SELECT fromUnixTimestamp(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
|
||||
## fromUnixTimestampInJodaSyntax
|
||||
|
||||
Similar to fromUnixTimestamp, except that it formats time in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
Same as [fromUnixTimestamp](#fromUnixTimestamp) but when called in the second way (two or three arguments), the formatting is performed using [Joda style](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) instead of MySQL style.
|
||||
|
||||
**Example:**
|
||||
|
||||
``` sql
|
||||
SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC');
|
||||
SELECT fromUnixTimestampInJodaSyntax(1234334543, 'yyyy-MM-dd HH:mm:ss', 'UTC') AS DateTime;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC')────┐
|
||||
│ 2022-11-30 10:41:12 │
|
||||
└────────────────────────────────────────────────────────────────────────────┘
|
||||
┌─DateTime────────────┐
|
||||
│ 2009-02-11 06:42:23 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## toModifiedJulianDay
|
||||
|
@ -2760,10 +2760,13 @@ message Root
|
||||
|
||||
Returns a formatted, possibly multi-line, version of the given SQL query.
|
||||
|
||||
Throws an exception if the query is not well-formed. To return `NULL` instead, function `formatQueryOrNull()` may be used.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
formatQuery(query)
|
||||
formatQueryOrNull(query)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
@ -2796,10 +2799,13 @@ WHERE (a > 3) AND (b < 3) │
|
||||
|
||||
Like formatQuery() but the returned formatted string contains no line breaks.
|
||||
|
||||
Throws an exception if the query is not well-formed. To return `NULL` instead, function `formatQuerySingleLineOrNull()` may be used.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
formatQuerySingleLine(query)
|
||||
formatQuerySingleLineOrNull(query)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
@ -107,11 +107,7 @@ round(3.65, 1) = 3.6
|
||||
|
||||
Rounds a number to a specified decimal position.
|
||||
|
||||
- If the rounding number is halfway between two numbers, the function uses banker’s rounding.
|
||||
|
||||
Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2.
|
||||
|
||||
It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`.
|
||||
- If the rounding number is halfway between two numbers, the function uses banker’s rounding. Banker's rounding is a method of rounding fractional numbers. When the rounding number is halfway between two numbers, it's rounded to the nearest even digit at the specified decimal position. For example: 3.5 rounds up to 4, 2.5 rounds down to 2. It's the default rounding method for floating point numbers defined in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Roundings_to_nearest). The [round](#rounding_functions-round) function performs the same rounding for floating point numbers. The `roundBankers` function also rounds integers the same way, for example, `roundBankers(45, -1) = 40`.
|
||||
|
||||
- In other cases, the function rounds numbers to the nearest integer.
|
||||
|
||||
|
@ -1371,6 +1371,86 @@ Result:
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## byteHammingDistance
|
||||
|
||||
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
byteHammingDistance(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT byteHammingDistance('karolin', 'kathrin');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─byteHammingDistance('karolin', 'kathrin')─┐
|
||||
│ 3 │
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Alias: mismatches
|
||||
|
||||
## stringJaccardIndex
|
||||
|
||||
Calculates the [Jaccard similarity index](https://en.wikipedia.org/wiki/Jaccard_index) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
stringJaccardIndex(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT stringJaccardIndex('clickhouse', 'mouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─stringJaccardIndex('clickhouse', 'mouse')─┐
|
||||
│ 0.4 │
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## stringJaccardIndexUTF8
|
||||
|
||||
Like [stringJaccardIndex](#stringJaccardIndex) but for UTF8-encoded strings.
|
||||
|
||||
## editDistance
|
||||
|
||||
Calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
editDistance(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT editDistance('clickhouse', 'mouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─editDistance('clickhouse', 'mouse')─┐
|
||||
│ 6 │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Alias: levenshteinDistance
|
||||
|
||||
## initcap
|
||||
|
||||
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.
|
||||
|
@ -681,79 +681,3 @@ Like [hasSubsequence](#hasSubsequence) but assumes `haystack` and `needle` are U
|
||||
## hasSubsequenceCaseInsensitiveUTF8
|
||||
|
||||
Like [hasSubsequenceUTF8](#hasSubsequenceUTF8) but searches case-insensitively.
|
||||
|
||||
## byteHammingDistance
|
||||
|
||||
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
byteHammingDistance(string2, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT byteHammingDistance('abc', 'ab') ;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─byteHammingDistance('abc', 'ab')─┐
|
||||
│ 1 │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
- Alias: mismatches
|
||||
|
||||
## jaccardIndex
|
||||
|
||||
Calculates the [Jaccard similarity index](https://en.wikipedia.org/wiki/Jaccard_index) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
byteJaccardIndex(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT jaccardIndex('clickhouse', 'mouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─jaccardIndex('clickhouse', 'mouse')─┐
|
||||
│ 0.4 │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## editDistance
|
||||
|
||||
Calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two byte strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
editDistance(string1, string2)
|
||||
```
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
SELECT editDistance('clickhouse', 'mouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─editDistance('clickhouse', 'mouse')─┐
|
||||
│ 6 │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
- Alias: levenshteinDistance
|
||||
|
@ -171,7 +171,8 @@ Result:
|
||||
Can be used with [MinHash](../../sql-reference/functions/hash-functions.md#ngramminhash) functions for detection of semi-duplicate strings:
|
||||
|
||||
``` sql
|
||||
SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseInsensitive(string)) as HammingDistance FROM (SELECT 'ClickHouse is a column-oriented database management system for online analytical processing of queries.' AS string);
|
||||
SELECT tupleHammingDistance(wordShingleMinHash(string), wordShingleMinHashCaseInsensitive(string)) AS HammingDistance
|
||||
FROM (SELECT 'ClickHouse is a column-oriented database management system for online analytical processing of queries.' AS string);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -103,7 +103,7 @@ INSERT INTO holdings VALUES
|
||||
('Bitcoin', 200),
|
||||
('Ethereum', 250),
|
||||
('Ethereum', 5000),
|
||||
('DOGEFI', 10);
|
||||
('DOGEFI', 10),
|
||||
('Bitcoin Diamond', 5000);
|
||||
```
|
||||
|
||||
|
@ -207,7 +207,7 @@ The optional keyword `FULL` causes the output to include the collation, comment
|
||||
|
||||
The statement produces a result table with the following structure:
|
||||
- `field` - The name of the column (String)
|
||||
- `type` - The column data type. If setting `[use_mysql_types_in_show_columns](../../operations/settings/settings.md#use_mysql_types_in_show_columns) = 1` (default: 0), then the equivalent type name in MySQL is shown. (String)
|
||||
- `type` - The column data type. If the query was made through the MySQL wire protocol, then the equivalent type name in MySQL is shown. (String)
|
||||
- `null` - `YES` if the column data type is Nullable, `NO` otherwise (String)
|
||||
- `key` - `PRI` if the column is part of the primary key, `SOR` if the column is part of the sorting key, empty otherwise (String)
|
||||
- `default` - Default expression of the column if it is of type `ALIAS`, `DEFAULT`, or `MATERIALIZED`, otherwise `NULL`. (Nullable(String))
|
||||
|
@ -7,7 +7,7 @@ keywords: [gcs, bucket]
|
||||
|
||||
# gcs Table Function
|
||||
|
||||
Provides a table-like interface to select/insert files in [Google Cloud Storage](https://cloud.google.com/storage/).
|
||||
Provides a table-like interface to `SELECT` and `INSERT` data from [Google Cloud Storage](https://cloud.google.com/storage/). Requires the [`Storage Object User` IAM role](https://cloud.google.com/storage/docs/access-control/iam-roles).
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -49,21 +49,9 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
Блоки создаются для всех обработанных фрагментов данных. Напоминаем, что одни и те же типы вычислений, имена столбцов и типы переиспользуются в разных блоках и только данные колонок изменяются. Лучше разделить данные и заголовок блока потому, что в блоках маленького размера мы имеем большой оверхэд по временным строкам при копировании умных указателей (`shared_ptrs`) и имен столбцов.
|
||||
|
||||
## Потоки блоков (Block Streams) {#block-streams}
|
||||
## Процессоры
|
||||
|
||||
Потоки блоков обрабатывают данные. Мы используем потоки блоков для чтения данных, трансформации или записи данных куда-либо. `IBlockInputStream` предоставляет метод `read` для получения следующего блока, пока это возможно, и метод `write`, чтобы продвигать (push) блок куда-либо.
|
||||
|
||||
Потоки отвечают за:
|
||||
|
||||
1. Чтение и запись в таблицу. Таблица лишь возвращает поток для чтения или записи блоков.
|
||||
2. Реализацию форматов данных. Например, при выводе данных в терминал в формате `Pretty`, вы создаете выходной поток блоков, который форматирует поступающие в него блоки.
|
||||
3. Трансформацию данных. Допустим, у вас есть `IBlockInputStream` и вы хотите создать отфильтрованный поток. Вы создаете `FilterBlockInputStream` и инициализируете его вашим потоком. Затем вы тянете (pull) блоки из `FilterBlockInputStream`, а он тянет блоки исходного потока, фильтрует их и возвращает отфильтрованные блоки вам. Таким образом построены конвейеры выполнения запросов.
|
||||
|
||||
Имеются и более сложные трансформации. Например, когда вы тянете блоки из `AggregatingBlockInputStream`, он считывает все данные из своего источника, агрегирует их, и возвращает поток агрегированных данных вам. Другой пример: конструктор `UnionBlockInputStream` принимает множество источников входных данных и число потоков. Такой `Stream` работает в несколько потоков и читает данные источников параллельно.
|
||||
|
||||
> Потоки блоков используют «втягивающий» (pull) подход к управлению потоком выполнения: когда вы вытягиваете блок из первого потока, он, следовательно, вытягивает необходимые блоки из вложенных потоков, так и работает весь конвейер выполнения. Ни «pull» ни «push» не имеют явного преимущества, потому что поток управления неявный, и это ограничивает в реализации различных функций, таких как одновременное выполнение нескольких запросов (слияние нескольких конвейеров вместе). Это ограничение можно преодолеть с помощью сопрограмм (coroutines) или просто запуском дополнительных потоков, которые ждут друг друга. У нас может быть больше возможностей, если мы сделаем поток управления явным: если мы локализуем логику для передачи данных из одной расчетной единицы в другую вне этих расчетных единиц. Читайте эту [статью](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) для углубленного изучения.
|
||||
|
||||
Следует отметить, что конвейер выполнения запроса создает временные данные на каждом шаге. Мы стараемся сохранить размер блока достаточно маленьким, чтобы временные данные помещались в кэш процессора. При таком допущении запись и чтение временных данных практически бесплатны по сравнению с другими расчетами. Мы могли бы рассмотреть альтернативу, которая заключается в том, чтобы объединить многие операции в конвейере вместе. Это может сделать конвейер как можно короче и удалить большую часть временных данных, что может быть преимуществом, но у такого подхода также есть недостатки. Например, разделенный конвейер позволяет легко реализовать кэширование промежуточных данных, использование промежуточных данных из аналогичных запросов, выполняемых одновременно, и объединение конвейеров для аналогичных запросов.
|
||||
Смотрите описание в файле [src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h) исходного кода.
|
||||
|
||||
## Форматы {#formats}
|
||||
|
||||
@ -81,13 +69,16 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
Буферы чтения-записи имеют дело только с байтами. В заголовочных файлах `ReadHelpers` и `WriteHelpers` объявлены некоторые функции, чтобы помочь с форматированием ввода-вывода. Например, есть помощники для записи числа в десятичном формате.
|
||||
|
||||
Давайте посмотрим, что происходит, когда вы хотите вывести результат в `JSON` формате в стандартный вывод (stdout). У вас есть результирующий набор данных, готовый к извлечению из `IBlockInputStream`. Вы создаете `WriteBufferFromFileDescriptor(STDOUT_FILENO)` чтобы записать байты в stdout. Вы создаете `JSONRowOutputStream`, инициализируете с этим `WriteBuffer`'ом, чтобы записать строки `JSON` в stdout. Кроме того вы создаете `BlockOutputStreamFromRowOutputStream`, реализуя `IBlockOutputStream`. Затем вызывается `copyData` для передачи данных из `IBlockInputStream` в `IBlockOutputStream` и все работает. Внутренний `JSONRowOutputStream` будет писать в формате `JSON` различные разделители и вызвать `IDataType::serializeTextJSON` метод со ссылкой на `IColumn` и номер строки в качестве аргументов. Следовательно, `IDataType::serializeTextJSON` вызовет метод из `WriteHelpers.h`: например, `writeText` для числовых типов и `writeJSONString` для `DataTypeString`.
|
||||
Давайте посмотрим, что происходит, когда вы хотите вывести результат в `JSON` формате в стандартный вывод (stdout). У вас есть результирующий набор данных, готовый к извлечению из `QueryPipeline`. Вы создаете `WriteBufferFromFileDescriptor(STDOUT_FILENO)` чтобы записать байты в stdout. Вы создаете `JSONRowOutputFormat`, инициализируете с этим `WriteBuffer`'ом, чтобы записать строки `JSON` в stdout.
|
||||
Чтобы соеденить выход `QueryPipeline` с форматом, можно использовать метод `complete`, который превращает `QueryPipeline` в завершенный `QueryPipeline`.
|
||||
Внутренний `JSONRowOutputStream` будет писать в формате `JSON` различные разделители и вызвать `IDataType::serializeTextJSON` метод со ссылкой на `IColumn` и номер строки в качестве аргументов. Следовательно, `IDataType::serializeTextJSON` вызовет метод из `WriteHelpers.h`: например, `writeText` для числовых типов и `writeJSONString` для `DataTypeString`.
|
||||
|
||||
## Таблицы {#tables}
|
||||
|
||||
Интерфейс `IStorage` служит для отображения таблицы. Различные движки таблиц являются реализациями этого интерфейса. Примеры `StorageMergeTree`, `StorageMemory` и так далее. Экземпляры этих классов являются просто таблицами.
|
||||
|
||||
Ключевые методы `IStorage` это `read` и `write`. Есть и другие варианты — `alter`, `rename`, `drop` и так далее. Метод `read` принимает следующие аргументы: набор столбцов для чтения из таблицы, `AST` запрос и желаемое количество потоков для вывода. Он возвращает один или несколько объектов `IBlockInputStream` и информацию о стадии обработки данных, которая была завершена внутри табличного движка во время выполнения запроса.
|
||||
Ключевые методы `IStorage` это `read` и `write`. Есть и другие варианты — `alter`, `rename`, `drop` и так далее.
|
||||
Метод `read` принимает следующие аргументы: набор столбцов для чтения из таблицы, `AST` запрос и желаемое количество потоков для вывода и возвращает `Pipe`.
|
||||
|
||||
В большинстве случаев метод read отвечает только за чтение указанных столбцов из таблицы, а не за дальнейшую обработку данных. Вся дальнейшая обработка данных осуществляется интерпретатором запросов и не входит в сферу ответственности `IStorage`.
|
||||
|
||||
@ -96,7 +87,9 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
- AST-запрос, передающийся в метод `read`, может использоваться движком таблицы для получения информации о возможности использования индекса и считывания меньшего количества данных из таблицы.
|
||||
- Иногда движок таблиц может сам обрабатывать данные до определенного этапа. Например, `StorageDistributed` можно отправить запрос на удаленные серверы, попросить их обработать данные до этапа, когда данные с разных удаленных серверов могут быть объединены, и вернуть эти предварительно обработанные данные. Затем интерпретатор запросов завершает обработку данных.
|
||||
|
||||
Метод `read` может возвращать несколько объектов `IBlockInputStream`, позволяя осуществлять параллельную обработку данных. Эти несколько блочных входных потоков могут считываться из таблицы параллельно. Затем вы можете обернуть эти потоки различными преобразованиями (такими как вычисление выражений или фильтрация), которые могут быть вычислены независимо, и создать `UnionBlockInputStream` поверх них, чтобы читать из нескольких потоков параллельно.
|
||||
Метод `read` может возвращать `Pipe`, состоящий из нескольких процессоров. Каждый их этих процессоров может читать данные параллельно.
|
||||
Затем, вы можете соеденить эти просессоры с другими преобразованиями (такими как вычисление выражений или фильтрация), которые могут быть вычислены независимо.
|
||||
Далее, создан `QueryPipeline` поверх них, можно выполнить пайплайн с помощью `PipelineExecutor`.
|
||||
|
||||
Есть и другие варианты. Например, `TableFunction` возвращает временный объект `IStorage`, который можно подставить во `FROM`.
|
||||
|
||||
@ -112,10 +105,18 @@ ClickHouse — полноценная столбцовая СУБД. Данны
|
||||
|
||||
## Интерпретаторы {#interpreters}
|
||||
|
||||
Интерпретаторы отвечают за создание конвейера выполнения запроса из `AST`. Есть простые интерпретаторы, такие как `InterpreterExistsQuery` и `InterpreterDropQuery` или более сложный `InterpreterSelectQuery`. Конвейер выполнения запроса представляет собой комбинацию входных и выходных потоков блоков. Например, результатом интерпретации `SELECT` запроса является `IBlockInputStream` для чтения результирующего набора данных; результат интерпретации `INSERT` запроса — это `IBlockOutputStream`, для записи данных, предназначенных для вставки; результат интерпретации `INSERT SELECT` запроса — это `IBlockInputStream`, который возвращает пустой результирующий набор при первом чтении, но копирует данные из `SELECT` к `INSERT`.
|
||||
Интерпретаторы отвечают за создание конвейера выполнения запроса из `AST`. Есть простые интерпретаторы, такие как `InterpreterExistsQuery` и `InterpreterDropQuery` или более сложный `InterpreterSelectQuery`.
|
||||
|
||||
Конвейер выполнения запроса представляет собой комбинацию процессоров, которые могут принимать на вход и также возвращать чанки (набор колонок с их типами)
|
||||
Процессоры обмениваются данными через порты и могут иметь несколько входных и выходных портов.
|
||||
Более подробное описание можно найти в файле [src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h).
|
||||
|
||||
Например, результатом интерпретации `SELECT` запроса является `QueryPipeline`, который имеет специальный выходной порт для чтения результирующего набора данных. Результатом интерпретации `INSERT` запроса является `QueryPipeline` с входным портом для записи данных для вставки. Результатом интерпретации `INSERT SELECT` запроса является завершенный `QueryPipeline`, который не имеет входов или выходов, но копирует данные из `SELECT` в `INSERT` одновременно.
|
||||
|
||||
`InterpreterSelectQuery` использует `ExpressionAnalyzer` и `ExpressionActions` механизмы для анализа запросов и преобразований. Именно здесь выполняется большинство оптимизаций запросов на основе правил. `ExpressionAnalyzer` написан довольно грязно и должен быть переписан: различные преобразования запросов и оптимизации должны быть извлечены в отдельные классы, чтобы позволить модульные преобразования или запросы.
|
||||
|
||||
Для решения текущих проблем, существующих в интерпретаторах, разрабатывается новый `InterpreterSelectQueryAnalyzer`. Это новая версия `InterpreterSelectQuery`, которая не использует `ExpressionAnalyzer` и вводит дополнительный уровень абстракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к использованию в продакшене, но его можно протестировать с помощью флага `allow_experimental_analyzer`.
|
||||
|
||||
## Функции {#functions}
|
||||
|
||||
Существуют обычные функции и агрегатные функции. Агрегатные функции смотрите в следующем разделе.
|
||||
|
@ -345,7 +345,7 @@ struct ExtractDomain
|
||||
**7.** Для абстрактных классов (интерфейсов) можно добавить в начало имени букву `I`.
|
||||
|
||||
``` cpp
|
||||
class IBlockInputStream
|
||||
class IProcessor
|
||||
```
|
||||
|
||||
**8.** Если переменная используется достаточно локально, то можно использовать короткое имя.
|
||||
|
@ -366,7 +366,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
@ -529,7 +529,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -569,7 +569,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -621,7 +621,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
@ -640,7 +640,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
|
@ -31,27 +31,25 @@ WITH arrayMap(x -> demangle(addressToSymbol(x)), trace) AS all SELECT thread_nam
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
thread_name: clickhouse-serv
|
||||
|
||||
thread_id: 686
|
||||
query_id: 1a11f70b-626d-47c1-b948-f9c7b206395d
|
||||
res: sigqueue
|
||||
DB::StorageSystemStackTrace::fillData(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::Context const&, DB::SelectQueryInfo const&) const
|
||||
DB::IStorageSystemOneBlock<DB::StorageSystemStackTrace>::read(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&, DB::SelectQueryInfo const&, DB::Context const&, DB::QueryProcessingStage::Enum, unsigned long, unsigned int)
|
||||
DB::InterpreterSelectQuery::executeFetchColumns(DB::QueryProcessingStage::Enum, DB::QueryPipeline&, std::__1::shared_ptr<DB::PrewhereInfo> const&, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&)
|
||||
DB::InterpreterSelectQuery::executeImpl(DB::QueryPipeline&, std::__1::shared_ptr<DB::IBlockInputStream> const&, std::__1::optional<DB::Pipe>)
|
||||
DB::InterpreterSelectQuery::execute()
|
||||
DB::InterpreterSelectWithUnionQuery::execute()
|
||||
DB::executeQueryImpl(char const*, char const*, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool, DB::ReadBuffer*)
|
||||
DB::executeQuery(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::Context&, bool, DB::QueryProcessingStage::Enum, bool)
|
||||
DB::TCPHandler::runImpl()
|
||||
DB::TCPHandler::run()
|
||||
Poco::Net::TCPServerConnection::start()
|
||||
Poco::Net::TCPServerDispatcher::run()
|
||||
Poco::PooledThread::run()
|
||||
Poco::ThreadImpl::runnableEntry(void*)
|
||||
start_thread
|
||||
__clone
|
||||
thread_name: QueryPipelineEx
|
||||
thread_id: 743490
|
||||
query_id: dc55a564-febb-4e37-95bb-090ef182c6f1
|
||||
res: memcpy
|
||||
large_ralloc
|
||||
arena_ralloc
|
||||
do_rallocx
|
||||
Allocator<true, true>::realloc(void*, unsigned long, unsigned long, unsigned long)
|
||||
HashTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>::resize(unsigned long, unsigned long)
|
||||
void DB::Aggregator::executeImplBatch<false, false, true, DB::AggregationMethodOneNumber<unsigned long, HashMapTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>, true, false>>(DB::AggregationMethodOneNumber<unsigned long, HashMapTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>, true, false>&, DB::AggregationMethodOneNumber<unsigned long, HashMapTable<unsigned long, HashMapCell<unsigned long, char*, HashCRC32<unsigned long>, HashTableNoState, PairNoInit<unsigned long, char*>>, HashCRC32<unsigned long>, HashTableGrowerWithPrecalculation<8ul>, Allocator<true, true>>, true, false>::State&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, char*) const
|
||||
DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const
|
||||
DB::Aggregator::executeOnBlock(std::__1::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn>>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>&, std::__1::vector<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>, std::__1::allocator<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*>>>>&, bool&) const
|
||||
DB::AggregatingTransform::work()
|
||||
DB::ExecutionThreadContext::executeTask()
|
||||
DB::PipelineExecutor::executeStepImpl(unsigned long, std::__1::atomic<bool>*)
|
||||
void std::__1::__function::__policy_invoker<void ()>::__call_impl<std::__1::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads()::$_0, void ()>>(std::__1::__function::__policy_storage const*)
|
||||
ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>::worker(std::__1::__list_iterator<ThreadFromGlobalPoolImpl<false>, void*>)
|
||||
void std::__1::__function::__policy_invoker<void ()>::__call_impl<std::__1::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false>::ThreadFromGlobalPoolImpl<void ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>::scheduleImpl<void>(std::__1::function<void ()>, Priority, std::__1::optional<unsigned long>, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__1::__function::__policy_storage const*)
|
||||
void* std::__1::__thread_proxy[abi:v15000]<std::__1::tuple<std::__1::unique_ptr<std::__1::__thread_struct, std::__1::default_delete<std::__1::__thread_struct>>, void ThreadPoolImpl<std::__1::thread>::scheduleImpl<void>(std::__1::function<void ()>, Priority, std::__1::optional<unsigned long>, bool)::'lambda0'()>>(void*)
|
||||
```
|
||||
|
||||
Получение имен файлов и номеров строк в исходном коде ClickHouse:
|
||||
|
@ -362,7 +362,7 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
@ -520,7 +520,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -560,7 +560,7 @@ $ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
@ -612,7 +612,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
@ -631,7 +631,7 @@ $ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< Keep-Alive: timeout=10
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0","elapsed_ns":"662334"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
|
@ -439,6 +439,13 @@ else()
|
||||
install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
|
||||
# A target to get stripped binary.
|
||||
# Note: this is different to the above (extract debug symbols to a separate place)
|
||||
add_custom_target(clickhouse-stripped ALL
|
||||
COMMAND "${STRIP_PATH}" -o "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-stripped" --strip-debug --remove-section=.comment --remove-section=.note "${CMAKE_CURRENT_BINARY_DIR}/clickhouse"
|
||||
DEPENDS clickhouse
|
||||
COMMENT "Stripping clickhouse binary" VERBATIM)
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
set (CLICKHOUSE_UNIT_TESTS_TARGETS unit_tests_dbms)
|
||||
add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_UNIT_TESTS_TARGETS})
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Interpreters/InterpreterInsertQuery.h>
|
||||
@ -59,7 +60,7 @@ void ClusterCopier::init()
|
||||
getContext()->setClustersConfig(task_cluster_current_config, false, task_cluster->clusters_prefix);
|
||||
|
||||
/// Set up shards and their priority
|
||||
task_cluster->random_engine.seed(task_cluster->random_device());
|
||||
task_cluster->random_engine.seed(randomSeed());
|
||||
for (auto & task_table : task_cluster->table_tasks)
|
||||
{
|
||||
task_table.cluster_pull = getContext()->getCluster(task_table.cluster_pull_name);
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
#include <random>
|
||||
#include <pcg_random.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -45,7 +45,6 @@ struct TaskCluster
|
||||
/// Subtasks
|
||||
TasksTable table_tasks;
|
||||
|
||||
std::random_device random_device;
|
||||
pcg64 random_engine;
|
||||
};
|
||||
|
||||
|
@ -420,7 +420,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
|
||||
/// Create symlinks.
|
||||
|
||||
std::initializer_list<const char *> tools
|
||||
std::initializer_list<std::string_view> tools
|
||||
{
|
||||
"clickhouse-server",
|
||||
"clickhouse-client",
|
||||
@ -435,6 +435,9 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
"clickhouse-keeper",
|
||||
"clickhouse-keeper-converter",
|
||||
"clickhouse-disks",
|
||||
"ch",
|
||||
"chl",
|
||||
"chc",
|
||||
};
|
||||
|
||||
for (const auto & tool : tools)
|
||||
@ -443,6 +446,15 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
fs::path symlink_path = bin_dir / tool;
|
||||
|
||||
if (fs::exists(symlink_path))
|
||||
{
|
||||
/// Do not replace short named symlinks if they are already present in the system
|
||||
/// to avoid collision with other tools.
|
||||
if (!tool.starts_with("clickhouse"))
|
||||
{
|
||||
fmt::print("Symlink {} already exists. Will keep it.\n", symlink_path.string());
|
||||
need_to_create = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
bool is_symlink = FS::isSymlink(symlink_path);
|
||||
fs::path points_to;
|
||||
@ -470,6 +482,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (need_to_create)
|
||||
{
|
||||
|
@ -2,15 +2,12 @@
|
||||
#include <csetjmp>
|
||||
#include <unistd.h>
|
||||
|
||||
#ifdef OS_LINUX
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
#include <new>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <string_view>
|
||||
#include <utility> /// pair
|
||||
|
||||
#include <fmt/format.h>
|
||||
@ -22,7 +19,6 @@
|
||||
#include <Common/IO.h>
|
||||
|
||||
#include <base/phdr_cache.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
|
||||
/// Universal executable for various clickhouse applications
|
||||
@ -98,7 +94,7 @@ using MainFunc = int (*)(int, char**);
|
||||
#if !defined(FUZZING_MODE)
|
||||
|
||||
/// Add an item here to register new application
|
||||
std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||
std::pair<std::string_view, MainFunc> clickhouse_applications[] =
|
||||
{
|
||||
#if ENABLE_CLICKHOUSE_LOCAL
|
||||
{"local", mainEntryClickHouseLocal},
|
||||
@ -158,6 +154,18 @@ std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||
#endif
|
||||
};
|
||||
|
||||
/// Add an item here to register a new short name
|
||||
std::pair<std::string_view, std::string_view> clickhouse_short_names[] =
|
||||
{
|
||||
#if ENABLE_CLICKHOUSE_LOCAL
|
||||
{"ch", "local"},
|
||||
{"chl", "local"},
|
||||
#endif
|
||||
#if ENABLE_CLICKHOUSE_CLIENT
|
||||
{"chc", "client"},
|
||||
#endif
|
||||
};
|
||||
|
||||
int printHelp(int, char **)
|
||||
{
|
||||
std::cerr << "Use one of the following commands:" << std::endl;
|
||||
@ -387,15 +395,21 @@ void checkHarmfulEnvironmentVariables(char ** argv)
|
||||
|
||||
}
|
||||
|
||||
bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
|
||||
bool isClickhouseApp(std::string_view app_suffix, std::vector<char *> & argv)
|
||||
{
|
||||
for (const auto & [alias, name] : clickhouse_short_names)
|
||||
if (app_suffix == name
|
||||
&& !argv.empty() && (alias == argv[0] || endsWith(argv[0], "/" + std::string(alias))))
|
||||
return true;
|
||||
|
||||
/// Use app if the first arg 'app' is passed (the arg should be quietly removed)
|
||||
if (argv.size() >= 2)
|
||||
{
|
||||
auto first_arg = argv.begin() + 1;
|
||||
|
||||
/// 'clickhouse --client ...' and 'clickhouse client ...' are Ok
|
||||
if (*first_arg == "--" + app_suffix || *first_arg == app_suffix)
|
||||
if (*first_arg == app_suffix
|
||||
|| (std::string_view(*first_arg).starts_with("--") && std::string_view(*first_arg).substr(2) == app_suffix))
|
||||
{
|
||||
argv.erase(first_arg);
|
||||
return true;
|
||||
@ -403,7 +417,7 @@ bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
|
||||
}
|
||||
|
||||
/// Use app if clickhouse binary is run through symbolic link with name clickhouse-app
|
||||
std::string app_name = "clickhouse-" + app_suffix;
|
||||
std::string app_name = "clickhouse-" + std::string(app_suffix);
|
||||
return !argv.empty() && (app_name == argv[0] || endsWith(argv[0], "/" + app_name));
|
||||
}
|
||||
|
||||
|
@ -1106,7 +1106,7 @@ public:
|
||||
{
|
||||
if (isInteger(data_type))
|
||||
{
|
||||
if (isUnsignedInteger(data_type))
|
||||
if (isUInt(data_type))
|
||||
return std::make_unique<UnsignedIntegerModel>(seed);
|
||||
else
|
||||
return std::make_unique<SignedIntegerModel>(seed);
|
||||
|
@ -11,8 +11,8 @@ else ()
|
||||
endif ()
|
||||
|
||||
add_custom_target (self-extracting ALL
|
||||
${CMAKE_COMMAND} -E remove clickhouse
|
||||
${CMAKE_COMMAND} -E remove clickhouse clickhouse-stripped
|
||||
COMMAND ${COMPRESSOR} ${DECOMPRESSOR} clickhouse ../clickhouse
|
||||
DEPENDS clickhouse compressor
|
||||
COMMAND ${COMPRESSOR} ${DECOMPRESSOR} clickhouse-stripped ../clickhouse-stripped
|
||||
DEPENDS clickhouse clickhouse-stripped compressor
|
||||
)
|
||||
|
||||
|
@ -104,15 +104,14 @@
|
||||
</url_scheme_mappers>
|
||||
|
||||
<!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->
|
||||
<!-- It is off by default. Next headers are obligate for CORS.-->
|
||||
<!-- http_options_response>
|
||||
<http_options_response>
|
||||
<header>
|
||||
<name>Access-Control-Allow-Origin</name>
|
||||
<value>*</value>
|
||||
</header>
|
||||
<header>
|
||||
<name>Access-Control-Allow-Headers</name>
|
||||
<value>origin, x-requested-with</value>
|
||||
<value>origin, x-requested-with, x-clickhouse-format, x-clickhouse-user, x-clickhouse-key, Authorization</value>
|
||||
</header>
|
||||
<header>
|
||||
<name>Access-Control-Allow-Methods</name>
|
||||
@ -122,7 +121,7 @@
|
||||
<name>Access-Control-Max-Age</name>
|
||||
<value>86400</value>
|
||||
</header>
|
||||
</http_options_response -->
|
||||
</http_options_response>
|
||||
|
||||
<!-- It is the name that will be shown in the clickhouse-client.
|
||||
By default, anything with "production" will be highlighted in red in query prompt.
|
||||
@ -245,7 +244,7 @@
|
||||
<max_connections>4096</max_connections>
|
||||
|
||||
<!-- For 'Connection: keep-alive' in HTTP 1.1 -->
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
<keep_alive_timeout>10</keep_alive_timeout>
|
||||
|
||||
<!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->
|
||||
<!-- <grpc_port>9100</grpc_port> -->
|
||||
|
@ -1,7 +0,0 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<allow_introspection_functions>1</allow_introspection_functions>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
1
programs/server/users.d/allow_introspection_functions.yaml
Symbolic link
1
programs/server/users.d/allow_introspection_functions.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/users.d/allow_introspection_functions.yaml
|
@ -86,6 +86,13 @@
|
||||
|
||||
<!-- User can create other users and grant rights to them. -->
|
||||
<!-- <access_management>1</access_management> -->
|
||||
|
||||
<!-- User permissions can be granted here -->
|
||||
<!--
|
||||
<grants>
|
||||
<query>GRANT ALL ON *.*</query>
|
||||
</grants>
|
||||
-->
|
||||
</default>
|
||||
</users>
|
||||
|
||||
|
@ -91,6 +91,10 @@ users:
|
||||
# User can create other users and grant rights to them.
|
||||
# access_management: 1
|
||||
|
||||
# SQL expressions for grants available for that user - https://clickhouse.com/docs/en/sql-reference/statements/grant
|
||||
# grants:
|
||||
# - query: GRANT ALL ON *.*
|
||||
|
||||
# Quotas.
|
||||
quotas:
|
||||
# Name of quota.
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if (!isUnsignedInteger(arguments[1]))
|
||||
if (!isUInt(arguments[1]))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Second argument of aggregate function {} must be unsigned integer.", getName());
|
||||
|
||||
if (default_value.isNull())
|
||||
|
@ -238,7 +238,7 @@ public:
|
||||
if constexpr (has_second_arg)
|
||||
{
|
||||
assertBinary(Name::name, types);
|
||||
if (!isUnsignedInteger(types[1]))
|
||||
if (!isUInt(types[1]))
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Second argument (weight) for function {} must be unsigned integer, but it has type {}",
|
||||
|
@ -466,7 +466,7 @@ public:
|
||||
std::vector<DataSet *> data_vec;
|
||||
data_vec.resize(places.size());
|
||||
|
||||
for (unsigned long i = 0; i < data_vec.size(); i++)
|
||||
for (size_t i = 0; i < data_vec.size(); ++i)
|
||||
data_vec[i] = &this->data(places[i]).set;
|
||||
|
||||
DataSet::parallelizeMergePrepare(data_vec, thread_pool);
|
||||
|
@ -143,7 +143,6 @@ namespace
|
||||
|
||||
void registerAggregateFunctionUniqCombined(AggregateFunctionFactory & factory)
|
||||
{
|
||||
using namespace std::placeholders;
|
||||
factory.registerFunction("uniqCombined",
|
||||
[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <Analyzer/Passes/AnyFunctionPass.h>
|
||||
#include <Analyzer/Passes/MoveFunctionsOutOfAnyPass.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
@ -14,8 +14,80 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class AnyFunctionVisitor : public InDepthQueryTreeVisitorWithContext<AnyFunctionVisitor>
|
||||
class AnyFunctionViMoveFunctionsOutOfAnyVisitor : public InDepthQueryTreeVisitorWithContext<AnyFunctionViMoveFunctionsOutOfAnyVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<AnyFunctionViMoveFunctionsOutOfAnyVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_move_functions_out_of_any)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return;
|
||||
|
||||
/// check function is any
|
||||
const auto & function_name = function_node->getFunctionName();
|
||||
if (function_name != "any" && function_name != "anyLast")
|
||||
return;
|
||||
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
if (arguments.size() != 1)
|
||||
return;
|
||||
|
||||
auto * inside_function_node = arguments[0]->as<FunctionNode>();
|
||||
|
||||
/// check argument is a function
|
||||
if (!inside_function_node)
|
||||
return;
|
||||
|
||||
/// check arguments can not contain arrayJoin or lambda
|
||||
if (!canRewrite(inside_function_node))
|
||||
return;
|
||||
|
||||
auto & inside_function_node_arguments = inside_function_node->getArguments().getNodes();
|
||||
|
||||
/// case any(f())
|
||||
if (inside_function_node_arguments.empty())
|
||||
return;
|
||||
|
||||
auto it = node_to_rewritten_node.find(node.get());
|
||||
if (it != node_to_rewritten_node.end())
|
||||
{
|
||||
node = it->second;
|
||||
return;
|
||||
}
|
||||
|
||||
/// checking done, rewrite function
|
||||
bool changed_argument = false;
|
||||
for (auto & inside_argument : inside_function_node_arguments)
|
||||
{
|
||||
if (inside_argument->as<ConstantNode>()) /// skip constant node
|
||||
break;
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(function_name, {inside_argument->getResultType()}, {}, properties);
|
||||
|
||||
auto any_function = std::make_shared<FunctionNode>(function_name);
|
||||
any_function->resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
|
||||
auto & any_function_arguments = any_function->getArguments().getNodes();
|
||||
any_function_arguments.push_back(std::move(inside_argument));
|
||||
|
||||
inside_argument = std::move(any_function);
|
||||
changed_argument = true;
|
||||
}
|
||||
|
||||
if (changed_argument)
|
||||
{
|
||||
node_to_rewritten_node.emplace(node.get(), arguments[0]);
|
||||
node = arguments[0];
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
bool canRewrite(const FunctionNode * function_node)
|
||||
{
|
||||
@ -45,90 +117,17 @@ private:
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<AnyFunctionVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_move_functions_out_of_any)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return;
|
||||
|
||||
/// check function is any
|
||||
const auto & function_name = function_node->getFunctionName();
|
||||
if (!(function_name == "any" || function_name == "anyLast"))
|
||||
return;
|
||||
|
||||
auto & arguments = function_node->getArguments().getNodes();
|
||||
if (arguments.size() != 1)
|
||||
return;
|
||||
|
||||
auto * inside_function_node = arguments[0]->as<FunctionNode>();
|
||||
|
||||
/// check argument is a function
|
||||
if (!inside_function_node)
|
||||
return;
|
||||
|
||||
/// check arguments can not contain arrayJoin or lambda
|
||||
if (!canRewrite(inside_function_node))
|
||||
return;
|
||||
|
||||
auto & inside_arguments = inside_function_node->getArguments().getNodes();
|
||||
|
||||
/// case any(f())
|
||||
if (inside_arguments.empty())
|
||||
return;
|
||||
|
||||
if (rewritten.contains(node.get()))
|
||||
{
|
||||
node = rewritten.at(node.get());
|
||||
return;
|
||||
}
|
||||
|
||||
/// checking done, rewrite function
|
||||
bool pushed = false;
|
||||
for (auto & inside_argument : inside_arguments)
|
||||
{
|
||||
if (inside_argument->as<ConstantNode>()) /// skip constant node
|
||||
break;
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
auto aggregate_function = AggregateFunctionFactory::instance().get(function_name, {inside_argument->getResultType()}, {}, properties);
|
||||
|
||||
auto any_function = std::make_shared<FunctionNode>(function_name);
|
||||
any_function->resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
|
||||
auto & any_function_arguments = any_function->getArguments().getNodes();
|
||||
any_function_arguments.push_back(std::move(inside_argument));
|
||||
|
||||
inside_argument = std::move(any_function);
|
||||
pushed = true;
|
||||
}
|
||||
|
||||
if (pushed)
|
||||
{
|
||||
rewritten.insert({node.get(), arguments[0]});
|
||||
node = arguments[0];
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/// After query analysis alias will be rewritten to QueryTreeNode
|
||||
/// whose memory address is same with the original one.
|
||||
/// So we can reuse the rewritten one.
|
||||
std::unordered_map<IQueryTreeNode *, QueryTreeNodePtr > rewritten;
|
||||
/// After query analysis, alias identifier will be resolved to node whose memory address is same with the original one.
|
||||
/// So we can reuse the rewritten function.
|
||||
std::unordered_map<IQueryTreeNode *, QueryTreeNodePtr> node_to_rewritten_node;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void AnyFunctionPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
void MoveFunctionsOutOfAnyPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
AnyFunctionVisitor visitor(context);
|
||||
AnyFunctionViMoveFunctionsOutOfAnyVisitor visitor(context);
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
@ -7,13 +7,13 @@ namespace DB
|
||||
|
||||
/** Rewrite 'any' and 'anyLast' functions pushing them inside original function.
|
||||
*
|
||||
* Example: any(f(x, y, g(z)))
|
||||
* Result: f(any(x), any(y), g(any(z)))
|
||||
* Example: SELECT any(f(x, y, g(z)));
|
||||
* Result: SELECT f(any(x), any(y), g(any(z)));
|
||||
*/
|
||||
class AnyFunctionPass final : public IQueryTreePass
|
||||
class MoveFunctionsOutOfAnyPass final : public IQueryTreePass
|
||||
{
|
||||
public:
|
||||
String getName() override { return "AnyFunction"; }
|
||||
String getName() override { return "MoveFunctionsOutOfAnyPass"; }
|
||||
|
||||
String getDescription() override
|
||||
{
|
@ -1467,9 +1467,15 @@ ProjectionName QueryAnalyzer::calculateFunctionProjectionName(const QueryTreeNod
|
||||
const ProjectionNames & arguments_projection_names)
|
||||
{
|
||||
const auto & function_node_typed = function_node->as<FunctionNode &>();
|
||||
const auto & function_node_name = function_node_typed.getFunctionName();
|
||||
|
||||
bool is_array_function = function_node_name == "array";
|
||||
bool is_tuple_function = function_node_name == "tuple";
|
||||
|
||||
WriteBufferFromOwnString buffer;
|
||||
buffer << function_node_typed.getFunctionName();
|
||||
|
||||
if (!is_array_function && !is_tuple_function)
|
||||
buffer << function_node_name;
|
||||
|
||||
if (!parameters_projection_names.empty())
|
||||
{
|
||||
@ -1487,7 +1493,16 @@ ProjectionName QueryAnalyzer::calculateFunctionProjectionName(const QueryTreeNod
|
||||
buffer << ')';
|
||||
}
|
||||
|
||||
buffer << '(';
|
||||
char open_bracket = '(';
|
||||
char close_bracket = ')';
|
||||
|
||||
if (is_array_function)
|
||||
{
|
||||
open_bracket = '[';
|
||||
close_bracket = ']';
|
||||
}
|
||||
|
||||
buffer << open_bracket;
|
||||
|
||||
size_t function_arguments_projection_names_size = arguments_projection_names.size();
|
||||
for (size_t i = 0; i < function_arguments_projection_names_size; ++i)
|
||||
@ -1498,7 +1513,7 @@ ProjectionName QueryAnalyzer::calculateFunctionProjectionName(const QueryTreeNod
|
||||
buffer << ", ";
|
||||
}
|
||||
|
||||
buffer << ')';
|
||||
buffer << close_bracket;
|
||||
|
||||
return buffer.str();
|
||||
}
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <Analyzer/Passes/CrossToInnerJoinPass.h>
|
||||
#include <Analyzer/Passes/ShardNumColumnToFunctionPass.h>
|
||||
#include <Analyzer/Passes/ConvertQueryToCNFPass.h>
|
||||
#include <Analyzer/Passes/AnyFunctionPass.h>
|
||||
#include <Analyzer/Passes/MoveFunctionsOutOfAnyPass.h>
|
||||
#include <Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.h>
|
||||
|
||||
|
||||
@ -164,9 +164,7 @@ private:
|
||||
*
|
||||
* TODO: Support setting optimize_substitute_columns.
|
||||
* TODO: Support GROUP BY injective function elimination.
|
||||
* TODO: Support setting optimize_move_functions_out_of_any.
|
||||
* TODO: Support setting optimize_aggregators_of_group_by_keys.
|
||||
* TODO: Support setting optimize_duplicate_order_by_and_distinct.
|
||||
* TODO: Support setting optimize_monotonous_functions_in_order_by.
|
||||
* TODO: Add optimizations based on function semantics. Example: SELECT * FROM test_table WHERE id != id. (id is not nullable column).
|
||||
*/
|
||||
@ -284,7 +282,7 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
manager.addPass(std::make_unique<CrossToInnerJoinPass>());
|
||||
manager.addPass(std::make_unique<ShardNumColumnToFunctionPass>());
|
||||
|
||||
manager.addPass(std::make_unique<AnyFunctionPass>());
|
||||
manager.addPass(std::make_unique<MoveFunctionsOutOfAnyPass>());
|
||||
manager.addPass(std::make_unique<OptimizeDateOrDateTimeConverterWithPreimagePass>());
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,12 @@
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event BackupEntriesCollectorMicroseconds;
|
||||
extern const Event BackupEntriesCollectorForTablesDataMicroseconds;
|
||||
extern const Event BackupEntriesCollectorRunPostTasksMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -82,7 +88,8 @@ BackupEntriesCollector::BackupEntriesCollector(
|
||||
const BackupSettings & backup_settings_,
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
||||
const ReadSettings & read_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
ThreadPool & threadpool_)
|
||||
: backup_query_elements(backup_query_elements_)
|
||||
, backup_settings(backup_settings_)
|
||||
, backup_coordination(backup_coordination_)
|
||||
@ -101,6 +108,7 @@ BackupEntriesCollector::BackupEntriesCollector(
|
||||
context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
||||
, threadpool(threadpool_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -108,6 +116,8 @@ BackupEntriesCollector::~BackupEntriesCollector() = default;
|
||||
|
||||
BackupEntries BackupEntriesCollector::run()
|
||||
{
|
||||
auto timer = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupEntriesCollectorMicroseconds);
|
||||
|
||||
/// run() can be called onle once.
|
||||
if (!current_stage.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Already making backup entries");
|
||||
@ -133,11 +143,19 @@ BackupEntries BackupEntriesCollector::run()
|
||||
|
||||
/// Make backup entries for the data of the found tables.
|
||||
setStage(Stage::EXTRACTING_DATA_FROM_TABLES);
|
||||
|
||||
{
|
||||
auto timer2 = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupEntriesCollectorForTablesDataMicroseconds);
|
||||
makeBackupEntriesForTablesData();
|
||||
}
|
||||
|
||||
/// Run all the tasks added with addPostCollectingTask().
|
||||
setStage(Stage::RUNNING_POST_TASKS);
|
||||
|
||||
{
|
||||
auto timer2 = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupEntriesCollectorRunPostTasksMicroseconds);
|
||||
runPostTasks();
|
||||
}
|
||||
|
||||
/// No more backup entries or tasks are allowed after this point.
|
||||
|
||||
@ -738,8 +756,20 @@ void BackupEntriesCollector::makeBackupEntriesForTablesData()
|
||||
if (backup_settings.structure_only)
|
||||
return;
|
||||
|
||||
std::vector<std::future<void>> futures;
|
||||
for (const auto & table_name : table_infos | boost::adaptors::map_keys)
|
||||
{
|
||||
futures.push_back(scheduleFromThreadPool<void>([&]()
|
||||
{
|
||||
makeBackupEntriesForTableData(table_name);
|
||||
}, threadpool, "BackupCollect"));
|
||||
}
|
||||
/// Wait for all tasks.
|
||||
for (auto & future : futures)
|
||||
future.wait();
|
||||
/// Make sure there is no exception.
|
||||
for (auto & future : futures)
|
||||
future.get();
|
||||
}
|
||||
|
||||
void BackupEntriesCollector::makeBackupEntriesForTableData(const QualifiedTableName & table_name)
|
||||
@ -775,20 +805,28 @@ void BackupEntriesCollector::makeBackupEntriesForTableData(const QualifiedTableN
|
||||
}
|
||||
}
|
||||
|
||||
void BackupEntriesCollector::addBackupEntry(const String & file_name, BackupEntryPtr backup_entry)
|
||||
void BackupEntriesCollector::addBackupEntryUnlocked(const String & file_name, BackupEntryPtr backup_entry)
|
||||
{
|
||||
if (current_stage == Stage::WRITING_BACKUP)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding backup entries is not allowed");
|
||||
backup_entries.emplace_back(file_name, backup_entry);
|
||||
}
|
||||
|
||||
void BackupEntriesCollector::addBackupEntry(const String & file_name, BackupEntryPtr backup_entry)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
addBackupEntryUnlocked(file_name, backup_entry);
|
||||
}
|
||||
|
||||
void BackupEntriesCollector::addBackupEntry(const std::pair<String, BackupEntryPtr> & backup_entry)
|
||||
{
|
||||
addBackupEntry(backup_entry.first, backup_entry.second);
|
||||
std::lock_guard lock(mutex);
|
||||
addBackupEntryUnlocked(backup_entry.first, backup_entry.second);
|
||||
}
|
||||
|
||||
void BackupEntriesCollector::addBackupEntries(const BackupEntries & backup_entries_)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (current_stage == Stage::WRITING_BACKUP)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of backup entries is not allowed");
|
||||
insertAtEnd(backup_entries, backup_entries_);
|
||||
@ -796,6 +834,7 @@ void BackupEntriesCollector::addBackupEntries(const BackupEntries & backup_entri
|
||||
|
||||
void BackupEntriesCollector::addBackupEntries(BackupEntries && backup_entries_)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (current_stage == Stage::WRITING_BACKUP)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of backup entries is not allowed");
|
||||
insertAtEnd(backup_entries, std::move(backup_entries_));
|
||||
@ -803,6 +842,7 @@ void BackupEntriesCollector::addBackupEntries(BackupEntries && backup_entries_)
|
||||
|
||||
void BackupEntriesCollector::addPostTask(std::function<void()> task)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (current_stage == Stage::WRITING_BACKUP)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding of post tasks is not allowed");
|
||||
post_tasks.push(std::move(task));
|
||||
@ -824,6 +864,7 @@ void BackupEntriesCollector::runPostTasks()
|
||||
|
||||
size_t BackupEntriesCollector::getAccessCounter(AccessEntityType type)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
access_counters.resize(static_cast<size_t>(AccessEntityType::MAX));
|
||||
return access_counters[static_cast<size_t>(type)]++;
|
||||
}
|
||||
|
@ -31,7 +31,8 @@ public:
|
||||
const BackupSettings & backup_settings_,
|
||||
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
||||
const ReadSettings & read_settings_,
|
||||
const ContextPtr & context_);
|
||||
const ContextPtr & context_,
|
||||
ThreadPool & threadpool_);
|
||||
~BackupEntriesCollector();
|
||||
|
||||
/// Collects backup entries and returns the result.
|
||||
@ -90,6 +91,8 @@ private:
|
||||
void makeBackupEntriesForTablesData();
|
||||
void makeBackupEntriesForTableData(const QualifiedTableName & table_name);
|
||||
|
||||
void addBackupEntryUnlocked(const String & file_name, BackupEntryPtr backup_entry);
|
||||
|
||||
void runPostTasks();
|
||||
|
||||
Strings setStage(const String & new_stage, const String & message = "");
|
||||
@ -170,6 +173,9 @@ private:
|
||||
BackupEntries backup_entries;
|
||||
std::queue<std::function<void()>> post_tasks;
|
||||
std::vector<size_t> access_counters;
|
||||
|
||||
ThreadPool & threadpool;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -215,13 +215,12 @@ BackupFileInfos buildFileInfosForBackupEntries(const BackupEntries & backup_entr
|
||||
++num_active_jobs;
|
||||
}
|
||||
|
||||
auto job = [&mutex, &num_active_jobs, &event, &exception, &infos, &backup_entries, &read_settings, &base_backup, &thread_group, i, log](bool async)
|
||||
auto job = [&mutex, &num_active_jobs, &event, &exception, &infos, &backup_entries, &read_settings, &base_backup, &thread_group, i, log]()
|
||||
{
|
||||
SCOPE_EXIT_SAFE({
|
||||
std::lock_guard lock{mutex};
|
||||
if (!--num_active_jobs)
|
||||
event.notify_all();
|
||||
if (async)
|
||||
CurrentThread::detachFromGroupIfNotDetached();
|
||||
});
|
||||
|
||||
@ -230,10 +229,9 @@ BackupFileInfos buildFileInfosForBackupEntries(const BackupEntries & backup_entr
|
||||
const auto & name = backup_entries[i].first;
|
||||
const auto & entry = backup_entries[i].second;
|
||||
|
||||
if (async && thread_group)
|
||||
if (thread_group)
|
||||
CurrentThread::attachToGroup(thread_group);
|
||||
|
||||
if (async)
|
||||
setThreadName("BackupWorker");
|
||||
|
||||
{
|
||||
@ -252,8 +250,7 @@ BackupFileInfos buildFileInfosForBackupEntries(const BackupEntries & backup_entr
|
||||
}
|
||||
};
|
||||
|
||||
if (!thread_pool.trySchedule([job] { job(true); }))
|
||||
job(false);
|
||||
thread_pool.scheduleOrThrowOnError(job);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Backups/BackupFileInfo.h>
|
||||
#include <Backups/BackupIO.h>
|
||||
#include <Backups/IBackupEntry.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <base/hex.h>
|
||||
#include <Common/logger_useful.h>
|
||||
@ -24,6 +25,14 @@
|
||||
#include <Poco/DOM/DOMParser.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event BackupsOpenedForRead;
|
||||
extern const Event BackupsOpenedForWrite;
|
||||
extern const Event BackupReadMetadataMicroseconds;
|
||||
extern const Event BackupWriteMetadataMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
@ -89,12 +98,14 @@ BackupImpl::BackupImpl(
|
||||
, archive_params(archive_params_)
|
||||
, open_mode(OpenMode::READ)
|
||||
, reader(std::move(reader_))
|
||||
, context(context_)
|
||||
, is_internal_backup(false)
|
||||
, version(INITIAL_BACKUP_VERSION)
|
||||
, base_backup_info(base_backup_info_)
|
||||
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
|
||||
, log(&Poco::Logger::get("BackupImpl"))
|
||||
{
|
||||
open(context_);
|
||||
open();
|
||||
}
|
||||
|
||||
|
||||
@ -115,6 +126,7 @@ BackupImpl::BackupImpl(
|
||||
, archive_params(archive_params_)
|
||||
, open_mode(OpenMode::WRITE)
|
||||
, writer(std::move(writer_))
|
||||
, context(context_)
|
||||
, is_internal_backup(is_internal_backup_)
|
||||
, coordination(coordination_)
|
||||
, uuid(backup_uuid_)
|
||||
@ -124,7 +136,7 @@ BackupImpl::BackupImpl(
|
||||
, use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_)
|
||||
, log(&Poco::Logger::get("BackupImpl"))
|
||||
{
|
||||
open(context_);
|
||||
open();
|
||||
}
|
||||
|
||||
|
||||
@ -140,9 +152,11 @@ BackupImpl::~BackupImpl()
|
||||
}
|
||||
}
|
||||
|
||||
void BackupImpl::open(const ContextPtr & context)
|
||||
void BackupImpl::open()
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
LOG_INFO(log, "{} backup: {}", ((open_mode == OpenMode::WRITE) ? "Writing" : "Reading"), backup_name_for_logging);
|
||||
ProfileEvents::increment((open_mode == OpenMode::WRITE) ? ProfileEvents::BackupsOpenedForWrite : ProfileEvents::BackupsOpenedForRead);
|
||||
|
||||
if (open_mode == OpenMode::WRITE)
|
||||
{
|
||||
@ -166,35 +180,8 @@ void BackupImpl::open(const ContextPtr & context)
|
||||
if (open_mode == OpenMode::READ)
|
||||
readBackupMetadata();
|
||||
|
||||
if (base_backup_info)
|
||||
{
|
||||
if (use_same_s3_credentials_for_base_backup)
|
||||
backup_info.copyS3CredentialsTo(*base_backup_info);
|
||||
|
||||
BackupFactory::CreateParams params;
|
||||
params.backup_info = *base_backup_info;
|
||||
params.open_mode = OpenMode::READ;
|
||||
params.context = context;
|
||||
/// use_same_s3_credentials_for_base_backup should be inherited for base backups
|
||||
params.use_same_s3_credentials_for_base_backup = use_same_s3_credentials_for_base_backup;
|
||||
|
||||
base_backup = BackupFactory::instance().createBackup(params);
|
||||
|
||||
if (open_mode == OpenMode::WRITE)
|
||||
{
|
||||
base_backup_uuid = base_backup->getUUID();
|
||||
}
|
||||
else if (base_backup_uuid != base_backup->getUUID())
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::WRONG_BASE_BACKUP,
|
||||
"Backup {}: The base backup {} has different UUID ({} != {})",
|
||||
backup_name_for_logging,
|
||||
base_backup->getNameForLogging(),
|
||||
toString(base_backup->getUUID()),
|
||||
(base_backup_uuid ? toString(*base_backup_uuid) : ""));
|
||||
}
|
||||
}
|
||||
if ((open_mode == OpenMode::WRITE) && base_backup_info)
|
||||
base_backup_uuid = getBaseBackupUnlocked()->getUUID();
|
||||
}
|
||||
|
||||
void BackupImpl::close()
|
||||
@ -239,6 +226,42 @@ void BackupImpl::closeArchive()
|
||||
archive_writer.reset();
|
||||
}
|
||||
|
||||
std::shared_ptr<const IBackup> BackupImpl::getBaseBackup() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return getBaseBackupUnlocked();
|
||||
}
|
||||
|
||||
std::shared_ptr<const IBackup> BackupImpl::getBaseBackupUnlocked() const
|
||||
{
|
||||
if (!base_backup && base_backup_info)
|
||||
{
|
||||
if (use_same_s3_credentials_for_base_backup)
|
||||
backup_info.copyS3CredentialsTo(*base_backup_info);
|
||||
|
||||
BackupFactory::CreateParams params;
|
||||
params.backup_info = *base_backup_info;
|
||||
params.open_mode = OpenMode::READ;
|
||||
params.context = context;
|
||||
/// use_same_s3_credentials_for_base_backup should be inherited for base backups
|
||||
params.use_same_s3_credentials_for_base_backup = use_same_s3_credentials_for_base_backup;
|
||||
|
||||
base_backup = BackupFactory::instance().createBackup(params);
|
||||
|
||||
if ((open_mode == OpenMode::READ) && (base_backup_uuid != base_backup->getUUID()))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::WRONG_BASE_BACKUP,
|
||||
"Backup {}: The base backup {} has different UUID ({} != {})",
|
||||
backup_name_for_logging,
|
||||
base_backup->getNameForLogging(),
|
||||
toString(base_backup->getUUID()),
|
||||
(base_backup_uuid ? toString(*base_backup_uuid) : ""));
|
||||
}
|
||||
}
|
||||
return base_backup;
|
||||
}
|
||||
|
||||
size_t BackupImpl::getNumFiles() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
@ -289,8 +312,10 @@ UInt64 BackupImpl::getNumReadBytes() const
|
||||
|
||||
void BackupImpl::writeBackupMetadata()
|
||||
{
|
||||
assert(!is_internal_backup);
|
||||
LOG_TRACE(log, "Backup {}: Writing metadata", backup_name_for_logging);
|
||||
auto timer = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupWriteMetadataMicroseconds);
|
||||
|
||||
assert(!is_internal_backup);
|
||||
checkLockFile(true);
|
||||
|
||||
std::unique_ptr<WriteBuffer> out;
|
||||
@ -374,11 +399,16 @@ void BackupImpl::writeBackupMetadata()
|
||||
out->finalize();
|
||||
|
||||
uncompressed_size = size_of_entries + out->count();
|
||||
|
||||
LOG_TRACE(log, "Backup {}: Metadata was written", backup_name_for_logging);
|
||||
}
|
||||
|
||||
|
||||
void BackupImpl::readBackupMetadata()
|
||||
{
|
||||
LOG_TRACE(log, "Backup {}: Reading metadata", backup_name_for_logging);
|
||||
auto timer = DB::CurrentThread::getProfileEvents().timer(ProfileEvents::BackupReadMetadataMicroseconds);
|
||||
|
||||
using namespace XMLUtils;
|
||||
|
||||
std::unique_ptr<ReadBuffer> in;
|
||||
@ -482,6 +512,8 @@ void BackupImpl::readBackupMetadata()
|
||||
compressed_size = uncompressed_size;
|
||||
if (!use_archive)
|
||||
setCompressedSize();
|
||||
|
||||
LOG_TRACE(log, "Backup {}: Metadata was read", backup_name_for_logging);
|
||||
}
|
||||
|
||||
void BackupImpl::checkBackupDoesntExist() const
|
||||
@ -705,7 +737,8 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFileImpl(const SizeAndChecks
|
||||
if (info.base_size)
|
||||
{
|
||||
/// Make `base_read_buffer` if there is data for this backup entry in the base backup.
|
||||
if (!base_backup)
|
||||
auto base = getBaseBackup();
|
||||
if (!base)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::NO_BASE_BACKUP,
|
||||
@ -713,7 +746,7 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFileImpl(const SizeAndChecks
|
||||
backup_name_for_logging, formatSizeAndChecksum(size_and_checksum));
|
||||
}
|
||||
|
||||
if (!base_backup->fileExists(std::pair(info.base_size, info.base_checksum)))
|
||||
if (!base->fileExists(std::pair(info.base_size, info.base_checksum)))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::WRONG_BASE_BACKUP,
|
||||
@ -721,7 +754,7 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFileImpl(const SizeAndChecks
|
||||
backup_name_for_logging, formatSizeAndChecksum(size_and_checksum));
|
||||
}
|
||||
|
||||
base_read_buffer = base_backup->readFile(std::pair{info.base_size, info.base_checksum});
|
||||
base_read_buffer = base->readFile(std::pair{info.base_size, info.base_checksum});
|
||||
}
|
||||
|
||||
{
|
||||
@ -809,7 +842,7 @@ size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum,
|
||||
else if (info.size && (info.size == info.base_size))
|
||||
{
|
||||
/// Data comes completely from the base backup (nothing comes from this backup).
|
||||
base_backup->copyFileToDisk(std::pair{info.base_size, info.base_checksum}, destination_disk, destination_path, write_mode);
|
||||
getBaseBackup()->copyFileToDisk(std::pair{info.base_size, info.base_checksum}, destination_disk, destination_path, write_mode);
|
||||
file_copied = true;
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
OpenMode getOpenMode() const override { return open_mode; }
|
||||
time_t getTimestamp() const override { return timestamp; }
|
||||
UUID getUUID() const override { return *uuid; }
|
||||
BackupPtr getBaseBackup() const override { return base_backup; }
|
||||
BackupPtr getBaseBackup() const override;
|
||||
size_t getNumFiles() const override;
|
||||
UInt64 getTotalSize() const override;
|
||||
size_t getNumEntries() const override;
|
||||
@ -85,7 +85,7 @@ public:
|
||||
bool supportsWritingInMultipleThreads() const override { return !use_archive; }
|
||||
|
||||
private:
|
||||
void open(const ContextPtr & context);
|
||||
void open();
|
||||
void close();
|
||||
|
||||
void openArchive();
|
||||
@ -95,6 +95,9 @@ private:
|
||||
void writeBackupMetadata() TSA_REQUIRES(mutex);
|
||||
void readBackupMetadata() TSA_REQUIRES(mutex);
|
||||
|
||||
/// Returns the base backup or null if there is no base backup.
|
||||
std::shared_ptr<const IBackup> getBaseBackupUnlocked() const TSA_REQUIRES(mutex);
|
||||
|
||||
/// Checks that a new backup doesn't exist yet.
|
||||
void checkBackupDoesntExist() const;
|
||||
|
||||
@ -118,6 +121,7 @@ private:
|
||||
const OpenMode open_mode;
|
||||
std::shared_ptr<IBackupWriter> writer;
|
||||
std::shared_ptr<IBackupReader> reader;
|
||||
const ContextPtr context;
|
||||
const bool is_internal_backup;
|
||||
std::shared_ptr<IBackupCoordination> coordination;
|
||||
|
||||
@ -138,8 +142,8 @@ private:
|
||||
mutable size_t num_read_files = 0;
|
||||
mutable UInt64 num_read_bytes = 0;
|
||||
int version;
|
||||
std::optional<BackupInfo> base_backup_info;
|
||||
std::shared_ptr<const IBackup> base_backup;
|
||||
mutable std::optional<BackupInfo> base_backup_info;
|
||||
mutable std::shared_ptr<const IBackup> base_backup;
|
||||
std::optional<UUID> base_backup_uuid;
|
||||
std::shared_ptr<IArchiveReader> archive_reader;
|
||||
std::shared_ptr<IArchiveWriter> archive_writer;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Backups/BackupStatus.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -47,6 +48,9 @@ struct BackupOperationInfo
|
||||
std::exception_ptr exception;
|
||||
String error_message;
|
||||
|
||||
/// Profile events collected during the backup.
|
||||
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters = nullptr;
|
||||
|
||||
std::chrono::system_clock::time_point start_time;
|
||||
std::chrono::system_clock::time_point end_time;
|
||||
};
|
||||
|
@ -218,42 +218,145 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
BackupsWorker::BackupsWorker(
|
||||
ContextPtr global_context,
|
||||
size_t num_backup_threads,
|
||||
size_t num_restore_threads,
|
||||
bool allow_concurrent_backups_,
|
||||
bool allow_concurrent_restores_)
|
||||
: backups_thread_pool(std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::BackupsThreads,
|
||||
CurrentMetrics::BackupsThreadsActive,
|
||||
num_backup_threads,
|
||||
/* max_free_threads = */ 0,
|
||||
num_backup_threads))
|
||||
, restores_thread_pool(std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::RestoreThreads,
|
||||
CurrentMetrics::RestoreThreadsActive,
|
||||
num_restore_threads,
|
||||
/* max_free_threads = */ 0,
|
||||
num_restore_threads))
|
||||
, backup_async_executor_pool(std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::BackupsThreads,
|
||||
CurrentMetrics::BackupsThreadsActive,
|
||||
num_backup_threads,
|
||||
num_backup_threads,
|
||||
num_backup_threads))
|
||||
, restore_async_executor_pool(std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::RestoreThreads,
|
||||
CurrentMetrics::RestoreThreadsActive,
|
||||
num_restore_threads,
|
||||
num_restore_threads,
|
||||
num_restore_threads))
|
||||
, log(&Poco::Logger::get("BackupsWorker"))
|
||||
/// We have to use multiple thread pools because
|
||||
/// 1) there should be separate thread pools for BACKUP and RESTORE;
|
||||
/// 2) a task from a thread pool can't wait another task from the same thread pool. (Because if it schedules and waits
|
||||
/// while the thread pool is still occupied with the waiting task then a scheduled task can be never executed).
|
||||
enum class BackupsWorker::ThreadPoolId
|
||||
{
|
||||
/// "BACKUP ON CLUSTER ASYNC" waits in background while "BACKUP ASYNC" is finished on the nodes of the cluster, then finalizes the backup.
|
||||
BACKUP_ASYNC_ON_CLUSTER,
|
||||
|
||||
/// "BACKUP ASYNC" waits in background while all file infos are built and then it copies the backup's files.
|
||||
BACKUP_ASYNC,
|
||||
|
||||
/// Making a list of files to copy and copying of those files is always sequential, so those operations can share one thread pool.
|
||||
BACKUP_MAKE_FILES_LIST,
|
||||
BACKUP_COPY_FILES = BACKUP_MAKE_FILES_LIST,
|
||||
|
||||
/// "RESTORE ON CLUSTER ASYNC" waits in background while "BACKUP ASYNC" is finished on the nodes of the cluster, then finalizes the backup.
|
||||
RESTORE_ASYNC_ON_CLUSTER,
|
||||
|
||||
/// "RESTORE ASYNC" waits in background while the data of all tables are restored.
|
||||
RESTORE_ASYNC,
|
||||
|
||||
/// Restores the data of tables.
|
||||
RESTORE_TABLES_DATA,
|
||||
};
|
||||
|
||||
|
||||
/// Keeps thread pools for BackupsWorker.
|
||||
class BackupsWorker::ThreadPools
|
||||
{
|
||||
public:
|
||||
ThreadPools(size_t num_backup_threads_, size_t num_restore_threads_)
|
||||
: num_backup_threads(num_backup_threads_), num_restore_threads(num_restore_threads_)
|
||||
{
|
||||
}
|
||||
|
||||
/// Returns a thread pool, creates it if it's not created yet.
|
||||
ThreadPool & getThreadPool(ThreadPoolId thread_pool_id)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = thread_pools.find(thread_pool_id);
|
||||
if (it != thread_pools.end())
|
||||
return *it->second;
|
||||
|
||||
CurrentMetrics::Metric metric_threads;
|
||||
CurrentMetrics::Metric metric_active_threads;
|
||||
size_t max_threads = 0;
|
||||
|
||||
/// What to do with a new job if a corresponding thread pool is already running `max_threads` jobs:
|
||||
/// `use_queue == true` - put into the thread pool's queue,
|
||||
/// `use_queue == false` - schedule() should wait until some of the jobs finish.
|
||||
bool use_queue = false;
|
||||
|
||||
switch (thread_pool_id)
|
||||
{
|
||||
case ThreadPoolId::BACKUP_ASYNC:
|
||||
case ThreadPoolId::BACKUP_ASYNC_ON_CLUSTER:
|
||||
case ThreadPoolId::BACKUP_COPY_FILES:
|
||||
{
|
||||
metric_threads = CurrentMetrics::BackupsThreads;
|
||||
metric_active_threads = CurrentMetrics::BackupsThreadsActive;
|
||||
max_threads = num_backup_threads;
|
||||
/// We don't use thread pool queues for thread pools with a lot of tasks otherwise that queue could be memory-wasting.
|
||||
use_queue = (thread_pool_id != ThreadPoolId::BACKUP_COPY_FILES);
|
||||
break;
|
||||
}
|
||||
|
||||
case ThreadPoolId::RESTORE_ASYNC:
|
||||
case ThreadPoolId::RESTORE_ASYNC_ON_CLUSTER:
|
||||
case ThreadPoolId::RESTORE_TABLES_DATA:
|
||||
{
|
||||
metric_threads = CurrentMetrics::RestoreThreads;
|
||||
metric_active_threads = CurrentMetrics::RestoreThreadsActive;
|
||||
max_threads = num_restore_threads;
|
||||
use_queue = (thread_pool_id != ThreadPoolId::RESTORE_TABLES_DATA);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/// We set max_free_threads = 0 because we don't want to keep any threads if there is no BACKUP or RESTORE query running right now.
|
||||
chassert(max_threads != 0);
|
||||
size_t max_free_threads = 0;
|
||||
size_t queue_size = use_queue ? 0 : max_threads;
|
||||
auto thread_pool = std::make_unique<ThreadPool>(metric_threads, metric_active_threads, max_threads, max_free_threads, queue_size);
|
||||
auto * thread_pool_ptr = thread_pool.get();
|
||||
thread_pools.emplace(thread_pool_id, std::move(thread_pool));
|
||||
return *thread_pool_ptr;
|
||||
}
|
||||
|
||||
/// Waits for all threads to finish.
|
||||
void wait()
|
||||
{
|
||||
auto wait_sequence = {
|
||||
ThreadPoolId::RESTORE_ASYNC_ON_CLUSTER,
|
||||
ThreadPoolId::RESTORE_ASYNC,
|
||||
ThreadPoolId::RESTORE_TABLES_DATA,
|
||||
ThreadPoolId::BACKUP_ASYNC_ON_CLUSTER,
|
||||
ThreadPoolId::BACKUP_ASYNC,
|
||||
ThreadPoolId::BACKUP_COPY_FILES,
|
||||
};
|
||||
|
||||
for (auto thread_pool_id : wait_sequence)
|
||||
{
|
||||
ThreadPool * thread_pool = nullptr;
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = thread_pools.find(thread_pool_id);
|
||||
if (it != thread_pools.end())
|
||||
thread_pool = it->second.get();
|
||||
}
|
||||
if (thread_pool)
|
||||
thread_pool->wait();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
const size_t num_backup_threads;
|
||||
const size_t num_restore_threads;
|
||||
std::map<ThreadPoolId, std::unique_ptr<ThreadPool>> thread_pools TSA_GUARDED_BY(mutex);
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
|
||||
BackupsWorker::BackupsWorker(ContextPtr global_context, size_t num_backup_threads, size_t num_restore_threads, bool allow_concurrent_backups_, bool allow_concurrent_restores_)
|
||||
: thread_pools(std::make_unique<ThreadPools>(num_backup_threads, num_restore_threads))
|
||||
, allow_concurrent_backups(allow_concurrent_backups_)
|
||||
, allow_concurrent_restores(allow_concurrent_restores_)
|
||||
, log(&Poco::Logger::get("BackupsWorker"))
|
||||
{
|
||||
backup_log = global_context->getBackupLog();
|
||||
/// We set max_free_threads = 0 because we don't want to keep any threads if there is no BACKUP or RESTORE query running right now.
|
||||
}
|
||||
|
||||
|
||||
BackupsWorker::~BackupsWorker() = default;
|
||||
|
||||
|
||||
ThreadPool & BackupsWorker::getThreadPool(ThreadPoolId thread_pool_id)
|
||||
{
|
||||
return thread_pools->getThreadPool(thread_pool_id);
|
||||
}
|
||||
|
||||
|
||||
@ -313,16 +416,9 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
|
||||
|
||||
if (backup_settings.async)
|
||||
{
|
||||
backup_async_executor_pool->scheduleOrThrowOnError(
|
||||
[this,
|
||||
backup_query,
|
||||
backup_id,
|
||||
backup_name_for_logging,
|
||||
backup_info,
|
||||
backup_settings,
|
||||
backup_coordination,
|
||||
context_in_use,
|
||||
mutable_context]
|
||||
auto & thread_pool = getThreadPool(on_cluster ? ThreadPoolId::BACKUP_ASYNC_ON_CLUSTER : ThreadPoolId::BACKUP_ASYNC);
|
||||
thread_pool.scheduleOrThrowOnError(
|
||||
[this, backup_query, backup_id, backup_name_for_logging, backup_info, backup_settings, backup_coordination, context_in_use, mutable_context]
|
||||
{
|
||||
doBackup(
|
||||
backup_query,
|
||||
@ -454,7 +550,9 @@ void BackupsWorker::doBackup(
|
||||
/// Prepare backup entries.
|
||||
BackupEntries backup_entries;
|
||||
{
|
||||
BackupEntriesCollector backup_entries_collector{backup_query->elements, backup_settings, backup_coordination, backup_create_params.read_settings, context};
|
||||
BackupEntriesCollector backup_entries_collector(
|
||||
backup_query->elements, backup_settings, backup_coordination,
|
||||
backup_create_params.read_settings, context, getThreadPool(ThreadPoolId::BACKUP_MAKE_FILES_LIST));
|
||||
backup_entries = backup_entries_collector.run();
|
||||
}
|
||||
|
||||
@ -515,7 +613,7 @@ void BackupsWorker::buildFileInfosForBackupEntries(const BackupPtr & backup, con
|
||||
LOG_TRACE(log, "{}", Stage::BUILDING_FILE_INFOS);
|
||||
backup_coordination->setStage(Stage::BUILDING_FILE_INFOS, "");
|
||||
backup_coordination->waitForStage(Stage::BUILDING_FILE_INFOS);
|
||||
backup_coordination->addFileInfos(::DB::buildFileInfosForBackupEntries(backup_entries, backup->getBaseBackup(), read_settings, *backups_thread_pool));
|
||||
backup_coordination->addFileInfos(::DB::buildFileInfosForBackupEntries(backup_entries, backup->getBaseBackup(), read_settings, getThreadPool(ThreadPoolId::BACKUP_MAKE_FILES_LIST)));
|
||||
}
|
||||
|
||||
|
||||
@ -541,6 +639,7 @@ void BackupsWorker::writeBackupEntries(BackupMutablePtr backup, BackupEntries &&
|
||||
std::exception_ptr exception;
|
||||
|
||||
bool always_single_threaded = !backup->supportsWritingInMultipleThreads();
|
||||
auto & thread_pool = getThreadPool(ThreadPoolId::BACKUP_COPY_FILES);
|
||||
auto thread_group = CurrentThread::getGroup();
|
||||
|
||||
for (size_t i = 0; i != backup_entries.size(); ++i)
|
||||
@ -608,7 +707,7 @@ void BackupsWorker::writeBackupEntries(BackupMutablePtr backup, BackupEntries &&
|
||||
continue;
|
||||
}
|
||||
|
||||
backups_thread_pool->scheduleOrThrowOnError([job] { job(true); });
|
||||
thread_pool.scheduleOrThrowOnError([job] { job(true); });
|
||||
}
|
||||
|
||||
{
|
||||
@ -666,15 +765,9 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
|
||||
|
||||
if (restore_settings.async)
|
||||
{
|
||||
restore_async_executor_pool->scheduleOrThrowOnError(
|
||||
[this,
|
||||
restore_query,
|
||||
restore_id,
|
||||
backup_name_for_logging,
|
||||
backup_info,
|
||||
restore_settings,
|
||||
restore_coordination,
|
||||
context_in_use]
|
||||
auto & thread_pool = getThreadPool(on_cluster ? ThreadPoolId::RESTORE_ASYNC_ON_CLUSTER : ThreadPoolId::RESTORE_ASYNC);
|
||||
thread_pool.scheduleOrThrowOnError(
|
||||
[this, restore_query, restore_id, backup_name_for_logging, backup_info, restore_settings, restore_coordination, context_in_use]
|
||||
{
|
||||
doRestore(
|
||||
restore_query,
|
||||
@ -818,7 +911,7 @@ void BackupsWorker::doRestore(
|
||||
}
|
||||
|
||||
/// Execute the data restoring tasks.
|
||||
restoreTablesData(restore_id, backup, std::move(data_restore_tasks), *restores_thread_pool);
|
||||
restoreTablesData(restore_id, backup, std::move(data_restore_tasks), getThreadPool(ThreadPoolId::RESTORE_TABLES_DATA));
|
||||
|
||||
/// We have restored everything, we need to tell other hosts (they could be waiting for it).
|
||||
restore_coordination->setStage(Stage::COMPLETED, "");
|
||||
@ -863,22 +956,20 @@ void BackupsWorker::restoreTablesData(const OperationID & restore_id, BackupPtr
|
||||
++num_active_jobs;
|
||||
}
|
||||
|
||||
auto job = [&](bool async)
|
||||
auto job = [&]()
|
||||
{
|
||||
SCOPE_EXIT_SAFE(
|
||||
std::lock_guard lock{mutex};
|
||||
if (!--num_active_jobs)
|
||||
event.notify_all();
|
||||
if (async)
|
||||
CurrentThread::detachFromGroupIfNotDetached();
|
||||
);
|
||||
|
||||
try
|
||||
{
|
||||
if (async && thread_group)
|
||||
if (thread_group)
|
||||
CurrentThread::attachToGroup(thread_group);
|
||||
|
||||
if (async)
|
||||
setThreadName("RestoreWorker");
|
||||
|
||||
{
|
||||
@ -906,7 +997,7 @@ void BackupsWorker::restoreTablesData(const OperationID & restore_id, BackupPtr
|
||||
}
|
||||
};
|
||||
|
||||
thread_pool.scheduleOrThrowOnError([job] { job(true); });
|
||||
thread_pool.scheduleOrThrowOnError(job);
|
||||
}
|
||||
|
||||
{
|
||||
@ -967,6 +1058,7 @@ void BackupsWorker::setStatus(const String & id, BackupStatus status, bool throw
|
||||
auto old_status = info.status;
|
||||
|
||||
info.status = status;
|
||||
info.profile_counters = std::make_shared<ProfileEvents::Counters::Snapshot>(CurrentThread::getProfileEvents().getPartiallyAtomicSnapshot());
|
||||
|
||||
if (isFinalStatus(status))
|
||||
info.end_time = std::chrono::system_clock::now();
|
||||
@ -1049,10 +1141,7 @@ void BackupsWorker::shutdown()
|
||||
if (has_active_backups_and_restores)
|
||||
LOG_INFO(log, "Waiting for {} backups and {} restores to be finished", num_active_backups, num_active_restores);
|
||||
|
||||
backups_thread_pool->wait();
|
||||
restores_thread_pool->wait();
|
||||
backup_async_executor_pool->wait();
|
||||
restore_async_executor_pool->wait();
|
||||
thread_pools->wait();
|
||||
|
||||
if (has_active_backups_and_restores)
|
||||
LOG_INFO(log, "All backup and restore tasks have finished");
|
||||
|
@ -33,6 +33,7 @@ class BackupsWorker
|
||||
{
|
||||
public:
|
||||
BackupsWorker(ContextPtr global_context, size_t num_backup_threads, size_t num_restore_threads, bool allow_concurrent_backups_, bool allow_concurrent_restores_);
|
||||
~BackupsWorker();
|
||||
|
||||
/// Waits until all tasks have been completed.
|
||||
void shutdown();
|
||||
@ -88,11 +89,15 @@ private:
|
||||
void setNumFilesAndSize(const BackupOperationID & id, size_t num_files, UInt64 total_size, size_t num_entries,
|
||||
UInt64 uncompressed_size, UInt64 compressed_size, size_t num_read_files, UInt64 num_read_bytes);
|
||||
|
||||
std::unique_ptr<ThreadPool> backups_thread_pool;
|
||||
std::unique_ptr<ThreadPool> restores_thread_pool;
|
||||
enum class ThreadPoolId;
|
||||
ThreadPool & getThreadPool(ThreadPoolId thread_pool_id);
|
||||
|
||||
std::unique_ptr<ThreadPool> backup_async_executor_pool;
|
||||
std::unique_ptr<ThreadPool> restore_async_executor_pool;
|
||||
class ThreadPools;
|
||||
std::unique_ptr<ThreadPools> thread_pools;
|
||||
|
||||
const bool allow_concurrent_backups;
|
||||
const bool allow_concurrent_restores;
|
||||
Poco::Logger * log;
|
||||
|
||||
std::unordered_map<BackupOperationID, BackupOperationInfo> infos;
|
||||
std::shared_ptr<BackupLog> backup_log;
|
||||
@ -100,9 +105,6 @@ private:
|
||||
std::atomic<size_t> num_active_backups = 0;
|
||||
std::atomic<size_t> num_active_restores = 0;
|
||||
mutable std::mutex infos_mutex;
|
||||
Poco::Logger * log;
|
||||
const bool allow_concurrent_backups;
|
||||
const bool allow_concurrent_restores;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ public:
|
||||
/// Returns UUID of the backup.
|
||||
virtual UUID getUUID() const = 0;
|
||||
|
||||
/// Returns the base backup (can be null).
|
||||
/// Returns the base backup or null if there is no base backup.
|
||||
virtual std::shared_ptr<const IBackup> getBaseBackup() const = 0;
|
||||
|
||||
/// Returns the number of files stored in the backup. Compare with getNumEntries().
|
||||
|
@ -89,6 +89,17 @@ add_headers_and_sources(clickhouse_common_io IO/Resource)
|
||||
add_headers_and_sources(clickhouse_common_io IO/S3)
|
||||
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
|
||||
|
||||
|
||||
add_headers_and_sources(clickhouse_compression Compression)
|
||||
add_headers_and_sources(clickhouse_compression Parsers)
|
||||
add_headers_and_sources(clickhouse_compression Core)
|
||||
#Included these specific files to avoid linking grpc
|
||||
add_glob(clickhouse_compression_headers Server/ServerType.h)
|
||||
add_glob(clickhouse_compression_sources Server/ServerType.cpp)
|
||||
add_headers_and_sources(clickhouse_compression Common/SSH)
|
||||
add_library(clickhouse_compression ${clickhouse_compression_headers} ${clickhouse_compression_sources})
|
||||
|
||||
|
||||
add_headers_and_sources(dbms Disks/IO)
|
||||
add_headers_and_sources(dbms Disks/ObjectStorages)
|
||||
if (TARGET ch_contrib::sqlite)
|
||||
@ -270,6 +281,7 @@ target_include_directories (clickhouse_common_io PUBLIC "${ClickHouse_SOURCE_DIR
|
||||
|
||||
if (TARGET ch_contrib::llvm)
|
||||
dbms_target_link_libraries (PUBLIC ch_contrib::llvm)
|
||||
target_link_libraries (clickhouse_compression PUBLIC ch_contrib::llvm)
|
||||
endif ()
|
||||
|
||||
if (TARGET ch_contrib::gwp_asan)
|
||||
@ -293,6 +305,18 @@ target_link_libraries (clickhouse_common_io
|
||||
common
|
||||
ch_contrib::double_conversion
|
||||
ch_contrib::dragonbox_to_chars
|
||||
ch_contrib::libdivide
|
||||
)
|
||||
|
||||
|
||||
target_link_libraries (clickhouse_compression
|
||||
PUBLIC
|
||||
string_utils
|
||||
pcg_random
|
||||
clickhouse_parsers
|
||||
PRIVATE
|
||||
ch_contrib::lz4
|
||||
ch_contrib::roaring
|
||||
)
|
||||
|
||||
# Use X86 AVX2/AVX512 instructions to accelerate filter operations
|
||||
@ -336,6 +360,7 @@ if (TARGET ch_contrib::crc32-vpmsum)
|
||||
|
||||
if (TARGET ch_contrib::ssh)
|
||||
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::ssh)
|
||||
target_link_libraries(clickhouse_compression PUBLIC ch_contrib::ssh)
|
||||
endif()
|
||||
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::abseil_swiss_tables)
|
||||
@ -359,10 +384,12 @@ endif()
|
||||
|
||||
if (TARGET ch_contrib::krb5)
|
||||
dbms_target_link_libraries(PRIVATE ch_contrib::krb5)
|
||||
target_link_libraries (clickhouse_compression PRIVATE ch_contrib::krb5)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::nuraft)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::nuraft)
|
||||
target_link_libraries (clickhouse_compression PUBLIC ch_contrib::nuraft)
|
||||
endif()
|
||||
|
||||
dbms_target_link_libraries (
|
||||
@ -432,6 +459,7 @@ endif ()
|
||||
|
||||
if (TARGET ch_contrib::ldap)
|
||||
dbms_target_link_libraries (PRIVATE ch_contrib::ldap ch_contrib::lber)
|
||||
target_link_libraries (clickhouse_compression PRIVATE ch_contrib::ldap ch_contrib::lber)
|
||||
endif ()
|
||||
dbms_target_link_libraries (PUBLIC ch_contrib::sparsehash)
|
||||
|
||||
|
@ -30,7 +30,8 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
||||
, port(connection_port.value_or(getPortFromConfig(config)))
|
||||
{
|
||||
bool is_secure = config.getBool("secure", false);
|
||||
security = is_secure ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
||||
bool is_clickhouse_cloud = connection_host.ends_with(".clickhouse.cloud") || connection_host.ends_with(".clickhouse-staging.com");
|
||||
security = (is_secure || is_clickhouse_cloud) ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
||||
|
||||
default_database = config.getString("database", "");
|
||||
|
||||
|
@ -131,7 +131,7 @@ void LocalConnection::sendQuery(
|
||||
|
||||
try
|
||||
{
|
||||
state->io = executeQuery(state->query, query_context, false, state->stage).second;
|
||||
state->io = executeQuery(state->query, query_context, QueryFlags{}, state->stage).second;
|
||||
|
||||
if (state->io.pipeline.pushing())
|
||||
{
|
||||
|
@ -293,7 +293,6 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
, word_break_characters(word_break_characters_)
|
||||
, editor(getEditor())
|
||||
{
|
||||
using namespace std::placeholders;
|
||||
using Replxx = replxx::Replxx;
|
||||
|
||||
if (!history_file_path.empty())
|
||||
|
@ -6,9 +6,7 @@
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/RadixSort.h>
|
||||
|
||||
#include <base/unaligned.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
@ -20,8 +18,6 @@
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
|
||||
|
||||
template <typename T> bool decimalLess(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -41,12 +41,6 @@ namespace ErrorCodes
|
||||
void abortOnFailedAssertion(const String & description)
|
||||
{
|
||||
LOG_FATAL(&Poco::Logger::root(), "Logical error: '{}'.", description);
|
||||
|
||||
/// This is to suppress -Wmissing-noreturn
|
||||
volatile bool always_false = false;
|
||||
if (always_false)
|
||||
return;
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ namespace Poco { class Logger; }
|
||||
namespace DB
|
||||
{
|
||||
|
||||
void abortOnFailedAssertion(const String & description);
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||
|
||||
/// This flag can be set for testing purposes - to check that no exceptions are thrown.
|
||||
extern bool terminate_on_any_exception;
|
||||
|
68
src/Common/ObjectStorageKey.cpp
Normal file
68
src/Common/ObjectStorageKey.cpp
Normal file
@ -0,0 +1,68 @@
|
||||
#include "ObjectStorageKey.h"
|
||||
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
const String & ObjectStorageKey::getPrefix() const
|
||||
{
|
||||
if (!is_relative)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "object key has no prefix, key: {}", key);
|
||||
|
||||
return prefix;
|
||||
}
|
||||
|
||||
const String & ObjectStorageKey::getSuffix() const
|
||||
{
|
||||
if (!is_relative)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "object key has no suffix, key: {}", key);
|
||||
return suffix;
|
||||
}
|
||||
|
||||
const String & ObjectStorageKey::serialize() const
|
||||
{
|
||||
return key;
|
||||
}
|
||||
|
||||
ObjectStorageKey ObjectStorageKey::createAsRelative(String key_)
|
||||
{
|
||||
ObjectStorageKey object_key;
|
||||
object_key.suffix = std::move(key_);
|
||||
object_key.key = object_key.suffix;
|
||||
object_key.is_relative = true;
|
||||
return object_key;
|
||||
}
|
||||
|
||||
ObjectStorageKey ObjectStorageKey::createAsRelative(String prefix_, String suffix_)
|
||||
{
|
||||
ObjectStorageKey object_key;
|
||||
object_key.prefix = std::move(prefix_);
|
||||
object_key.suffix = std::move(suffix_);
|
||||
|
||||
if (object_key.prefix.empty())
|
||||
object_key.key = object_key.suffix;
|
||||
else
|
||||
object_key.key = fs::path(object_key.prefix) / object_key.suffix;
|
||||
|
||||
object_key.is_relative = true;
|
||||
return object_key;
|
||||
}
|
||||
|
||||
ObjectStorageKey ObjectStorageKey::createAsAbsolute(String key_)
|
||||
{
|
||||
ObjectStorageKey object_key;
|
||||
object_key.key = std::move(key_);
|
||||
object_key.is_relative = false;
|
||||
return object_key;
|
||||
}
|
||||
}
|
29
src/Common/ObjectStorageKey.h
Normal file
29
src/Common/ObjectStorageKey.h
Normal file
@ -0,0 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct ObjectStorageKey
|
||||
{
|
||||
ObjectStorageKey() = default;
|
||||
|
||||
bool hasPrefix() const { return is_relative; }
|
||||
const String & getPrefix() const;
|
||||
const String & getSuffix() const;
|
||||
const String & serialize() const;
|
||||
|
||||
static ObjectStorageKey createAsRelative(String prefix_, String suffix_);
|
||||
static ObjectStorageKey createAsRelative(String key_);
|
||||
static ObjectStorageKey createAsAbsolute(String key_);
|
||||
|
||||
private:
|
||||
String prefix;
|
||||
String suffix;
|
||||
String key;
|
||||
bool is_relative = false;
|
||||
};
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user