mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge branch 'master' into s3_optimize
This commit is contained in:
commit
d5eece7408
208
.github/workflows/backport_branches.yml
vendored
208
.github/workflows/backport_branches.yml
vendored
@ -12,11 +12,10 @@ jobs:
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -24,34 +23,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -59,18 +56,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -79,7 +75,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -94,13 +90,12 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: CompatibilityCheck
|
||||
@ -132,28 +127,25 @@ jobs:
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -177,28 +169,25 @@ jobs:
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -222,26 +211,24 @@ jobs:
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -265,26 +252,24 @@ jobs:
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -308,26 +293,24 @@ jobs:
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -351,28 +334,25 @@ jobs:
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -396,28 +376,25 @@ jobs:
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -436,12 +413,10 @@ jobs:
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
@ -477,14 +452,13 @@ jobs:
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Report Builder
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -516,14 +490,13 @@ jobs:
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Report Builder
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -556,14 +529,13 @@ jobs:
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -594,14 +566,13 @@ jobs:
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -635,14 +606,13 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Stress test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -672,14 +642,13 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -706,11 +675,10 @@ jobs:
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
3
.github/workflows/cherry_pick.yml
vendored
3
.github/workflows/cherry_pick.yml
vendored
@ -28,8 +28,9 @@ jobs:
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
|
63
.github/workflows/docs_check.yml
vendored
63
.github/workflows/docs_check.yml
vendored
@ -21,11 +21,10 @@ jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -34,17 +33,16 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
@ -52,17 +50,16 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -70,18 +67,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -90,7 +86,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -110,15 +106,14 @@ jobs:
|
||||
- name: Download changed images
|
||||
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||
continue-on-error: true
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Style Check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -140,15 +135,14 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/docs_check/ClickHouse
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Docs Check
|
||||
run: |
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -167,11 +161,10 @@ jobs:
|
||||
- DocsCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
40
.github/workflows/docs_release.yml
vendored
40
.github/workflows/docs_release.yml
vendored
@ -23,34 +23,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -58,18 +56,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -78,7 +75,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -97,13 +94,12 @@ jobs:
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
|
12
.github/workflows/jepsen.yml
vendored
12
.github/workflows/jepsen.yml
vendored
@ -19,12 +19,10 @@ jobs:
|
||||
TEMP_PATH=${{runner.temp}}/keeper_jepsen
|
||||
REPO_COPY=${{runner.temp}}/keeper_jepsen/ClickHouse
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0
|
||||
- name: Jepsen Test
|
||||
run: |
|
||||
@ -50,12 +48,10 @@ jobs:
|
||||
# TEMP_PATH=${{runner.temp}}/server_jepsen
|
||||
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
|
||||
# EOF
|
||||
# - name: Clear repository
|
||||
# run: |
|
||||
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v2
|
||||
# uses: ClickHouse/checkout@v1
|
||||
# with:
|
||||
# clear-repository: true
|
||||
# fetch-depth: 0
|
||||
# - name: Jepsen Test
|
||||
# run: |
|
||||
|
894
.github/workflows/master.yml
vendored
894
.github/workflows/master.yml
vendored
File diff suppressed because it is too large
Load Diff
48
.github/workflows/nightly.yml
vendored
48
.github/workflows/nightly.yml
vendored
@ -16,34 +16,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -51,18 +49,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -71,7 +68,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -90,22 +87,17 @@ jobs:
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
id: coverity-checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -134,8 +126,10 @@ jobs:
|
||||
CC: clang-15
|
||||
CXX: clang++-15
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
submodules: true
|
||||
- name: Set up JDK 11
|
||||
|
1188
.github/workflows/pull_request.yml
vendored
1188
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
# Always use the most recent script version
|
||||
ref: master
|
||||
@ -50,12 +50,10 @@ jobs:
|
||||
DockerServerImages:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
|
514
.github/workflows/release_branches.yml
vendored
514
.github/workflows/release_branches.yml
vendored
File diff suppressed because it is too large
Load Diff
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -34,7 +34,7 @@ jobs:
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: master
|
||||
fetch-depth: 0
|
||||
|
6
.github/workflows/woboq.yml
vendored
6
.github/workflows/woboq.yml
vendored
@ -21,12 +21,10 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: 'true'
|
||||
- name: Codebrowser
|
||||
run: |
|
||||
|
@ -377,15 +377,15 @@ set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
if (OS_DARWIN)
|
||||
|
@ -402,10 +402,11 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
words.push_back(hs.get().text());
|
||||
}
|
||||
|
||||
std::string current_query(rx.get_state().text());
|
||||
std::string new_query;
|
||||
try
|
||||
{
|
||||
new_query = std::string(skim(words));
|
||||
new_query = std::string(skim(current_query, words));
|
||||
}
|
||||
catch (const std::exception & e)
|
||||
{
|
||||
|
@ -43,7 +43,10 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
||||
|
||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
target_compile_options(unwind PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||
|
||||
# We should enable optimizations (otherwise it will be too slow in debug)
|
||||
# and disable sanitizers (otherwise infinite loop may happen)
|
||||
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||
|
||||
check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
||||
if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit e9fb375d0a1e5ebfd74c043f088f2342552103f8
|
||||
Subproject commit 0f41651860fa4a530ecd68b93a15b8fd77397adf
|
@ -120,8 +120,8 @@ if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
# will try to send ping clickhouse via http_port (max 12 retries by default, with 1 sec timeout and 1 sec delay between retries)
|
||||
tries=${CLICKHOUSE_INIT_TIMEOUT:-12}
|
||||
# will try to send ping clickhouse via http_port (max 1000 retries by default, with 1 sec timeout and 1 sec delay between retries)
|
||||
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
|
||||
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
|
||||
if [ "$tries" -le "0" ]; then
|
||||
echo >&2 'ClickHouse init process failed.'
|
||||
|
@ -2,6 +2,7 @@
|
||||
<profiles>
|
||||
<default>
|
||||
<max_execution_time>10</max_execution_time>
|
||||
|
||||
<!--
|
||||
Don't let the fuzzer change this setting (I've actually seen it
|
||||
do this before).
|
||||
@ -14,6 +15,11 @@
|
||||
<max_memory_usage>
|
||||
<max>10G</max>
|
||||
</max_memory_usage>
|
||||
|
||||
<!-- Analyzer is unstable, not ready for testing. -->
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
|
@ -75,6 +75,7 @@ function download
|
||||
./clickhouse ||:
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
ln -s ./clickhouse ./clickhouse-client
|
||||
ln -s ./clickhouse ./clickhouse-local
|
||||
|
||||
# clickhouse-server is in the current dir
|
||||
export PATH="$PWD:$PATH"
|
||||
@ -91,6 +92,12 @@ function configure
|
||||
cp -av --dereference "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
cp -av --dereference "$script_dir"/allow-nullable-key.xml db/config.d
|
||||
|
||||
cat > db/config.d/max_server_memory_usage_to_ram_ratio.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage_to_ram_ratio>0.75</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
cat > db/config.d/core.xml <<EOL
|
||||
<clickhouse>
|
||||
<core_dump>
|
||||
@ -256,12 +263,21 @@ quit
|
||||
if [ "$server_died" == 1 ]
|
||||
then
|
||||
# The server has died.
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
|
||||
if grep -F --text 'Sanitizer: out-of-memory' description.txt
|
||||
then
|
||||
# OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description.
|
||||
task_exit_code=0
|
||||
echo "success" > status.txt
|
||||
else
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
fi
|
||||
|
||||
elif [ "$fuzzer_exit_code" == "143" ] || [ "$fuzzer_exit_code" == "0" ]
|
||||
then
|
||||
# Variants of a normal run:
|
||||
@ -352,17 +368,25 @@ th { cursor: pointer; }
|
||||
<body>
|
||||
<div class="main">
|
||||
|
||||
<h1>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</h1>
|
||||
<h1>AST Fuzzer for PR <a href="https://github.com/ClickHouse/ClickHouse/pull/${PR_TO_TEST}">#${PR_TO_TEST}</a> @ ${SHA_TO_TEST}</h1>
|
||||
<p class="links">
|
||||
<a href="runlog.log">runlog.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
</p>
|
||||
<table>
|
||||
<tr><th>Test name</th><th>Test status</th><th>Description</th></tr>
|
||||
<tr><td>AST Fuzzer</td><td>$(cat status.txt)</td><td>$(cat description.txt)</td></tr>
|
||||
<tr>
|
||||
<th>Test name</th>
|
||||
<th>Test status</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>AST Fuzzer</td>
|
||||
<td>$(cat status.txt)</td>
|
||||
<td style="white-space: pre;">$(clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < description.txt)</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,6 +1,6 @@
|
||||
Allow to run simple ClickHouse stress test in Docker from debian packages.
|
||||
Allows to run simple ClickHouse stress test in Docker from debian packages.
|
||||
Actually it runs multiple copies of clickhouse-test (functional tests).
|
||||
This allows to find problems like segmentation fault which cause shutdown of server.
|
||||
This allows to find problems like failed assertions and memory safety issues.
|
||||
|
||||
Usage:
|
||||
```
|
||||
|
@ -123,6 +123,22 @@ EOL
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Analyzer is not yet ready for testing
|
||||
cat > /etc/clickhouse-server/users.d/no_analyzer.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<constraints>
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
}
|
||||
|
||||
function stop()
|
||||
@ -334,219 +350,221 @@ zgrep -Fa "########################################" /test_output/* > /dev/null
|
||||
zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \
|
||||
&& echo -e 'Found signal in gdb.log\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
echo -e "Backward compatibility check\n"
|
||||
if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
echo -e "Backward compatibility check\n"
|
||||
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
|
||||
echo "Download previous release server"
|
||||
mkdir previous_release_package_folder
|
||||
echo "Download previous release server"
|
||||
mkdir previous_release_package_folder
|
||||
|
||||
echo $previous_release_tag | download_release_packets && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
echo $previous_release_tag | download_release_packets && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz ||:
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# Check if we cloned previous release repository successfully
|
||||
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
else
|
||||
echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
dpkg --remove clickhouse-server
|
||||
dpkg --remove clickhouse-common-static-dbg
|
||||
dpkg --remove clickhouse-common-static
|
||||
|
||||
rm -rf /var/lib/clickhouse/*
|
||||
|
||||
# Make BC check more funny by forcing Ordinary engine for system database
|
||||
mkdir /var/lib/clickhouse/metadata
|
||||
echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql
|
||||
|
||||
# Install previous release packages
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||
rm -f /etc/clickhouse-server/users.d/enable_blobs_check.xml ||:
|
||||
rm -f /etc/clickhouse-server/users.d/marks.xml ||:
|
||||
|
||||
# Remove s3 related configs to avoid "there is no disk type `cache`"
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# Turn on after 22.12
|
||||
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Install new package before running stress test because we should use new
|
||||
# clickhouse-client and new clickhouse-test.
|
||||
#
|
||||
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||
# will print sane stacktraces and also to avoid possible crashes.
|
||||
#
|
||||
# FIXME: those files can be extracted directly from debian package, but
|
||||
# actually better solution will be to use different PATH instead of playing
|
||||
# games with files from packages.
|
||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||
install_packages package_folder
|
||||
mv /usr/bin/clickhouse package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables"
|
||||
|
||||
stop 1
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
|| (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \
|
||||
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64")
|
||||
# FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server.
|
||||
# Let's just ignore all errors from queries ("} <Error> TCPHandler: Code:", "} <Error> executeQuery: Code:")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
||||
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||
-e "REPLICA_ALREADY_EXISTS" \
|
||||
-e "ALL_REPLICAS_LOST" \
|
||||
-e "DDLWorker: Cannot parse DDL task query" \
|
||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||
-e "UNKNOWN_DATABASE" \
|
||||
-e "NETWORK_ERROR" \
|
||||
-e "UNKNOWN_TABLE" \
|
||||
-e "ZooKeeperClient" \
|
||||
-e "KEEPER_EXCEPTION" \
|
||||
-e "DirectoryMonitor" \
|
||||
-e "TABLE_IS_READ_ONLY" \
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "NETLINK_ERROR" \
|
||||
-e "Renaming unexpected part" \
|
||||
-e "PART_IS_TEMPORARILY_LOCKED" \
|
||||
-e "and a merge is impossible: we didn't find" \
|
||||
-e "found in queue and some source parts for it was lost" \
|
||||
-e "is lost forever." \
|
||||
-e "Unknown index: idx." \
|
||||
-e "Cannot parse string 'Hello' as UInt64" \
|
||||
-e "} <Error> TCPHandler: Code:" \
|
||||
-e "} <Error> executeQuery: Code:" \
|
||||
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
|
||||
-e "This engine is deprecated and is not supported in transactions" \
|
||||
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt
|
||||
|
||||
# Sanitizer asserts
|
||||
zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
echo "Check for Logical errors in server log:"
|
||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
|
||||
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_logical_errors.txt if it's empty
|
||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
|
||||
|
||||
tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||:
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.backward.tsv.gz ||:
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz ||:
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# Check if we cloned previous release repository successfully
|
||||
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
else
|
||||
echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
dpkg --remove clickhouse-server
|
||||
dpkg --remove clickhouse-common-static-dbg
|
||||
dpkg --remove clickhouse-common-static
|
||||
|
||||
rm -rf /var/lib/clickhouse/*
|
||||
|
||||
# Make BC check more funny by forcing Ordinary engine for system database
|
||||
mkdir /var/lib/clickhouse/metadata
|
||||
echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql
|
||||
|
||||
# Install previous release packages
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||
rm -f /etc/clickhouse-server/users.d/enable_blobs_check.xml ||:
|
||||
rm -f /etc/clickhouse-server/users.d/marks.xml ||:
|
||||
|
||||
# Remove s3 related configs to avoid "there is no disk type `cache`"
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# Turn on after 22.12
|
||||
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Install new package before running stress test because we should use new
|
||||
# clickhouse-client and new clickhouse-test.
|
||||
#
|
||||
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||
# will print sane stacktraces and also to avoid possible crashes.
|
||||
#
|
||||
# FIXME: those files can be extracted directly from debian package, but
|
||||
# actually better solution will be to use different PATH instead of playing
|
||||
# games with files from packages.
|
||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||
install_packages package_folder
|
||||
mv /usr/bin/clickhouse package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables"
|
||||
|
||||
stop 1
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
|| (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \
|
||||
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64")
|
||||
# FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server.
|
||||
# Let's just ignore all errors from queries ("} <Error> TCPHandler: Code:", "} <Error> executeQuery: Code:")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
||||
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||
-e "REPLICA_ALREADY_EXISTS" \
|
||||
-e "ALL_REPLICAS_LOST" \
|
||||
-e "DDLWorker: Cannot parse DDL task query" \
|
||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||
-e "UNKNOWN_DATABASE" \
|
||||
-e "NETWORK_ERROR" \
|
||||
-e "UNKNOWN_TABLE" \
|
||||
-e "ZooKeeperClient" \
|
||||
-e "KEEPER_EXCEPTION" \
|
||||
-e "DirectoryMonitor" \
|
||||
-e "TABLE_IS_READ_ONLY" \
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "NETLINK_ERROR" \
|
||||
-e "Renaming unexpected part" \
|
||||
-e "PART_IS_TEMPORARILY_LOCKED" \
|
||||
-e "and a merge is impossible: we didn't find" \
|
||||
-e "found in queue and some source parts for it was lost" \
|
||||
-e "is lost forever." \
|
||||
-e "Unknown index: idx." \
|
||||
-e "Cannot parse string 'Hello' as UInt64" \
|
||||
-e "} <Error> TCPHandler: Code:" \
|
||||
-e "} <Error> executeQuery: Code:" \
|
||||
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
|
||||
-e "This engine is deprecated and is not supported in transactions" \
|
||||
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt
|
||||
|
||||
# Sanitizer asserts
|
||||
zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
echo "Check for Logical errors in server log:"
|
||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
|
||||
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_logical_errors.txt if it's empty
|
||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
|
||||
|
||||
tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||:
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.backward.tsv.gz ||:
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
dmesg -T > /test_output/dmesg.log
|
||||
|
@ -2,11 +2,10 @@
|
||||
slug: /en/interfaces/cli
|
||||
sidebar_position: 17
|
||||
sidebar_label: Command-Line Client
|
||||
title: Command-Line Client
|
||||
---
|
||||
import ConnectionDetails from '@site/docs/en/_snippets/_gather_your_details_native.md';
|
||||
|
||||
# Command-line Client
|
||||
|
||||
## clickhouse-client
|
||||
|
||||
ClickHouse provides a native command-line client: `clickhouse-client`. The client supports command-line options and configuration files. For more information, see [Configuring](#interfaces_cli_configuration).
|
||||
|
@ -825,6 +825,23 @@ Setting fields:
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
:::
|
||||
|
||||
## Null
|
||||
|
||||
A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables.
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY null_dict (
|
||||
id UInt64,
|
||||
val UInt8,
|
||||
default_val UInt8 DEFAULT 123,
|
||||
nullable_val Nullable(UInt8)
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(NULL())
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(0);
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse)
|
||||
- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse)
|
||||
|
@ -76,6 +76,49 @@ These functions are available starting from 22.10.
|
||||
|
||||
|
||||
|
||||
## randUniform
|
||||
|
||||
Return random number based on [continuous uniform distribution](https://en.wikipedia.org/wiki/Continuous_uniform_distribution) in a specified range from `min` to `max`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randUniform(min, max)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `min` - `Float64` - min value of the range,
|
||||
- `max` - `Float64` - max value of the range.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randUniform(5.5, 10) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randUniform(5.5, 10)─┐
|
||||
│ 8.094978491443102 │
|
||||
│ 7.3181248914450885 │
|
||||
│ 7.177741903868262 │
|
||||
│ 6.483347380953762 │
|
||||
│ 6.122286382885112 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randNormal
|
||||
|
||||
Return random number based on [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution).
|
||||
|
@ -683,7 +683,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
|
||||
("query_id", value<std::string>()->default_value(""), "")
|
||||
("max-consecutive-errors", value<size_t>()->default_value(0), "set number of allowed consecutive errors")
|
||||
("continue_on_errors", "continue testing even if a query fails")
|
||||
("ignore-error,continue_on_errors", "continue testing even if a query fails")
|
||||
("reconnect", "establish new connection for every query")
|
||||
("client-side-time", "display the time including network communication instead of server-side time; note that for server versions before 22.8 we always display client-side time")
|
||||
;
|
||||
@ -738,7 +738,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options["query_id"].as<std::string>(),
|
||||
options["query"].as<std::string>(),
|
||||
options["max-consecutive-errors"].as<size_t>(),
|
||||
options.count("continue_on_errors"),
|
||||
options.count("ignore-error"),
|
||||
options.count("reconnect"),
|
||||
options.count("client-side-time"),
|
||||
print_stacktrace,
|
||||
|
@ -1051,18 +1051,12 @@ namespace
|
||||
return pid;
|
||||
}
|
||||
|
||||
int stop(const fs::path & pid_file, bool force, bool do_not_kill, unsigned max_tries)
|
||||
bool sendSignalAndWaitForStop(const fs::path & pid_file, int signal, unsigned max_tries, unsigned wait_ms, const char * signal_name)
|
||||
{
|
||||
if (force && do_not_kill)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
||||
|
||||
int pid = isRunning(pid_file);
|
||||
|
||||
if (!pid)
|
||||
return 0;
|
||||
|
||||
int signal = force ? SIGKILL : SIGTERM;
|
||||
const char * signal_name = force ? "kill" : "terminate";
|
||||
return true;
|
||||
|
||||
if (0 == kill(pid, signal))
|
||||
fmt::print("Sent {} signal to process with pid {}.\n", signal_name, pid);
|
||||
@ -1078,46 +1072,51 @@ namespace
|
||||
fmt::print("Server stopped\n");
|
||||
break;
|
||||
}
|
||||
sleepForSeconds(1);
|
||||
sleepForMilliseconds(wait_ms);
|
||||
}
|
||||
|
||||
if (try_num == max_tries)
|
||||
return try_num < max_tries;
|
||||
}
|
||||
|
||||
int stop(const fs::path & pid_file, bool force, bool do_not_kill, unsigned max_tries)
|
||||
{
|
||||
if (force && do_not_kill)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
||||
|
||||
int signal = force ? SIGKILL : SIGTERM;
|
||||
const char * signal_name = force ? "kill" : "terminate";
|
||||
|
||||
if (sendSignalAndWaitForStop(pid_file, signal, max_tries, 1000, signal_name))
|
||||
return 0;
|
||||
|
||||
int pid = isRunning(pid_file);
|
||||
if (!pid)
|
||||
return 0;
|
||||
|
||||
if (do_not_kill)
|
||||
{
|
||||
if (do_not_kill)
|
||||
{
|
||||
fmt::print("Process (pid = {}) is still running. Will not try to kill it.\n", pid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
fmt::print("Will terminate forcefully (pid = {}).\n", pid);
|
||||
if (0 == kill(pid, 9))
|
||||
fmt::print("Sent kill signal (pid = {}).\n", pid);
|
||||
else
|
||||
throwFromErrno("Cannot send kill signal", ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
/// Wait for the process (100 seconds).
|
||||
constexpr size_t num_kill_check_tries = 1000;
|
||||
constexpr size_t kill_check_delay_ms = 100;
|
||||
for (size_t i = 0; i < num_kill_check_tries; ++i)
|
||||
{
|
||||
fmt::print("Waiting for server to be killed\n");
|
||||
if (!isRunning(pid_file))
|
||||
{
|
||||
fmt::print("Server exited\n");
|
||||
break;
|
||||
}
|
||||
sleepForMilliseconds(kill_check_delay_ms);
|
||||
}
|
||||
|
||||
if (isRunning(pid_file))
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_KILL,
|
||||
"The server process still exists after {} tries (delay: {} ms)",
|
||||
num_kill_check_tries, kill_check_delay_ms);
|
||||
}
|
||||
fmt::print("Process (pid = {}) is still running. Will not try to kill it.\n", pid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/// Send termination signal again, the server will receive it and immediately terminate.
|
||||
fmt::print("Will send the termination signal again to force the termination (pid = {}).\n", pid);
|
||||
if (sendSignalAndWaitForStop(pid_file, signal, std::min(10U, max_tries), 1000, signal_name))
|
||||
return 0;
|
||||
|
||||
/// Send kill signal. Total wait is 100 seconds.
|
||||
constexpr size_t num_kill_check_tries = 1000;
|
||||
constexpr size_t kill_check_delay_ms = 100;
|
||||
fmt::print("Will terminate forcefully (pid = {}).\n", pid);
|
||||
if (sendSignalAndWaitForStop(pid_file, SIGKILL, num_kill_check_tries, kill_check_delay_ms, signal_name))
|
||||
return 0;
|
||||
|
||||
if (!isRunning(pid_file))
|
||||
return 0;
|
||||
|
||||
throw Exception(ErrorCodes::CANNOT_KILL,
|
||||
"The server process still exists after {} tries (delay: {} ms)",
|
||||
num_kill_check_tries, kill_check_delay_ms);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -703,6 +703,13 @@ try
|
||||
global_context->addWarningMessage("Server was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
const auto memory_amount = getMemoryAmount();
|
||||
|
||||
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
|
||||
formatReadableSizeWithBinarySuffix(memory_amount),
|
||||
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
|
||||
std::thread::hardware_concurrency());
|
||||
|
||||
sanityChecks(*this);
|
||||
|
||||
// Initialize global thread pool. Do it before we fetch configs from zookeeper
|
||||
@ -776,8 +783,6 @@ try
|
||||
|
||||
Settings::checkNoSettingNamesAtTopLevel(config(), config_path);
|
||||
|
||||
const auto memory_amount = getMemoryAmount();
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
std::string executable_path = getExecutablePath();
|
||||
|
||||
@ -1044,8 +1049,8 @@ try
|
||||
bool continue_if_corrupted = config().getBool("merge_tree_metadata_cache.continue_if_corrupted", false);
|
||||
try
|
||||
{
|
||||
LOG_DEBUG(
|
||||
log, "Initializing merge tree metadata cache lru_cache_size:{} continue_if_corrupted:{}", size, continue_if_corrupted);
|
||||
LOG_DEBUG(log, "Initializing MergeTree metadata cache, lru_cache_size: {} continue_if_corrupted: {}",
|
||||
ReadableSize(size), continue_if_corrupted);
|
||||
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);
|
||||
}
|
||||
catch (...)
|
||||
@ -1718,13 +1723,6 @@ try
|
||||
main_config_reloader->start();
|
||||
access_control.startPeriodicReloading();
|
||||
|
||||
{
|
||||
LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.",
|
||||
formatReadableSizeWithBinarySuffix(memory_amount),
|
||||
getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores
|
||||
std::thread::hardware_concurrency());
|
||||
}
|
||||
|
||||
/// try to load dictionaries immediately, throw on error and die
|
||||
try
|
||||
{
|
||||
|
1
programs/server/config.d/graphite.xml
Symbolic link
1
programs/server/config.d/graphite.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/config.d/graphite.xml
|
@ -5,4 +5,5 @@ fn main() {
|
||||
}
|
||||
build.compile("skim");
|
||||
println!("cargo:rerun-if-changed=src/lib.rs");
|
||||
println!("cargo:rerun-if-changed=.cargo/config.toml");
|
||||
}
|
||||
|
@ -87,4 +87,4 @@ private:
|
||||
} // namespace cxxbridge1
|
||||
} // namespace rust
|
||||
|
||||
::rust::String skim(::std::vector<::std::string> const &words);
|
||||
::rust::String skim(::std::string const &prefix, ::std::vector<::std::string> const &words);
|
||||
|
@ -5,7 +5,7 @@ use cxx::{CxxString, CxxVector};
|
||||
#[cxx::bridge]
|
||||
mod ffi {
|
||||
extern "Rust" {
|
||||
fn skim(words: &CxxVector<CxxString>) -> Result<String>;
|
||||
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String>;
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,7 @@ impl SkimItem for Item {
|
||||
}
|
||||
}
|
||||
|
||||
fn skim(words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||
// Let's check is terminal available. To avoid panic.
|
||||
if let Err(err) = TermInfo::from_env() {
|
||||
return Err(format!("{}", err));
|
||||
@ -26,6 +26,7 @@ fn skim(words: &CxxVector<CxxString>) -> Result<String, String> {
|
||||
|
||||
let options = SkimOptionsBuilder::default()
|
||||
.height(Some("30%"))
|
||||
.query(Some(prefix.to_str().unwrap()))
|
||||
.tac(true)
|
||||
.tiebreak(Some("-score".to_string()))
|
||||
.build()
|
||||
|
@ -72,9 +72,12 @@ AggregateFunctionPtr AggregateFunctionFactory::get(
|
||||
{
|
||||
auto types_without_low_cardinality = convertLowCardinalityTypesToNested(argument_types);
|
||||
|
||||
/// If one of the types is Nullable, we apply aggregate function combinator "Null".
|
||||
|
||||
if (std::any_of(types_without_low_cardinality.begin(), types_without_low_cardinality.end(),
|
||||
/// If one of the types is Nullable, we apply aggregate function combinator "Null" if it's not window function.
|
||||
/// Window functions are not real aggregate functions. Applying combinators doesn't make sense for them,
|
||||
/// they must handle the nullability themselves
|
||||
auto properties = tryGetPropertiesImpl(name);
|
||||
bool is_window_function = properties.has_value() && properties->is_window_function;
|
||||
if (!is_window_function && std::any_of(types_without_low_cardinality.begin(), types_without_low_cardinality.end(),
|
||||
[](const auto & type) { return type->isNullable(); }))
|
||||
{
|
||||
AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix("Null");
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
throw Exception("Incorrect number of arguments for aggregate function with " + getName() + " suffix",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!isUInt8(arguments.back()))
|
||||
if (!isUInt8(arguments.back()) && !arguments.back()->onlyNull())
|
||||
throw Exception("Illegal type " + arguments.back()->getName() + " of last argument for aggregate function with " + getName() + " suffix",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
@ -52,6 +52,7 @@ class AggregateFunctionIfNullUnary final
|
||||
private:
|
||||
size_t num_arguments;
|
||||
bool filter_is_nullable = false;
|
||||
bool filter_is_only_null = false;
|
||||
|
||||
/// The name of the nested function, including combinators (i.e. *If)
|
||||
///
|
||||
@ -84,10 +85,8 @@ private:
|
||||
|
||||
return assert_cast<const ColumnUInt8 &>(*filter_column).getData()[row_num] && !filter_null_map[row_num];
|
||||
}
|
||||
else
|
||||
{
|
||||
return assert_cast<const ColumnUInt8 &>(*filter_column).getData()[row_num];
|
||||
}
|
||||
|
||||
return assert_cast<const ColumnUInt8 &>(*filter_column).getData()[row_num];
|
||||
}
|
||||
|
||||
public:
|
||||
@ -106,10 +105,14 @@ public:
|
||||
"Aggregate function {} require at least one argument", getName());
|
||||
|
||||
filter_is_nullable = arguments[num_arguments - 1]->isNullable();
|
||||
filter_is_only_null = arguments[num_arguments - 1]->onlyNull();
|
||||
}
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
if (filter_is_only_null)
|
||||
return;
|
||||
|
||||
const ColumnNullable * column = assert_cast<const ColumnNullable *>(columns[0]);
|
||||
const IColumn * nested_column = &column->getNestedColumn();
|
||||
if (!column->isNullAt(row_num) && singleFilter(columns, row_num))
|
||||
@ -127,6 +130,9 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t) const override
|
||||
{
|
||||
if (filter_is_only_null)
|
||||
return;
|
||||
|
||||
const ColumnNullable * column = assert_cast<const ColumnNullable *>(columns[0]);
|
||||
const UInt8 * null_map = column->getNullMapData().data();
|
||||
const IColumn * columns_param[] = {&column->getNestedColumn()};
|
||||
@ -177,6 +183,11 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isCompilable() const override
|
||||
{
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
@ -224,6 +235,9 @@ class AggregateFunctionIfNullVariadic final : public AggregateFunctionNullBase<
|
||||
serialize_flag,
|
||||
AggregateFunctionIfNullVariadic<result_is_nullable, serialize_flag>>
|
||||
{
|
||||
private:
|
||||
bool filter_is_only_null = false;
|
||||
|
||||
public:
|
||||
|
||||
String getName() const override
|
||||
@ -243,6 +257,8 @@ public:
|
||||
|
||||
for (size_t i = 0; i < number_of_arguments; ++i)
|
||||
is_nullable[i] = arguments[i]->isNullable();
|
||||
|
||||
filter_is_only_null = arguments.back()->onlyNull();
|
||||
}
|
||||
|
||||
static inline bool singleFilter(const IColumn ** columns, size_t row_num, size_t num_arguments)
|
||||
@ -282,6 +298,9 @@ public:
|
||||
void addBatchSinglePlace(
|
||||
size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t) const final
|
||||
{
|
||||
if (filter_is_only_null)
|
||||
return;
|
||||
|
||||
std::unique_ptr<UInt8[]> final_null_flags = std::make_unique<UInt8[]>(row_end);
|
||||
const size_t filter_column_num = number_of_arguments - 1;
|
||||
|
||||
@ -346,6 +365,11 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isCompilable() const override
|
||||
{
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
{
|
||||
/// TODO: Check
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
if (num_arguments == 0)
|
||||
throw Exception("Aggregate function " + getName() + " require at least one argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (!isUInt8(types.back()))
|
||||
if (!isUInt8(types.back()) && !types.back()->onlyNull())
|
||||
throw Exception("Last argument for aggregate function " + getName() + " must be UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
|
||||
@ -199,12 +199,16 @@ public:
|
||||
|
||||
AggregateFunctionPtr getNestedFunction() const override { return nested_func; }
|
||||
|
||||
std::unordered_set<size_t> getArgumentsThatCanBeOnlyNull() const override
|
||||
{
|
||||
return {num_arguments - 1};
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
bool isCompilable() const override
|
||||
{
|
||||
return nested_func->isCompilable();
|
||||
return canBeNativeType(*this->argument_types.back()) && nested_func->isCompilable();
|
||||
}
|
||||
|
||||
void compileCreate(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr) const override
|
||||
|
@ -29,7 +29,13 @@ public:
|
||||
size_t size = arguments.size();
|
||||
DataTypes res(size);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
res[i] = removeNullable(arguments[i]);
|
||||
{
|
||||
/// Nullable(Nothing) is processed separately, don't convert it to Nothing.
|
||||
if (arguments[i]->onlyNull())
|
||||
res[i] = arguments[i];
|
||||
else
|
||||
res[i] = removeNullable(arguments[i]);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -41,12 +47,16 @@ public:
|
||||
{
|
||||
bool has_nullable_types = false;
|
||||
bool has_null_types = false;
|
||||
for (const auto & arg_type : arguments)
|
||||
std::unordered_set<size_t> arguments_that_can_be_only_null;
|
||||
if (nested_function)
|
||||
arguments_that_can_be_only_null = nested_function->getArgumentsThatCanBeOnlyNull();
|
||||
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
if (arg_type->isNullable())
|
||||
if (arguments[i]->isNullable())
|
||||
{
|
||||
has_nullable_types = true;
|
||||
if (arg_type->onlyNull())
|
||||
if (arguments[i]->onlyNull() && !arguments_that_can_be_only_null.contains(i))
|
||||
{
|
||||
has_null_types = true;
|
||||
break;
|
||||
|
@ -345,6 +345,14 @@ public:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// For most functions if one of arguments is always NULL, we return NULL (it's implemented in combinator Null),
|
||||
/// but in some functions we can want to process this argument somehow (for example condition argument in If combinator).
|
||||
/// This method returns the set of argument indexes that can be always NULL, they will be skipped in combinator Null.
|
||||
virtual std::unordered_set<size_t> getArgumentsThatCanBeOnlyNull() const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/** Return the nested function if this is an Aggregate Function Combinator.
|
||||
* Otherwise return nullptr.
|
||||
*/
|
||||
@ -828,6 +836,9 @@ struct AggregateFunctionProperties
|
||||
* Some may also name this property as "non-commutative".
|
||||
*/
|
||||
bool is_order_dependent = false;
|
||||
|
||||
/// Indicates if it's actually window function.
|
||||
bool is_window_function = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -81,6 +81,7 @@ public:
|
||||
if (nested_if_function_arguments_nodes.size() != 3)
|
||||
return;
|
||||
|
||||
auto & cond_argument = nested_if_function_arguments_nodes[0];
|
||||
const auto * if_true_condition_constant_node = nested_if_function_arguments_nodes[1]->as<ConstantNode>();
|
||||
const auto * if_false_condition_constant_node = nested_if_function_arguments_nodes[2]->as<ConstantNode>();
|
||||
|
||||
@ -107,8 +108,8 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
/// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))`.
|
||||
if (if_true_condition_value == 0 && if_false_condition_value == 1)
|
||||
/// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))` if condition is not Nullable (otherwise the result can be different).
|
||||
if (if_true_condition_value == 0 && if_false_condition_value == 1 && !cond_argument->getResultType()->isNullable())
|
||||
{
|
||||
DataTypePtr not_function_result_type = std::make_shared<DataTypeUInt8>();
|
||||
|
||||
|
@ -344,7 +344,7 @@ set_source_files_properties(
|
||||
Common/Elf.cpp
|
||||
Common/Dwarf.cpp
|
||||
Common/SymbolIndex.cpp
|
||||
PROPERTIES COMPILE_FLAGS "-O3 ${WITHOUT_COVERAGE}")
|
||||
PROPERTIES COMPILE_FLAGS "-O2 ${WITHOUT_COVERAGE}")
|
||||
|
||||
target_link_libraries (clickhouse_common_io
|
||||
PRIVATE
|
||||
|
@ -2,12 +2,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/getCurrentProcessFDCount.h>
|
||||
#include <Common/getMaxFileDescriptorCount.h>
|
||||
#include <Interpreters/Cache/FileCache.h>
|
||||
#include <Server/ProtocolServerAdapter.h>
|
||||
#include <IO/UncompressedCache.h>
|
||||
#include <IO/MMappedFileCache.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
@ -156,9 +156,10 @@ ReturnType ThreadPoolImpl<Thread>::scheduleImpl(Job job, ssize_t priority, std::
|
||||
propagate_opentelemetry_tracing_context ? DB::OpenTelemetry::CurrentContext() : DB::OpenTelemetry::TracingContextOnThread());
|
||||
|
||||
++scheduled_jobs;
|
||||
new_job_or_shutdown.notify_one();
|
||||
}
|
||||
|
||||
new_job_or_shutdown.notify_one();
|
||||
|
||||
return static_cast<ReturnType>(true);
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ std::string ZooKeeperRequest::toString() const
|
||||
"OpNum = {}\n"
|
||||
"Additional info:\n{}",
|
||||
xid,
|
||||
getOpNum(),
|
||||
Coordination::toString(getOpNum()),
|
||||
toStringImpl());
|
||||
}
|
||||
|
||||
|
@ -676,7 +676,7 @@ void ZooKeeper::receiveThread()
|
||||
if (earliest_operation)
|
||||
{
|
||||
throw Exception(Error::ZOPERATIONTIMEOUT, "Operation timeout (no response) for request {} for path: {}",
|
||||
earliest_operation->request->getOpNum(), earliest_operation->request->getPath());
|
||||
toString(earliest_operation->request->getOpNum()), earliest_operation->request->getPath());
|
||||
}
|
||||
waited_us += max_wait_us;
|
||||
if (waited_us >= args.session_timeout_ms * 1000)
|
||||
@ -870,7 +870,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea
|
||||
if (already_started)
|
||||
return;
|
||||
|
||||
LOG_INFO(log, "Finalizing session {}: finalization_started={}, queue_finished={}, reason={}",
|
||||
LOG_INFO(log, "Finalizing session {}. finalization_started: {}, queue_finished: {}, reason: '{}'",
|
||||
session_id, already_started, requests_queue.isFinished(), reason);
|
||||
|
||||
auto expire_session_if_not_expired = [&]
|
||||
|
@ -132,7 +132,7 @@ void assertDigest(
|
||||
"Digest for nodes is not matching after {} request of type '{}'.\nExpected digest - {}, actual digest - {} (digest "
|
||||
"{}). Keeper will terminate to avoid inconsistencies.\nExtra information about the request:\n{}",
|
||||
committing ? "committing" : "preprocessing",
|
||||
request.getOpNum(),
|
||||
Coordination::toString(request.getOpNum()),
|
||||
first.value,
|
||||
second.value,
|
||||
first.version,
|
||||
|
@ -1704,7 +1704,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
||||
break;
|
||||
default:
|
||||
throw DB::Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", sub_zk_request->getOpNum());
|
||||
ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", Coordination::toString(sub_zk_request->getOpNum()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -565,6 +565,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \
|
||||
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
|
||||
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \
|
||||
M(UInt64, distributed_ddl_entry_format_version, 3, "Compatibility version of distributed DDL (ON CLUSTER) queries", 0) \
|
||||
\
|
||||
|
@ -178,12 +178,9 @@ __attribute__((__weak__)) void collectCrashLog(
|
||||
class SignalListener : public Poco::Runnable
|
||||
{
|
||||
public:
|
||||
enum Signals : int
|
||||
{
|
||||
StdTerminate = -1,
|
||||
StopThread = -2,
|
||||
SanitizerTrap = -3,
|
||||
};
|
||||
static constexpr int StdTerminate = -1;
|
||||
static constexpr int StopThread = -2;
|
||||
static constexpr int SanitizerTrap = -3;
|
||||
|
||||
explicit SignalListener(BaseDaemon & daemon_)
|
||||
: log(&Poco::Logger::get("BaseDaemon"))
|
||||
@ -208,7 +205,7 @@ public:
|
||||
// Don't use strsignal here, because it's not thread-safe.
|
||||
LOG_TRACE(log, "Received signal {}", sig);
|
||||
|
||||
if (sig == Signals::StopThread)
|
||||
if (sig == StopThread)
|
||||
{
|
||||
LOG_INFO(log, "Stop SignalListener thread");
|
||||
break;
|
||||
@ -219,7 +216,7 @@ public:
|
||||
BaseDaemon::instance().closeLogs(BaseDaemon::instance().logger());
|
||||
LOG_INFO(log, "Opened new log file after received signal.");
|
||||
}
|
||||
else if (sig == Signals::StdTerminate)
|
||||
else if (sig == StdTerminate)
|
||||
{
|
||||
UInt32 thread_num;
|
||||
std::string message;
|
||||
@ -909,7 +906,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
|
||||
void BaseDaemon::logRevision() const
|
||||
{
|
||||
Poco::Logger::root().information("Starting " + std::string{VERSION_FULL}
|
||||
logger().information("Starting " + std::string{VERSION_FULL}
|
||||
+ " (revision: " + std::to_string(ClickHouseRevision::getVersionRevision())
|
||||
+ ", git hash: " + (git_hash.empty() ? "<unknown>" : git_hash)
|
||||
+ ", build id: " + (build_id.empty() ? "<unknown>" : build_id) + ")"
|
||||
@ -958,7 +955,6 @@ void BaseDaemon::handleSignal(int signal_id)
|
||||
std::lock_guard lock(signal_handler_mutex);
|
||||
{
|
||||
++terminate_signals_counter;
|
||||
sigint_signals_counter += signal_id == SIGINT;
|
||||
signal_event.notify_all();
|
||||
}
|
||||
|
||||
@ -973,9 +969,9 @@ void BaseDaemon::onInterruptSignals(int signal_id)
|
||||
is_cancelled = true;
|
||||
LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id)); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
|
||||
|
||||
if (sigint_signals_counter >= 2)
|
||||
if (terminate_signals_counter >= 2)
|
||||
{
|
||||
LOG_INFO(&logger(), "Received second signal Interrupt. Immediately terminate.");
|
||||
LOG_INFO(&logger(), "This is the second termination signal. Immediately terminate.");
|
||||
call_default_signal_handler(signal_id);
|
||||
/// If the above did not help.
|
||||
_exit(128 + signal_id);
|
||||
|
@ -162,7 +162,6 @@ protected:
|
||||
std::mutex signal_handler_mutex;
|
||||
std::condition_variable signal_event;
|
||||
std::atomic_size_t terminate_signals_counter{0};
|
||||
std::atomic_size_t sigint_signals_counter{0};
|
||||
|
||||
std::string config_path;
|
||||
DB::ConfigProcessor::LoadedConfig loaded_config;
|
||||
|
@ -259,7 +259,7 @@ void SerializationBool::deserializeTextCSV(IColumn & column, ReadBuffer & istr,
|
||||
if (istr.eof())
|
||||
throw Exception("Expected boolean value but get EOF.", ErrorCodes::CANNOT_PARSE_BOOL);
|
||||
|
||||
deserializeImpl(column, istr, settings, [&](ReadBuffer & buf){ return buf.eof() || *buf.position() == settings.csv.delimiter || *buf.position() == '\n'; });
|
||||
deserializeImpl(column, istr, settings, [&](ReadBuffer & buf){ return buf.eof() || *buf.position() == settings.csv.delimiter || *buf.position() == '\n' || *buf.position() == '\r'; });
|
||||
}
|
||||
|
||||
void SerializationBool::serializeTextRaw(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
|
@ -584,7 +584,14 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
|
||||
bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros");
|
||||
|
||||
if (!enable_functional_tests_helper)
|
||||
LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
|
||||
{
|
||||
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments)
|
||||
LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
|
||||
else
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"It's not allowed to specify explicit zookeeper_path and replica_name for ReplicatedMergeTree arguments in Replicated database. "
|
||||
"If you really want to specify them explicitly, enable setting database_replicated_allow_replicated_engine_arguments.");
|
||||
}
|
||||
|
||||
if (maybe_shard_macros && maybe_replica_macros)
|
||||
return;
|
||||
|
48
src/Dictionaries/NullDictionarySource.cpp
Normal file
48
src/Dictionaries/NullDictionarySource.cpp
Normal file
@ -0,0 +1,48 @@
|
||||
#include "NullDictionarySource.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Processors/Sources/NullSource.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include "DictionarySourceFactory.h"
|
||||
#include "DictionarySourceHelpers.h"
|
||||
#include "DictionaryStructure.h"
|
||||
#include "registerDictionaries.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
NullDictionarySource::NullDictionarySource(Block & sample_block_) : sample_block(sample_block_)
|
||||
{
|
||||
}
|
||||
|
||||
NullDictionarySource::NullDictionarySource(const NullDictionarySource & other) : sample_block(other.sample_block)
|
||||
{
|
||||
}
|
||||
|
||||
QueryPipeline NullDictionarySource::loadAll()
|
||||
{
|
||||
LOG_TRACE(&Poco::Logger::get("NullDictionarySource"), "loadAll {}", toString());
|
||||
return QueryPipeline(std::make_shared<NullSource>(sample_block));
|
||||
}
|
||||
|
||||
|
||||
std::string NullDictionarySource::toString() const
|
||||
{
|
||||
return "Null";
|
||||
}
|
||||
|
||||
|
||||
void registerDictionarySourceNull(DictionarySourceFactory & factory)
|
||||
{
|
||||
auto create_table_source
|
||||
= [=](const DictionaryStructure & /* dict_struct */,
|
||||
const Poco::Util::AbstractConfiguration & /* config */,
|
||||
const std::string & /* config_prefix */,
|
||||
Block & sample_block,
|
||||
ContextPtr /* global_context */,
|
||||
const std::string & /* default_database */,
|
||||
bool /* created_from_ddl*/) -> DictionarySourcePtr { return std::make_unique<NullDictionarySource>(sample_block); };
|
||||
|
||||
factory.registerSource("null", create_table_source);
|
||||
}
|
||||
|
||||
}
|
53
src/Dictionaries/NullDictionarySource.h
Normal file
53
src/Dictionaries/NullDictionarySource.h
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Block.h>
|
||||
#include "IDictionarySource.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/// Allows creating empty dictionary
|
||||
class NullDictionarySource final : public IDictionarySource
|
||||
{
|
||||
public:
|
||||
NullDictionarySource(Block & sample_block_);
|
||||
|
||||
NullDictionarySource(const NullDictionarySource & other);
|
||||
|
||||
QueryPipeline loadAll() override;
|
||||
|
||||
QueryPipeline loadUpdatedAll() override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadUpdatedAll is unsupported for NullDictionarySource");
|
||||
}
|
||||
|
||||
QueryPipeline loadIds(const std::vector<UInt64> & /*ids*/) override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadIds is unsupported for NullDictionarySource");
|
||||
}
|
||||
|
||||
QueryPipeline loadKeys(const Columns & /*key_columns*/, const std::vector<size_t> & /*requested_rows*/) override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method loadKeys is unsupported for NullDictionarySource");
|
||||
}
|
||||
|
||||
bool isModified() const override { return false; }
|
||||
|
||||
bool supportsSelectiveLoad() const override { return false; }
|
||||
|
||||
///Not supported for NullDictionarySource
|
||||
bool hasUpdateField() const override { return false; }
|
||||
|
||||
DictionarySourcePtr clone() const override { return std::make_shared<NullDictionarySource>(*this); }
|
||||
|
||||
std::string toString() const override;
|
||||
|
||||
private:
|
||||
Block sample_block;
|
||||
};
|
||||
|
||||
}
|
@ -6,6 +6,7 @@ namespace DB
|
||||
|
||||
class DictionarySourceFactory;
|
||||
|
||||
void registerDictionarySourceNull(DictionarySourceFactory & factory);
|
||||
void registerDictionarySourceFile(DictionarySourceFactory & source_factory);
|
||||
void registerDictionarySourceMysql(DictionarySourceFactory & source_factory);
|
||||
void registerDictionarySourceClickHouse(DictionarySourceFactory & source_factory);
|
||||
@ -36,6 +37,7 @@ void registerDictionaries()
|
||||
{
|
||||
{
|
||||
auto & source_factory = DictionarySourceFactory::instance();
|
||||
registerDictionarySourceNull(source_factory);
|
||||
registerDictionarySourceFile(source_factory);
|
||||
registerDictionarySourceMysql(source_factory);
|
||||
registerDictionarySourceClickHouse(source_factory);
|
||||
|
@ -318,6 +318,12 @@ DataTypePtr tryInferDataTypeByEscapingRule(const String & field, const FormatSet
|
||||
if (auto date_type = tryInferDateOrDateTimeFromString(field, format_settings))
|
||||
return date_type;
|
||||
|
||||
/// Special case when we have number that starts with 0. In TSV we don't parse such numbers,
|
||||
/// see readIntTextUnsafe in ReadHelpers.h. If we see data started with 0, we can determine it
|
||||
/// as a String, so parsing won't fail.
|
||||
if (field[0] == '0' && field.size() != 1)
|
||||
return std::make_shared<DataTypeString>();
|
||||
|
||||
auto type = tryInferDataTypeForSingleField(field, format_settings);
|
||||
if (!type)
|
||||
return std::make_shared<DataTypeString>();
|
||||
|
@ -250,7 +250,7 @@ namespace
|
||||
{
|
||||
if (isArray(type))
|
||||
nested_types.push_back(assert_cast<const DataTypeArray &>(*type).getNestedType());
|
||||
else
|
||||
else if (isTuple(type))
|
||||
{
|
||||
const auto & elements = assert_cast<const DataTypeTuple &>(*type).getElements();
|
||||
for (const auto & element : elements)
|
||||
@ -262,7 +262,10 @@ namespace
|
||||
if (checkIfTypesAreEqual(nested_types))
|
||||
{
|
||||
for (auto & type : data_types)
|
||||
type = std::make_shared<DataTypeArray>(nested_types.back());
|
||||
{
|
||||
if (isArray(type) || isTuple(type))
|
||||
type = std::make_shared<DataTypeArray>(nested_types.back());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -826,14 +829,40 @@ void transformInferredJSONTypesIfNeeded(
|
||||
|
||||
void transformJSONTupleToArrayIfPossible(DataTypePtr & data_type, const FormatSettings & settings, JSONInferenceInfo * json_info)
|
||||
{
|
||||
if (!data_type || !isTuple(data_type))
|
||||
if (!data_type)
|
||||
return;
|
||||
|
||||
const auto * tuple_type = assert_cast<const DataTypeTuple *>(data_type.get());
|
||||
auto nested_types = tuple_type->getElements();
|
||||
transformInferredTypesIfNeededImpl<true>(nested_types, settings, json_info);
|
||||
if (checkIfTypesAreEqual(nested_types))
|
||||
data_type = std::make_shared<DataTypeArray>(nested_types.back());
|
||||
if (const auto * array_type = typeid_cast<const DataTypeArray *>(data_type.get()))
|
||||
{
|
||||
auto nested_type = array_type->getNestedType();
|
||||
transformJSONTupleToArrayIfPossible(nested_type, settings, json_info);
|
||||
data_type = std::make_shared<DataTypeArray>(nested_type);
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * map_type = typeid_cast<const DataTypeMap *>(data_type.get()))
|
||||
{
|
||||
auto value_type = map_type->getValueType();
|
||||
transformJSONTupleToArrayIfPossible(value_type, settings, json_info);
|
||||
data_type = std::make_shared<DataTypeMap>(map_type->getKeyType(), value_type);
|
||||
return;
|
||||
}
|
||||
|
||||
if (const auto * tuple_type = typeid_cast<const DataTypeTuple *>(data_type.get()))
|
||||
{
|
||||
auto nested_types = tuple_type->getElements();
|
||||
for (auto & nested_type : nested_types)
|
||||
transformJSONTupleToArrayIfPossible(nested_type, settings, json_info);
|
||||
|
||||
auto nested_types_copy = nested_types;
|
||||
transformInferredTypesIfNeededImpl<true>(nested_types_copy, settings, json_info);
|
||||
if (checkIfTypesAreEqual(nested_types_copy))
|
||||
data_type = std::make_shared<DataTypeArray>(nested_types_copy.back());
|
||||
else
|
||||
data_type = std::make_shared<DataTypeTuple>(nested_types);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
DataTypePtr tryInferNumberFromString(std::string_view field, const FormatSettings & settings)
|
||||
|
@ -176,6 +176,7 @@ public:
|
||||
ColumnPtr executeShortCircuit(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const;
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return !Impl::specialImplementationForNulls(); }
|
||||
|
||||
|
@ -126,7 +126,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -138,6 +138,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) - 1) / static_cast<__int128_t>(step) + 1;
|
||||
else if (start > end_data[row_idx] && step < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) + 1) / static_cast<__int128_t>(step) + 1;
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
@ -161,8 +163,11 @@ private:
|
||||
IColumn::Offset offset{};
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
for (size_t idx = 0; idx < row_length[row_idx]; idx++)
|
||||
out_data[offset++] = static_cast<T>(start + offset * step);
|
||||
for (size_t idx = 0; idx < row_length[row_idx]; ++idx)
|
||||
{
|
||||
out_data[offset] = static_cast<T>(start + offset * step);
|
||||
++offset;
|
||||
}
|
||||
out_offsets[row_idx] = offset;
|
||||
}
|
||||
|
||||
@ -183,7 +188,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -195,7 +200,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) - 1) / static_cast<__int128_t>(step) + 1;
|
||||
else if (start_data[row_idx] > end_data[row_idx] && step < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) + 1) / static_cast<__int128_t>(step) + 1;
|
||||
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
@ -241,7 +247,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -253,6 +259,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) - 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else if (start > end_data[row_idx] && step_data[row_idx] < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_data[row_idx]) - static_cast<__int128_t>(start) + 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
@ -301,7 +309,7 @@ private:
|
||||
|
||||
size_t total_values = 0;
|
||||
size_t pre_values = 0;
|
||||
std::vector<size_t> row_length(input_rows_count);
|
||||
PODArray<size_t> row_length(input_rows_count);
|
||||
|
||||
for (size_t row_idx = 0; row_idx < input_rows_count; ++row_idx)
|
||||
{
|
||||
@ -312,6 +320,8 @@ private:
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_start[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) - 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else if (start_data[row_idx] > end_start[row_idx] && step_data[row_idx] < 0)
|
||||
row_length[row_idx] = (static_cast<__int128_t>(end_start[row_idx]) - static_cast<__int128_t>(start_data[row_idx]) + 1) / static_cast<__int128_t>(step_data[row_idx]) + 1;
|
||||
else
|
||||
row_length[row_idx] = 0;
|
||||
|
||||
pre_values += row_length[row_idx];
|
||||
|
||||
|
@ -1026,6 +1026,7 @@ public:
|
||||
}
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; }
|
||||
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
|
||||
|
||||
/// Get result types by argument types. If the function does not apply to these arguments, throw an exception.
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
|
@ -51,6 +51,7 @@ public:
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForNothing() const override { return false; }
|
||||
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
|
||||
|
||||
ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t number_of_arguments) const override
|
||||
{
|
||||
|
@ -1098,6 +1098,25 @@ inline String toString(const T & x)
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline String toStringWithFinalSeparator(const std::vector<T> & x, const String & final_sep)
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
for (auto it = x.begin(); it != x.end(); ++it)
|
||||
{
|
||||
if (it != x.begin())
|
||||
{
|
||||
if (std::next(it) == x.end())
|
||||
writeString(final_sep, buf);
|
||||
else
|
||||
writeString(", ", buf);
|
||||
}
|
||||
writeQuoted(*it, buf);
|
||||
}
|
||||
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
inline void writeNullTerminatedString(const String & s, WriteBuffer & buffer)
|
||||
{
|
||||
/// c_str is guaranteed to return zero-terminated string
|
||||
|
32
src/IO/tests/gtest_WriteHelpers.cpp
Normal file
32
src/IO/tests/gtest_WriteHelpers.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
TEST(WriteHelpersTest, ToStringWithFinalSeparatorTest)
|
||||
{
|
||||
{
|
||||
std::vector<std::string> v;
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA'");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA", "BBB"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA' or 'BBB'");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA", "BBB", "CCC"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA', 'BBB' or 'CCC'");
|
||||
}
|
||||
{
|
||||
std::vector<std::string> v = {"AAA", "BBB", "CCC", "DDD"};
|
||||
EXPECT_EQ(toStringWithFinalSeparator(v, " or "), "'AAA', 'BBB', 'CCC' or 'DDD'");
|
||||
}
|
||||
}
|
@ -763,13 +763,17 @@ try
|
||||
fs::directory_iterator dir_end;
|
||||
for (fs::directory_iterator it(path); it != dir_end; ++it)
|
||||
{
|
||||
if (it->is_regular_file() && startsWith(it->path().filename(), "tmp"))
|
||||
if (it->is_regular_file())
|
||||
{
|
||||
LOG_DEBUG(log, "Removing old temporary file {}", it->path().string());
|
||||
fs::remove(it->path());
|
||||
if (startsWith(it->path().filename(), "tmp"))
|
||||
{
|
||||
LOG_DEBUG(log, "Removing old temporary file {}", it->path().string());
|
||||
fs::remove(it->path());
|
||||
}
|
||||
else
|
||||
LOG_DEBUG(log, "Found unknown file in temporary path {}", it->path().string());
|
||||
}
|
||||
else
|
||||
LOG_DEBUG(log, "Found unknown file in temporary path {}", it->path().string());
|
||||
/// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types.
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
|
@ -404,6 +404,8 @@ ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns)
|
||||
column_declaration->children.push_back(column_declaration->default_expression);
|
||||
}
|
||||
|
||||
column_declaration->ephemeral_default = column.default_desc.ephemeral_default;
|
||||
|
||||
if (!column.comment.empty())
|
||||
{
|
||||
column_declaration->comment = std::make_shared<ASTLiteral>(Field(column.comment));
|
||||
@ -540,11 +542,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
|
||||
final_column_name));
|
||||
|
||||
default_expr_list->children.emplace_back(
|
||||
setAlias(
|
||||
col_decl.default_specifier == "EPHEMERAL" ? /// can be ASTLiteral::value NULL
|
||||
std::make_shared<ASTLiteral>(data_type_ptr->getDefault()) :
|
||||
col_decl.default_expression->clone(),
|
||||
tmp_column_name));
|
||||
setAlias(col_decl.default_expression->clone(), tmp_column_name));
|
||||
}
|
||||
else
|
||||
default_expr_list->children.emplace_back(setAlias(col_decl.default_expression->clone(), col_decl.name));
|
||||
@ -590,10 +588,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
|
||||
visitor.visit(col_decl.default_expression);
|
||||
}
|
||||
|
||||
ASTPtr default_expr =
|
||||
col_decl.default_specifier == "EPHEMERAL" && col_decl.default_expression->as<ASTLiteral>()->value.isNull() ?
|
||||
std::make_shared<ASTLiteral>(DataTypeFactory::instance().get(col_decl.type)->getDefault()) :
|
||||
col_decl.default_expression->clone();
|
||||
ASTPtr default_expr = col_decl.default_expression->clone();
|
||||
|
||||
if (col_decl.type)
|
||||
column.type = name_type_it->type;
|
||||
@ -607,6 +602,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
|
||||
|
||||
column.default_desc.kind = columnDefaultKindFromString(col_decl.default_specifier);
|
||||
column.default_desc.expression = default_expr;
|
||||
column.default_desc.ephemeral_default = col_decl.ephemeral_default;
|
||||
}
|
||||
else if (col_decl.type)
|
||||
column.type = name_type_it->type;
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
@ -1229,16 +1230,24 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
|
||||
if (storage)
|
||||
{
|
||||
std::vector<String> hint_name{};
|
||||
std::set<String> helper_hint_name{};
|
||||
for (const auto & name : columns_context.requiredColumns())
|
||||
{
|
||||
auto hints = storage->getHints(name);
|
||||
hint_name.insert(hint_name.end(), hints.begin(), hints.end());
|
||||
for (const auto & hint : hints)
|
||||
{
|
||||
// We want to preserve the ordering of the hints
|
||||
// (as they are ordered by Levenshtein distance)
|
||||
auto [_, inserted] = helper_hint_name.insert(hint);
|
||||
if (inserted)
|
||||
hint_name.push_back(hint);
|
||||
}
|
||||
}
|
||||
|
||||
if (!hint_name.empty())
|
||||
{
|
||||
ss << ", maybe you meant: ";
|
||||
ss << toString(hint_name);
|
||||
ss << toStringWithFinalSeparator(hint_name, " or ");
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -78,7 +79,7 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatSta
|
||||
if (default_expression)
|
||||
{
|
||||
settings.ostr << ' ' << (settings.hilite ? hilite_keyword : "") << default_specifier << (settings.hilite ? hilite_none : "");
|
||||
if (default_specifier != "EPHEMERAL" || !default_expression->as<ASTLiteral>()->value.isNull())
|
||||
if (!ephemeral_default)
|
||||
{
|
||||
settings.ostr << ' ';
|
||||
default_expression->formatImpl(settings, state, frame);
|
||||
|
@ -16,6 +16,7 @@ public:
|
||||
std::optional<bool> null_modifier;
|
||||
String default_specifier;
|
||||
ASTPtr default_expression;
|
||||
bool ephemeral_default;
|
||||
ASTPtr comment;
|
||||
ASTPtr codec;
|
||||
ASTPtr ttl;
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTColumnDeclaration.h>
|
||||
#include <Parsers/ASTIdentifier_fwd.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
@ -170,6 +171,7 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
ASTPtr type;
|
||||
String default_specifier;
|
||||
std::optional<bool> null_modifier;
|
||||
bool ephemeral_default = false;
|
||||
ASTPtr default_expression;
|
||||
ASTPtr comment_expression;
|
||||
ASTPtr codec_expression;
|
||||
@ -235,8 +237,16 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
else if (s_ephemeral.ignore(pos, expected))
|
||||
{
|
||||
default_specifier = s_ephemeral.getName();
|
||||
if (!literal_parser.parse(pos, default_expression, expected) && type)
|
||||
default_expression = std::make_shared<ASTLiteral>(Field());
|
||||
if (!expr_parser.parse(pos, default_expression, expected) && type)
|
||||
{
|
||||
ephemeral_default = true;
|
||||
|
||||
auto default_function = std::make_shared<ASTFunction>();
|
||||
default_function->name = "defaultValueOfTypeName";
|
||||
default_function->arguments = std::make_shared<ASTExpressionList>();
|
||||
default_function->arguments->children.emplace_back(std::make_shared<ASTLiteral>(type->as<ASTFunction>()->formatWithSecretsHidden()));
|
||||
default_expression = default_function;
|
||||
}
|
||||
|
||||
if (!default_expression && !type)
|
||||
return false;
|
||||
@ -302,6 +312,7 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
column_declaration->default_specifier = default_specifier;
|
||||
if (default_expression)
|
||||
{
|
||||
column_declaration->ephemeral_default = ephemeral_default;
|
||||
column_declaration->default_expression = default_expression;
|
||||
column_declaration->children.push_back(std::move(default_expression));
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ Chunk ArrowBlockInputFormat::generate()
|
||||
|
||||
++record_batch_current;
|
||||
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, *table_result);
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, *table_result, (*table_result)->num_rows());
|
||||
|
||||
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
|
||||
/// Otherwise fill the missing columns with zero values of its type.
|
||||
|
@ -69,7 +69,6 @@ namespace ErrorCodes
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int THERE_IS_NO_COLUMN;
|
||||
extern const int UNKNOWN_EXCEPTION;
|
||||
extern const int INCORRECT_NUMBER_OF_COLUMNS;
|
||||
extern const int INCORRECT_DATA;
|
||||
}
|
||||
|
||||
@ -810,7 +809,7 @@ ArrowColumnToCHColumn::ArrowColumnToCHColumn(
|
||||
{
|
||||
}
|
||||
|
||||
void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table)
|
||||
void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table, size_t num_rows)
|
||||
{
|
||||
NameToColumnPtr name_to_column_ptr;
|
||||
for (auto column_name : table->ColumnNames())
|
||||
@ -824,16 +823,12 @@ void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr<arr
|
||||
name_to_column_ptr[std::move(column_name)] = arrow_column;
|
||||
}
|
||||
|
||||
arrowColumnsToCHChunk(res, name_to_column_ptr);
|
||||
arrowColumnsToCHChunk(res, name_to_column_ptr, num_rows);
|
||||
}
|
||||
|
||||
void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr)
|
||||
void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr, size_t num_rows)
|
||||
{
|
||||
if (unlikely(name_to_column_ptr.empty()))
|
||||
throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Columns is empty");
|
||||
|
||||
Columns columns_list;
|
||||
UInt64 num_rows = name_to_column_ptr.begin()->second->length();
|
||||
columns_list.reserve(header.columns());
|
||||
std::unordered_map<String, std::pair<BlockPtr, std::shared_ptr<NestedColumnExtractHelper>>> nested_tables;
|
||||
bool skipped = false;
|
||||
|
@ -28,9 +28,9 @@ public:
|
||||
bool allow_missing_columns_,
|
||||
bool case_insensitive_matching_ = false);
|
||||
|
||||
void arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table);
|
||||
void arrowTableToCHChunk(Chunk & res, std::shared_ptr<arrow::Table> & table, size_t num_rows);
|
||||
|
||||
void arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr);
|
||||
void arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & name_to_column_ptr, size_t num_rows);
|
||||
|
||||
/// Get missing columns that exists in header but not in arrow::Schema
|
||||
std::vector<size_t> getMissingColumns(const arrow::Schema & schema) const;
|
||||
|
@ -54,14 +54,19 @@ Chunk ORCBlockInputFormat::generate()
|
||||
throw ParsingException(
|
||||
ErrorCodes::CANNOT_READ_ALL_DATA, "Error while reading batch of ORC data: {}", table_result.status().ToString());
|
||||
|
||||
/// We should extract the number of rows directly from the stripe, because in case when
|
||||
/// record batch contains 0 columns (for example if we requested only columns that
|
||||
/// are not presented in data) the number of rows in record batch will be 0.
|
||||
size_t num_rows = file_reader->GetRawORCReader()->getStripe(stripe_current)->getNumberOfRows();
|
||||
|
||||
auto table = table_result.ValueOrDie();
|
||||
if (!table || !table->num_rows())
|
||||
if (!table || !num_rows)
|
||||
return {};
|
||||
|
||||
++stripe_current;
|
||||
|
||||
Chunk res;
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table);
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table, num_rows);
|
||||
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
|
||||
/// Otherwise fill the missing columns with zero values of its type.
|
||||
if (format_settings.defaults_for_omitted_fields)
|
||||
|
@ -70,7 +70,7 @@ Chunk ParquetBlockInputFormat::generate()
|
||||
|
||||
++row_group_current;
|
||||
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table);
|
||||
arrow_column_to_ch_column->arrowTableToCHChunk(res, table, table->num_rows());
|
||||
|
||||
/// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields.
|
||||
/// Otherwise fill the missing columns with zero values of its type.
|
||||
|
@ -332,8 +332,7 @@ std::string buildTaggedRegex(std::string regexp_str)
|
||||
* </default>
|
||||
* </graphite_rollup>
|
||||
*/
|
||||
static const Pattern &
|
||||
appendGraphitePattern(
|
||||
static const Pattern & appendGraphitePattern(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_element, Patterns & patterns,
|
||||
bool default_rule,
|
||||
|
@ -9,6 +9,12 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
static GraphiteRollupSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
const Block & header, const Graphite::Params & params)
|
||||
{
|
||||
@ -26,6 +32,9 @@ static GraphiteRollupSortedAlgorithm::ColumnsDefinition defineColumns(
|
||||
if (i != def.time_column_num && i != def.value_column_num && i != def.version_column_num)
|
||||
def.unmodified_column_numbers.push_back(i);
|
||||
|
||||
if (!WhichDataType(header.getByPosition(def.value_column_num).type).isFloat64())
|
||||
throw Exception("Only `Float64` data type is allowed for the value column of GraphiteMergeTree", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
return def;
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ static MergeTreeReaderSettings getMergeTreeReaderSettings(
|
||||
.read_in_order = query_info.input_order_info != nullptr,
|
||||
.apply_deleted_mask = context->applyDeletedMask(),
|
||||
.use_asynchronous_read_from_pool = settings.allow_asynchronous_read_from_io_pool_for_merge_tree
|
||||
&& (settings.max_streams_to_max_threads_ratio > 1 || settings.allow_asynchronous_read_from_io_pool_for_merge_tree),
|
||||
&& (settings.max_streams_to_max_threads_ratio > 1 || settings.max_streams_for_merge_tree_reading > 1),
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -2313,7 +2313,8 @@ void registerWindowFunctions(AggregateFunctionFactory & factory)
|
||||
.returns_default_when_only_null = true,
|
||||
// This probably doesn't make any difference for window functions because
|
||||
// it is an Aggregator-specific setting.
|
||||
.is_order_dependent = true };
|
||||
.is_order_dependent = true,
|
||||
.is_window_function = true};
|
||||
|
||||
factory.registerFunction("rank", {[](const std::string & name,
|
||||
const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
|
@ -26,6 +26,7 @@ struct ColumnDefault
|
||||
{
|
||||
ColumnDefaultKind kind = ColumnDefaultKind::Default;
|
||||
ASTPtr expression;
|
||||
bool ephemeral_default = false;
|
||||
};
|
||||
|
||||
|
||||
|
@ -123,6 +123,7 @@ void ColumnDescription::readText(ReadBuffer & buf)
|
||||
{
|
||||
default_desc.kind = columnDefaultKindFromString(col_ast->default_specifier);
|
||||
default_desc.expression = std::move(col_ast->default_expression);
|
||||
default_desc.ephemeral_default = col_ast->ephemeral_default;
|
||||
}
|
||||
|
||||
if (col_ast->comment)
|
||||
|
@ -1,34 +1,23 @@
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
|
||||
#include <Backups/BackupEntriesCollector.h>
|
||||
#include <Backups/BackupEntryFromImmutableFile.h>
|
||||
#include <Backups/BackupEntryFromSmallFile.h>
|
||||
#include <Backups/BackupEntryWrappedWith.h>
|
||||
#include <Backups/IBackup.h>
|
||||
#include <Backups/RestorerFromBackup.h>
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <DataTypes/DataTypeObject.h>
|
||||
#include <DataTypes/ObjectUtils.h>
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <DataTypes/hasNullable.h>
|
||||
#include <Disks/createVolume.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorage.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/ConcatReadBuffer.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/S3Common.h>
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/PartLog.h>
|
||||
@ -56,7 +45,6 @@
|
||||
#include <Storages/MergeTree/MergeTreeDataPartWide.h>
|
||||
#include <Storages/MergeTree/DataPartStorageOnDisk.h>
|
||||
#include <Storages/MergeTree/checkDataPart.h>
|
||||
#include <Storages/MergeTree/localBackup.h>
|
||||
#include <Storages/StorageMergeTree.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/VirtualColumnUtils.h>
|
||||
@ -70,22 +58,17 @@
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/noexcept_scope.h>
|
||||
#include <Processors/QueryPlan/ReadFromMergeTree.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <AggregateFunctions/AggregateFunctionCount.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
|
||||
#include <boost/range/adaptor/filtered.hpp>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <iomanip>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <thread>
|
||||
@ -1030,7 +1013,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
size_t suspicious_broken_parts_bytes = 0;
|
||||
std::atomic<bool> has_adaptive_parts = false;
|
||||
std::atomic<bool> has_non_adaptive_parts = false;
|
||||
std::atomic<bool> has_lightweight_in_parts = false;
|
||||
std::atomic<bool> has_lightweight_deletes_in_parts = false;
|
||||
|
||||
std::mutex mutex;
|
||||
auto load_part = [&](const String & part_name, const DiskPtr & part_disk_ptr)
|
||||
@ -1122,7 +1105,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
|
||||
/// Check if there is lightweight delete in part
|
||||
if (part->hasLightweightDelete())
|
||||
has_lightweight_in_parts.store(true, std::memory_order_relaxed);
|
||||
has_lightweight_deletes_in_parts.store(true, std::memory_order_relaxed);
|
||||
|
||||
part->modification_time = part_disk_ptr->getLastModified(fs::path(relative_data_path) / part_name).epochTime();
|
||||
/// Assume that all parts are Active, covered parts will be detected and marked as Outdated later
|
||||
@ -1206,7 +1189,7 @@ void MergeTreeData::loadDataPartsFromDisk(
|
||||
|
||||
has_non_adaptive_index_granularity_parts = has_non_adaptive_parts;
|
||||
|
||||
if (has_lightweight_in_parts)
|
||||
if (has_lightweight_deletes_in_parts)
|
||||
has_lightweight_delete_parts.store(true);
|
||||
|
||||
if (suspicious_broken_parts > settings->max_suspicious_broken_parts && !skip_sanity_checks)
|
||||
@ -5999,20 +5982,25 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
||||
if (select_query->interpolate() && !select_query->interpolate()->children.empty())
|
||||
return std::nullopt;
|
||||
|
||||
// Currently projections don't support GROUPING SET yet.
|
||||
if (select_query->group_by_with_grouping_sets)
|
||||
// Projections don't support grouping sets yet.
|
||||
if (select_query->group_by_with_grouping_sets
|
||||
|| select_query->group_by_with_totals
|
||||
|| select_query->group_by_with_rollup
|
||||
|| select_query->group_by_with_cube)
|
||||
return std::nullopt;
|
||||
|
||||
auto query_options = SelectQueryOptions(
|
||||
QueryProcessingStage::WithMergeableState,
|
||||
/* depth */ 1,
|
||||
/* is_subquery_= */ true
|
||||
).ignoreProjections().ignoreAlias();
|
||||
).ignoreProjections().ignoreAlias();
|
||||
|
||||
InterpreterSelectQuery select(
|
||||
query_ptr,
|
||||
query_context,
|
||||
query_options,
|
||||
query_info.prepared_sets);
|
||||
|
||||
const auto & analysis_result = select.getAnalysisResult();
|
||||
|
||||
query_info.prepared_sets = select.getQueryAnalyzer()->getPreparedSets();
|
||||
|
@ -110,7 +110,7 @@ Granules getGranulesToWrite(const MergeTreeIndexGranularity & index_granularity,
|
||||
.is_complete = (rows_left_in_block >= expected_rows_in_mark)
|
||||
});
|
||||
current_row += result.back().rows_to_write;
|
||||
current_mark++;
|
||||
++current_mark;
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -146,6 +146,7 @@ void MergeTreeDataPartWriterCompact::write(const Block & block, const IColumn::P
|
||||
if (compute_granularity)
|
||||
{
|
||||
size_t index_granularity_for_block = computeIndexGranularity(block);
|
||||
assert(index_granularity_for_block >= 1);
|
||||
fillIndexGranularity(index_granularity_for_block, block.rows());
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,9 @@ static size_t computeIndexGranularityImpl(
|
||||
size_t rows_in_block = block.rows();
|
||||
size_t index_granularity_for_block;
|
||||
if (!can_use_adaptive_index_granularity)
|
||||
{
|
||||
index_granularity_for_block = fixed_index_granularity_rows;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t block_size_in_memory = block.bytes();
|
||||
@ -152,11 +154,13 @@ static size_t computeIndexGranularityImpl(
|
||||
index_granularity_for_block = index_granularity_bytes / size_of_row_in_bytes;
|
||||
}
|
||||
}
|
||||
if (index_granularity_for_block == 0) /// very rare case when index granularity bytes less then single row
|
||||
index_granularity_for_block = 1;
|
||||
|
||||
/// We should be less or equal than fixed index granularity
|
||||
index_granularity_for_block = std::min(fixed_index_granularity_rows, index_granularity_for_block);
|
||||
|
||||
/// very rare case when index granularity bytes less then single row
|
||||
if (index_granularity_for_block == 0)
|
||||
index_granularity_for_block = 1;
|
||||
|
||||
return index_granularity_for_block;
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,15 @@ void MergeTreeSettings::sanityCheck(size_t background_pool_tasks) const
|
||||
background_pool_tasks);
|
||||
}
|
||||
|
||||
// Zero index_granularity is nonsensical.
|
||||
if (index_granularity < 1)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"index_granularity: value {} makes no sense",
|
||||
index_granularity);
|
||||
}
|
||||
|
||||
// The min_index_granularity_bytes value is 1024 b and index_granularity_bytes is 10 mb by default.
|
||||
// If index_granularity_bytes is not disabled i.e > 0 b, then always ensure that it's greater than
|
||||
// min_index_granularity_bytes. This is mainly a safeguard against accidents whereby a really low
|
||||
|
@ -17,7 +17,7 @@ struct ReplicatedMergeTreeLogEntryData;
|
||||
/// (so instead of doing exactly the same merge cluster-wise you can do merge once and fetch ready part)
|
||||
/// Fetches may be desirable for other operational reasons (backup replica without lot of CPU resources).
|
||||
///
|
||||
/// That class allow to take a decisions about preferred strategy for a concreate merge.
|
||||
/// That class allow to take a decisions about preferred strategy for a concrete merge.
|
||||
///
|
||||
/// Since that code is used in shouldExecuteLogEntry we need to be able to:
|
||||
/// 1) make decision fast
|
||||
|
@ -476,7 +476,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
{
|
||||
String graphite_config_name;
|
||||
String error_msg
|
||||
= "Last parameter of GraphiteMergeTree must be name (in single quotes) of element in configuration file with Graphite options";
|
||||
= "Last parameter of GraphiteMergeTree must be the name (in single quotes) of the element in configuration file with the Graphite options";
|
||||
error_msg += getMergeTreeVerboseHelp(is_extended_storage_def);
|
||||
|
||||
if (const auto * ast = engine_args[arg_cnt - 1]->as<ASTLiteral>())
|
||||
|
@ -171,7 +171,6 @@ struct ProjectionCandidate
|
||||
*/
|
||||
struct SelectQueryInfo
|
||||
{
|
||||
|
||||
SelectQueryInfo()
|
||||
: prepared_sets(std::make_shared<PreparedSets>())
|
||||
{}
|
||||
|
@ -95,7 +95,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
logging.info("Going to run %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(temp_path, "runlog.log")
|
||||
run_log_path = os.path.join(temp_path, "run.log")
|
||||
with open(run_log_path, "w", encoding="utf-8") as log:
|
||||
with subprocess.Popen(
|
||||
run_command, shell=True, stderr=log, stdout=log
|
||||
@ -113,7 +113,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
s3_prefix = f"{pr_info.number}/{pr_info.sha}/fuzzer_{check_name_lower}/"
|
||||
paths = {
|
||||
"runlog.log": run_log_path,
|
||||
"run.log": run_log_path,
|
||||
"main.log": os.path.join(workspace_path, "main.log"),
|
||||
"server.log.gz": os.path.join(workspace_path, "server.log.gz"),
|
||||
"fuzzer.log": os.path.join(workspace_path, "fuzzer.log"),
|
||||
@ -130,8 +130,8 @@ if __name__ == "__main__":
|
||||
paths[f] = ""
|
||||
|
||||
report_url = GITHUB_RUN_URL
|
||||
if paths["runlog.log"]:
|
||||
report_url = paths["runlog.log"]
|
||||
if paths["run.log"]:
|
||||
report_url = paths["run.log"]
|
||||
if paths["main.log"]:
|
||||
report_url = paths["main.log"]
|
||||
if paths["server.log.gz"]:
|
||||
|
@ -57,7 +57,7 @@ if __name__ == "__main__":
|
||||
|
||||
logging.info("Going to run codebrowser: %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(TEMP_PATH, "runlog.log")
|
||||
run_log_path = os.path.join(TEMP_PATH, "run.log")
|
||||
|
||||
with TeePopen(run_command, run_log_path) as process:
|
||||
retcode = process.wait()
|
||||
|
@ -476,7 +476,6 @@ def main():
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {url}")
|
||||
print(f'::set-output name=url_output::"{url}"')
|
||||
|
||||
if not args.reports:
|
||||
return
|
||||
|
@ -208,7 +208,6 @@ def main():
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {url}")
|
||||
print(f'::set-output name=url_output::"{url}"')
|
||||
|
||||
if not args.reports:
|
||||
return
|
||||
|
@ -340,7 +340,6 @@ def main():
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {url}")
|
||||
print(f'::set-output name=url_output::"{url}"')
|
||||
|
||||
if not args.reports:
|
||||
return
|
||||
|
@ -82,7 +82,7 @@ if __name__ == "__main__":
|
||||
f"{docker_image}"
|
||||
)
|
||||
|
||||
run_log_path = os.path.join(test_output, "runlog.log")
|
||||
run_log_path = os.path.join(test_output, "run.log")
|
||||
logging.info("Running command: '%s'", cmd)
|
||||
|
||||
with TeePopen(cmd, run_log_path) as process:
|
||||
|
@ -60,7 +60,7 @@ if __name__ == "__main__":
|
||||
else:
|
||||
user = f"{os.geteuid()}:{os.getegid()}"
|
||||
|
||||
run_log_path = os.path.join(test_output, "runlog.log")
|
||||
run_log_path = os.path.join(test_output, "run.log")
|
||||
|
||||
with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"):
|
||||
cmd = (
|
||||
|
@ -155,7 +155,7 @@ if __name__ == "__main__":
|
||||
if not os.path.exists(logs_path):
|
||||
os.makedirs(logs_path)
|
||||
|
||||
run_log_path = os.path.join(logs_path, "runlog.log")
|
||||
run_log_path = os.path.join(logs_path, "run.log")
|
||||
with TeePopen(run_cmd, run_log_path, timeout=40 * 60) as process:
|
||||
retcode = process.wait()
|
||||
if retcode == 0:
|
||||
|
@ -292,7 +292,7 @@ if __name__ == "__main__":
|
||||
if not os.path.exists(result_path):
|
||||
os.makedirs(result_path)
|
||||
|
||||
run_log_path = os.path.join(result_path, "runlog.log")
|
||||
run_log_path = os.path.join(result_path, "run.log")
|
||||
|
||||
additional_envs = get_additional_envs(
|
||||
check_name, run_by_hash_num, run_by_hash_total
|
||||
|
@ -251,7 +251,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
logging.info("Going to run jepsen: %s", cmd)
|
||||
|
||||
run_log_path = os.path.join(TEMP_PATH, "runlog.log")
|
||||
run_log_path = os.path.join(TEMP_PATH, "run.log")
|
||||
|
||||
with TeePopen(cmd, run_log_path) as process:
|
||||
retcode = process.wait()
|
||||
|
@ -176,7 +176,7 @@ if __name__ == "__main__":
|
||||
)
|
||||
logging.info("Going to run command %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(temp_path, "runlog.log")
|
||||
run_log_path = os.path.join(temp_path, "run.log")
|
||||
|
||||
popen_env = os.environ.copy()
|
||||
popen_env.update(env_extra)
|
||||
@ -198,7 +198,7 @@ if __name__ == "__main__":
|
||||
"all-query-metrics.tsv": os.path.join(
|
||||
result_path, "report/all-query-metrics.tsv"
|
||||
),
|
||||
"runlog.log": run_log_path,
|
||||
"run.log": run_log_path,
|
||||
}
|
||||
|
||||
s3_prefix = f"{pr_info.number}/{pr_info.sha}/{check_name_prefix}/"
|
||||
@ -253,8 +253,8 @@ if __name__ == "__main__":
|
||||
|
||||
report_url = GITHUB_RUN_URL
|
||||
|
||||
if uploaded["runlog.log"]:
|
||||
report_url = uploaded["runlog.log"]
|
||||
if uploaded["run.log"]:
|
||||
report_url = uploaded["run.log"]
|
||||
|
||||
if uploaded["compare.log"]:
|
||||
report_url = uploaded["compare.log"]
|
||||
|
@ -95,7 +95,7 @@ if __name__ == "__main__":
|
||||
run_command = get_run_command(build_url, workspace_path, docker_image)
|
||||
logging.info("Going to run %s", run_command)
|
||||
|
||||
run_log_path = os.path.join(workspace_path, "runlog.log")
|
||||
run_log_path = os.path.join(workspace_path, "run.log")
|
||||
with open(run_log_path, "w", encoding="utf-8") as log:
|
||||
with subprocess.Popen(
|
||||
run_command, shell=True, stderr=log, stdout=log
|
||||
|
@ -34,6 +34,7 @@ def get_run_command(
|
||||
"docker run --cap-add=SYS_PTRACE "
|
||||
# a static link, don't use S3_URL or S3_DOWNLOAD
|
||||
"-e S3_URL='https://s3.amazonaws.com/clickhouse-datasets' "
|
||||
f"-e DISABLE_BC_CHECK={os.environ.get('DISABLE_BC_CHECK', '0')} "
|
||||
# For dmesg and sysctl
|
||||
"--privileged "
|
||||
f"--volume={build_path}:/package_folder "
|
||||
@ -138,7 +139,7 @@ if __name__ == "__main__":
|
||||
if not os.path.exists(result_path):
|
||||
os.makedirs(result_path)
|
||||
|
||||
run_log_path = os.path.join(temp_path, "runlog.log")
|
||||
run_log_path = os.path.join(temp_path, "run.log")
|
||||
|
||||
run_command = get_run_command(
|
||||
packages_path, result_path, repo_tests_path, server_log_path, docker_image
|
||||
|
@ -140,7 +140,7 @@ if __name__ == "__main__":
|
||||
|
||||
run_command = f"docker run --cap-add=SYS_PTRACE --volume={tests_binary_path}:/unit_tests_dbms --volume={test_output}:/test_output {docker_image}"
|
||||
|
||||
run_log_path = os.path.join(test_output, "runlog.log")
|
||||
run_log_path = os.path.join(test_output, "run.log")
|
||||
|
||||
logging.info("Going to run func tests: %s", run_command)
|
||||
|
||||
|
@ -25,7 +25,7 @@ MAX_RETRY = 5
|
||||
|
||||
# Number of times a check can re-run as a whole.
|
||||
# It is needed, because we are using AWS "spot" instances, that are terminated often
|
||||
MAX_WORKFLOW_RERUN = 20
|
||||
MAX_WORKFLOW_RERUN = 30
|
||||
|
||||
WorkflowDescription = namedtuple(
|
||||
"WorkflowDescription",
|
||||
|
@ -358,10 +358,15 @@ class ClickhouseIntegrationTestsRunner:
|
||||
subprocess.check_call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
|
||||
"sync", shell=True
|
||||
)
|
||||
subprocess.check_call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
|
||||
retcode = subprocess.call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
|
||||
"tar czf {} -C {} {}".format(result_path, dir, " ".join(relpaths)),
|
||||
shell=True,
|
||||
)
|
||||
# tar return 1 when the files are changed on compressing, we ignore it
|
||||
if retcode in (0, 1):
|
||||
return
|
||||
# but even on the fatal errors it's better to retry
|
||||
logging.error("Fatal error on compressing %s: %s", result_path, retcode)
|
||||
|
||||
def _get_runner_opts(self):
|
||||
result = []
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user