mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-03 13:02:00 +00:00
Merge remote-tracking branch 'upstream/master' into try-perf
This commit is contained in:
commit
9014c818ae
208
.github/workflows/backport_branches.yml
vendored
208
.github/workflows/backport_branches.yml
vendored
@ -12,11 +12,10 @@ jobs:
|
||||
PythonUnitTests:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -24,34 +23,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -59,18 +56,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -79,7 +75,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -94,13 +90,12 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: CompatibilityCheck
|
||||
@ -132,28 +127,25 @@ jobs:
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -177,28 +169,25 @@ jobs:
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -222,26 +211,24 @@ jobs:
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -265,26 +252,24 @@ jobs:
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -308,26 +293,24 @@ jobs:
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -351,28 +334,25 @@ jobs:
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -396,28 +376,25 @@ jobs:
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
@ -436,12 +413,10 @@ jobs:
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
@ -477,14 +452,13 @@ jobs:
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Report Builder
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -516,14 +490,13 @@ jobs:
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Report Builder
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -556,14 +529,13 @@ jobs:
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -594,14 +566,13 @@ jobs:
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -635,14 +606,13 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Stress test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -672,14 +642,13 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Integration test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
@ -706,11 +675,10 @@ jobs:
|
||||
- CompatibilityCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
3
.github/workflows/cherry_pick.yml
vendored
3
.github/workflows/cherry_pick.yml
vendored
@ -28,8 +28,9 @@ jobs:
|
||||
REPO_TEAM=core
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
|
||||
fetch-depth: 0
|
||||
- name: Cherry pick
|
||||
|
63
.github/workflows/docs_check.yml
vendored
63
.github/workflows/docs_check.yml
vendored
@ -21,11 +21,10 @@ jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Labels check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -34,17 +33,16 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
@ -52,17 +50,16 @@ jobs:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -70,18 +67,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -90,7 +86,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -110,15 +106,14 @@ jobs:
|
||||
- name: Download changed images
|
||||
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||
continue-on-error: true
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Style Check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -140,15 +135,14 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/docs_check/ClickHouse
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -rf "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Docs Check
|
||||
run: |
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -167,11 +161,10 @@ jobs:
|
||||
- DocsCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Finish label
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
|
40
.github/workflows/docs_release.yml
vendored
40
.github/workflows/docs_release.yml
vendored
@ -23,34 +23,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -58,18 +56,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -78,7 +75,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -97,13 +94,12 @@ jobs:
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.TEMP_PATH }}
|
||||
|
12
.github/workflows/jepsen.yml
vendored
12
.github/workflows/jepsen.yml
vendored
@ -19,12 +19,10 @@ jobs:
|
||||
TEMP_PATH=${{runner.temp}}/keeper_jepsen
|
||||
REPO_COPY=${{runner.temp}}/keeper_jepsen/ClickHouse
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0
|
||||
- name: Jepsen Test
|
||||
run: |
|
||||
@ -50,12 +48,10 @@ jobs:
|
||||
# TEMP_PATH=${{runner.temp}}/server_jepsen
|
||||
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
|
||||
# EOF
|
||||
# - name: Clear repository
|
||||
# run: |
|
||||
# sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v2
|
||||
# uses: ClickHouse/checkout@v1
|
||||
# with:
|
||||
# clear-repository: true
|
||||
# fetch-depth: 0
|
||||
# - name: Jepsen Test
|
||||
# run: |
|
||||
|
948
.github/workflows/master.yml
vendored
948
.github/workflows/master.yml
vendored
File diff suppressed because it is too large
Load Diff
48
.github/workflows/nightly.yml
vendored
48
.github/workflows/nightly.yml
vendored
@ -16,34 +16,32 @@ jobs:
|
||||
DockerHubPushAarch64:
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix aarch64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
|
||||
DockerHubPushAmd64:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Images check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_images_check.py --suffix amd64 --all
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
|
||||
@ -51,18 +49,17 @@ jobs:
|
||||
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download changed aarch64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_aarch64
|
||||
path: ${{ runner.temp }}
|
||||
- name: Download changed amd64 images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images_amd64
|
||||
path: ${{ runner.temp }}
|
||||
@ -71,7 +68,7 @@ jobs:
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/changed_images.json
|
||||
@ -90,22 +87,17 @@ jobs:
|
||||
EOF
|
||||
echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV"
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
id: coverity-checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
@ -134,8 +126,10 @@ jobs:
|
||||
CC: clang-15
|
||||
CXX: clang++-15
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
submodules: true
|
||||
- name: Set up JDK 11
|
||||
|
1247
.github/workflows/pull_request.yml
vendored
1247
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
# Always use the most recent script version
|
||||
ref: master
|
||||
@ -50,12 +50,10 @@ jobs:
|
||||
DockerServerImages:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
|
514
.github/workflows/release_branches.yml
vendored
514
.github/workflows/release_branches.yml
vendored
File diff suppressed because it is too large
Load Diff
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -34,7 +34,7 @@ jobs:
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
ref: master
|
||||
fetch-depth: 0
|
||||
|
6
.github/workflows/woboq.yml
vendored
6
.github/workflows/woboq.yml
vendored
@ -21,12 +21,10 @@ jobs:
|
||||
REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
EOF
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: 'true'
|
||||
- name: Codebrowser
|
||||
run: |
|
||||
|
34
.gitmodules
vendored
34
.gitmodules
vendored
@ -104,13 +104,13 @@
|
||||
url = https://github.com/ClickHouse/aws-sdk-cpp.git
|
||||
[submodule "aws-c-event-stream"]
|
||||
path = contrib/aws-c-event-stream
|
||||
url = https://github.com/ClickHouse/aws-c-event-stream.git
|
||||
url = https://github.com/awslabs/aws-c-event-stream.git
|
||||
[submodule "aws-c-common"]
|
||||
path = contrib/aws-c-common
|
||||
url = https://github.com/ClickHouse/aws-c-common.git
|
||||
[submodule "aws-checksums"]
|
||||
path = contrib/aws-checksums
|
||||
url = https://github.com/ClickHouse/aws-checksums.git
|
||||
url = https://github.com/awslabs/aws-checksums.git
|
||||
[submodule "contrib/curl"]
|
||||
path = contrib/curl
|
||||
url = https://github.com/curl/curl.git
|
||||
@ -294,3 +294,33 @@
|
||||
[submodule "contrib/libdivide"]
|
||||
path = contrib/libdivide
|
||||
url = https://github.com/ridiculousfish/libdivide.git
|
||||
[submodule "contrib/aws-crt-cpp"]
|
||||
path = contrib/aws-crt-cpp
|
||||
url = https://github.com/ClickHouse/aws-crt-cpp.git
|
||||
[submodule "contrib/aws-c-io"]
|
||||
path = contrib/aws-c-io
|
||||
url = https://github.com/ClickHouse/aws-c-io.git
|
||||
[submodule "contrib/aws-c-mqtt"]
|
||||
path = contrib/aws-c-mqtt
|
||||
url = https://github.com/awslabs/aws-c-mqtt.git
|
||||
[submodule "contrib/aws-c-auth"]
|
||||
path = contrib/aws-c-auth
|
||||
url = https://github.com/awslabs/aws-c-auth.git
|
||||
[submodule "contrib/aws-c-cal"]
|
||||
path = contrib/aws-c-cal
|
||||
url = https://github.com/ClickHouse/aws-c-cal.git
|
||||
[submodule "contrib/aws-c-sdkutils"]
|
||||
path = contrib/aws-c-sdkutils
|
||||
url = https://github.com/awslabs/aws-c-sdkutils.git
|
||||
[submodule "contrib/aws-c-http"]
|
||||
path = contrib/aws-c-http
|
||||
url = https://github.com/awslabs/aws-c-http.git
|
||||
[submodule "contrib/aws-c-s3"]
|
||||
path = contrib/aws-c-s3
|
||||
url = https://github.com/awslabs/aws-c-s3.git
|
||||
[submodule "contrib/aws-c-compression"]
|
||||
path = contrib/aws-c-compression
|
||||
url = https://github.com/awslabs/aws-c-compression.git
|
||||
[submodule "contrib/aws-s2n-tls"]
|
||||
path = contrib/aws-s2n-tls
|
||||
url = https://github.com/aws/s2n-tls.git
|
||||
|
@ -73,22 +73,7 @@ message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
|
||||
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
|
||||
option(USE_STATIC_LIBRARIES "Disable to use shared libraries" ON)
|
||||
# DEVELOPER ONLY.
|
||||
# Faster linking if turned on.
|
||||
option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files" OFF)
|
||||
|
||||
if (USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
message(FATAL_ERROR "SPLIT_SHARED_LIBRARIES=1 must not be used together with USE_STATIC_LIBRARIES=1")
|
||||
endif()
|
||||
|
||||
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
set(BUILD_SHARED_LIBS 1 CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
|
||||
endif ()
|
||||
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
|
||||
|
||||
option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF)
|
||||
|
||||
@ -171,7 +156,7 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests"
|
||||
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
|
||||
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
|
||||
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND USE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND NOT USE_MUSL)
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT USE_MUSL)
|
||||
# Only for Linux, x86_64 or aarch64.
|
||||
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
|
||||
elseif(GLIBC_COMPATIBILITY)
|
||||
@ -377,15 +362,15 @@ set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_C_FLAGS_ADD}")
|
||||
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
|
||||
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_ASM_FLAGS_ADD}")
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
if (OS_DARWIN)
|
||||
@ -467,22 +452,13 @@ endif ()
|
||||
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX AND NOT ARCH_AARCH64)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
endif ()
|
||||
else ()
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
# This is required for clang on Arch linux, that uses PIE by default.
|
||||
# See enable-SSP-and-PIE-by-default.patch [1].
|
||||
#
|
||||
# [1]: https://github.com/archlinux/svntogit-packages/blob/6e681aa860e65ad46a1387081482eb875c2200f2/trunk/enable-SSP-and-PIE-by-default.patch
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie")
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX AND NOT ARCH_AARCH64)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
@ -504,10 +480,7 @@ else ()
|
||||
set (CLICKHOUSE_ETC_DIR "${CMAKE_INSTALL_PREFIX}/etc")
|
||||
endif ()
|
||||
|
||||
message (STATUS
|
||||
"Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ;
|
||||
USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES}
|
||||
SPLIT_SHARED_LIBRARIES=${SPLIT_SHARED_LIBRARIES}")
|
||||
message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}")
|
||||
|
||||
include (GNUInstallDirs)
|
||||
|
||||
@ -553,7 +526,7 @@ macro (clickhouse_add_executable target)
|
||||
# - _je_zone_register due to JEMALLOC_PRIVATE_NAMESPACE=je_ under OS X.
|
||||
# - but jemalloc-cmake does not run private_namespace.sh
|
||||
# so symbol name should be _zone_register
|
||||
if (ENABLE_JEMALLOC AND USE_STATIC_LIBRARIES AND OS_DARWIN)
|
||||
if (ENABLE_JEMALLOC AND OS_DARWIN)
|
||||
set_property(TARGET ${target} APPEND PROPERTY LINK_OPTIONS -u_zone_register)
|
||||
endif()
|
||||
endif()
|
||||
|
4
LICENSE
4
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2016-2022 ClickHouse, Inc.
|
||||
Copyright 2016-2023 ClickHouse, Inc.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@ -188,7 +188,7 @@ Copyright 2016-2022 ClickHouse, Inc.
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2022 ClickHouse, Inc.
|
||||
Copyright 2016-2023 ClickHouse, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <base/MoveOrCopyIfThrow.h>
|
||||
|
||||
/** Pool for limited size objects that cannot be used from different threads simultaneously.
|
||||
* The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
|
||||
* The main use case is to have fixed size of objects that can be reused in different threads during their lifetime
|
||||
* and have to be initialized on demand.
|
||||
* Two main properties of pool are allocated objects size and borrowed objects size.
|
||||
* Allocated objects size is size of objects that are currently allocated by the pool.
|
||||
|
@ -8,16 +8,13 @@ set (SRCS
|
||||
getPageSize.cpp
|
||||
getThreadId.cpp
|
||||
JSON.cpp
|
||||
LineReader.cpp
|
||||
mremap.cpp
|
||||
phdr_cache.cpp
|
||||
preciseExp10.cpp
|
||||
setTerminalEcho.cpp
|
||||
shift10.cpp
|
||||
sleep.cpp
|
||||
terminalColors.cpp
|
||||
errnoToString.cpp
|
||||
ReplxxLineReader.cpp
|
||||
StringRef.cpp
|
||||
safeExit.cpp
|
||||
throwError.cpp
|
||||
@ -40,17 +37,8 @@ else ()
|
||||
target_compile_definitions(common PUBLIC WITH_COVERAGE=0)
|
||||
endif ()
|
||||
|
||||
# FIXME: move libraries for line reading out from base
|
||||
if (TARGET ch_rust::skim)
|
||||
target_link_libraries(common PUBLIC ch_rust::skim)
|
||||
endif()
|
||||
|
||||
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||
|
||||
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
|
||||
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||
endif()
|
||||
|
||||
target_link_libraries (common
|
||||
PUBLIC
|
||||
ch_contrib::cityhash
|
||||
|
@ -1,28 +0,0 @@
|
||||
#include <base/setTerminalEcho.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <stdexcept>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
void setTerminalEcho(bool enable)
|
||||
{
|
||||
/// Obtain terminal attributes,
|
||||
/// toggle the ECHO flag
|
||||
/// and set them back.
|
||||
|
||||
struct termios tty{};
|
||||
|
||||
if (0 != tcgetattr(STDIN_FILENO, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString());
|
||||
|
||||
if (enable)
|
||||
tty.c_lflag |= ECHO;
|
||||
else
|
||||
tty.c_lflag &= ~ECHO;
|
||||
|
||||
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString());
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/// Enable or disable echoing of typed characters. Throws std::runtime_error on error.
|
||||
void setTerminalEcho(bool enable);
|
@ -37,7 +37,7 @@ if (GLIBC_COMPATIBILITY)
|
||||
|
||||
target_include_directories(glibc-compatibility PRIVATE libcxxabi ${musl_arch_include_dir})
|
||||
|
||||
if (( NOT USE_STATIC_LIBRARIES AND NOT USE_STATIC_LIBRARIES ) OR ENABLE_OPENSSL_DYNAMIC)
|
||||
if (ENABLE_OPENSSL_DYNAMIC)
|
||||
target_compile_options(glibc-compatibility PRIVATE -fPIC)
|
||||
endif ()
|
||||
|
||||
|
@ -102,6 +102,11 @@ elseif (ARCH_AMD64)
|
||||
SET(ENABLE_AVX512_FOR_SPEC_OP 0)
|
||||
endif()
|
||||
|
||||
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
|
||||
# AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
|
||||
# Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code
|
||||
# compile+link+run).
|
||||
|
||||
set (TEST_FLAG "-mssse3")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||
check_cxx_source_compiles("
|
||||
|
@ -25,7 +25,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${ASAN_FLAGS}")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
|
||||
endif ()
|
||||
if (COMPILER_GCC)
|
||||
@ -50,7 +50,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan")
|
||||
endif ()
|
||||
|
||||
@ -71,7 +71,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan")
|
||||
endif ()
|
||||
if (COMPILER_GCC)
|
||||
@ -103,7 +103,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
|
||||
endif ()
|
||||
if (COMPILER_GCC)
|
||||
|
15
contrib/CMakeLists.txt
vendored
15
contrib/CMakeLists.txt
vendored
@ -115,12 +115,25 @@ endif()
|
||||
add_contrib (llvm-project-cmake llvm-project)
|
||||
add_contrib (libfuzzer-cmake llvm-project)
|
||||
add_contrib (libxml2-cmake libxml2)
|
||||
add_contrib (aws-s3-cmake
|
||||
|
||||
add_contrib (aws-cmake
|
||||
aws
|
||||
aws-c-auth
|
||||
aws-c-cal
|
||||
aws-c-common
|
||||
aws-c-compression
|
||||
aws-c-event-stream
|
||||
aws-c-http
|
||||
aws-c-io
|
||||
aws-c-mqtt
|
||||
aws-c-s3
|
||||
aws-c-sdkutils
|
||||
aws-s2n-tls
|
||||
aws-checksums
|
||||
aws-crt-cpp
|
||||
aws-cmake
|
||||
)
|
||||
|
||||
add_contrib (base64-cmake base64)
|
||||
add_contrib (simdjson-cmake simdjson)
|
||||
add_contrib (rapidjson-cmake rapidjson)
|
||||
|
@ -78,23 +78,14 @@ set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
|
||||
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
|
||||
|
||||
# set flatbuffers CMake options
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
|
||||
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
|
||||
else ()
|
||||
set(FLATBUFFERS_BUILD_SHAREDLIB ON CACHE BOOL "Enable the build of the flatbuffers shared library")
|
||||
set(FLATBUFFERS_BUILD_FLATLIB OFF CACHE BOOL "Disable the build of the flatbuffers library")
|
||||
endif ()
|
||||
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
|
||||
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
|
||||
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
|
||||
|
||||
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
||||
|
||||
add_library(_flatbuffers INTERFACE)
|
||||
if(USE_STATIC_LIBRARIES)
|
||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
||||
else()
|
||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared)
|
||||
endif()
|
||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
||||
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
|
||||
|
||||
# === hdfs
|
||||
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
||||
Subproject commit 00b03604543367d7e310cb0993973fdcb723ea79
|
||||
Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252
|
1
contrib/aws-c-auth
vendored
Submodule
1
contrib/aws-c-auth
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 30df6c407e2df43bd244e2c34c9b4a4b87372bfb
|
1
contrib/aws-c-cal
vendored
Submodule
1
contrib/aws-c-cal
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 85dd7664b786a389c6fb1a6f031ab4bb2282133d
|
2
contrib/aws-c-common
vendored
2
contrib/aws-c-common
vendored
@ -1 +1 @@
|
||||
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857
|
||||
Subproject commit 324fd1d973ccb25c813aa747bf1759cfde5121c5
|
1
contrib/aws-c-compression
vendored
Submodule
1
contrib/aws-c-compression
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit b517b7decd0dac30be2162f5186c250221c53aff
|
2
contrib/aws-c-event-stream
vendored
2
contrib/aws-c-event-stream
vendored
@ -1 +1 @@
|
||||
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0
|
||||
Subproject commit 39bfa94a14b7126bf0c1330286ef8db452d87e66
|
1
contrib/aws-c-http
vendored
Submodule
1
contrib/aws-c-http
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 2c5a2a7d5556600b9782ffa6c9d7e09964df1abc
|
1
contrib/aws-c-io
vendored
Submodule
1
contrib/aws-c-io
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 5d32c453560d0823df521a686bf7fbacde7f9be3
|
1
contrib/aws-c-mqtt
vendored
Submodule
1
contrib/aws-c-mqtt
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 882c689561a3db1466330ccfe3b63637e0a575d3
|
1
contrib/aws-c-s3
vendored
Submodule
1
contrib/aws-c-s3
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit a41255ece72a7c887bba7f9d998ca3e14f4c8a1b
|
1
contrib/aws-c-sdkutils
vendored
Submodule
1
contrib/aws-c-sdkutils
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 25bf5cf225f977c3accc6a05a0a7a181ef2a4a30
|
2
contrib/aws-checksums
vendored
2
contrib/aws-checksums
vendored
@ -1 +1 @@
|
||||
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65
|
||||
Subproject commit 48e7c0e01479232f225c8044d76c84e74192889d
|
114
contrib/aws-cmake/AwsFeatureTests.cmake
Normal file
114
contrib/aws-cmake/AwsFeatureTests.cmake
Normal file
@ -0,0 +1,114 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckCSourceRuns)
|
||||
|
||||
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
|
||||
|
||||
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
|
||||
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
|
||||
# work-around, disable avx2 (and all other extensions) in mingw builds.
|
||||
#
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
|
||||
#
|
||||
if (MINGW)
|
||||
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
|
||||
set(USE_CPU_EXTENSIONS OFF)
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_CROSSCOMPILING)
|
||||
check_c_source_runs("
|
||||
#include <stdbool.h>
|
||||
bool foo(int a, int b, int *c) {
|
||||
return __builtin_mul_overflow(a, b, c);
|
||||
}
|
||||
|
||||
int main() {
|
||||
int out;
|
||||
if (foo(1, 2, &out)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
|
||||
|
||||
if (USE_CPU_EXTENSIONS)
|
||||
check_c_source_runs("
|
||||
int main() {
|
||||
int foo = 42;
|
||||
_mulx_u32(1, 2, &foo);
|
||||
return foo != 2;
|
||||
}" AWS_HAVE_MSVC_MULX)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <Windows.h>
|
||||
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
|
||||
int main() {
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
it's not windows desktop
|
||||
#endif
|
||||
" AWS_HAVE_WINAPI_DESKTOP)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
|
||||
# error \"not intel\"
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
" AWS_ARCH_INTEL)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
#if !(defined(__aarch64__) || defined(_M_ARM64))
|
||||
# error \"not arm64\"
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
" AWS_ARCH_ARM64)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
#if !(defined(__arm__) || defined(_M_ARM))
|
||||
# error \"not arm\"
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
" AWS_ARCH_ARM32)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
int foo = 42, bar = 24;
|
||||
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
|
||||
}" AWS_HAVE_GCC_INLINE_ASM)
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <sys/auxv.h>
|
||||
int main() {
|
||||
#ifdef __linux__
|
||||
getauxval(AT_HWCAP);
|
||||
getauxval(AT_HWCAP2);
|
||||
#endif
|
||||
return 0;
|
||||
}" AWS_HAVE_AUXV)
|
||||
|
||||
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
|
||||
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
|
||||
check_c_source_compiles("
|
||||
#include <execinfo.h>
|
||||
int main() {
|
||||
backtrace(NULL, 0);
|
||||
return 0;
|
||||
}" AWS_HAVE_EXECINFO)
|
||||
endif()
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <linux/if_link.h>
|
||||
int main() {
|
||||
return 1;
|
||||
}" AWS_HAVE_LINUX_IF_LINK_H)
|
74
contrib/aws-cmake/AwsSIMD.cmake
Normal file
74
contrib/aws-cmake/AwsSIMD.cmake
Normal file
@ -0,0 +1,74 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckCCompilerFlag)
|
||||
include(CheckIncludeFile)
|
||||
|
||||
if (USE_CPU_EXTENSIONS)
|
||||
if (MSVC)
|
||||
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
|
||||
if (HAVE_M_AVX2_FLAG)
|
||||
set(AVX2_CFLAGS "/arch:AVX2")
|
||||
endif()
|
||||
else()
|
||||
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
|
||||
if (HAVE_M_AVX2_FLAG)
|
||||
set(AVX2_CFLAGS "-mavx -mavx2")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
cmake_push_check_state()
|
||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <immintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <string.h>
|
||||
|
||||
int main() {
|
||||
__m256i vec;
|
||||
memset(&vec, 0, sizeof(vec));
|
||||
|
||||
_mm256_shuffle_epi8(vec, vec);
|
||||
_mm256_set_epi32(1,2,3,4,5,6,7,8);
|
||||
_mm256_permutevar8x32_epi32(vec, vec);
|
||||
|
||||
return 0;
|
||||
}" HAVE_AVX2_INTRINSICS)
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <immintrin.h>
|
||||
#include <string.h>
|
||||
|
||||
int main() {
|
||||
__m256i vec;
|
||||
memset(&vec, 0, sizeof(vec));
|
||||
return (int)_mm256_extract_epi64(vec, 2);
|
||||
}" HAVE_MM256_EXTRACT_EPI64)
|
||||
|
||||
cmake_pop_check_state()
|
||||
endif() # USE_CPU_EXTENSIONS
|
||||
|
||||
macro(simd_add_definition_if target definition)
|
||||
if(${definition})
|
||||
target_compile_definitions(${target} PRIVATE -D${definition})
|
||||
endif(${definition})
|
||||
endmacro(simd_add_definition_if)
|
||||
|
||||
# Configure private preprocessor definitions for SIMD-related features
|
||||
# Does not set any processor feature codegen flags
|
||||
function(simd_add_definitions target)
|
||||
simd_add_definition_if(${target} HAVE_AVX2_INTRINSICS)
|
||||
simd_add_definition_if(${target} HAVE_MM256_EXTRACT_EPI64)
|
||||
endfunction(simd_add_definitions)
|
||||
|
||||
# Adds source files only if AVX2 is supported. These files will be built with
|
||||
# avx2 intrinsics enabled.
|
||||
# Usage: simd_add_source_avx2(target file1.c file2.c ...)
|
||||
function(simd_add_source_avx2 target)
|
||||
foreach(file ${ARGN})
|
||||
target_sources(${target} PRIVATE ${file})
|
||||
set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
|
||||
endforeach()
|
||||
endfunction(simd_add_source_avx2)
|
50
contrib/aws-cmake/AwsThreadAffinity.cmake
Normal file
50
contrib/aws-cmake/AwsThreadAffinity.cmake
Normal file
@ -0,0 +1,50 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckSymbolExists)
|
||||
|
||||
# Check if the platform supports setting thread affinity
|
||||
# (important for hitting full NIC entitlement on NUMA architectures)
|
||||
function(aws_set_thread_affinity_method target)
|
||||
|
||||
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
|
||||
if (NOT UNIX OR ANDROID OR APPLE)
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
cmake_push_check_state()
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
|
||||
|
||||
set(headers "pthread.h")
|
||||
# BSDs put nonportable pthread declarations in a separate header.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES BSD)
|
||||
set(headers "${headers};pthread_np.h")
|
||||
endif()
|
||||
|
||||
# Using pthread attrs is the preferred method, but is glibc-specific.
|
||||
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
|
||||
if (USE_PTHREAD_ATTR_SETAFFINITY)
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# This method is still nonportable, but is supported by musl and BSDs.
|
||||
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
|
||||
if (USE_PTHREAD_SETAFFINITY)
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# If we got here, we expected thread affinity support but didn't find it.
|
||||
# We still build with degraded NUMA performance, but show a warning.
|
||||
message(WARNING "No supported method for setting thread affinity")
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
||||
|
||||
cmake_pop_check_state()
|
||||
endfunction()
|
61
contrib/aws-cmake/AwsThreadName.cmake
Normal file
61
contrib/aws-cmake/AwsThreadName.cmake
Normal file
@ -0,0 +1,61 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckSymbolExists)
|
||||
|
||||
# Check how the platform supports setting thread name
|
||||
function(aws_set_thread_name_method target)
|
||||
|
||||
if (WINDOWS)
|
||||
# On Windows we do a runtime check, instead of compile-time check
|
||||
return()
|
||||
elseif (APPLE)
|
||||
# All Apple platforms we support have the same function, so no need for compile-time check.
|
||||
return()
|
||||
endif()
|
||||
|
||||
cmake_push_check_state()
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
|
||||
|
||||
# The start of the test program
|
||||
set(c_source_start "
|
||||
#define _GNU_SOURCE
|
||||
#include <pthread.h>
|
||||
|
||||
#if defined(__FreeBSD__) || defined(__NETBSD__)
|
||||
#include <pthread_np.h>
|
||||
#endif
|
||||
|
||||
int main() {
|
||||
pthread_t thread_id;
|
||||
")
|
||||
|
||||
# The end of the test program
|
||||
set(c_source_end "}")
|
||||
|
||||
# pthread_setname_np() usually takes 2 args
|
||||
check_c_source_compiles("
|
||||
${c_source_start}
|
||||
pthread_setname_np(thread_id, \"asdf\");
|
||||
${c_source_end}"
|
||||
PTHREAD_SETNAME_TAKES_2ARGS)
|
||||
if (PTHREAD_SETNAME_TAKES_2ARGS)
|
||||
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# But on NetBSD it takes 3!
|
||||
check_c_source_compiles("
|
||||
${c_source_start}
|
||||
pthread_setname_np(thread_id, \"asdf\", NULL);
|
||||
${c_source_end}
|
||||
" PTHREAD_SETNAME_TAKES_3ARGS)
|
||||
if (PTHREAD_SETNAME_TAKES_3ARGS)
|
||||
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# And on many older/weirder platforms it's just not supported
|
||||
cmake_pop_check_state()
|
||||
endfunction()
|
376
contrib/aws-cmake/CMakeLists.txt
Normal file
376
contrib/aws-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,376 @@
|
||||
set(ENABLE_AWS_S3_DEFAULT OFF)
|
||||
|
||||
if(ENABLE_LIBRARIES AND (OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::Crypto)
|
||||
set(ENABLE_AWS_S3_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option(ENABLE_AWS_S3 "Enable AWS S3" ${ENABLE_AWS_S3_DEFAULT})
|
||||
|
||||
if(ENABLE_AWS_S3)
|
||||
if(NOT TARGET OpenSSL::Crypto)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK without OpenSSL")
|
||||
elseif(NOT (OS_LINUX OR OS_DARWIN))
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK with platform ${CMAKE_SYSTEM_NAME}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT ENABLE_AWS_S3)
|
||||
message(STATUS "Not using AWS S3")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
||||
# Utilities.
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsFeatureTests.cmake")
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
|
||||
|
||||
|
||||
# Gather sources and options.
|
||||
set(AWS_SOURCES)
|
||||
set(AWS_PUBLIC_INCLUDES)
|
||||
set(AWS_PRIVATE_INCLUDES)
|
||||
set(AWS_PUBLIC_COMPILE_DEFS)
|
||||
set(AWS_PRIVATE_COMPILE_DEFS)
|
||||
set(AWS_PRIVATE_LIBS)
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
|
||||
endif()
|
||||
|
||||
set(ENABLE_OPENSSL_ENCRYPTION ON)
|
||||
if (ENABLE_OPENSSL_ENCRYPTION)
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DENABLE_OPENSSL_ENCRYPTION")
|
||||
endif()
|
||||
|
||||
set(USE_S2N ON)
|
||||
if (USE_S2N)
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_S2N")
|
||||
endif()
|
||||
|
||||
|
||||
# Directories.
|
||||
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
|
||||
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-core")
|
||||
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-s3")
|
||||
|
||||
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
|
||||
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
|
||||
SET(AWS_CHECKSUMS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
|
||||
SET(AWS_COMMON_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
|
||||
SET(AWS_COMPRESSION_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-compression")
|
||||
SET(AWS_CRT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-crt-cpp")
|
||||
SET(AWS_EVENT_STREAM_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
|
||||
SET(AWS_HTTP_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-http")
|
||||
SET(AWS_IO_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-io")
|
||||
SET(AWS_MQTT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-mqtt")
|
||||
SET(AWS_S2N_TLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-s2n-tls")
|
||||
SET(AWS_S3_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-s3")
|
||||
SET(AWS_SDKUTILS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-sdkutils")
|
||||
|
||||
|
||||
# aws-cpp-sdk-core
|
||||
file(GLOB AWS_SDK_CORE_SRC
|
||||
"${AWS_SDK_CORE_DIR}/source/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/bearer-token-provider/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/signer/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/signer-provider/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/client/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/config/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/config/defaults/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/endpoint/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/endpoint/internal/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/memory/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/memory/stl/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/stream/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/threading/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/xml/*.cpp"
|
||||
)
|
||||
|
||||
if(OS_LINUX OR OS_DARWIN)
|
||||
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/linux-shared/*.cpp")
|
||||
file(GLOB AWS_SDK_CORE_PLATFORM_SRC "${AWS_SDK_CORE_DIR}/source/platform/linux-shared/*.cpp")
|
||||
else()
|
||||
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/*.cpp")
|
||||
set(AWS_SDK_CORE_PLATFORM_SRC)
|
||||
endif()
|
||||
|
||||
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
|
||||
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||
|
||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MAJOR=1")
|
||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MINOR=10")
|
||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_PATCH=36")
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
||||
|
||||
list(APPEND AWS_PUBLIC_INCLUDES
|
||||
"${AWS_SDK_CORE_DIR}/include/"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||
)
|
||||
|
||||
|
||||
# aws-cpp-sdk-s3
|
||||
file(GLOB AWS_SDK_S3_SRC
|
||||
"${AWS_SDK_S3_DIR}/source/*.cpp"
|
||||
"${AWS_SDK_S3_DIR}/source/model/*.cpp"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-auth
|
||||
file(GLOB AWS_AUTH_SRC
|
||||
"${AWS_AUTH_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_AUTH_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_AUTH_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-cal
|
||||
file(GLOB AWS_CAL_SRC
|
||||
"${AWS_CAL_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
if (ENABLE_OPENSSL_ENCRYPTION)
|
||||
file(GLOB AWS_CAL_OS_SRC
|
||||
"${AWS_CAL_DIR}/source/unix/*.c"
|
||||
)
|
||||
list(APPEND AWS_PRIVATE_LIBS OpenSSL::Crypto)
|
||||
endif()
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_CAL_SRC} ${AWS_CAL_OS_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CAL_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-event-stream
|
||||
file(GLOB AWS_EVENT_STREAM_SRC
|
||||
"${AWS_EVENT_STREAM_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_EVENT_STREAM_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_EVENT_STREAM_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-common
|
||||
file(GLOB AWS_COMMON_SRC
|
||||
"${AWS_COMMON_DIR}/source/*.c"
|
||||
"${AWS_COMMON_DIR}/source/external/*.c"
|
||||
"${AWS_COMMON_DIR}/source/posix/*.c"
|
||||
)
|
||||
|
||||
file(GLOB AWS_COMMON_ARCH_SRC
|
||||
"${AWS_COMMON_DIR}/source/arch/generic/*.c"
|
||||
)
|
||||
|
||||
if (AWS_ARCH_INTEL)
|
||||
file(GLOB AWS_COMMON_ARCH_SRC
|
||||
"${AWS_COMMON_DIR}/source/arch/intel/cpuid.c"
|
||||
"${AWS_COMMON_DIR}/source/arch/intel/asm/*.c"
|
||||
)
|
||||
elseif (AWS_ARCH_ARM64 OR AWS_ARCH_ARM32)
|
||||
if (AWS_HAVE_AUXV)
|
||||
file(GLOB AWS_COMMON_ARCH_SRC
|
||||
"${AWS_COMMON_DIR}/source/arch/arm/asm/*.c"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(AWS_COMMON_AVX2_SRC)
|
||||
if (HAVE_AVX2_INTRINSICS)
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_SIMD_ENCODING")
|
||||
set(AWS_COMMON_AVX2_SRC "${AWS_COMMON_DIR}/source/arch/intel/encoding_avx2.c")
|
||||
set_source_files_properties(${AWS_COMMON_AVX2_SRC} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
|
||||
endif()
|
||||
|
||||
configure_file("${AWS_COMMON_DIR}/include/aws/common/config.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_COMMON_SRC} ${AWS_COMMON_ARCH_SRC} ${AWS_COMMON_AVX2_SRC})
|
||||
|
||||
list(APPEND AWS_PUBLIC_INCLUDES
|
||||
"${AWS_COMMON_DIR}/include/"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||
)
|
||||
|
||||
|
||||
# aws-checksums
|
||||
file(GLOB AWS_CHECKSUMS_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/intel/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
|
||||
)
|
||||
|
||||
if(AWS_ARCH_INTEL AND AWS_HAVE_GCC_INLINE_ASM)
|
||||
file(GLOB AWS_CHECKSUMS_ARCH_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
|
||||
)
|
||||
endif()
|
||||
|
||||
if (AWS_ARCH_ARM64)
|
||||
file(GLOB AWS_CHECKSUMS_ARCH_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
|
||||
)
|
||||
set_source_files_properties("${AWS_CHECKSUMS_DIR}/source/arm/crc32c_arm.c" PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
|
||||
elseif (AWS_ARCH_ARM32)
|
||||
if (AWS_ARM32_CRC)
|
||||
file(GLOB AWS_CHECKSUMS_ARCH_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/asm/*.c"
|
||||
)
|
||||
set_source_files_properties(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_CHECKSUMS_SRC} ${AWS_CHECKSUMS_ARCH_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CHECKSUMS_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-io
|
||||
file(GLOB AWS_IO_SRC
|
||||
"${AWS_IO_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
if (OS_LINUX)
|
||||
file(GLOB AWS_IO_OS_SRC
|
||||
"${AWS_IO_DIR}/source/linux/*.c"
|
||||
"${AWS_IO_DIR}/source/posix/*.c"
|
||||
)
|
||||
elseif (OS_DARWIN)
|
||||
file(GLOB AWS_IO_OS_SRC
|
||||
"${AWS_IO_DIR}/source/bsd/*.c"
|
||||
"${AWS_IO_DIR}/source/posix/*.c"
|
||||
)
|
||||
endif()
|
||||
|
||||
set(AWS_IO_TLS_SRC)
|
||||
if (USE_S2N)
|
||||
file(GLOB AWS_IO_TLS_SRC
|
||||
"${AWS_IO_DIR}/source/s2n/*.c"
|
||||
)
|
||||
endif()
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_IO_SRC} ${AWS_IO_OS_SRC} ${AWS_IO_TLS_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_IO_DIR}/include/")
|
||||
|
||||
|
||||
# aws-s2n-tls
|
||||
if (USE_S2N)
|
||||
file(GLOB AWS_S2N_TLS_SRC
|
||||
"${AWS_S2N_TLS_DIR}/crypto/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/error/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/stuffer/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/pq-crypto/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/pq-crypto/kyber_r3/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/tls/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/tls/extensions/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/utils/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_S2N_TLS_SRC})
|
||||
|
||||
list(APPEND AWS_PRIVATE_INCLUDES
|
||||
"${AWS_S2N_TLS_DIR}/"
|
||||
"${AWS_S2N_TLS_DIR}/api/"
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
# aws-crt-cpp
|
||||
file(GLOB AWS_CRT_SRC
|
||||
"${AWS_CRT_DIR}/source/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/auth/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/crypto/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/endpoints/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/external/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/http/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/io/*.cpp"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_CRT_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_CRT_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-mqtt
|
||||
file(GLOB AWS_MQTT_SRC
|
||||
"${AWS_MQTT_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_MQTT_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_MQTT_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-http
|
||||
file(GLOB AWS_HTTP_SRC
|
||||
"${AWS_HTTP_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_HTTP_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_HTTP_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-compression
|
||||
file(GLOB AWS_COMPRESSION_SRC
|
||||
"${AWS_COMPRESSION_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_COMPRESSION_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_COMPRESSION_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-s3
|
||||
file(GLOB AWS_S3_SRC
|
||||
"${AWS_S3_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_S3_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_S3_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-sdkutils
|
||||
file(GLOB AWS_SDKUTILS_SRC
|
||||
"${AWS_SDKUTILS_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDKUTILS_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDKUTILS_DIR}/include/")
|
||||
|
||||
|
||||
# Add library.
|
||||
add_library(_aws ${AWS_SOURCES})
|
||||
|
||||
target_include_directories(_aws SYSTEM BEFORE PUBLIC ${AWS_PUBLIC_INCLUDES})
|
||||
target_include_directories(_aws SYSTEM BEFORE PRIVATE ${AWS_PRIVATE_INCLUDES})
|
||||
target_compile_definitions(_aws PUBLIC ${AWS_PUBLIC_COMPILE_DEFS})
|
||||
target_compile_definitions(_aws PRIVATE ${AWS_PRIVATE_COMPILE_DEFS})
|
||||
target_link_libraries(_aws PRIVATE ${AWS_PRIVATE_LIBS})
|
||||
|
||||
aws_set_thread_affinity_method(_aws)
|
||||
aws_set_thread_name_method(_aws)
|
||||
|
||||
# The library is large - avoid bloat.
|
||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||
target_compile_options (_aws PRIVATE -g0)
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::aws_s3 ALIAS _aws)
|
1
contrib/aws-crt-cpp
vendored
Submodule
1
contrib/aws-crt-cpp
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit ec0bea288f451d884c0d80d534bc5c66241c39a4
|
1
contrib/aws-s2n-tls
vendored
Submodule
1
contrib/aws-s2n-tls
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 15d534e8a9ca1eda6bacee514e37d08b4f38a526
|
@ -1,122 +0,0 @@
|
||||
if(NOT OS_FREEBSD)
|
||||
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_S3)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on FreeBSD")
|
||||
endif()
|
||||
|
||||
if(NOT ENABLE_S3)
|
||||
message(STATUS "Not using S3")
|
||||
return()
|
||||
endif()
|
||||
|
||||
SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
|
||||
SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core")
|
||||
SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
|
||||
SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
|
||||
SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
|
||||
|
||||
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
|
||||
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||
|
||||
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
|
||||
|
||||
|
||||
file(GLOB AWS_CORE_SOURCES
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AWS_S3_SOURCES
|
||||
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AWS_S3_MODEL_SOURCES
|
||||
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AWS_EVENT_STREAM_SOURCES
|
||||
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
file(GLOB AWS_COMMON_SOURCES
|
||||
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
|
||||
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
|
||||
)
|
||||
|
||||
file(GLOB AWS_CHECKSUMS_SOURCES
|
||||
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
|
||||
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
|
||||
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
|
||||
)
|
||||
|
||||
file(GLOB S3_UNIFIED_SRC
|
||||
${AWS_EVENT_STREAM_SOURCES}
|
||||
${AWS_COMMON_SOURCES}
|
||||
${AWS_S3_SOURCES}
|
||||
${AWS_S3_MODEL_SOURCES}
|
||||
${AWS_CORE_SOURCES}
|
||||
)
|
||||
|
||||
set(S3_INCLUDES
|
||||
"${AWS_COMMON_LIBRARY_DIR}/include/"
|
||||
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
|
||||
"${AWS_S3_LIBRARY_DIR}/include/"
|
||||
"${AWS_CORE_LIBRARY_DIR}/include/"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/"
|
||||
)
|
||||
|
||||
add_library(_aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
|
||||
target_include_directories(_aws_s3_checksums SYSTEM PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
|
||||
if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
target_compile_definitions(_aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
|
||||
endif()
|
||||
set_target_properties(_aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
|
||||
set_property(TARGET _aws_s3_checksums PROPERTY C_STANDARD 99)
|
||||
|
||||
add_library(_aws_s3 ${S3_UNIFIED_SRC})
|
||||
|
||||
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
|
||||
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
|
||||
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
|
||||
target_include_directories(_aws_s3 SYSTEM BEFORE PUBLIC ${S3_INCLUDES})
|
||||
|
||||
if (TARGET OpenSSL::SSL)
|
||||
target_compile_definitions(_aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
|
||||
target_link_libraries(_aws_s3 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(_aws_s3 PRIVATE _aws_s3_checksums)
|
||||
|
||||
# The library is large - avoid bloat.
|
||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||
target_compile_options (_aws_s3 PRIVATE -g0)
|
||||
target_compile_options (_aws_s3_checksums PRIVATE -g0)
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::aws_s3 ALIAS _aws_s3)
|
@ -139,13 +139,6 @@ if(NOT OPENSSL_NO_ASM)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
add_definitions(-DBORINGSSL_SHARED_LIBRARY)
|
||||
# Enable position-independent code globally. This is needed because
|
||||
# some library targets are OBJECT libraries.
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
|
||||
endif()
|
||||
|
||||
set(
|
||||
CRYPTO_ios_aarch64_SOURCES
|
||||
|
||||
|
@ -63,13 +63,8 @@ SET(SRCS
|
||||
"${LIBRARY_DIR}/src/lib/windows_port.c"
|
||||
)
|
||||
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
add_library(_c-ares STATIC ${SRCS})
|
||||
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
|
||||
else()
|
||||
add_library(_c-ares SHARED ${SRCS})
|
||||
target_compile_definitions(_c-ares PUBLIC CARES_BUILDING_LIBRARY)
|
||||
endif()
|
||||
add_library(_c-ares STATIC ${SRCS})
|
||||
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
|
||||
|
||||
target_compile_definitions(_c-ares PRIVATE HAVE_CONFIG_H=1)
|
||||
|
||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5
|
||||
Subproject commit 7c78edd52b4d65acc103c2f195818ffcabe6fe0d
|
@ -43,7 +43,10 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
||||
|
||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||
target_compile_options(unwind PRIVATE -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||
|
||||
# We should enable optimizations (otherwise it will be too slow in debug)
|
||||
# and disable sanitizers (otherwise infinite loop may happen)
|
||||
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||
|
||||
check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
||||
if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
||||
|
@ -136,11 +136,6 @@ add_library(ch_contrib::uv ALIAS _uv)
|
||||
target_compile_definitions(_uv PRIVATE ${uv_defines})
|
||||
target_include_directories(_uv SYSTEM PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
|
||||
target_link_libraries(_uv ${uv_libraries})
|
||||
if (NOT USE_STATIC_LIBRARIES)
|
||||
target_compile_definitions(_uv
|
||||
INTERFACE USING_UV_SHARED=1
|
||||
PRIVATE BUILDING_UV_SHARED=1)
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
# Now for some gibbering horrors from beyond the stars...
|
||||
|
@ -6,8 +6,6 @@ endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
# If USE_STATIC_LIBRARIES=0 was passed to CMake, we'll still build LLVM statically to keep complexity minimal.
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
|
@ -1,4 +1,4 @@
|
||||
if (NOT OS_FREEBSD AND NOT SPLIT_SHARED_LIBRARIES AND NOT (OS_DARWIN AND COMPILER_CLANG))
|
||||
if (NOT OS_FREEBSD AND NOT (OS_DARWIN AND COMPILER_CLANG))
|
||||
option (ENABLE_SENTRY "Enable Sentry" ${ENABLE_LIBRARIES})
|
||||
else()
|
||||
option (ENABLE_SENTRY "Enable Sentry" OFF)
|
||||
@ -51,11 +51,7 @@ endif()
|
||||
|
||||
add_library(_sentry ${SRCS})
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
target_compile_definitions(_sentry PRIVATE SENTRY_BUILD_SHARED)
|
||||
else()
|
||||
target_compile_definitions(_sentry PUBLIC SENTRY_BUILD_STATIC)
|
||||
endif()
|
||||
target_compile_definitions(_sentry PUBLIC SENTRY_BUILD_STATIC)
|
||||
|
||||
target_link_libraries(_sentry PRIVATE ch_contrib::curl pthread)
|
||||
target_include_directories(_sentry PUBLIC "${SRC_DIR}/include" PRIVATE "${SRC_DIR}/src")
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit e9fb375d0a1e5ebfd74c043f088f2342552103f8
|
||||
Subproject commit f0081b2649b94837855f3bc7d05ef326b100bad8
|
@ -2,7 +2,6 @@
|
||||
"docker/packager/binary": {
|
||||
"name": "clickhouse/binary-builder",
|
||||
"dependent": [
|
||||
"docker/test/split_build_smoke_test",
|
||||
"docker/test/codebrowser"
|
||||
]
|
||||
},
|
||||
@ -55,10 +54,6 @@
|
||||
"name": "clickhouse/stress-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/split_build_smoke_test": {
|
||||
"name": "clickhouse/split-build-smoke-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/codebrowser": {
|
||||
"name": "clickhouse/codebrowser",
|
||||
"dependent": []
|
||||
|
@ -107,8 +107,6 @@ fi
|
||||
mv ./programs/clickhouse* /output
|
||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
find . -name '*.so' -print -exec mv '{}' /output \;
|
||||
find . -name '*.so.*' -print -exec mv '{}' /output \;
|
||||
|
||||
prepare_combined_output () {
|
||||
local OUTPUT
|
||||
@ -165,7 +163,7 @@ then
|
||||
)
|
||||
fi
|
||||
|
||||
# May be set for split build or for performance test.
|
||||
# May be set for performance test.
|
||||
if [ "" != "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
prepare_combined_output /output
|
||||
|
@ -100,12 +100,11 @@ def run_docker_image_with_env(
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def is_release_build(build_type, package_type, sanitizer, shared_libraries):
|
||||
def is_release_build(build_type, package_type, sanitizer):
|
||||
return (
|
||||
build_type == ""
|
||||
and package_type == "deb"
|
||||
and sanitizer == ""
|
||||
and not shared_libraries
|
||||
)
|
||||
|
||||
|
||||
@ -116,7 +115,6 @@ def parse_env_variables(
|
||||
package_type,
|
||||
cache,
|
||||
distcc_hosts,
|
||||
shared_libraries,
|
||||
clang_tidy,
|
||||
version,
|
||||
author,
|
||||
@ -218,7 +216,7 @@ def parse_env_variables(
|
||||
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||
if is_release_build(build_type, package_type, sanitizer, shared_libraries):
|
||||
if is_release_build(build_type, package_type, sanitizer):
|
||||
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||
result.append("WITH_PERFORMANCE=1")
|
||||
if is_cross_arm:
|
||||
@ -231,12 +229,10 @@ def parse_env_variables(
|
||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for shared library build and for performance tests.
|
||||
# Create combined output archive for performance tests.
|
||||
if package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
elif shared_libraries:
|
||||
result.append("COMBINED_OUTPUT=shared_build")
|
||||
|
||||
if sanitizer:
|
||||
result.append(f"SANITIZER={sanitizer}")
|
||||
@ -285,15 +281,6 @@ def parse_env_variables(
|
||||
result.append("BINARY_OUTPUT=tests")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
|
||||
if shared_libraries:
|
||||
cmake_flags.append("-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1")
|
||||
# We can't always build utils because it requires too much space, but
|
||||
# we have to build them at least in some way in CI. The shared library
|
||||
# build is probably the least heavy disk-wise.
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
# utils are not included into clickhouse-bundle, so build everything
|
||||
build_target = "all"
|
||||
|
||||
if clang_tidy:
|
||||
cmake_flags.append("-DENABLE_CLANG_TIDY=1")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
@ -371,7 +358,6 @@ if __name__ == "__main__":
|
||||
default="",
|
||||
)
|
||||
|
||||
parser.add_argument("--shared-libraries", action="store_true")
|
||||
parser.add_argument("--clang-tidy", action="store_true")
|
||||
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
|
||||
parser.add_argument(
|
||||
@ -424,7 +410,6 @@ if __name__ == "__main__":
|
||||
args.package_type,
|
||||
args.cache,
|
||||
args.distcc_hosts,
|
||||
args.shared_libraries,
|
||||
args.clang_tidy,
|
||||
args.version,
|
||||
args.author,
|
||||
|
@ -80,7 +80,7 @@ do
|
||||
done
|
||||
|
||||
# if clickhouse user is defined - create it (user "default" already exists out of box)
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ] || [ "$CLICKHOUSE_ACCESS_MANAGEMENT" != "0" ]; then
|
||||
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
|
||||
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
|
||||
<clickhouse>
|
||||
@ -120,8 +120,8 @@ if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
# will try to send ping clickhouse via http_port (max 12 retries by default, with 1 sec timeout and 1 sec delay between retries)
|
||||
tries=${CLICKHOUSE_INIT_TIMEOUT:-12}
|
||||
# will try to send ping clickhouse via http_port (max 1000 retries by default, with 1 sec timeout and 1 sec delay between retries)
|
||||
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
|
||||
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
|
||||
if [ "$tries" -le "0" ]; then
|
||||
echo >&2 'ClickHouse init process failed.'
|
||||
|
@ -2,6 +2,7 @@
|
||||
<profiles>
|
||||
<default>
|
||||
<max_execution_time>10</max_execution_time>
|
||||
|
||||
<!--
|
||||
Don't let the fuzzer change this setting (I've actually seen it
|
||||
do this before).
|
||||
@ -14,6 +15,11 @@
|
||||
<max_memory_usage>
|
||||
<max>10G</max>
|
||||
</max_memory_usage>
|
||||
|
||||
<!-- Analyzer is unstable, not ready for testing. -->
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
|
@ -51,7 +51,6 @@ function clone
|
||||
)
|
||||
|
||||
ls -lath ||:
|
||||
|
||||
}
|
||||
|
||||
function wget_with_retry
|
||||
@ -75,6 +74,7 @@ function download
|
||||
./clickhouse ||:
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
ln -s ./clickhouse ./clickhouse-client
|
||||
ln -s ./clickhouse ./clickhouse-local
|
||||
|
||||
# clickhouse-server is in the current dir
|
||||
export PATH="$PWD:$PATH"
|
||||
@ -91,6 +91,12 @@ function configure
|
||||
cp -av --dereference "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
cp -av --dereference "$script_dir"/allow-nullable-key.xml db/config.d
|
||||
|
||||
cat > db/config.d/max_server_memory_usage_to_ram_ratio.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage_to_ram_ratio>0.75</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
cat > db/config.d/core.xml <<EOL
|
||||
<clickhouse>
|
||||
<core_dump>
|
||||
@ -151,7 +157,7 @@ function fuzz
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
|
||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz &
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
|
||||
kill -0 $server_pid
|
||||
@ -256,12 +262,21 @@ quit
|
||||
if [ "$server_died" == 1 ]
|
||||
then
|
||||
# The server has died.
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt
|
||||
if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
|
||||
if grep -E --text 'Sanitizer: (out-of-memory|failed to allocate)' description.txt
|
||||
then
|
||||
# OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description.
|
||||
task_exit_code=0
|
||||
echo "success" > status.txt
|
||||
else
|
||||
task_exit_code=210
|
||||
echo "failure" > status.txt
|
||||
fi
|
||||
|
||||
elif [ "$fuzzer_exit_code" == "143" ] || [ "$fuzzer_exit_code" == "0" ]
|
||||
then
|
||||
# Variants of a normal run:
|
||||
@ -327,24 +342,28 @@ case "$stage" in
|
||||
time fuzz
|
||||
;&
|
||||
"report")
|
||||
|
||||
CORE_LINK=''
|
||||
if [ -f core.gz ]; then
|
||||
CORE_LINK='<a href="core.gz">core.gz</a>'
|
||||
fi
|
||||
|
||||
grep --text -F '<Fatal>' server.log > fatal.log ||:
|
||||
|
||||
pigz server.log
|
||||
|
||||
cat > report.html <<EOF ||:
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<style>
|
||||
body { font-family: "DejaVu Sans", "Noto Sans", Arial, sans-serif; background: #EEE; }
|
||||
h1 { margin-left: 10px; }
|
||||
th, td { border: 0; padding: 5px 10px 5px 10px; text-align: left; vertical-align: top; line-height: 1.5; background-color: #FFF;
|
||||
td { white-space: pre; font-family: Monospace, Courier New; }
|
||||
border: 0; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }
|
||||
th, td { border: 0; padding: 5px 10px 5px 10px; text-align: left; vertical-align: top; line-height: 1.5; background-color: #FFF; }
|
||||
td { white-space: pre; font-family: Monospace, Courier New; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }
|
||||
a { color: #06F; text-decoration: none; }
|
||||
a:hover, a:active { color: #F40; text-decoration: underline; }
|
||||
table { border: 0; }
|
||||
p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-space: nowrap; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }
|
||||
th { cursor: pointer; }
|
||||
|
||||
</style>
|
||||
<title>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</title>
|
||||
@ -352,17 +371,32 @@ th { cursor: pointer; }
|
||||
<body>
|
||||
<div class="main">
|
||||
|
||||
<h1>AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}</h1>
|
||||
<h1>AST Fuzzer for PR <a href="https://github.com/ClickHouse/ClickHouse/pull/${PR_TO_TEST}">#${PR_TO_TEST}</a> @ ${SHA_TO_TEST}</h1>
|
||||
<p class="links">
|
||||
<a href="runlog.log">runlog.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log">fuzzer.log</a>
|
||||
<a href="server.log.gz">server.log.gz</a>
|
||||
<a href="main.log">main.log</a>
|
||||
${CORE_LINK}
|
||||
</p>
|
||||
<table>
|
||||
<tr><th>Test name</th><th>Test status</th><th>Description</th></tr>
|
||||
<tr><td>AST Fuzzer</td><td>$(cat status.txt)</td><td>$(cat description.txt)</td></tr>
|
||||
<tr>
|
||||
<th>Test name</th>
|
||||
<th>Test status</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>AST Fuzzer</td>
|
||||
<td>$(cat status.txt)</td>
|
||||
<td>$(
|
||||
clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < description.txt || cat description.txt
|
||||
)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="3" style="white-space: pre-wrap;">$(
|
||||
clickhouse-local --input-format RawBLOB --output-format RawBLOB --query "SELECT encodeXMLComponent(*) FROM table" < fatal.log || cat fatal.log
|
||||
)</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -83,6 +83,7 @@ RUN python3 -m pip install \
|
||||
pytest \
|
||||
pytest-order==1.0.0 \
|
||||
pytest-timeout \
|
||||
pytest-random \
|
||||
pytest-xdist \
|
||||
pytest-repeat \
|
||||
pytz \
|
||||
|
@ -297,6 +297,7 @@ if not args.use_existing_tables:
|
||||
|
||||
# Let's sync the data to avoid writeback affects performance
|
||||
os.system("sync")
|
||||
reportStageEnd("sync")
|
||||
|
||||
# By default, test all queries.
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
@ -1,9 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/split-build-smoke-test .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/binary-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /run.sh
|
||||
COPY process_split_build_smoke_test_result.py /
|
||||
|
||||
CMD /run.sh
|
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
import csv
|
||||
|
||||
RESULT_LOG_NAME = "run.log"
|
||||
|
||||
|
||||
def process_result(result_folder):
|
||||
|
||||
status = "success"
|
||||
description = "Server started and responded"
|
||||
summary = [("Smoke test", "OK")]
|
||||
with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log:
|
||||
lines = run_log.read().split("\n")
|
||||
if not lines or lines[0].strip() != "OK":
|
||||
status = "failure"
|
||||
logging.info("Lines is not ok: %s", str("\n".join(lines)))
|
||||
summary = [("Smoke test", "FAIL")]
|
||||
description = "Server failed to respond, see result in logs"
|
||||
|
||||
result_logs = []
|
||||
server_log_path = os.path.join(result_folder, "clickhouse-server.log")
|
||||
stderr_log_path = os.path.join(result_folder, "stderr.log")
|
||||
client_stderr_log_path = os.path.join(result_folder, "clientstderr.log")
|
||||
|
||||
if os.path.exists(server_log_path):
|
||||
result_logs.append(server_log_path)
|
||||
|
||||
if os.path.exists(stderr_log_path):
|
||||
result_logs.append(stderr_log_path)
|
||||
|
||||
if os.path.exists(client_stderr_log_path):
|
||||
result_logs.append(client_stderr_log_path)
|
||||
|
||||
return status, description, summary, result_logs
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of split build smoke test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
install_and_run_server() {
|
||||
mkdir /unpacked
|
||||
tar -xzf /package_folder/shared_build.tgz -C /unpacked --strip 1
|
||||
LD_LIBRARY_PATH=/unpacked /unpacked/clickhouse-server --config /unpacked/config/config.xml >/test_output/stderr.log 2>&1 &
|
||||
}
|
||||
|
||||
run_client() {
|
||||
for i in {1..100}; do
|
||||
sleep 1
|
||||
LD_LIBRARY_PATH=/unpacked /unpacked/clickhouse-client --query "select 'OK'" > /test_output/run.log 2> /test_output/clientstderr.log && break
|
||||
[[ $i == 100 ]] && echo 'FAIL'
|
||||
done
|
||||
}
|
||||
|
||||
install_and_run_server
|
||||
run_client
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /test_output/clickhouse-server.log
|
||||
/process_split_build_smoke_test_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
@ -1,90 +1,151 @@
|
||||
#!/bin/bash
|
||||
|
||||
USAGE='Usage for local run:
|
||||
set -euxf -o pipefail
|
||||
|
||||
./docker/test/stateless/setup_minio.sh { stateful | stateless } ./tests/
|
||||
export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
|
||||
export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
|
||||
|
||||
'
|
||||
usage() {
|
||||
echo $"Usage: $0 <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -e -x -a -u
|
||||
|
||||
TEST_TYPE="$1"
|
||||
shift
|
||||
|
||||
case $TEST_TYPE in
|
||||
stateless) QUERY_DIR=0_stateless ;;
|
||||
stateful) QUERY_DIR=1_stateful ;;
|
||||
*) echo "unknown test type $TEST_TYPE"; echo "${USAGE}"; exit 1 ;;
|
||||
esac
|
||||
|
||||
ls -lha
|
||||
|
||||
mkdir -p ./minio_data
|
||||
|
||||
if [ ! -f ./minio ]; then
|
||||
MINIO_SERVER_VERSION=${MINIO_SERVER_VERSION:-2022-09-07T22-25-02Z}
|
||||
MINIO_CLIENT_VERSION=${MINIO_CLIENT_VERSION:-2022-08-28T20-08-11Z}
|
||||
case $(uname -m) in
|
||||
x86_64) BIN_ARCH=amd64 ;;
|
||||
aarch64) BIN_ARCH=arm64 ;;
|
||||
*) echo "unknown architecture $(uname -m)"; exit 1 ;;
|
||||
esac
|
||||
echo 'MinIO binary not found, downloading...'
|
||||
|
||||
BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-${BIN_ARCH}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -O ./minio \
|
||||
&& wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-${BIN_ARCH}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
|
||||
&& chmod +x ./mc ./minio
|
||||
fi
|
||||
|
||||
MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
|
||||
MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
|
||||
|
||||
./minio --version
|
||||
./minio server --address ":11111" ./minio_data &
|
||||
|
||||
i=0
|
||||
while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied
|
||||
do
|
||||
if [[ $i == 60 ]]; then
|
||||
echo "Failed to setup minio"
|
||||
exit 0
|
||||
check_arg() {
|
||||
local query_dir
|
||||
if [ ! $# -eq 1 ]; then
|
||||
if [ ! $# -eq 2 ]; then
|
||||
echo "ERROR: need either one or two arguments, <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
|
||||
usage
|
||||
fi
|
||||
fi
|
||||
echo "Trying to connect to minio"
|
||||
sleep 1
|
||||
i=$((i + 1))
|
||||
done
|
||||
case "$1" in
|
||||
stateless)
|
||||
query_dir="0_stateless"
|
||||
;;
|
||||
stateful)
|
||||
query_dir="1_stateful"
|
||||
;;
|
||||
*)
|
||||
echo "unknown test type ${test_type}"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
echo ${query_dir}
|
||||
}
|
||||
|
||||
lsof -i :11111
|
||||
find_arch() {
|
||||
local arch
|
||||
case $(uname -m) in
|
||||
x86_64)
|
||||
arch="amd64"
|
||||
;;
|
||||
aarch64)
|
||||
arch="arm64"
|
||||
;;
|
||||
*)
|
||||
echo "unknown architecture $(uname -m)";
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo ${arch}
|
||||
}
|
||||
|
||||
sleep 5
|
||||
find_os() {
|
||||
local os
|
||||
os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
echo "${os}"
|
||||
}
|
||||
|
||||
./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
|
||||
./mc admin user add clickminio test testtest
|
||||
./mc admin policy set clickminio readwrite user=test
|
||||
./mc mb clickminio/test
|
||||
if [ "$TEST_TYPE" = "stateless" ]; then
|
||||
./mc policy set public clickminio/test
|
||||
fi
|
||||
download_minio() {
|
||||
local os
|
||||
local arch
|
||||
local minio_server_version=${MINIO_SERVER_VERSION:-2022-09-07T22-25-02Z}
|
||||
local minio_client_version=${MINIO_CLIENT_VERSION:-2022-08-28T20-08-11Z}
|
||||
|
||||
os=$(find_os)
|
||||
arch=$(find_arch)
|
||||
wget "https://dl.min.io/server/minio/release/${os}-${arch}/archive/minio.RELEASE.${minio_server_version}" -O ./minio
|
||||
wget "https://dl.min.io/client/mc/release/${os}-${arch}/archive/mc.RELEASE.${minio_client_version}" -O ./mc
|
||||
chmod +x ./mc ./minio
|
||||
}
|
||||
|
||||
# Upload data to Minio. By default after unpacking all tests will in
|
||||
# /usr/share/clickhouse-test/queries
|
||||
start_minio() {
|
||||
mkdir -p ./minio_data
|
||||
./minio --version
|
||||
./minio server --address ":11111" ./minio_data &
|
||||
wait_for_it
|
||||
lsof -i :11111
|
||||
sleep 5
|
||||
}
|
||||
|
||||
TEST_PATH=${1:-/usr/share/clickhouse-test}
|
||||
MINIO_DATA_PATH=${TEST_PATH}/queries/${QUERY_DIR}/data_minio
|
||||
setup_minio() {
|
||||
local test_type=$1
|
||||
./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
|
||||
./mc admin user add clickminio test testtest
|
||||
./mc admin policy set clickminio readwrite user=test
|
||||
./mc mb clickminio/test
|
||||
if [ "$test_type" = "stateless" ]; then
|
||||
./mc policy set public clickminio/test
|
||||
fi
|
||||
}
|
||||
|
||||
# Iterating over globs will cause redudant FILE variale to be a path to a file, not a filename
|
||||
# shellcheck disable=SC2045
|
||||
for FILE in $(ls "${MINIO_DATA_PATH}"); do
|
||||
echo "$FILE";
|
||||
./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE";
|
||||
done
|
||||
# uploads data to minio, by default after unpacking all tests
|
||||
# will be in /usr/share/clickhouse-test/queries
|
||||
upload_data() {
|
||||
local query_dir=$1
|
||||
local test_path=$2
|
||||
local data_path=${test_path}/queries/${query_dir}/data_minio
|
||||
|
||||
mkdir -p ~/.aws
|
||||
cat <<EOT >> ~/.aws/credentials
|
||||
# iterating over globs will cause redundant file variable to be
|
||||
# a path to a file, not a filename
|
||||
# shellcheck disable=SC2045
|
||||
for file in $(ls "${data_path}"); do
|
||||
echo "${file}";
|
||||
./mc cp "${data_path}"/"${file}" clickminio/test/"${file}";
|
||||
done
|
||||
}
|
||||
|
||||
setup_aws_credentials() {
|
||||
local minio_root_user=${MINIO_ROOT_USER:-clickhouse}
|
||||
local minio_root_password=${MINIO_ROOT_PASSWORD:-clickhouse}
|
||||
mkdir -p ~/.aws
|
||||
cat <<EOT >> ~/.aws/credentials
|
||||
[default]
|
||||
aws_access_key_id=${MINIO_ROOT_USER}
|
||||
aws_secret_access_key=${MINIO_ROOT_PASSWORD}
|
||||
aws_access_key_id=${minio_root_user}
|
||||
aws_secret_access_key=${minio_root_password}
|
||||
EOT
|
||||
}
|
||||
|
||||
wait_for_it() {
|
||||
local counter=0
|
||||
local max_counter=60
|
||||
local url="http://localhost:11111"
|
||||
local params=(
|
||||
--silent
|
||||
--verbose
|
||||
)
|
||||
while ! curl "${params[@]}" "${url}" 2>&1 | grep AccessDenied
|
||||
do
|
||||
if [[ ${counter} == "${max_counter}" ]]; then
|
||||
echo "failed to setup minio"
|
||||
exit 0
|
||||
fi
|
||||
echo "trying to connect to minio"
|
||||
sleep 1
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
}
|
||||
|
||||
main() {
|
||||
local query_dir
|
||||
query_dir=$(check_arg "$@")
|
||||
if [ ! -f ./minio ]; then
|
||||
download_minio
|
||||
fi
|
||||
start_minio
|
||||
setup_minio "$1"
|
||||
upload_data "${query_dir}" "${2:-/usr/share/clickhouse-test}"
|
||||
setup_aws_credentials
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,6 +1,6 @@
|
||||
Allow to run simple ClickHouse stress test in Docker from debian packages.
|
||||
Allows to run simple ClickHouse stress test in Docker from debian packages.
|
||||
Actually it runs multiple copies of clickhouse-test (functional tests).
|
||||
This allows to find problems like segmentation fault which cause shutdown of server.
|
||||
This allows to find problems like failed assertions and memory safety issues.
|
||||
|
||||
Usage:
|
||||
```
|
||||
|
@ -11,31 +11,6 @@ set -x
|
||||
# core.COMM.PID-TID
|
||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||
|
||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||
# and find more potential issues.
|
||||
# Temporarily disable ThreadFuzzer with tsan because of https://github.com/google/sanitizers/issues/1540
|
||||
is_tsan_build=$(clickhouse local -q "select value like '% -fsanitize=thread %' from system.build_options where name='CXX_FLAGS'")
|
||||
if [ "$is_tsan_build" -eq "0" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
fi
|
||||
|
||||
|
||||
function install_packages()
|
||||
{
|
||||
@ -54,7 +29,7 @@ function configure()
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packets.py /usr/bin/download_release_packets
|
||||
ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages
|
||||
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
|
||||
|
||||
# avoid too slow startup
|
||||
@ -78,6 +53,7 @@ function configure()
|
||||
local total_mem
|
||||
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
|
||||
total_mem=$(( total_mem*1024 )) # bytes
|
||||
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
#
|
||||
# But not via max_server_memory_usage but via max_memory_usage_for_user,
|
||||
@ -90,16 +66,17 @@ function configure()
|
||||
# max_server_memory_usage will be hard limit, and queries that should be
|
||||
# executed regardless memory limits will use max_memory_usage_for_user=0,
|
||||
# instead of relying on max_untracked_memory
|
||||
local max_server_mem
|
||||
max_server_mem=$((total_mem*75/100)) # 75%
|
||||
echo "Setting max_server_memory_usage=$max_server_mem"
|
||||
|
||||
max_server_memory_usage_to_ram_ratio=0.5
|
||||
echo "Setting max_server_memory_usage_to_ram_ratio to ${max_server_memory_usage_to_ram_ratio}"
|
||||
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage>${max_server_mem}</max_server_memory_usage>
|
||||
<max_server_memory_usage_to_ram_ratio>${max_server_memory_usage_to_ram_ratio}</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
local max_users_mem
|
||||
max_users_mem=$((total_mem*50/100)) # 50%
|
||||
max_users_mem=$((total_mem*30/100)) # 30%
|
||||
echo "Setting max_memory_usage_for_user=$max_users_mem"
|
||||
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
|
||||
<clickhouse>
|
||||
@ -123,6 +100,29 @@ EOL
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Let OOM killer terminate other processes before clickhouse-server:
|
||||
cat > /etc/clickhouse-server/config.d/oom_score.xml <<EOL
|
||||
<clickhouse>
|
||||
<oom_score>-1000</oom_score>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Analyzer is not yet ready for testing
|
||||
cat > /etc/clickhouse-server/users.d/no_analyzer.xml <<EOL
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<constraints>
|
||||
<allow_experimental_analyzer>
|
||||
<readonly/>
|
||||
</allow_experimental_analyzer>
|
||||
</constraints>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
}
|
||||
|
||||
function stop()
|
||||
@ -210,6 +210,31 @@ quit
|
||||
|
||||
install_packages package_folder
|
||||
|
||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||
# and find more potential issues.
|
||||
# Temporarily disable ThreadFuzzer with tsan because of https://github.com/google/sanitizers/issues/1540
|
||||
is_tsan_build=$(clickhouse local -q "select value like '% -fsanitize=thread %' from system.build_options where name='CXX_FLAGS'")
|
||||
if [ "$is_tsan_build" -eq "0" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
fi
|
||||
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
@ -334,219 +359,228 @@ zgrep -Fa "########################################" /test_output/* > /dev/null
|
||||
zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \
|
||||
&& echo -e 'Found signal in gdb.log\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
echo -e "Backward compatibility check\n"
|
||||
if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
|
||||
echo -e "Backward compatibility check\n"
|
||||
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
|
||||
echo "Download previous release server"
|
||||
mkdir previous_release_package_folder
|
||||
echo "Download clickhouse-server from the previous release"
|
||||
mkdir previous_release_package_folder
|
||||
|
||||
echo $previous_release_tag | download_release_packets && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
echo $previous_release_tag | download_release_packages && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz ||:
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# Check if we cloned previous release repository successfully
|
||||
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv
|
||||
else
|
||||
echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
dpkg --remove clickhouse-server
|
||||
dpkg --remove clickhouse-common-static-dbg
|
||||
dpkg --remove clickhouse-common-static
|
||||
|
||||
rm -rf /var/lib/clickhouse/*
|
||||
|
||||
# Make BC check more funny by forcing Ordinary engine for system database
|
||||
mkdir /var/lib/clickhouse/metadata
|
||||
echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql
|
||||
|
||||
# Install previous release packages
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||
rm -f /etc/clickhouse-server/users.d/enable_blobs_check.xml ||:
|
||||
rm -f /etc/clickhouse-server/users.d/marks.xml ||:
|
||||
|
||||
# Remove s3 related configs to avoid "there is no disk type `cache`"
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# Turn on after 22.12
|
||||
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Install new package before running stress test because we should use new
|
||||
# clickhouse-client and new clickhouse-test.
|
||||
#
|
||||
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||
# will print sane stacktraces and also to avoid possible crashes.
|
||||
#
|
||||
# FIXME: those files can be extracted directly from debian package, but
|
||||
# actually better solution will be to use different PATH instead of playing
|
||||
# games with files from packages.
|
||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||
install_packages package_folder
|
||||
mv /usr/bin/clickhouse package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables"
|
||||
|
||||
stop 1
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
|| (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \
|
||||
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.clean.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64")
|
||||
# FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server.
|
||||
# Let's just ignore all errors from queries ("} <Error> TCPHandler: Code:", "} <Error> executeQuery: Code:")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
||||
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||
-e "REPLICA_ALREADY_EXISTS" \
|
||||
-e "ALL_REPLICAS_LOST" \
|
||||
-e "DDLWorker: Cannot parse DDL task query" \
|
||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||
-e "UNKNOWN_DATABASE" \
|
||||
-e "NETWORK_ERROR" \
|
||||
-e "UNKNOWN_TABLE" \
|
||||
-e "ZooKeeperClient" \
|
||||
-e "KEEPER_EXCEPTION" \
|
||||
-e "DirectoryMonitor" \
|
||||
-e "TABLE_IS_READ_ONLY" \
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "NETLINK_ERROR" \
|
||||
-e "Renaming unexpected part" \
|
||||
-e "PART_IS_TEMPORARILY_LOCKED" \
|
||||
-e "and a merge is impossible: we didn't find" \
|
||||
-e "found in queue and some source parts for it was lost" \
|
||||
-e "is lost forever." \
|
||||
-e "Unknown index: idx." \
|
||||
-e "Cannot parse string 'Hello' as UInt64" \
|
||||
-e "} <Error> TCPHandler: Code:" \
|
||||
-e "} <Error> executeQuery: Code:" \
|
||||
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
|
||||
-e "This engine is deprecated and is not supported in transactions" \
|
||||
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt
|
||||
|
||||
# Sanitizer asserts
|
||||
zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
echo "Check for Logical errors in server log:"
|
||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
|
||||
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_logical_errors.txt if it's empty
|
||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
|
||||
|
||||
tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||:
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.backward.tsv.gz ||:
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz ||:
|
||||
done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# Check if we cloned previous release repository successfully
|
||||
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv
|
||||
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
|
||||
then
|
||||
echo -e "Backward compatibility check: Failed to download previous release packages\tFAIL" >> /test_output/test_results.tsv
|
||||
else
|
||||
echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv
|
||||
echo -e "Successfully downloaded previous release packages\tOK" >> /test_output/test_results.tsv
|
||||
|
||||
# Uninstall current packages
|
||||
dpkg --remove clickhouse-client
|
||||
dpkg --remove clickhouse-server
|
||||
dpkg --remove clickhouse-common-static-dbg
|
||||
dpkg --remove clickhouse-common-static
|
||||
|
||||
rm -rf /var/lib/clickhouse/*
|
||||
|
||||
# Make BC check more funny by forcing Ordinary engine for system database
|
||||
mkdir /var/lib/clickhouse/metadata
|
||||
echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/system.sql
|
||||
|
||||
# Install previous release packages
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||
rm -f /etc/clickhouse-server/users.d/enable_blobs_check.xml ||:
|
||||
rm -f /etc/clickhouse-server/users.d/marks.xml ||:
|
||||
|
||||
# Remove s3 related configs to avoid "there is no disk type `cache`"
|
||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||
|
||||
# Turn on after 22.12
|
||||
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||
# it uses recently introduced settings which previous versions may not have
|
||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||
|
||||
start
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Install new package before running stress test because we should use new
|
||||
# clickhouse-client and new clickhouse-test.
|
||||
#
|
||||
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||
# will print sane stacktraces and also to avoid possible crashes.
|
||||
#
|
||||
# FIXME: those files can be extracted directly from debian package, but
|
||||
# actually better solution will be to use different PATH instead of playing
|
||||
# games with files from packages.
|
||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||
install_packages package_folder
|
||||
mv /usr/bin/clickhouse package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
|
||||
&& echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
rm -rf tmp_stress_output
|
||||
|
||||
# We experienced deadlocks in this command in very rare cases. Let's debug it:
|
||||
timeout 10m clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" ||
|
||||
(
|
||||
echo "thread apply all backtrace (on select tables count)" >> /test_output/gdb.log
|
||||
timeout 30m gdb -batch -ex 'thread apply all backtrace' -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
|
||||
clickhouse stop --force
|
||||
)
|
||||
|
||||
stop 1
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log
|
||||
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
# Disable fault injections on start (we don't test them here, and it can lead to tons of requests in case of huge number of tables).
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
|| (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \
|
||||
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Let the server run for a while before checking log.
|
||||
sleep 60
|
||||
|
||||
stop
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.dirty.log
|
||||
|
||||
# Error messages (we should ignore some errors)
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/38643 ("Unknown index: idx.")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64")
|
||||
# FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server.
|
||||
# Let's just ignore all errors from queries ("} <Error> TCPHandler: Code:", "} <Error> executeQuery: Code:")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
|
||||
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
|
||||
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
|
||||
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
|
||||
echo "Check for Error messages in server log:"
|
||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||
-e "REPLICA_ALREADY_EXISTS" \
|
||||
-e "ALL_REPLICAS_LOST" \
|
||||
-e "DDLWorker: Cannot parse DDL task query" \
|
||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||
-e "UNKNOWN_DATABASE" \
|
||||
-e "NETWORK_ERROR" \
|
||||
-e "UNKNOWN_TABLE" \
|
||||
-e "ZooKeeperClient" \
|
||||
-e "KEEPER_EXCEPTION" \
|
||||
-e "DirectoryMonitor" \
|
||||
-e "TABLE_IS_READ_ONLY" \
|
||||
-e "Code: 1000, e.code() = 111, Connection refused" \
|
||||
-e "UNFINISHED" \
|
||||
-e "NETLINK_ERROR" \
|
||||
-e "Renaming unexpected part" \
|
||||
-e "PART_IS_TEMPORARILY_LOCKED" \
|
||||
-e "and a merge is impossible: we didn't find" \
|
||||
-e "found in queue and some source parts for it was lost" \
|
||||
-e "is lost forever." \
|
||||
-e "Unknown index: idx." \
|
||||
-e "Cannot parse string 'Hello' as UInt64" \
|
||||
-e "} <Error> TCPHandler: Code:" \
|
||||
-e "} <Error> executeQuery: Code:" \
|
||||
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
|
||||
-e "This engine is deprecated and is not supported in transactions" \
|
||||
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
-e "Coordination::Exception: Connection loss" \
|
||||
-e "MutateFromLogEntryTask" \
|
||||
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||
-e "Session expired" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.dirty.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_error_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt
|
||||
|
||||
# Sanitizer asserts
|
||||
zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
zgrep -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv
|
||||
rm -f /test_output/tmp
|
||||
|
||||
# OOM
|
||||
zgrep -Fa " <Fatal> Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Logical errors
|
||||
echo "Check for Logical errors in server log:"
|
||||
zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
|
||||
&& echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_logical_errors.txt if it's empty
|
||||
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
|
||||
&& echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# It also checks for crash without stacktrace (printed by watchdog)
|
||||
echo "Check for Fatal message in server log:"
|
||||
zgrep -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file bc_check_fatal_messages.txt if it's empty
|
||||
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
|
||||
|
||||
tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||:
|
||||
for table in query_log trace_log
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.backward.tsv.gz ||:
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
dmesg -T > /test_output/dmesg.log
|
||||
|
@ -14,9 +14,6 @@ def get_options(i, backward_compatibility_check):
|
||||
if 0 < i:
|
||||
options.append("--order=random")
|
||||
|
||||
if i % 3 == 1:
|
||||
options.append("--db-engine=Ordinary")
|
||||
|
||||
if i % 3 == 2 and not backward_compatibility_check:
|
||||
options.append(
|
||||
'''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i)
|
||||
|
@ -1,82 +0,0 @@
|
||||
# docker build -t clickhouse/testflows-runner .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
ca-certificates \
|
||||
bash \
|
||||
btrfs-progs \
|
||||
e2fsprogs \
|
||||
iptables \
|
||||
xfsprogs \
|
||||
tar \
|
||||
pigz \
|
||||
wget \
|
||||
git \
|
||||
iproute2 \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
liblua5.1-dev \
|
||||
luajit \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
gdb \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get clean
|
||||
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.7.20 docker-compose==1.29.2 docker==5.0.0 dicttoxml kazoo tzlocal==2.1 pytz python-dateutil numpy
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 20.10.6
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install docker
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) rarch=x86_64 ;; \
|
||||
arm64) rarch=aarch64 ;; \
|
||||
esac \
|
||||
&& set -eux \
|
||||
&& if ! wget -nv -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/${rarch}/docker-${DOCKER_VERSION}.tgz"; then \
|
||||
echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${rarch}'" \
|
||||
&& exit 1; \
|
||||
fi \
|
||||
&& tar --extract \
|
||||
--file docker.tgz \
|
||||
--strip-components 1 \
|
||||
--directory /usr/local/bin/ \
|
||||
&& rm docker.tgz \
|
||||
&& dockerd --version \
|
||||
&& docker --version
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY process_testflows_result.py /usr/local/bin/
|
||||
|
||||
RUN set -x \
|
||||
&& addgroup --system dockremap \
|
||||
&& adduser --system dockremap \
|
||||
&& adduser dockremap dockremap \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subuid \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subgid
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
EXPOSE 2375
|
||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||
CMD ["sh", "-c", "python3 regression.py --no-color -o new-fails --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv; find * -type f | grep _instances | grep clickhouse-server | xargs -n1 tar -rvf clickhouse_logs.tar; gzip -9 clickhouse_logs.tar"]
|
@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Configure to use Yandex dockerhub-proxy"
|
||||
mkdir -p /etc/docker/
|
||||
cat > /etc/docker/daemon.json << EOF
|
||||
{
|
||||
"insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
|
||||
}
|
||||
EOF
|
||||
|
||||
# In case of test hung it is convenient to use pytest --pdb to debug it,
|
||||
# and on hung you can simply press Ctrl-C and it will spawn a python pdb,
|
||||
# but on SIGINT dockerd will exit, so ignore it to preserve the daemon.
|
||||
trap '' INT
|
||||
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &>/var/log/somefile &
|
||||
|
||||
set +e
|
||||
reties=0
|
||||
while true; do
|
||||
docker info &>/dev/null && break
|
||||
reties=$((reties+1))
|
||||
if [[ $reties -ge 100 ]]; then # 10 sec max
|
||||
echo "Can't start docker daemon, timeout exceeded." >&2
|
||||
exit 1;
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
set -e
|
||||
|
||||
echo "Start tests"
|
||||
export CLICKHOUSE_TESTS_SERVER_BIN_PATH=/clickhouse
|
||||
export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse
|
||||
export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config
|
||||
export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
||||
|
||||
cd /ClickHouse/tests/testflows
|
||||
exec "$@"
|
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# "modprobe" without modprobe
|
||||
# https://twitter.com/lucabruno/status/902934379835662336
|
||||
|
||||
# this isn't 100% fool-proof, but it'll have a much higher success rate than simply using the "real" modprobe
|
||||
|
||||
# Docker often uses "modprobe -va foo bar baz"
|
||||
# so we ignore modules that start with "-"
|
||||
for module; do
|
||||
if [ "${module#-}" = "$module" ]; then
|
||||
ip link show "$module" || true
|
||||
lsmod | grep "$module" || true
|
||||
fi
|
||||
done
|
||||
|
||||
# remove /usr/local/... from PATH so we can exec the real modprobe as a last resort
|
||||
export PATH='/usr/sbin:/usr/bin:/sbin:/bin'
|
||||
exec modprobe "$@"
|
@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
import csv
|
||||
import json
|
||||
|
||||
|
||||
def process_result(result_folder):
|
||||
json_path = os.path.join(result_folder, "results.json")
|
||||
if not os.path.exists(json_path):
|
||||
return "success", "No testflows in branch", None, []
|
||||
|
||||
test_binary_log = os.path.join(result_folder, "test.log")
|
||||
with open(json_path) as source:
|
||||
results = json.loads(source.read())
|
||||
|
||||
total_tests = 0
|
||||
total_ok = 0
|
||||
total_fail = 0
|
||||
total_other = 0
|
||||
test_results = []
|
||||
for test in results["tests"]:
|
||||
test_name = test["test"]["test_name"]
|
||||
test_result = test["result"]["result_type"].upper()
|
||||
test_time = str(test["result"]["message_rtime"])
|
||||
total_tests += 1
|
||||
if test_result == "OK":
|
||||
total_ok += 1
|
||||
elif test_result == "FAIL" or test_result == "ERROR":
|
||||
total_fail += 1
|
||||
else:
|
||||
total_other += 1
|
||||
|
||||
test_results.append((test_name, test_result, test_time))
|
||||
if total_fail != 0:
|
||||
status = "failure"
|
||||
else:
|
||||
status = "success"
|
||||
|
||||
description = "failed: {}, passed: {}, other: {}".format(
|
||||
total_fail, total_ok, total_other
|
||||
)
|
||||
return status, description, test_results, [json_path, test_binary_log]
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of Testflows tests"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="./")
|
||||
parser.add_argument("--out-results-file", default="./test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="./check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
@ -118,7 +118,6 @@ Builds ClickHouse in various configurations for use in further steps. You have t
|
||||
- **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`).
|
||||
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
|
||||
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
|
||||
- **Split** `splitted` is a [split build](../development/build.md#split-build)
|
||||
- **Status**: `success` or `fail`
|
||||
- **Build log**: link to the building and files copying log, useful when build failed.
|
||||
- **Build time**.
|
||||
@ -130,7 +129,6 @@ Builds ClickHouse in various configurations for use in further steps. You have t
|
||||
- `clickhouse`: Main built binary.
|
||||
- `clickhouse-odbc-bridge`
|
||||
- `unit_tests_dbms`: GoogleTest binary with ClickHouse unit tests.
|
||||
- `shared_build.tgz`: build with shared libraries.
|
||||
- `performance.tgz`: Special package for performance tests.
|
||||
|
||||
|
||||
@ -169,16 +167,6 @@ concurrency-related errors. If it fails:
|
||||
of error.
|
||||
|
||||
|
||||
## Split Build Smoke Test
|
||||
|
||||
Checks that the server build in [split build](../development/developer-instruction.md#split-build)
|
||||
configuration can start and run simple queries. If it fails:
|
||||
|
||||
* Fix other test errors first;
|
||||
* Build the server in [split build](../development/developer-instruction.md#split-build) configuration
|
||||
locally and check whether it can start and run `select 1`.
|
||||
|
||||
|
||||
## Compatibility Check
|
||||
Checks that `clickhouse` binary runs on distributions with old libc versions. If it fails, ask a maintainer for help.
|
||||
|
||||
|
@ -34,7 +34,14 @@ SETTINGS
|
||||
[kafka_max_block_size = 0,]
|
||||
[kafka_skip_broken_messages = N,]
|
||||
[kafka_commit_every_batch = 0,]
|
||||
[kafka_thread_per_consumer = 0]
|
||||
[kafka_client_id = '',]
|
||||
[kafka_poll_timeout_ms = 0,]
|
||||
[kafka_poll_max_batch_size = 0,]
|
||||
[kafka_flush_interval_ms = 0,]
|
||||
[kafka_thread_per_consumer = 0,]
|
||||
[kafka_handle_error_mode = 'default',]
|
||||
[kafka_commit_on_select = false,]
|
||||
[kafka_max_rows_per_message = 1];
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -46,13 +53,20 @@ Required parameters:
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `kafka_row_delimiter` — Delimiter character, which ends the message.
|
||||
- `kafka_row_delimiter` — Delimiter character, which ends the message. **This setting is deprecated and is no longer used, not left for compatibility reasons.**
|
||||
- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed.
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll (default: `max_block_size`).
|
||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data).
|
||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block (default: `0`).
|
||||
- `kafka_thread_per_consumer` — Provide independent thread for each consumer (default: `0`). When enabled, every consumer flush the data independently, in parallel (otherwise — rows from several consumers squashed to form one block).
|
||||
- `kafka_num_consumers` — The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. Default: `1`.
|
||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block. Default: `0`.
|
||||
- `kafka_client_id` — Client identifier. Empty by default.
|
||||
- `kafka_poll_timeout_ms` — Timeout for single poll from Kafka. Default: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms).
|
||||
- `kafka_poll_max_batch_size` — Maximum amount of messages to be polled in a single Kafka poll. Default: [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
- `kafka_flush_interval_ms` — Timeout for flushing data from Kafka. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `kafka_thread_per_consumer` — Provide independent thread for each consumer. When enabled, every consumer flush the data independently, in parallel (otherwise — rows from several consumers squashed to form one block). Default: `0`.
|
||||
- `kafka_handle_error_mode` — How to handle errors for Kafka engine. Possible values: default, stream.
|
||||
- `kafka_commit_on_select` — Commit messages when select query is made. Default: `false`.
|
||||
- `kafka_max_rows_per_message` — The maximum number of rows written in one kafka message for row-based formats. Default : `1`.
|
||||
|
||||
Examples:
|
||||
|
||||
@ -94,7 +108,7 @@ Do not use this method in new projects. If possible, switch old projects to the
|
||||
|
||||
``` sql
|
||||
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
||||
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages])
|
||||
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_max_block_size, kafka_skip_broken_messages, kafka_commit_every_batch, kafka_client_id, kafka_poll_timeout_ms, kafka_poll_max_batch_size, kafka_flush_interval_ms, kafka_thread_per_consumer, kafka_handle_error_mode, kafka_commit_on_select, kafka_max_rows_per_message]);
|
||||
```
|
||||
|
||||
</details>
|
||||
@ -193,6 +207,14 @@ Example:
|
||||
- `_headers.name` — Array of message's headers keys.
|
||||
- `_headers.value` — Array of message's headers values.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
Kafka engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
The number of rows in one Kafka message depends on whether the format is row-based or block-based:
|
||||
|
||||
- For row-based formats the number of rows in one Kafka message can be controlled by setting `kafka_max_rows_per_message`.
|
||||
- For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
@ -37,8 +37,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[nats_max_block_size = N,]
|
||||
[nats_flush_interval_ms = N,]
|
||||
[nats_username = 'user',]
|
||||
[nats_password = 'password']
|
||||
[redis_password = 'clickhouse']
|
||||
[nats_password = 'password',]
|
||||
[nats_token = 'clickhouse',]
|
||||
[nats_startup_connect_tries = '5']
|
||||
[nats_max_rows_per_message = 1]
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -49,7 +51,7 @@ Required parameters:
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `nats_row_delimiter` – Delimiter character, which ends the message.
|
||||
- `nats_row_delimiter` – Delimiter character, which ends the message. **This setting is deprecated and is no longer used, not left for compatibility reasons.**
|
||||
- `nats_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `nats_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
|
||||
- `nats_queue_group` – Name for queue group of NATS subscribers. Default is the table name.
|
||||
@ -57,11 +59,13 @@ Optional parameters:
|
||||
- `nats_reconnect_wait` – Amount of time in milliseconds to sleep between each reconnect attempt. Default: `5000`.
|
||||
- `nats_server_list` - Server list for connection. Can be specified to connect to NATS cluster.
|
||||
- `nats_skip_broken_messages` - NATS message parser tolerance to schema-incompatible messages per block. Default: `0`. If `nats_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS.
|
||||
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS.
|
||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `nats_username` - NATS username.
|
||||
- `nats_password` - NATS password.
|
||||
- `nats_token` - NATS auth token.
|
||||
- `nats_startup_connect_tries` - Number of connect tries at startup. Default: `5`.
|
||||
- `nats_max_rows_per_message` — The maximum number of rows written in one NATS message for row-based formats. (default : `1`).
|
||||
|
||||
SSL connection:
|
||||
|
||||
@ -159,6 +163,14 @@ If you want to change the target table by using `ALTER`, we recommend disabling
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_subject` - NATS message subject.
|
||||
- `_subject` - NATS message subject.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
NATS engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
The number of rows in one NATS message depends on whether the format is row-based or block-based:
|
||||
|
||||
- For row-based formats the number of rows in one NATS message can be controlled by setting `nats_max_rows_per_message`.
|
||||
- For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/nats/) <!--hide-->
|
||||
|
@ -37,8 +37,16 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_persistent = 0,]
|
||||
[rabbitmq_skip_broken_messages = N,]
|
||||
[rabbitmq_max_block_size = N,]
|
||||
[rabbitmq_flush_interval_ms = N]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish']
|
||||
[rabbitmq_flush_interval_ms = N,]
|
||||
[rabbitmq_queue_settings_list = 'x-dead-letter-exchange=my-dlx,x-max-length=10,x-overflow=reject-publish',]
|
||||
[rabbitmq_queue_consume = false,]
|
||||
[rabbitmq_address = '',]
|
||||
[rabbitmq_vhost = '/',]
|
||||
[rabbitmq_queue_consume = false,]
|
||||
[rabbitmq_username = '',]
|
||||
[rabbitmq_password = '',]
|
||||
[rabbitmq_commit_on_select = false,]
|
||||
[rabbitmq_max_rows_per_message = 1]
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -49,19 +57,27 @@ Required parameters:
|
||||
|
||||
Optional parameters:
|
||||
|
||||
- `rabbitmq_exchange_type` – The type of RabbitMQ exchange: `direct`, `fanout`, `topic`, `headers`, `consistent_hash`. Default: `fanout`.
|
||||
- `rabbitmq_routing_key_list` – A comma-separated list of routing keys.
|
||||
- `rabbitmq_row_delimiter` – Delimiter character, which ends the message.
|
||||
- `rabbitmq_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `rabbitmq_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient.
|
||||
- `rabbitmq_num_queues` – Total number of queues. Default: `1`. Increasing this number can significantly improve performance.
|
||||
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
|
||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. Default: `0`. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||
- `rabbitmq_max_block_size`
|
||||
- `rabbitmq_flush_interval_ms`
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
- `rabbitmq_exchange_type` – The type of RabbitMQ exchange: `direct`, `fanout`, `topic`, `headers`, `consistent_hash`. Default: `fanout`.
|
||||
- `rabbitmq_routing_key_list` – A comma-separated list of routing keys.
|
||||
- `rabbitmq_row_delimiter` – Delimiter character, which ends the message. **This setting is deprecated and is no longer used, not left for compatibility reasons.**
|
||||
- `rabbitmq_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||
- `rabbitmq_num_consumers` – The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. Default: `1`
|
||||
- `rabbitmq_num_queues` – Total number of queues. Increasing this number can significantly improve performance. Default: `1`.
|
||||
- `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below.
|
||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||
- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
||||
- `rabbitmq_flush_interval_ms` - Timeout for flushing data from RabbitMQ. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||
- `rabbitmq_address` - Address for connection. Use ether this setting or `rabbitmq_host_port`.
|
||||
- `rabbitmq_vhost` - RabbitMQ vhost. Default: `'\'`.
|
||||
- `rabbitmq_queue_consume` - Use user-defined queues and do not make any RabbitMQ setup: declaring exchanges, queues, bindings. Default: `false`.
|
||||
- `rabbitmq_username` - RabbitMQ username.
|
||||
- `rabbitmq_password` - RabbitMQ password.
|
||||
- `rabbitmq_commit_on_select` - Commit messages when select query is made. Default: `false`.
|
||||
- `rabbitmq_max_rows_per_message` — The maximum number of rows written in one RabbitMQ message for row-based formats. Default : `1`.
|
||||
|
||||
|
||||
SSL connection:
|
||||
|
||||
@ -166,11 +182,20 @@ Example:
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_exchange_name` - RabbitMQ exchange name.
|
||||
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
|
||||
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
|
||||
- `_redelivered` - `redelivered` flag of the message.
|
||||
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
|
||||
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
|
||||
- `_exchange_name` - RabbitMQ exchange name.
|
||||
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
|
||||
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
|
||||
- `_redelivered` - `redelivered` flag of the message.
|
||||
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
|
||||
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
|
||||
|
||||
## Data formats support {#data-formats-support}
|
||||
|
||||
RabbitMQ engine supports all [formats](../../../interfaces/formats.md) supported in ClickHouse.
|
||||
The number of rows in one RabbitMQ message depends on whether the format is row-based or block-based:
|
||||
|
||||
- For row-based formats the number of rows in one RabbitMQ message can be controlled by setting `rabbitmq_max_rows_per_message`.
|
||||
- For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) <!--hide-->
|
||||
|
@ -2,11 +2,10 @@
|
||||
slug: /en/interfaces/cli
|
||||
sidebar_position: 17
|
||||
sidebar_label: Command-Line Client
|
||||
title: Command-Line Client
|
||||
---
|
||||
import ConnectionDetails from '@site/docs/en/_snippets/_gather_your_details_native.md';
|
||||
|
||||
# Command-line Client
|
||||
|
||||
## clickhouse-client
|
||||
|
||||
ClickHouse provides a native command-line client: `clickhouse-client`. The client supports command-line options and configuration files. For more information, see [Configuring](#interfaces_cli_configuration).
|
||||
|
File diff suppressed because it is too large
Load Diff
1573
docs/en/interfaces/schema-inference.md
Normal file
1573
docs/en/interfaces/schema-inference.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -9,6 +9,29 @@ slug: /en/operations/backup
|
||||
- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk)
|
||||
- [Alternatives](#alternatives)
|
||||
|
||||
## Command summary
|
||||
|
||||
```bash
|
||||
BACKUP|RESTORE
|
||||
TABLE [db.]table_name [AS [db.]table_name_in_backup]
|
||||
[PARTITION[S] partition_expr [,...]] |
|
||||
DICTIONARY [db.]dictionary_name [AS [db.]name_in_backup] |
|
||||
DATABASE database_name [AS database_name_in_backup]
|
||||
[EXCEPT TABLES ...] |
|
||||
TEMPORARY TABLE table_name [AS table_name_in_backup] |
|
||||
VIEW view_name [AS view_name_in_backup]
|
||||
ALL TEMPORARY TABLES [EXCEPT ...] |
|
||||
ALL DATABASES [EXCEPT ...] } [,...]
|
||||
[ON CLUSTER 'cluster_name']
|
||||
TO|FROM File('<path>/<filename>') | Disk('<disk_name>', '<path>/') | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')
|
||||
[SETTINGS base_backup = File('<path>/<filename>') | Disk(...) | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')]
|
||||
|
||||
```
|
||||
|
||||
:::note ALL
|
||||
`ALL` is only applicable to the `RESTORE` command.
|
||||
:::
|
||||
|
||||
## Background
|
||||
|
||||
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
|
||||
|
@ -127,6 +127,13 @@ Default value: 100000.
|
||||
|
||||
A large number of parts in a table reduces performance of ClickHouse queries and increases ClickHouse boot time. Most often this is a consequence of an incorrect design (mistakes when choosing a partitioning strategy - too small partitions).
|
||||
|
||||
## simultaneous_parts_removal_limit {#simultaneous-parts-removal-limit}
|
||||
|
||||
If there are a lot of outdated parts cleanup thread will try to delete up to `simultaneous_parts_removal_limit` parts during one iteration.
|
||||
`simultaneous_parts_removal_limit` set to `0` means unlimited.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## replicated_deduplication_window {#replicated-deduplication-window}
|
||||
|
||||
The number of most recently inserted blocks for which ClickHouse Keeper stores hash sums to check for duplicates.
|
||||
|
@ -1011,6 +1011,12 @@ The default value is 7500.
|
||||
|
||||
The smaller the value, the more often data is flushed into the table. Setting the value too low leads to poor performance.
|
||||
|
||||
## stream_poll_timeout_ms {#stream_poll_timeout_ms}
|
||||
|
||||
Timeout for polling data from/to streaming storages.
|
||||
|
||||
Default value: 500.
|
||||
|
||||
## load_balancing {#settings-load_balancing}
|
||||
|
||||
Specifies the algorithm of replicas selection that is used for distributed query processing.
|
||||
@ -3625,7 +3631,7 @@ z IPv4
|
||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
||||
If the setting is enabled, the inferred type will be `Nullable` only if column contains `NULL` in a sample that is parsed during schema inference.
|
||||
|
||||
Default value: `false`.
|
||||
Default value: `true`.
|
||||
|
||||
## input_format_try_infer_integers {#input_format_try_infer_integers}
|
||||
|
||||
|
70
docs/en/operations/system-tables/schema_inference_cache.md
Normal file
70
docs/en/operations/system-tables/schema_inference_cache.md
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/schema_inference_cache
|
||||
---
|
||||
# Schema inference cache
|
||||
|
||||
Contains information about all cached file schemas.
|
||||
|
||||
Columns:
|
||||
- `storage` ([String](/docs/en/sql-reference/data-types/string.md)) — Storage name: File, URL, S3 or HDFS.
|
||||
- `source` ([String](/docs/en/sql-reference/data-types/string.md)) — File source.
|
||||
- `format` ([String](/docs/en/sql-reference/data-types/string.md)) — Format name.
|
||||
- `additional_format_info` ([String](/docs/en/sql-reference/data-types/string.md)) - Additional information required to identify the schema. For example, format specific settings.
|
||||
- `registration_time` ([DateTime](/docs/en/sql-reference/data-types/datetime.md)) — Timestamp when schema was added in cache.
|
||||
- `schema` ([String](/docs/en/sql-reference/data-types/string.md)) - Cached schema.
|
||||
|
||||
**Example**
|
||||
|
||||
Let's say we have a file `data.jsonl` with this content:
|
||||
```json
|
||||
{"id" : 1, "age" : 25, "name" : "Josh", "hobbies" : ["football", "cooking", "music"]}
|
||||
{"id" : 2, "age" : 19, "name" : "Alan", "hobbies" : ["tennis", "art"]}
|
||||
{"id" : 3, "age" : 32, "name" : "Lana", "hobbies" : ["fitness", "reading", "shopping"]}
|
||||
{"id" : 4, "age" : 47, "name" : "Brayan", "hobbies" : ["movies", "skydiving"]}
|
||||
```
|
||||
|
||||
:::tip
|
||||
Place `data.jsonl` in the `user_files_path` directory. You can find this by looking
|
||||
in your ClickHouse configuration files. The default is:
|
||||
```
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
```
|
||||
:::
|
||||
|
||||
Open `clickhouse-client` and run the `DESCRIBE` query:
|
||||
|
||||
```sql
|
||||
DESCRIBE file('data.jsonl') SETTINGS input_format_try_infer_integers=0;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─name────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ id │ Nullable(Float64) │ │ │ │ │ │
|
||||
│ age │ Nullable(Float64) │ │ │ │ │ │
|
||||
│ name │ Nullable(String) │ │ │ │ │ │
|
||||
│ hobbies │ Array(Nullable(String)) │ │ │ │ │ │
|
||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Let's see the content of the `system.schema_inference_cache` table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.schema_inference_cache
|
||||
FORMAT Vertical
|
||||
```
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
storage: File
|
||||
source: /home/droscigno/user_files/data.jsonl
|
||||
format: JSONEachRow
|
||||
additional_format_info: schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, schema_inference_make_columns_nullable=true, try_infer_integers=false, try_infer_dates=true, try_infer_datetimes=true, try_infer_numbers_from_strings=true, read_bools_as_numbers=true, try_infer_objects=false
|
||||
registration_time: 2022-12-29 17:49:52
|
||||
schema: id Nullable(Float64), age Nullable(Float64), name Nullable(String), hobbies Array(Nullable(String))
|
||||
```
|
||||
|
||||
|
||||
**See also**
|
||||
- [Automatic schema inference from input data](/docs/en/interfaces/schema-inference.md)
|
||||
|
@ -0,0 +1,41 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/grouparraylast
|
||||
sidebar_position: 110
|
||||
---
|
||||
|
||||
# groupArrayLast
|
||||
|
||||
Syntax: `groupArrayLast(max_size)(x)`
|
||||
|
||||
Creates an array of last argument values.
|
||||
For example, `groupArrayLast(1)(x)` is equivalent to `[anyLast (x)]`.
|
||||
|
||||
In some cases, you can still rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
select groupArrayLast(2)(number+1) numbers from numbers(10)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─numbers─┐
|
||||
│ [9,10] │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
In compare to `groupArray`:
|
||||
|
||||
```sql
|
||||
select groupArray(2)(number+1) numbers from numbers(10)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─numbers─┐
|
||||
│ [1,2] │
|
||||
└─────────┘
|
||||
```
|
@ -32,6 +32,7 @@ ClickHouse-specific aggregate functions:
|
||||
- [topK](../../../sql-reference/aggregate-functions/reference/topk.md)
|
||||
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
||||
- [groupArrayLast](../../../sql-reference/aggregate-functions/reference/grouparraylast.md)
|
||||
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
||||
|
@ -825,6 +825,23 @@ Setting fields:
|
||||
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
|
||||
:::
|
||||
|
||||
## Null
|
||||
|
||||
A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables.
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY null_dict (
|
||||
id UInt64,
|
||||
val UInt8,
|
||||
default_val UInt8 DEFAULT 123,
|
||||
nullable_val Nullable(UInt8)
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(NULL())
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(0);
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse)
|
||||
- [Using dictionaries to accelerate queries](https://clickhouse.com/blog/faster-queries-dictionaries-clickhouse)
|
||||
|
@ -410,35 +410,35 @@ Converts a date with time to a certain fixed date, while preserving the time.
|
||||
|
||||
## toRelativeYearNum
|
||||
|
||||
Converts a date with time or date to the number of the year, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the year, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeQuarterNum
|
||||
|
||||
Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the quarter, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMonthNum
|
||||
|
||||
Converts a date with time or date to the number of the month, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the month, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeWeekNum
|
||||
|
||||
Converts a date with time or date to the number of the week, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the week, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeDayNum
|
||||
|
||||
Converts a date with time or date to the number of the day, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the day, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeHourNum
|
||||
|
||||
Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the hour, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeMinuteNum
|
||||
|
||||
Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the minute, starting from a certain fixed point in the past.
|
||||
|
||||
## toRelativeSecondNum
|
||||
|
||||
Converts a date with time or date to the number of the second, starting from a certain fixed point in the past.
|
||||
Converts a date or date with time to the number of the second, starting from a certain fixed point in the past.
|
||||
|
||||
## toISOYear
|
||||
|
||||
@ -517,6 +517,154 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
|
||||
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_trunc
|
||||
|
||||
Truncates date and time data to the specified part of date.
|
||||
@ -637,80 +785,6 @@ Result:
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the difference between two dates or dates with time values.
|
||||
The difference is calculated using relative units, e.g. the difference between `2022-01-01` and `2021-12-29` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
Difference between `enddate` and `startdate` expressed in `unit`.
|
||||
|
||||
Type: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
dateDiff('day', s, e) AS day_diff,
|
||||
dateDiff('month', s, e) AS month__diff,
|
||||
dateDiff('year', s, e) AS year_diff;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
|
||||
└────────────┴────────────┴──────────┴─────────────┴───────────┘
|
||||
```
|
||||
|
||||
## date\_sub
|
||||
|
||||
Subtracts the time interval or date interval from the provided date or date with time.
|
||||
@ -1085,6 +1159,8 @@ SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64
|
||||
|
||||
Formats a Time according to the given Format string. Format is a constant expression, so you cannot have multiple formats for a single result column.
|
||||
|
||||
formatDateTime uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
@ -1158,6 +1234,64 @@ Result:
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [formatDateTimeInJodaSyntax](##formatDateTimeInJodaSyntax)
|
||||
|
||||
|
||||
## formatDateTimeInJodaSyntax
|
||||
|
||||
Similar to formatDateTime, except that it formats datetime in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
|
||||
|
||||
**Replacement fields**
|
||||
|
||||
Using replacement fields, you can define a pattern for the resulting string.
|
||||
|
||||
|
||||
| Placeholder | Description | Presentation | Examples |
|
||||
| ----------- | ----------- | ------------- | -------- |
|
||||
| G | era | text | AD |
|
||||
| C | century of era (>=0) | number | 20 |
|
||||
| Y | year of era (>=0) | year | 1996 |
|
||||
| x | weekyear(not supported yet) | year | 1996 |
|
||||
| w | week of weekyear(not supported yet) | number | 27 |
|
||||
| e | day of week | number | 2 |
|
||||
| E | day of week | text | Tuesday; Tue |
|
||||
| y | year | year | 1996 |
|
||||
| D | day of year | number | 189 |
|
||||
| M | month of year | month | July; Jul; 07 |
|
||||
| d | day of month | number | 10 |
|
||||
| a | halfday of day | text | PM |
|
||||
| K | hour of halfday (0~11) | number | 0 |
|
||||
| h | clockhour of halfday (1~12) | number | 12 |
|
||||
| H | hour of day (0~23) | number | 0 |
|
||||
| k | clockhour of day (1~24) | number | 24 |
|
||||
| m | minute of hour | number | 30 |
|
||||
| s | second of minute | number | 55 |
|
||||
| S | fraction of second(not supported yet) | number | 978 |
|
||||
| z | time zone(short name not supported yet) | text | Pacific Standard Time; PST |
|
||||
| Z | time zone offset/id(not supported yet) | zone | -0800; -08:00; America/Los_Angeles |
|
||||
| ' | escape for text | delimiter| |
|
||||
| '' | single quote | literal | ' |
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT formatDateTimeInJodaSyntax(toDateTime('2010-01-04 12:34:56'), 'yyyy-MM-dd HH:mm:ss')
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
┌─formatDateTimeInJodaSyntax(toDateTime('2010-01-04 12:34:56'), 'yyyy-MM-dd HH:mm:ss')─┐
|
||||
│ 2010-01-04 12:34:56 │
|
||||
└─────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## dateName
|
||||
|
||||
Returns specified part of date.
|
||||
@ -1241,6 +1375,8 @@ Result:
|
||||
|
||||
Function converts Unix timestamp to a calendar date and a time of a day. When there is only a single argument of [Integer](../../sql-reference/data-types/int-uint.md) type, it acts in the same way as [toDateTime](../../sql-reference/functions/type-conversion-functions.md#todatetime) and return [DateTime](../../sql-reference/data-types/datetime.md) type.
|
||||
|
||||
FROM_UNIXTIME uses MySQL datetime format style, refer to https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format.
|
||||
|
||||
Alias: `fromUnixTimestamp`.
|
||||
|
||||
**Example:**
|
||||
@ -1273,6 +1409,28 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime;
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [fromUnixTimestampInJodaSyntax](##fromUnixTimestampInJodaSyntax)
|
||||
|
||||
|
||||
## fromUnixTimestampInJodaSyntax
|
||||
Similar to FROM_UNIXTIME, except that it formats time in Joda style instead of MySQL style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html.
|
||||
|
||||
**Example:**
|
||||
Query:
|
||||
``` sql
|
||||
SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC');
|
||||
```
|
||||
|
||||
Result:
|
||||
```
|
||||
┌─fromUnixTimestampInJodaSyntax(1669804872, 'yyyy-MM-dd HH:mm:ss', 'UTC')─┐
|
||||
│ 2022-11-30 10:41:12 │
|
||||
└────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## toModifiedJulianDay
|
||||
|
||||
Converts a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD` to a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number in Int32. This function supports date from `0000-01-01` to `9999-12-31`. It raises an exception if the argument cannot be parsed as a date, or the date is invalid.
|
||||
|
@ -68,6 +68,483 @@ Result:
|
||||
└────────────┴────────────┴──────────────┴────────────────┴─────────────────┴──────────────────────┘
|
||||
```
|
||||
|
||||
# Functions for Generating Random Numbers based on Distributions
|
||||
|
||||
:::note
|
||||
These functions are available starting from 22.10.
|
||||
:::
|
||||
|
||||
|
||||
|
||||
## randUniform
|
||||
|
||||
Return random number based on [continuous uniform distribution](https://en.wikipedia.org/wiki/Continuous_uniform_distribution) in a specified range from `min` to `max`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randUniform(min, max)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `min` - `Float64` - min value of the range,
|
||||
- `max` - `Float64` - max value of the range.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randUniform(5.5, 10) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randUniform(5.5, 10)─┐
|
||||
│ 8.094978491443102 │
|
||||
│ 7.3181248914450885 │
|
||||
│ 7.177741903868262 │
|
||||
│ 6.483347380953762 │
|
||||
│ 6.122286382885112 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randNormal
|
||||
|
||||
Return random number based on [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randNormal(meam, variance)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `meam` - `Float64` mean value of distribution,
|
||||
- `variance` - `Float64` - [variance](https://en.wikipedia.org/wiki/Variance).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randNormal(10, 2) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──randNormal(10, 2)─┐
|
||||
│ 13.389228911709653 │
|
||||
│ 8.622949707401295 │
|
||||
│ 10.801887062682981 │
|
||||
│ 4.5220192605895315 │
|
||||
│ 10.901239123982567 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randLogNormal
|
||||
|
||||
Return random number based on [log-normal distribution](https://en.wikipedia.org/wiki/Log-normal_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randLogNormal(meam, variance)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `meam` - `Float64` mean value of distribution,
|
||||
- `variance` - `Float64` - [variance](https://en.wikipedia.org/wiki/Variance).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randLogNormal(100, 5) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randLogNormal(100, 5)─┐
|
||||
│ 1.295699673937363e48 │
|
||||
│ 9.719869109186684e39 │
|
||||
│ 6.110868203189557e42 │
|
||||
│ 9.912675872925529e39 │
|
||||
│ 2.3564708490552458e42 │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randBinomial
|
||||
|
||||
Return random number based on [binomial distribution](https://en.wikipedia.org/wiki/Binomial_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randBinomial(experiments, probability)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `experiments` - `UInt64` number of experiments,
|
||||
- `probability` - `Float64` - probability of success in each experiment (values in `0...1` range only).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randBinomial(100, .75) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randBinomial(100, 0.75)─┐
|
||||
│ 74 │
|
||||
│ 78 │
|
||||
│ 76 │
|
||||
│ 77 │
|
||||
│ 80 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randNegativeBinomial
|
||||
|
||||
Return random number based on [negative binomial distribution](https://en.wikipedia.org/wiki/Negative_binomial_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randNegativeBinomial(experiments, probability)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `experiments` - `UInt64` number of experiments,
|
||||
- `probability` - `Float64` - probability of failure in each experiment (values in `0...1` range only).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randNegativeBinomial(100, .75) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randNegativeBinomial(100, 0.75)─┐
|
||||
│ 33 │
|
||||
│ 32 │
|
||||
│ 39 │
|
||||
│ 40 │
|
||||
│ 50 │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randPoisson
|
||||
|
||||
Return random number based on [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randPoisson(n)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `n` - `UInt64` mean number of occurrences.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randPoisson(10) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randPoisson(10)─┐
|
||||
│ 8 │
|
||||
│ 8 │
|
||||
│ 7 │
|
||||
│ 10 │
|
||||
│ 6 │
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randBernoulli
|
||||
|
||||
Return random number based on [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randBernoulli(probability)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `probability` - `Float64` - probability of success (values in `0...1` range only).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [UInt64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randBernoulli(.75) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randBernoulli(0.75)─┐
|
||||
│ 1 │
|
||||
│ 1 │
|
||||
│ 0 │
|
||||
│ 1 │
|
||||
│ 1 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randExponential
|
||||
|
||||
Return random number based on [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randExponential(lambda)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `lambda` - `Float64` lambda value.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randExponential(1/10) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randExponential(divide(1, 10))─┐
|
||||
│ 44.71628934340778 │
|
||||
│ 4.211013337903262 │
|
||||
│ 10.809402553207766 │
|
||||
│ 15.63959406553284 │
|
||||
│ 1.8148392319860158 │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randChiSquared
|
||||
|
||||
Return random number based on [Chi-square distribution](https://en.wikipedia.org/wiki/Chi-squared_distribution) - a distribution of a sum of the squares of k independent standard normal random variables.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randChiSquared(degree_of_freedom)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `degree_of_freedom` - `Float64` degree of freedom.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randChiSquared(10) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─randChiSquared(10)─┐
|
||||
│ 10.015463656521543 │
|
||||
│ 9.621799919882768 │
|
||||
│ 2.71785015634699 │
|
||||
│ 11.128188665931908 │
|
||||
│ 4.902063104425469 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randStudentT
|
||||
|
||||
Return random number based on [Student's t-distribution](https://en.wikipedia.org/wiki/Student%27s_t-distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randStudentT(degree_of_freedom)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `degree_of_freedom` - `Float64` degree of freedom.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randStudentT(10) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─────randStudentT(10)─┐
|
||||
│ 1.2217309938538725 │
|
||||
│ 1.7941971681200541 │
|
||||
│ -0.28192176076784664 │
|
||||
│ 0.2508897721303792 │
|
||||
│ -2.7858432909761186 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
## randFisherF
|
||||
|
||||
Return random number based on [F-distribution](https://en.wikipedia.org/wiki/F-distribution).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
randFisherF(d1, d2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `d1` - `Float64` d1 degree of freedom in `X = (S1 / d1) / (S2 / d2)`,
|
||||
- `d2` - `Float64` d2 degree of freedom in `X = (S1 / d1) / (S2 / d2)`,
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Pseudo-random number.
|
||||
|
||||
Type: [Float64](/docs/en/sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT randFisherF(10, 3) FROM numbers(5)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──randFisherF(10, 3)─┐
|
||||
│ 7.286287504216609 │
|
||||
│ 0.26590779413050386 │
|
||||
│ 0.22207610901168987 │
|
||||
│ 0.7953362728449572 │
|
||||
│ 0.19278885985221572 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
# Random Functions for Working with Strings
|
||||
|
||||
## randomString
|
||||
|
@ -14,7 +14,7 @@ ClickHouse has the [same behavior as C++ programs](https://en.cppreference.com/w
|
||||
|
||||
## toInt(8\|16\|32\|64\|128\|256)
|
||||
|
||||
Converts an input value to the [Int](../../sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
Converts an input value to the [Int](/docs/en/sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
|
||||
- `toInt8(expr)` — Results in the `Int8` data type.
|
||||
- `toInt16(expr)` — Results in the `Int16` data type.
|
||||
@ -25,7 +25,7 @@ Converts an input value to the [Int](../../sql-reference/data-types/int-uint.md)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -33,7 +33,7 @@ Integer value in the `Int8`, `Int16`, `Int32`, `Int64`, `Int128` or `Int256` dat
|
||||
|
||||
Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers.
|
||||
|
||||
The behavior of functions for the [NaN and Inf](../../sql-reference/data-types/float.md#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
The behavior of functions for the [NaN and Inf](/docs/en/sql-reference/data-types/float.md/#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -114,7 +114,7 @@ Result:
|
||||
|
||||
## toUInt(8\|16\|32\|64\|256)
|
||||
|
||||
Converts an input value to the [UInt](../../sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
Converts an input value to the [UInt](/docs/en/sql-reference/data-types/int-uint.md) data type. This function family includes:
|
||||
|
||||
- `toUInt8(expr)` — Results in the `UInt8` data type.
|
||||
- `toUInt16(expr)` — Results in the `UInt16` data type.
|
||||
@ -124,7 +124,7 @@ Converts an input value to the [UInt](../../sql-reference/data-types/int-uint.md
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -132,7 +132,7 @@ Integer value in the `UInt8`, `UInt16`, `UInt32`, `UInt64` or `UInt256` data typ
|
||||
|
||||
Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers.
|
||||
|
||||
The behavior of functions for negative arguments and for the [NaN and Inf](../../sql-reference/data-types/float.md#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
The behavior of functions for negative arguments and for the [NaN and Inf](/docs/en/sql-reference/data-types/float.md/#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#numeric-conversion-issues), when using the functions.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -166,7 +166,111 @@ Result:
|
||||
|
||||
## toDate
|
||||
|
||||
Alias: `DATE`.
|
||||
Converts the argument to `Date` data type.
|
||||
|
||||
If the argument is `DateTime` or `DateTime64`, it truncates it, leaving the date component of the DateTime:
|
||||
```sql
|
||||
SELECT
|
||||
now() AS x,
|
||||
toDate(x)
|
||||
```
|
||||
```response
|
||||
┌───────────────────x─┬─toDate(now())─┐
|
||||
│ 2022-12-30 13:44:17 │ 2022-12-30 │
|
||||
└─────────────────────┴───────────────┘
|
||||
```
|
||||
|
||||
If the argument is a string, it is parsed as Date or DateTime. If it was parsed as DateTime, the date component is being used:
|
||||
```sql
|
||||
SELECT
|
||||
toDate('2022-12-30') AS x,
|
||||
toTypeName(x)
|
||||
```
|
||||
```response
|
||||
┌──────────x─┬─toTypeName(toDate('2022-12-30'))─┐
|
||||
│ 2022-12-30 │ Date │
|
||||
└────────────┴──────────────────────────────────┘
|
||||
|
||||
1 row in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
```sql
|
||||
SELECT
|
||||
toDate('2022-12-30 01:02:03') AS x,
|
||||
toTypeName(x)
|
||||
```
|
||||
```response
|
||||
┌──────────x─┬─toTypeName(toDate('2022-12-30 01:02:03'))─┐
|
||||
│ 2022-12-30 │ Date │
|
||||
└────────────┴───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
If the argument is a number and it looks like a UNIX timestamp (is greater than 65535), it is interpreted as a DateTime, then truncated to Date in the current timezone. The timezone argument can be specified as a second argument of the function. The truncation to Date depends on the timezone:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
now() AS current_time,
|
||||
toUnixTimestamp(current_time) AS ts,
|
||||
toDateTime(ts) AS time_Amsterdam,
|
||||
toDateTime(ts, 'Pacific/Apia') AS time_Samoa,
|
||||
toDate(time_Amsterdam) AS date_Amsterdam,
|
||||
toDate(time_Samoa) AS date_Samoa,
|
||||
toDate(ts) AS date_Amsterdam_2,
|
||||
toDate(ts, 'Pacific/Apia') AS date_Samoa_2
|
||||
```
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
current_time: 2022-12-30 13:51:54
|
||||
ts: 1672404714
|
||||
time_Amsterdam: 2022-12-30 13:51:54
|
||||
time_Samoa: 2022-12-31 01:51:54
|
||||
date_Amsterdam: 2022-12-30
|
||||
date_Samoa: 2022-12-31
|
||||
date_Amsterdam_2: 2022-12-30
|
||||
date_Samoa_2: 2022-12-31
|
||||
```
|
||||
|
||||
The example above demonstrates how the same UNIX timestamp can be interpreted as different dates in different time zones.
|
||||
|
||||
If the argument is a number and it is smaller than 65536, it is interpreted as the number of days since 1970-01-01 (a UNIX day) and converted to Date. It corresponds to the internal numeric representation of the `Date` data type. Example:
|
||||
|
||||
```sql
|
||||
SELECT toDate(12345)
|
||||
```
|
||||
```response
|
||||
┌─toDate(12345)─┐
|
||||
│ 2003-10-20 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
This conversion does not depend on timezones.
|
||||
|
||||
If the argument does not fit in the range of the Date type, it results in an implementation-defined behavior, that can saturate to the maximum supported date or overflow:
|
||||
```sql
|
||||
SELECT toDate(10000000000.)
|
||||
```
|
||||
```response
|
||||
┌─toDate(10000000000.)─┐
|
||||
│ 2106-02-07 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
The function `toDate` can be also written in alternative forms:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
now() AS time,
|
||||
toDate(time),
|
||||
DATE(time),
|
||||
CAST(time, 'Date')
|
||||
```
|
||||
```response
|
||||
┌────────────────time─┬─toDate(now())─┬─DATE(now())─┬─CAST(now(), 'Date')─┐
|
||||
│ 2022-12-30 13:54:58 │ 2022-12-30 │ 2022-12-30 │ 2022-12-30 │
|
||||
└─────────────────────┴───────────────┴─────────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
Have a nice day working with dates and times.
|
||||
|
||||
## toDateOrZero
|
||||
|
||||
@ -184,7 +288,7 @@ Alias: `DATE`.
|
||||
|
||||
## toDate32
|
||||
|
||||
Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the border values supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account.
|
||||
Converts the argument to the [Date32](/docs/en/sql-reference/data-types/date32.md) data type. If the value is outside the range, `toDate32` returns the border values supported by `Date32`. If the argument has [Date](/docs/en/sql-reference/data-types/date.md) type, borders of `Date` are taken into account.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -194,13 +298,13 @@ toDate32(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md) or [Date](../../sql-reference/data-types/date.md).
|
||||
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [UInt32](/docs/en/sql-reference/data-types/int-uint.md) or [Date](/docs/en/sql-reference/data-types/date.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date.
|
||||
|
||||
Type: [Date32](../../sql-reference/data-types/date32.md).
|
||||
Type: [Date32](/docs/en/sql-reference/data-types/date32.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -242,7 +346,7 @@ SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
|
||||
|
||||
## toDate32OrZero
|
||||
|
||||
The same as [toDate32](#todate32) but returns the min value of [Date32](../../sql-reference/data-types/date32.md) if invalid argument is received.
|
||||
The same as [toDate32](#todate32) but returns the min value of [Date32](/docs/en/sql-reference/data-types/date32.md) if an invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -262,7 +366,7 @@ Result:
|
||||
|
||||
## toDate32OrNull
|
||||
|
||||
The same as [toDate32](#todate32) but returns `NULL` if invalid argument is received.
|
||||
The same as [toDate32](#todate32) but returns `NULL` if an invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -282,7 +386,7 @@ Result:
|
||||
|
||||
## toDate32OrDefault
|
||||
|
||||
Converts the argument to the [Date32](../../sql-reference/data-types/date32.md) data type. If the value is outside the range returns the lower border value supported by `Date32`. If the argument has [Date](../../sql-reference/data-types/date.md) type, borders of `Date` are taken into account. Returns default value if invalid argument is received.
|
||||
Converts the argument to the [Date32](/docs/en/sql-reference/data-types/date32.md) data type. If the value is outside the range, `toDate32OrDefault` returns the lower border value supported by `Date32`. If the argument has [Date](/docs/en/sql-reference/data-types/date.md) type, borders of `Date` are taken into account. Returns default value if an invalid argument is received.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -304,7 +408,7 @@ Result:
|
||||
|
||||
## toDateTime64
|
||||
|
||||
Converts the argument to the [DateTime64](../../sql-reference/data-types/datetime64.md) data type.
|
||||
Converts the argument to the [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) data type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -314,7 +418,7 @@ toDateTime64(expr, scale, [timezone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../../sql-reference/data-types/string.md), [UInt32](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `expr` — The value. [String](/docs/en/sql-reference/data-types/string.md), [UInt32](/docs/en/sql-reference/data-types/int-uint.md), [Float](/docs/en/sql-reference/data-types/float.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` - Time zone of the specified datetime64 object.
|
||||
|
||||
@ -322,7 +426,7 @@ toDateTime64(expr, scale, [timezone])
|
||||
|
||||
- A calendar date and time of day, with sub-second precision.
|
||||
|
||||
Type: [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
Type: [DateTime64](/docs/en/sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -378,7 +482,7 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
|
||||
|
||||
## toDecimal(32\|64\|128\|256)
|
||||
|
||||
Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places.
|
||||
Converts `value` to the [Decimal](/docs/en/sql-reference/data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places.
|
||||
|
||||
- `toDecimal32(value, S)`
|
||||
- `toDecimal64(value, S)`
|
||||
@ -387,7 +491,7 @@ Converts `value` to the [Decimal](../../sql-reference/data-types/decimal.md) dat
|
||||
|
||||
## toDecimal(32\|64\|128\|256)OrNull
|
||||
|
||||
Converts an input string to a [Nullable(Decimal(P,S))](../../sql-reference/data-types/decimal.md) data type value. This family of functions include:
|
||||
Converts an input string to a [Nullable(Decimal(P,S))](/docs/en/sql-reference/data-types/decimal.md) data type value. This family of functions includes:
|
||||
|
||||
- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` data type.
|
||||
- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` data type.
|
||||
@ -398,7 +502,7 @@ These functions should be used instead of `toDecimal*()` functions, if you prefe
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions), returns a value in the [String](../../sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions), returns a value in the [String](/docs/en/sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `S` — Scale, the number of decimal places in the resulting value.
|
||||
|
||||
**Returned value**
|
||||
@ -441,7 +545,7 @@ Result:
|
||||
|
||||
## toDecimal(32\|64\|128\|256)OrDefault
|
||||
|
||||
Converts an input string to a [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type value. This family of functions include:
|
||||
Converts an input string to a [Decimal(P,S)](/docs/en/sql-reference/data-types/decimal.md) data type value. This family of functions includes:
|
||||
|
||||
- `toDecimal32OrDefault(expr, S)` — Results in `Decimal32(S)` data type.
|
||||
- `toDecimal64OrDefault(expr, S)` — Results in `Decimal64(S)` data type.
|
||||
@ -452,7 +556,7 @@ These functions should be used instead of `toDecimal*()` functions, if you prefe
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions), returns a value in the [String](../../sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions), returns a value in the [String](/docs/en/sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `S` — Scale, the number of decimal places in the resulting value.
|
||||
|
||||
**Returned value**
|
||||
@ -494,7 +598,7 @@ Result:
|
||||
|
||||
## toDecimal(32\|64\|128\|256)OrZero
|
||||
|
||||
Converts an input value to the [Decimal(P,S)](../../sql-reference/data-types/decimal.md) data type. This family of functions include:
|
||||
Converts an input value to the [Decimal(P,S)](/docs/en/sql-reference/data-types/decimal.md) data type. This family of functions includes:
|
||||
|
||||
- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` data type.
|
||||
- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` data type.
|
||||
@ -505,7 +609,7 @@ These functions should be used instead of `toDecimal*()` functions, if you prefe
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions), returns a value in the [String](../../sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions), returns a value in the [String](/docs/en/sql-reference/data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`.
|
||||
- `S` — Scale, the number of decimal places in the resulting value.
|
||||
|
||||
**Returned value**
|
||||
@ -564,7 +668,7 @@ YYYY-MM-DD hh:mm:ss
|
||||
|
||||
As an exception, if converting from UInt32, Int32, UInt64, or Int64 numeric types to Date, and if the number is greater than or equal to 65536, the number is interpreted as a Unix timestamp (and not as the number of days) and is rounded to the date. This allows support for the common occurrence of writing ‘toDate(unix_timestamp)’, which otherwise would be an error and would require writing the more cumbersome ‘toDate(toDateTime(unix_timestamp))’.
|
||||
|
||||
Conversion between a date and date with time is performed the natural way: by adding a null time or dropping the time.
|
||||
Conversion between a date and a date with time is performed the natural way: by adding a null time or dropping the time.
|
||||
|
||||
Conversion between numeric types uses the same rules as assignments between different numeric types in C++.
|
||||
|
||||
@ -643,15 +747,15 @@ These functions accept a string and interpret the bytes placed at the beginning
|
||||
|
||||
## reinterpretAsString
|
||||
|
||||
This function accepts a number or date or date with time, and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long.
|
||||
This function accepts a number or date or date with time and returns a string containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a string that is one byte long.
|
||||
|
||||
## reinterpretAsFixedString
|
||||
|
||||
This function accepts a number or date or date with time, and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long.
|
||||
This function accepts a number or date or date with time and returns a FixedString containing bytes representing the corresponding value in host order (little endian). Null bytes are dropped from the end. For example, a UInt32 type value of 255 is a FixedString that is one byte long.
|
||||
|
||||
## reinterpretAsUUID
|
||||
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string is longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -661,11 +765,11 @@ reinterpretAsUUID(fixed_string)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `fixed_string` — Big-endian byte string. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring).
|
||||
- `fixed_string` — Big-endian byte string. [FixedString](/docs/en/sql-reference/data-types/fixedstring.md/#fixedstring).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The UUID type value. [UUID](../../sql-reference/data-types/uuid.md#uuid-data-type).
|
||||
- The UUID type value. [UUID](/docs/en/sql-reference/data-types/uuid.md/#uuid-data-type).
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -718,7 +822,7 @@ reinterpret(x, type)
|
||||
**Arguments**
|
||||
|
||||
- `x` — Any type.
|
||||
- `type` — Destination type. [String](../../sql-reference/data-types/string.md).
|
||||
- `type` — Destination type. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -757,7 +861,7 @@ x::t
|
||||
**Arguments**
|
||||
|
||||
- `x` — A value to convert. May be of any type.
|
||||
- `T` — The name of the target data type. [String](../../sql-reference/data-types/string.md).
|
||||
- `T` — The name of the target data type. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
- `t` — The target data type.
|
||||
|
||||
**Returned value**
|
||||
@ -806,9 +910,9 @@ Result:
|
||||
└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
Conversion to FixedString(N) only works for arguments of type [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
Conversion to FixedString(N) only works for arguments of type [String](/docs/en/sql-reference/data-types/string.md) or [FixedString](/docs/en/sql-reference/data-types/fixedstring.md).
|
||||
|
||||
Type conversion to [Nullable](../../sql-reference/data-types/nullable.md) and back is supported.
|
||||
Type conversion to [Nullable](/docs/en/sql-reference/data-types/nullable.md) and back is supported.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -844,7 +948,7 @@ Result:
|
||||
|
||||
**See also**
|
||||
|
||||
- [cast_keep_nullable](../../operations/settings/settings.md#cast_keep_nullable) setting
|
||||
- [cast_keep_nullable](/docs/en/operations/settings/settings.md/#cast_keep_nullable) setting
|
||||
|
||||
## accurateCast(x, T)
|
||||
|
||||
@ -882,7 +986,7 @@ Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in c
|
||||
|
||||
## accurateCastOrNull(x, T)
|
||||
|
||||
Converts input value `x` to the specified data type `T`. Always returns [Nullable](../../sql-reference/data-types/nullable.md) type and returns [NULL](../../sql-reference/syntax.md#null-literal) if the casted value is not representable in the target type.
|
||||
Converts input value `x` to the specified data type `T`. Always returns [Nullable](/docs/en/sql-reference/data-types/nullable.md) type and returns [NULL](/docs/en/sql-reference/syntax.md/#null-literal) if the casted value is not representable in the target type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -991,7 +1095,7 @@ Result:
|
||||
|
||||
## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second)
|
||||
|
||||
Converts a Number type argument to an [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type.
|
||||
Converts a Number type argument to an [Interval](/docs/en/sql-reference/data-types/special-data-types/interval.md) data type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1039,7 +1143,7 @@ Result:
|
||||
## parseDateTimeBestEffort
|
||||
## parseDateTime32BestEffort
|
||||
|
||||
Converts a date and time in the [String](../../sql-reference/data-types/string.md) representation to [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) data type.
|
||||
Converts a date and time in the [String](/docs/en/sql-reference/data-types/string.md) representation to [DateTime](/docs/en/sql-reference/data-types/datetime.md/#data_type-datetime) data type.
|
||||
|
||||
The function parses [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), [RFC 1123 - 5.2.14 RFC-822 Date and Time Specification](https://tools.ietf.org/html/rfc1123#page-55), ClickHouse’s and some other date and time formats.
|
||||
|
||||
@ -1051,8 +1155,8 @@ parseDateTimeBestEffort(time_string [, time_zone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_string` — String containing a date and time to convert. [String](../../sql-reference/data-types/string.md).
|
||||
- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](../../sql-reference/data-types/string.md).
|
||||
- `time_string` — String containing a date and time to convert. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Supported non-standard formats**
|
||||
|
||||
@ -1175,7 +1279,7 @@ Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except
|
||||
|
||||
## parseDateTime64BestEffort
|
||||
|
||||
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type.
|
||||
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](/docs/en/sql-reference/functions/type-conversion-functions.md/#data_type-datetime) data type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1185,13 +1289,13 @@ parseDateTime64BestEffort(time_string [, precision [, time_zone]])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `time_string` — String containing a date or date with time to convert. [String](../../sql-reference/data-types/string.md).
|
||||
- `precision` — Required precision. `3` — for milliseconds, `6` — for microseconds. Default — `3`. Optional. [UInt8](../../sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
- `time_string` — String containing a date or date with time to convert. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
- `precision` — Required precision. `3` — for milliseconds, `6` — for microseconds. Default — `3`. Optional. [UInt8](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type.
|
||||
- `time_string` converted to the [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type.
|
||||
|
||||
**Examples**
|
||||
|
||||
@ -1242,7 +1346,7 @@ Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that
|
||||
|
||||
## toLowCardinality
|
||||
|
||||
Converts input parameter to the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) version of same data type.
|
||||
Converts input parameter to the [LowCardinality](/docs/en/sql-reference/data-types/lowcardinality.md) version of same data type.
|
||||
|
||||
To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`.
|
||||
|
||||
@ -1254,7 +1358,7 @@ toLowCardinality(expr)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — [Expression](../../sql-reference/syntax.md#syntax-expressions) resulting in one of the [supported data types](../../sql-reference/data-types/index.md#data_types).
|
||||
- `expr` — [Expression](/docs/en/sql-reference/syntax.md/#syntax-expressions) resulting in one of the [supported data types](/docs/en/sql-reference/data-types/index.md/#data_types).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -1388,12 +1492,12 @@ formatRow(format, x, y, ...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `format` — Text format. For example, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated).
|
||||
- `format` — Text format. For example, [CSV](/docs/en/interfaces/formats.md/#csv), [TSV](/docs/en/interfaces/formats.md/#tabseparated).
|
||||
- `x`,`y`, ... — Expressions.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A formatted string (for text formats it's usually terminated with the new line character).
|
||||
- A formatted string. (for text formats it's usually terminated with the new line character).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1417,9 +1521,39 @@ Result:
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Note**: If format contains suffix/prefix, it will be written in each row.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT formatRow('CustomSeparated', number, 'good')
|
||||
FROM numbers(3)
|
||||
SETTINGS format_custom_result_before_delimiter='<prefix>\n', format_custom_result_after_delimiter='<suffix>'
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─formatRow('CustomSeparated', number, 'good')─┐
|
||||
│ <prefix>
|
||||
0 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
1 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
2 good
|
||||
<suffix> │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Note: Only row-based formats are supported in this function.
|
||||
|
||||
## formatRowNoNewline
|
||||
|
||||
Converts arbitrary expressions into a string via given format. The function trims the last `\n` if any.
|
||||
Converts arbitrary expressions into a string via given format. Differs from formatRow in that this function trims the last `\n` if any.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1429,7 +1563,7 @@ formatRowNoNewline(format, x, y, ...)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `format` — Text format. For example, [CSV](../../interfaces/formats.md#csv), [TSV](../../interfaces/formats.md#tabseparated).
|
||||
- `format` — Text format. For example, [CSV](/docs/en/interfaces/formats.md/#csv), [TSV](/docs/en/interfaces/formats.md/#tabseparated).
|
||||
- `x`,`y`, ... — Expressions.
|
||||
|
||||
**Returned value**
|
||||
@ -1457,7 +1591,7 @@ Result:
|
||||
|
||||
## snowflakeToDateTime
|
||||
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](../data-types/datetime.md) format.
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime](/docs/en/sql-reference/data-types/datetime.md) format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1467,12 +1601,12 @@ snowflakeToDateTime(value [, time_zone])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
- `value` — Snowflake ID. [Int64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [DateTime](../data-types/datetime.md) data type.
|
||||
- Input value converted to the [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1493,7 +1627,7 @@ Result:
|
||||
|
||||
## snowflakeToDateTime64
|
||||
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](../data-types/datetime64.md) format.
|
||||
Extracts time from [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) format.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1503,12 +1637,12 @@ snowflakeToDateTime64(value [, time_zone])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Snowflake ID. [Int64](../data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
- `value` — Snowflake ID. [Int64](/docs/en/sql-reference/data-types/int-uint.md).
|
||||
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](/docs/en/sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [DateTime64](../data-types/datetime64.md) data type.
|
||||
- Input value converted to the [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) data type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1529,7 +1663,7 @@ Result:
|
||||
|
||||
## dateTimeToSnowflake
|
||||
|
||||
Converts [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
Converts [DateTime](/docs/en/sql-reference/data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1539,11 +1673,11 @@ dateTimeToSnowflake(value)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `value` — Date and time. [DateTime](/docs/en/sql-reference/data-types/datetime.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
- Input value converted to the [Int64](/docs/en/sql-reference/data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1563,7 +1697,7 @@ Result:
|
||||
|
||||
## dateTime64ToSnowflake
|
||||
|
||||
Convert [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
Convert [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1573,11 +1707,11 @@ dateTime64ToSnowflake(value)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `value` — Date and time. [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `value` — Date and time. [DateTime64](/docs/en/sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Input value converted to the [Int64](../data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
- Input value converted to the [Int64](/docs/en/sql-reference/data-types/int-uint.md) data type as the first Snowflake ID at that time.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -21,12 +21,11 @@ Subquery is another `SELECT` query that may be specified in parenthesis inside `
|
||||
|
||||
When `FINAL` is specified, ClickHouse fully merges the data before returning the result and thus performs all data transformations that happen during merges for the given table engine.
|
||||
|
||||
It is applicable when selecting data from tables that use the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)-engine family. Also supported for:
|
||||
It is applicable when selecting data from ReplacingMergeTree, SummingMergeTree, AggregatingMergeTree, CollapsingMergeTree and VersionedCollapsingMergeTree tables.
|
||||
|
||||
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) versions of `MergeTree` engines.
|
||||
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) engines that operate over other engines, provided they were created over `MergeTree`-engine tables.
|
||||
`SELECT` queries with `FINAL` are executed in parallel. The [max_final_threads](../../../operations/settings/settings.md#max-final-threads) setting limits the number of threads used.
|
||||
|
||||
Now `SELECT` queries with `FINAL` are executed in parallel and slightly faster. But there are drawbacks (see below). The [max_final_threads](../../../operations/settings/settings.md#max-final-threads) setting limits the number of threads used.
|
||||
There are drawbacks to using `FINAL` (see below).
|
||||
|
||||
### Drawbacks
|
||||
|
||||
|
@ -169,12 +169,6 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
cmake -D CMAKE_BUILD_TYPE=Debug ..
|
||||
|
||||
В случае использования на разработческой машине старого HDD или SSD, а также при желании использовать меньше места для артефактов сборки можно использовать следующую команду:
|
||||
```bash
|
||||
cmake -DUSE_DEBUG_HELPERS=1 -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 ..
|
||||
```
|
||||
При этом надо учесть, что получаемые в результате сборки исполнимые файлы будут динамически слинкованы с библиотеками, и поэтому фактически станут непереносимыми на другие компьютеры (либо для этого нужно будет предпринять значительно больше усилий по сравнению со статической сборкой). Плюсом же в данном случае является значительно меньшее время сборки (это проявляется не на первой сборке, а на последующих, после внесения изменений в исходный код - тратится меньшее время на линковку по сравнению со статической сборкой) и значительно меньшее использование места на жёстком диске (экономия более, чем в 3 раза по сравнению со статической сборкой). Для целей разработки, когда планируются только отладочные запуски на том же компьютере, где осуществлялась сборка, это может быть наиболее удобным вариантом.
|
||||
|
||||
Вы можете изменить вариант сборки, выполнив новую команду в директории build.
|
||||
|
||||
Запустите ninja для сборки:
|
||||
|
@ -424,23 +424,23 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toRelativeYearNum {#torelativeyearnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeQuarterNum {#torelativequarternum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeMonthNum {#torelativemonthnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeWeekNum {#torelativeweeknum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер недели, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeDayNum {#torelativedaynum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
Переводит дату или дату-с-временем в номер дня, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeHourNum {#torelativehournum}
|
||||
|
||||
@ -456,7 +456,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
## toISOYear {#toisoyear}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -479,7 +479,7 @@ SELECT
|
||||
|
||||
## toISOWeek {#toisoweek}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели.
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер ISO недели.
|
||||
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
|
||||
|
||||
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
|
||||
@ -503,7 +503,7 @@ SELECT
|
||||
```
|
||||
|
||||
## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
|
||||
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
Переводит дату или дату-с-временем в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
`toISOWeek() ` эквивалентно `toWeek(date,3)`.
|
||||
|
||||
@ -569,6 +569,132 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## age
|
||||
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
|
||||
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
age('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 24 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
toDate('2022-01-01') AS e,
|
||||
toDate('2021-12-29') AS s,
|
||||
age('day', s, e) AS day_age,
|
||||
age('month', s, e) AS month__age,
|
||||
age('year', s, e) AS year_age;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
|
||||
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
|
||||
└────────────┴────────────┴─────────┴────────────┴──────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу указанных границ `unit` пересекаемых между `startdate` и `enddate`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
- `minute` (возможные сокращения: `mi`, `n`)
|
||||
- `hour` (возможные сокращения: `hh`, `h`)
|
||||
- `day` (возможные сокращения: `dd`, `d`)
|
||||
- `week` (возможные сокращения: `wk`, `ww`)
|
||||
- `month` (возможные сокращения: `mm`, `m`)
|
||||
- `quarter` (возможные сокращения: `qq`, `q`)
|
||||
- `year` (возможные сокращения: `yyyy`, `yy`)
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Отсекает от даты и времени части, меньшие чем указанная часть.
|
||||
@ -689,60 +815,6 @@ SELECT date_add(YEAR, 3, toDate('2018-01-01'));
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_diff {#date_diff}
|
||||
|
||||
Вычисляет разницу между двумя значениями дат или дат со временем.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_diff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
Синонимы: `dateDiff`, `DATE_DIFF`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Разница между `enddate` и `startdate`, выраженная в `unit`.
|
||||
|
||||
Тип: [Int](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## date\_sub {#date_sub}
|
||||
|
||||
Вычитает интервал времени или даты из указанной даты или даты со временем.
|
||||
|
@ -1316,7 +1316,7 @@ formatRow(format, x, y, ...)
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Отформатированная строка (в текстовых форматах обычно с завершающим переводом строки).
|
||||
- Отформатированная строка. (в текстовых форматах обычно с завершающим переводом строки).
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -1340,9 +1340,39 @@ FROM numbers(3);
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Примечание**: если формат содержит префикс/суффикс, то он будет записан в каждой строке.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT formatRow('CustomSeparated', number, 'good')
|
||||
FROM numbers(3)
|
||||
SETTINGS format_custom_result_before_delimiter='<prefix>\n', format_custom_result_after_delimiter='<suffix>'
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─formatRow('CustomSeparated', number, 'good')─┐
|
||||
│ <prefix>
|
||||
0 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
1 good
|
||||
<suffix> │
|
||||
│ <prefix>
|
||||
2 good
|
||||
<suffix> │
|
||||
└──────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Примечание**: данная функция поддерживает только строковые форматы вывода.
|
||||
|
||||
## formatRowNoNewline {#formatrownonewline}
|
||||
|
||||
Преобразует произвольные выражения в строку заданного формата. При этом удаляет лишние переводы строк `\n`, если они появились.
|
||||
Преобразует произвольные выражения в строку заданного формата. Отличается от функции formatRow тем, что удаляет лишний перевод строки `\n` а конце, если он есть.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
|
@ -85,7 +85,6 @@ git push
|
||||
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
|
||||
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
|
||||
- **Bundled**: `bundled` 构建使用来自 `contrib` 库, 而 `unbundled` 构建使用系统库.
|
||||
- **Splitted**: `splitted` is a [split build](https://clickhouse.com/docs/en/development/build/#split-build)
|
||||
- **Status**: `成功` 或 `失败`
|
||||
- **Build log**: 链接到构建和文件复制日志, 当构建失败时很有用.
|
||||
- **Build time**.
|
||||
@ -97,7 +96,6 @@ git push
|
||||
- `clickhouse`: Main built binary.
|
||||
- `clickhouse-odbc-bridge`
|
||||
- `unit_tests_dbms`: 带有 ClickHouse 单元测试的 GoogleTest 二进制文件.
|
||||
- `shared_build.tgz`: 使用共享库构建.
|
||||
- `performance.tgz`: 用于性能测试的特殊包.
|
||||
|
||||
## 特殊构建检查 {#special-buildcheck}
|
||||
@ -123,14 +121,6 @@ git push
|
||||
of error.
|
||||
```
|
||||
|
||||
## 冒烟测试 {#split-build-smoke-test}
|
||||
检查[拆分构建](./build.md#split-build)配置中的服务器构建是否可以启动并运行简单查询.如果失败:
|
||||
```
|
||||
* Fix other test errors first;
|
||||
* Build the server in [split build](./build.md#split-build) configuration
|
||||
locally and check whether it can start and run `select 1`.
|
||||
```
|
||||
|
||||
## 兼容性检查 {#compatibility-check}
|
||||
检查`clickhouse`二进制文件是否可以在带有旧libc版本的发行版上运行.如果失败, 请向维护人员寻求帮助.
|
||||
|
||||
|
@ -11,7 +11,7 @@ sidebar_position: 29
|
||||
这系列的引擎有:
|
||||
|
||||
- [StripeLog](stripelog.md)
|
||||
- [日志](log.md)
|
||||
- [Log](log.md)
|
||||
- [TinyLog](tinylog.md)
|
||||
|
||||
## 共同属性 {#table_engines-log-engine-family-common-properties}
|
||||
|
@ -13,12 +13,6 @@ option (ENABLE_CLICKHOUSE_SERVER "Server mode (main mode)" ${ENABLE_CLICKHOUSE_A
|
||||
option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)"
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
# Don't create self-extracting clickhouse for split build
|
||||
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING AND SPLIT_SHARED_LIBRARIES)
|
||||
message (STATUS "Self-extracting on split build is not supported")
|
||||
unset (ENABLE_CLICKHOUSE_SELF_EXTRACTING CACHE)
|
||||
endif ()
|
||||
|
||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/
|
||||
option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
@ -173,10 +167,6 @@ else()
|
||||
message(STATUS "ClickHouse keeper-converter mode: OFF")
|
||||
endif()
|
||||
|
||||
if(NOT (USE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES))
|
||||
set(CLICKHOUSE_ONE_SHARED ON)
|
||||
endif()
|
||||
|
||||
if (ENABLE_CLICKHOUSE_DISKS)
|
||||
message(STATUS "Clickhouse disks mode: ON")
|
||||
else()
|
||||
@ -192,11 +182,7 @@ endif()
|
||||
configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h)
|
||||
|
||||
macro(clickhouse_target_link_split_lib target name)
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
|
||||
else()
|
||||
target_link_libraries(${target} PRIVATE clickhouse-lib)
|
||||
endif()
|
||||
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
|
||||
endmacro()
|
||||
|
||||
macro(clickhouse_program_add_library name)
|
||||
@ -208,18 +194,16 @@ macro(clickhouse_program_add_library name)
|
||||
set(CLICKHOUSE_${name_uc}_LINK ${CLICKHOUSE_${name_uc}_LINK} PARENT_SCOPE)
|
||||
set(CLICKHOUSE_${name_uc}_INCLUDE ${CLICKHOUSE_${name_uc}_INCLUDE} PARENT_SCOPE)
|
||||
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES})
|
||||
add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES})
|
||||
|
||||
set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if()
|
||||
if(_link)
|
||||
target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK})
|
||||
endif()
|
||||
set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if()
|
||||
if(_link)
|
||||
target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK})
|
||||
endif()
|
||||
|
||||
set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if()
|
||||
if (_include)
|
||||
target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE})
|
||||
endif()
|
||||
set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if()
|
||||
if (_include)
|
||||
target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
@ -263,68 +247,8 @@ if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
||||
add_subdirectory (self-extracting)
|
||||
endif ()
|
||||
|
||||
if (CLICKHOUSE_ONE_SHARED)
|
||||
add_library(clickhouse-lib SHARED
|
||||
${CLICKHOUSE_SERVER_SOURCES}
|
||||
${CLICKHOUSE_CLIENT_SOURCES}
|
||||
${CLICKHOUSE_LOCAL_SOURCES}
|
||||
${CLICKHOUSE_BENCHMARK_SOURCES}
|
||||
${CLICKHOUSE_COPIER_SOURCES}
|
||||
${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES}
|
||||
${CLICKHOUSE_COMPRESSOR_SOURCES}
|
||||
${CLICKHOUSE_FORMAT_SOURCES}
|
||||
${CLICKHOUSE_OBFUSCATOR_SOURCES}
|
||||
${CLICKHOUSE_GIT_IMPORT_SOURCES}
|
||||
${CLICKHOUSE_ODBC_BRIDGE_SOURCES}
|
||||
${CLICKHOUSE_KEEPER_SOURCES}
|
||||
${CLICKHOUSE_KEEPER_CONVERTER_SOURCES}
|
||||
${CLICKHOUSE_STATIC_FILES_DISK_UPLOADER_SOURCES}
|
||||
${CLICKHOUSE_SU_SOURCES})
|
||||
|
||||
target_link_libraries(clickhouse-lib
|
||||
${CLICKHOUSE_SERVER_LINK}
|
||||
${CLICKHOUSE_CLIENT_LINK}
|
||||
${CLICKHOUSE_LOCAL_LINK}
|
||||
${CLICKHOUSE_BENCHMARK_LINK}
|
||||
${CLICKHOUSE_COPIER_LINK}
|
||||
${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK}
|
||||
${CLICKHOUSE_COMPRESSOR_LINK}
|
||||
${CLICKHOUSE_FORMAT_LINK}
|
||||
${CLICKHOUSE_OBFUSCATOR_LINK}
|
||||
${CLICKHOUSE_GIT_IMPORT_LINK}
|
||||
${CLICKHOUSE_ODBC_BRIDGE_LINK}
|
||||
${CLICKHOUSE_KEEPER_LINK}
|
||||
${CLICKHOUSE_KEEPER_CONVERTER_LINK}
|
||||
${CLICKHOUSE_STATIC_FILES_DISK_UPLOADER_LINK}
|
||||
${CLICKHOUSE_SU_LINK})
|
||||
|
||||
target_include_directories(clickhouse-lib
|
||||
${CLICKHOUSE_SERVER_INCLUDE}
|
||||
${CLICKHOUSE_CLIENT_INCLUDE}
|
||||
${CLICKHOUSE_LOCAL_INCLUDE}
|
||||
${CLICKHOUSE_BENCHMARK_INCLUDE}
|
||||
${CLICKHOUSE_COPIER_INCLUDE}
|
||||
${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE}
|
||||
${CLICKHOUSE_COMPRESSOR_INCLUDE}
|
||||
${CLICKHOUSE_FORMAT_INCLUDE}
|
||||
${CLICKHOUSE_OBFUSCATOR_INCLUDE}
|
||||
${CLICKHOUSE_GIT_IMPORT_INCLUDE}
|
||||
${CLICKHOUSE_ODBC_BRIDGE_INCLUDE}
|
||||
${CLICKHOUSE_KEEPER_INCLUDE}
|
||||
${CLICKHOUSE_KEEPER_CONVERTER_INCLUDE})
|
||||
|
||||
set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "")
|
||||
install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
|
||||
clickhouse_add_executable (clickhouse main.cpp)
|
||||
|
||||
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
# Shared split (dev) build: In CI, the server is run with custom LD_LIBRARY_PATH. This makes the harmful env check re-execute the
|
||||
# process in a clean environment but as in CI the containing directory is not included in DT_RUNPATH/DT_RPATH, the server won't come up.
|
||||
target_compile_definitions(clickhouse PRIVATE DISABLE_HARMFUL_ENV_VAR_CHECK)
|
||||
endif ()
|
||||
|
||||
# A library that prevent usage of several functions from libc.
|
||||
if (ARCH_AMD64 AND OS_LINUX AND NOT OS_ANDROID)
|
||||
set (HARMFUL_LIB harmful)
|
||||
|
@ -683,7 +683,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
|
||||
("query_id", value<std::string>()->default_value(""), "")
|
||||
("max-consecutive-errors", value<size_t>()->default_value(0), "set number of allowed consecutive errors")
|
||||
("continue_on_errors", "continue testing even if a query fails")
|
||||
("ignore-error,continue_on_errors", "continue testing even if a query fails")
|
||||
("reconnect", "establish new connection for every query")
|
||||
("client-side-time", "display the time including network communication instead of server-side time; note that for server versions before 22.8 we always display client-side time")
|
||||
;
|
||||
@ -738,7 +738,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options["query_id"].as<std::string>(),
|
||||
options["query"].as<std::string>(),
|
||||
options["max-consecutive-errors"].as<size_t>(),
|
||||
options.count("continue_on_errors"),
|
||||
options.count("ignore-error"),
|
||||
options.count("reconnect"),
|
||||
options.count("client-side-time"),
|
||||
print_stacktrace,
|
||||
|
@ -10,6 +10,4 @@ set (CLICKHOUSE_BENCHMARK_LINK
|
||||
|
||||
clickhouse_program_add(benchmark)
|
||||
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib)
|
||||
endif()
|
||||
target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib)
|
||||
|
@ -1,12 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Helper for split build mode.
|
||||
# Allows to run commands like
|
||||
# clickhouse client
|
||||
# clickhouse server
|
||||
# ...
|
||||
|
||||
set -e
|
||||
CMD=$1
|
||||
shift
|
||||
clickhouse-$CMD $*
|
@ -13,6 +13,10 @@ set (CLICKHOUSE_CLIENT_LINK
|
||||
string_utils
|
||||
)
|
||||
|
||||
if (TARGET ch_rust::skim)
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE ch_rust::skim)
|
||||
endif()
|
||||
|
||||
# Always use internal readpassphrase
|
||||
list(APPEND CLICKHOUSE_CLIENT_LINK PRIVATE readpassphrase)
|
||||
|
||||
|
@ -30,9 +30,10 @@
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/copyData.h>
|
||||
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
@ -41,6 +42,8 @@
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
|
||||
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
||||
|
||||
#include <Interpreters/InterpreterSetQuery.h>
|
||||
|
||||
#include <Functions/registerFunctions.h>
|
||||
@ -827,6 +830,20 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
WriteBufferFromOStream ast_buf(std::cout, 4096);
|
||||
formatAST(*query, ast_buf, false /*highlight*/);
|
||||
ast_buf.next();
|
||||
if (const auto * insert = query->as<ASTInsertQuery>())
|
||||
{
|
||||
/// For inserts with data it's really useful to have the data itself available in the logs, as formatAST doesn't print it
|
||||
if (insert->hasInlinedData())
|
||||
{
|
||||
String bytes;
|
||||
{
|
||||
auto read_buf = getReadBufferFromASTInsertQuery(query);
|
||||
WriteBufferFromString write_buf(bytes);
|
||||
copyData(*read_buf, write_buf);
|
||||
}
|
||||
std::cout << std::endl << bytes;
|
||||
}
|
||||
}
|
||||
std::cout << std::endl << std::endl;
|
||||
|
||||
try
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user