mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 16:42:05 +00:00
Merge branch 'master' into fixes_for_transactions
This commit is contained in:
commit
6bc68c0cbc
@ -1,3 +1,6 @@
|
||||
# Enable all checks + disale selected checks. Feel free to remove disabled checks from below list if
|
||||
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
|
||||
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
|
||||
Checks: '*,
|
||||
-abseil-*,
|
||||
|
||||
@ -115,6 +118,7 @@ Checks: '*,
|
||||
-readability-else-after-return,
|
||||
-readability-function-cognitive-complexity,
|
||||
-readability-function-size,
|
||||
-readability-identifier-length,
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
|
15
.github/workflows/backport_branches.yml
vendored
15
.github/workflows/backport_branches.yml
vendored
@ -131,7 +131,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -151,7 +150,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -177,7 +176,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -197,7 +195,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -223,7 +221,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -243,7 +240,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -269,7 +266,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -289,7 +285,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -315,7 +311,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -335,7 +330,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
|
11
.github/workflows/docs_release.yml
vendored
11
.github/workflows/docs_release.yml
vendored
@ -7,16 +7,17 @@ env:
|
||||
concurrency:
|
||||
group: master-release
|
||||
cancel-in-progress: true
|
||||
on: # yamllint disable-line rule:truthy
|
||||
'on':
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'website/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/**'
|
||||
- '.github/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/docs/release/**'
|
||||
- 'docs/**'
|
||||
- 'utils/list-versions/version_date.tsv'
|
||||
- 'website/**'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
DockerHubPushAarch64:
|
||||
|
110
.github/workflows/master.yml
vendored
110
.github/workflows/master.yml
vendored
@ -199,7 +199,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -213,7 +212,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
@ -221,7 +220,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -247,7 +246,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -258,7 +256,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
@ -266,7 +264,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
@ -280,54 +278,6 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderPerformance:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=performance
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # is needed for ancestor commit search
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRelease:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -339,7 +289,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -361,7 +310,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -387,7 +336,6 @@ jobs:
|
||||
# IMAGES_PATH=${{runner.temp}}/images_path
|
||||
# REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
# CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
# CHECK_NAME=ClickHouse build check (actions)
|
||||
# BUILD_NAME=binary_gcc
|
||||
# EOF
|
||||
# - name: Download changed images
|
||||
@ -407,7 +355,7 @@ jobs:
|
||||
# sudo rm -fr "$TEMP_PATH"
|
||||
# mkdir -p "$TEMP_PATH"
|
||||
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
# cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
# cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
# - name: Upload build URLs to artifacts
|
||||
# if: ${{ success() || failure() }}
|
||||
# uses: actions/upload-artifact@v2
|
||||
@ -433,7 +381,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -453,7 +400,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -479,7 +426,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_ubsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -499,7 +445,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -525,7 +471,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -545,7 +490,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -571,7 +516,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_msan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -591,7 +535,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -617,7 +561,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -637,7 +580,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -666,7 +609,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_splitted
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -686,7 +628,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -712,7 +654,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_tidy
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -732,7 +673,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -758,7 +699,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -780,7 +720,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -806,7 +746,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -828,7 +767,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -854,7 +793,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_freebsd
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -876,7 +814,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -902,7 +840,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -924,7 +861,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -950,7 +887,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_ppc64le
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -972,7 +908,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -1002,7 +938,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # otherwise we will have no version info
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -2969,7 +2905,7 @@ jobs:
|
||||
#################################### PERFORMANCE TESTS ######################################
|
||||
#############################################################################################
|
||||
PerformanceComparison0:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3007,7 +2943,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison1:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3045,7 +2981,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison2:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3083,7 +3019,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison3:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
|
110
.github/workflows/pull_request.yml
vendored
110
.github/workflows/pull_request.yml
vendored
@ -260,7 +260,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -273,6 +272,8 @@ jobs:
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # for performance artifact
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
@ -280,7 +281,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -295,54 +296,6 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
BuilderPerformance:
|
||||
needs: [DockerHubPush, FastTest]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=performance
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # is needed for ancestor commit search
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinRelease:
|
||||
needs: [DockerHubPush, FastTest]
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -354,7 +307,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -374,7 +326,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -400,7 +352,6 @@ jobs:
|
||||
# IMAGES_PATH=${{runner.temp}}/images_path
|
||||
# REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
# CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
# CHECK_NAME=ClickHouse build check (actions)
|
||||
# BUILD_NAME=binary_gcc
|
||||
# EOF
|
||||
# - name: Download changed images
|
||||
@ -420,7 +371,7 @@ jobs:
|
||||
# sudo rm -fr "$TEMP_PATH"
|
||||
# mkdir -p "$TEMP_PATH"
|
||||
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
# cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
# cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
# - name: Upload build URLs to artifacts
|
||||
# if: ${{ success() || failure() }}
|
||||
# uses: actions/upload-artifact@v2
|
||||
@ -446,7 +397,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -459,6 +409,8 @@ jobs:
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # for performance artifact
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
@ -466,7 +418,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -492,7 +444,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -512,7 +463,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -538,7 +489,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_ubsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -558,7 +508,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -584,7 +534,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -604,7 +553,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -630,7 +579,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_msan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -650,7 +598,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -676,7 +624,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -696,7 +643,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -725,7 +672,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_splitted
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -745,7 +691,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -771,7 +717,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_tidy
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -791,7 +736,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -817,7 +762,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_darwin
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -837,7 +781,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -863,7 +807,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -883,7 +826,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -909,7 +852,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_freebsd
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -929,7 +871,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -955,7 +897,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_darwin_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -975,7 +916,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -1001,7 +942,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=binary_ppc64le
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -1021,7 +961,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -1051,7 +991,7 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
@ -3180,7 +3120,7 @@ jobs:
|
||||
#################################### PERFORMANCE TESTS ######################################
|
||||
#############################################################################################
|
||||
PerformanceComparison0:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3218,7 +3158,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison1:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3256,7 +3196,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison2:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3294,7 +3234,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison3:
|
||||
needs: [BuilderPerformance]
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
|
21
.github/workflows/release_branches.yml
vendored
21
.github/workflows/release_branches.yml
vendored
@ -122,7 +122,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_release
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -144,7 +143,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -170,7 +169,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_aarch64
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -189,7 +187,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
@ -214,7 +212,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_asan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -234,7 +231,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -260,7 +257,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_ubsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -280,7 +276,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -306,7 +302,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_tsan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -326,7 +321,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -352,7 +347,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_msan
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -372,7 +366,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -398,7 +392,6 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
BUILD_NAME=package_debug
|
||||
EOF
|
||||
- name: Download changed images
|
||||
@ -418,7 +411,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
|
4
.gitmodules
vendored
4
.gitmodules
vendored
@ -268,3 +268,7 @@
|
||||
[submodule "contrib/eigen"]
|
||||
path = contrib/eigen
|
||||
url = https://github.com/eigen-mirror/eigen
|
||||
[submodule "contrib/hashidsxx"]
|
||||
path = contrib/hashidsxx
|
||||
url = https://github.com/schoentoon/hashidsxx.git
|
||||
|
||||
|
165
CHANGELOG.md
165
CHANGELOG.md
@ -1,11 +1,174 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.5, 2022-05-19](#225)**<br>
|
||||
**[ClickHouse release v22.4, 2022-04-20](#224)**<br>
|
||||
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br>
|
||||
**[ClickHouse release v22.2, 2022-02-17](#222)**<br>
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br>
|
||||
**[Changelog for 2021](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats-new/changelog/2021.md)**<br>
|
||||
|
||||
### <a id="224"></a> ClickHouse release master FIXME as compared to v22.3.3.44-lts
|
||||
### <a id="225"></a> ClickHouse release 22.5, 2022-05-19
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. This only affects the metric values, and makes them better. This change does not introduce any incompatibility, but you may wonder about the changes of metrics, so we put in this category. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)). The ciphers `aes-192-cfb128` and `aes-256-cfb128` were removed, because they are not included in the FIPS certified version of BoringSSL.
|
||||
* `max_memory_usage` setting is removed from the default user profile in `users.xml`. This enables flexible memory limits for queries instead of the old rigid limit of 10 GB.
|
||||
* Disable `log_query_threads` setting by default. It controls the logging of statistics about every thread participating in query execution. After supporting asynchronous reads, the total number of distinct thread ids became too large, and logging into the `query_thread_log` has become too heavy. [#37077](https://github.com/ClickHouse/ClickHouse/pull/37077) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove function `groupArraySorted` which has a bug. [#36822](https://github.com/ClickHouse/ClickHouse/pull/36822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Enable memory overcommit by default. [#35921](https://github.com/ClickHouse/ClickHouse/pull/35921) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add support of GROUPING SETS in GROUP BY clause. This implementation supports a parallel processing of grouping sets. [#33631](https://github.com/ClickHouse/ClickHouse/pull/33631) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Added `system.certificates` table. [#37142](https://github.com/ClickHouse/ClickHouse/pull/37142) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Adds `h3Line`, `h3Distance` and `h3HexRing` functions. [#37030](https://github.com/ClickHouse/ClickHouse/pull/37030) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* New single binary based diagnostics tool (clickhouse-diagnostics). [#36705](https://github.com/ClickHouse/ClickHouse/pull/36705) ([Dale McDiarmid](https://github.com/gingerwizard)).
|
||||
* Add output format `Prometheus` [#36051](https://github.com/ClickHouse/ClickHouse/issues/36051). [#36206](https://github.com/ClickHouse/ClickHouse/pull/36206) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add `MySQLDump` input format. It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one. [#36667](https://github.com/ClickHouse/ClickHouse/pull/36667) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Show the `total_rows` and `total_bytes` fields in `system.tables` for temporary tables. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([xiedeyantu](https://github.com/xiedeyantu)).
|
||||
* Allow to override `parts_to_delay_insert` and `parts_to_throw_insert` with query-level settings. If they are defined, they will override table-level settings. [#36371](https://github.com/ClickHouse/ClickHouse/pull/36371) ([Memo](https://github.com/Joeywzr)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Implemented L1, L2, Linf, Cosine distance functions for arrays and L1, L2, Linf norm functions for arrays.
|
||||
[#37033](https://github.com/ClickHouse/ClickHouse/pull/37033) ([qieqieplus](https://github.com/qieqieplus)). Caveat: the functions will be renamed.
|
||||
* Improve the `WATCH` query in WindowView: 1. Reduce the latency of providing query results by calling the `fire_condition` signal. 2. Makes the cancel query operation(ctrl-c) faster, by checking `isCancelled()` more frequently. [#37226](https://github.com/ClickHouse/ClickHouse/pull/37226) ([vxider](https://github.com/Vxider)).
|
||||
* Introspection for remove filesystem cache. [#36802](https://github.com/ClickHouse/ClickHouse/pull/36802) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Added new hash function `wyHash64` for SQL. [#36467](https://github.com/ClickHouse/ClickHouse/pull/36467) ([olevino](https://github.com/olevino)).
|
||||
* Improvement for replicated databases: Added `SYSTEM SYNC DATABASE REPLICA` query which allows to sync tables metadata inside Replicated database, because currently synchronisation is asynchronous. [#35944](https://github.com/ClickHouse/ClickHouse/pull/35944) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Improvement for remote filesystem cache: Better read from cache. [#37054](https://github.com/ClickHouse/ClickHouse/pull/37054) ([Kseniia Sumarokova](https://github.com/kssenii)). Improve `SYSTEM DROP FILESYSTEM CACHE` query: `<path>` option and `FORCE` option. [#36639](https://github.com/ClickHouse/ClickHouse/pull/36639) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Improvement for semistructured data: Allow to cast columns of type `Object(...)` to `Object(Nullable(...))`. [#36564](https://github.com/ClickHouse/ClickHouse/pull/36564) ([awakeljw](https://github.com/awakeljw)).
|
||||
* Improvement for parallel replicas: We create a local interpreter if we want to execute query on localhost replica. But for when executing query on multiple replicas we rely on the fact that a connection exists so replicas can talk to coordinator. It is now improved and localhost replica can talk to coordinator directly in the same process. [#36281](https://github.com/ClickHouse/ClickHouse/pull/36281) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Improve performance of `avg`, `sum` aggregate functions if used without GROUP BY expression. [#37257](https://github.com/ClickHouse/ClickHouse/pull/37257) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of unary arithmetic functions (`bitCount`, `bitNot`, `abs`, `intExp2`, `intExp10`, `negate`, `roundAge`, `roundDuration`, `roundToExp2`, `sign`) using dynamic dispatch. [#37289](https://github.com/ClickHouse/ClickHouse/pull/37289) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of ORDER BY, MergeJoin, insertion into MergeTree using JIT compilation of sort columns comparator. [#34469](https://github.com/ClickHouse/ClickHouse/pull/34469) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Change structure of `system.asynchronous_metric_log`. It will take about 10 times less space. This closes [#36357](https://github.com/ClickHouse/ClickHouse/issues/36357). The field `event_time_microseconds` was removed, because it is useless. [#36360](https://github.com/ClickHouse/ClickHouse/pull/36360) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Load marks for only necessary columns when reading wide parts. [#36879](https://github.com/ClickHouse/ClickHouse/pull/36879) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Improves performance of file descriptor cache by narrowing mutex scopes. [#36682](https://github.com/ClickHouse/ClickHouse/pull/36682) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Improve performance of reading from storage `File` and table functions `file` in case when path has globs and matched directory contains large number of files. [#36647](https://github.com/ClickHouse/ClickHouse/pull/36647) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Apply parallel parsing for input format `HiveText`, which can speed up HiveText parsing by 2x when reading local file. [#36650](https://github.com/ClickHouse/ClickHouse/pull/36650) ([李扬](https://github.com/taiyang-li)).
|
||||
* The default `HashJoin` is not thread safe for inserting right table's rows and run it in a single thread. When the right table is large, the join process is too slow with low cpu utilization. [#36415](https://github.com/ClickHouse/ClickHouse/pull/36415) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Allow to rewrite `select countDistinct(a) from t` to `select count(1) from (select a from t groupBy a)`. [#35993](https://github.com/ClickHouse/ClickHouse/pull/35993) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Transform OR LIKE chain to multiMatchAny. Will enable once we have more confidence it works. [#34932](https://github.com/ClickHouse/ClickHouse/pull/34932) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||
* Improve performance of some functions with inlining. [#34544](https://github.com/ClickHouse/ClickHouse/pull/34544) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||
* Add a branch to avoid unnecessary memcpy in readBig. It improves performance somewhat. [#36095](https://github.com/ClickHouse/ClickHouse/pull/36095) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Implement partial GROUP BY key for optimize_aggregation_in_order. [#35111](https://github.com/ClickHouse/ClickHouse/pull/35111) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Show names of erroneous files in case of parsing errors while executing table functions `file`, `s3` and `url`. [#36314](https://github.com/ClickHouse/ClickHouse/pull/36314) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allowed to increase the number of threads for executing background operations (merges, mutations, moves and fetches) at runtime if they are specified at top level config. [#36425](https://github.com/ClickHouse/ClickHouse/pull/36425) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Now date time conversion functions that generates time before 1970-01-01 00:00:00 with partial hours/minutes timezones will be saturated to zero instead of overflow. This is the continuation of https://github.com/ClickHouse/ClickHouse/pull/29953 which addresses https://github.com/ClickHouse/ClickHouse/pull/29953#discussion_r800550280 . Mark as improvement because it's implementation defined behavior (and very rare case) and we are allowed to break it. [#36656](https://github.com/ClickHouse/ClickHouse/pull/36656) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add a warning if someone running clickhouse-server with log level "test". The log level "test" was added recently and cannot be used in production due to inevitable, unavoidable, fatal and life-threatening performance degradation. [#36824](https://github.com/ClickHouse/ClickHouse/pull/36824) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Parse collations in CREATE TABLE, throw exception or ignore. closes [#35892](https://github.com/ClickHouse/ClickHouse/issues/35892). [#36271](https://github.com/ClickHouse/ClickHouse/pull/36271) ([yuuch](https://github.com/yuuch)).
|
||||
* Option `compatibility_ignore_auto_increment_in_create_table` allows ignoring `AUTO_INCREMENT` keyword in a column declaration to simplify migration from MySQL. [#37178](https://github.com/ClickHouse/ClickHouse/pull/37178) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Add aliases `JSONLines` and `NDJSON` for `JSONEachRow`. Closes [#36303](https://github.com/ClickHouse/ClickHouse/issues/36303). [#36327](https://github.com/ClickHouse/ClickHouse/pull/36327) ([flynn](https://github.com/ucasfl)).
|
||||
* Limit the max partitions could be queried for each hive table. Avoid resource overruns. [#37281](https://github.com/ClickHouse/ClickHouse/pull/37281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Added implicit cast for `h3kRing` function second argument to improve usability. Closes [#35432](https://github.com/ClickHouse/ClickHouse/issues/35432). [#37189](https://github.com/ClickHouse/ClickHouse/pull/37189) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix progress indication for `INSERT SELECT` in `clickhouse-local` for any query and for file progress in client, more correct file progress. [#37075](https://github.com/ClickHouse/ClickHouse/pull/37075) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix bug which can lead to forgotten outdated parts in MergeTree table engines family in case of filesystem failures during parts removal. Before fix they will be removed only after first server restart. [#37014](https://github.com/ClickHouse/ClickHouse/pull/37014) ([alesapin](https://github.com/alesapin)).
|
||||
* Implemented a new mode of handling row policies which can be enabled in the main configuration which enables users without permissive row policies to read rows. [#36997](https://github.com/ClickHouse/ClickHouse/pull/36997) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Play UI: Nullable numbers will be aligned to the right in table cells. This closes [#36982](https://github.com/ClickHouse/ClickHouse/issues/36982). [#36988](https://github.com/ClickHouse/ClickHouse/pull/36988) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Play UI: If there is one row in result and more than a few columns, display the result vertically. Continuation of [#36811](https://github.com/ClickHouse/ClickHouse/issues/36811). [#36842](https://github.com/ClickHouse/ClickHouse/pull/36842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Cleanup CSS in Play UI. The pixels are more evenly placed. Better usability for long content in table cells. [#36569](https://github.com/ClickHouse/ClickHouse/pull/36569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Finalize write buffers in case of exception to avoid doing it in destructors. Hope it fixes: [#36907](https://github.com/ClickHouse/ClickHouse/issues/36907). [#36979](https://github.com/ClickHouse/ClickHouse/pull/36979) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* After [#36425](https://github.com/ClickHouse/ClickHouse/issues/36425) settings like `background_fetches_pool_size` became obsolete and can appear in top level config, but clickhouse throws and exception like `Error updating configuration from '/etc/clickhouse-server/config.xml' config.: Code: 137. DB::Exception: A setting 'background_fetches_pool_size' appeared at top level in config /etc/clickhouse-server/config.xml.` This is fixed. [#36917](https://github.com/ClickHouse/ClickHouse/pull/36917) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add extra diagnostic info (if applicable) when sending exception to other server. [#36872](https://github.com/ClickHouse/ClickHouse/pull/36872) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Allow to execute hash functions with arguments of type `Array(Tuple(..))`. [#36812](https://github.com/ClickHouse/ClickHouse/pull/36812) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added `user_defined_path` config setting. [#36753](https://github.com/ClickHouse/ClickHouse/pull/36753) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow cluster macro in `s3Cluster` table function. [#36726](https://github.com/ClickHouse/ClickHouse/pull/36726) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
||||
* Properly cancel INSERT queries in `clickhouse-client`/`clickhouse-local`. [#36710](https://github.com/ClickHouse/ClickHouse/pull/36710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow to cancel a query while still keeping a decent query id in `MySQLHandler`. [#36699](https://github.com/ClickHouse/ClickHouse/pull/36699) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add `is_all_data_sent` column into `system.processes`, and improve internal testing hardening check based on it. [#36649](https://github.com/ClickHouse/ClickHouse/pull/36649) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The metrics about time spent reading from s3 now calculated correctly. Close [#35483](https://github.com/ClickHouse/ClickHouse/issues/35483). [#36572](https://github.com/ClickHouse/ClickHouse/pull/36572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow file descriptors in table function file if it is run in clickhouse-local. [#36562](https://github.com/ClickHouse/ClickHouse/pull/36562) ([wuxiaobai24](https://github.com/wuxiaobai24)).
|
||||
* Allow names of tuple elements that start from digits. [#36544](https://github.com/ClickHouse/ClickHouse/pull/36544) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Now clickhouse-benchmark can read authentication info from environment variables. [#36497](https://github.com/ClickHouse/ClickHouse/pull/36497) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* `clickhouse-keeper` improvement: add support for force recovery which allows you to reconfigure cluster without quorum. [#36258](https://github.com/ClickHouse/ClickHouse/pull/36258) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Improve schema inference for JSON objects. [#36207](https://github.com/ClickHouse/ClickHouse/pull/36207) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Refactor code around schema inference with globs. Try next file from glob only if it makes sense (previously we tried next file in case of any error). Also it fixes [#36317](https://github.com/ClickHouse/ClickHouse/issues/36317). [#36205](https://github.com/ClickHouse/ClickHouse/pull/36205) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add a separate `CLUSTER` grant (and `access_control_improvements.on_cluster_queries_require_cluster_grant` configuration directive, for backward compatibility, default to `false`). [#35767](https://github.com/ClickHouse/ClickHouse/pull/35767) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* If the required amount of memory is available before the selected query stopped, all waiting queries continue execution. Now we don't stop any query if memory is freed before the moment when the selected query knows about the cancellation. [#35637](https://github.com/ClickHouse/ClickHouse/pull/35637) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Nullables detection in protobuf. In proto3, default values are not sent on the wire. This makes it non-trivial to distinguish between null and default values for Nullable columns. A standard way to deal with this problem is to use Google wrappers to nest the target value within an inner message (see https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/wrappers.proto). In this case, a missing field is interpreted as null value, a field with missing value if interpreted as default value, and a field with regular value is interpreted as regular value. However, ClickHouse interprets Google wrappers as nested columns. We propose to introduce special behaviour to detect Google wrappers and interpret them like in the description above. For example, to serialize values for a Nullable column `test`, we would use `google.protobuf.StringValue test` in our .proto schema. Note that these types are so called "well-known types" in Protobuf, implemented in the library itself. [#35149](https://github.com/ClickHouse/ClickHouse/pull/35149) ([Jakub Kuklis](https://github.com/jkuklis)).
|
||||
* Added support for specifying `content_type` in predefined and static HTTP handler config. [#34916](https://github.com/ClickHouse/ClickHouse/pull/34916) ([Roman Nikonov](https://github.com/nic11)).
|
||||
* Warn properly if use clickhouse-client --file without preceeding --external. Close [#34747](https://github.com/ClickHouse/ClickHouse/issues/34747). [#34765](https://github.com/ClickHouse/ClickHouse/pull/34765) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improve MySQL database engine to compatible with binary(0) dataType. [#37232](https://github.com/ClickHouse/ClickHouse/pull/37232) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Improve JSON report of clickhouse-benchmark. [#36473](https://github.com/ClickHouse/ClickHouse/pull/36473) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||
* Server might refuse to start if it cannot resolve hostname of external ClickHouse dictionary. It's fixed. Fixes [#36451](https://github.com/ClickHouse/ClickHouse/issues/36451). [#36463](https://github.com/ClickHouse/ClickHouse/pull/36463) ([tavplubix](https://github.com/tavplubix)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Now `clickhouse-keeper` for the `x86_64` architecture is statically linked with [musl](https://musl.libc.org/) and doesn't depend on any system libraries. [#31833](https://github.com/ClickHouse/ClickHouse/pull/31833) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse builds for `PowerPC64LE` architecture are now available in universal installation script `curl https://clickhouse.com/ | sh` and by direct link `https://builds.clickhouse.com/master/powerpc64le/clickhouse`. [#37095](https://github.com/ClickHouse/ClickHouse/pull/37095) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Limit PowerPC code generation to Power8 for better compatibility. This closes [#36025](https://github.com/ClickHouse/ClickHouse/issues/36025). [#36529](https://github.com/ClickHouse/ClickHouse/pull/36529) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Simplify performance test. This will give a chance for us to use it. [#36769](https://github.com/ClickHouse/ClickHouse/pull/36769) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fail performance comparison on errors in the report. [#34797](https://github.com/ClickHouse/ClickHouse/pull/34797) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add ZSTD support for Arrow. This fixes [#35283](https://github.com/ClickHouse/ClickHouse/issues/35283). [#35486](https://github.com/ClickHouse/ClickHouse/pull/35486) ([Sean Lafferty](https://github.com/seanlaff)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Extracts Version ID if present from the URI and adds a request to the AWS HTTP URI. Closes [#31221](https://github.com/ClickHouse/ClickHouse/issues/31221). - [x] Extract `Version ID` from URI if present and reassemble without it. - [x] Configure `AWS HTTP URI` object with request. - [x] Unit Tests: [`gtest_s3_uri`](https://github.com/ClickHouse/ClickHouse/blob/2340a6c6849ebc05a8efbf97ba8de3ff9dc0eff4/src/IO/tests/gtest_s3_uri.cpp) - [x] Drop instrumentation commit. [#34571](https://github.com/ClickHouse/ClickHouse/pull/34571) ([Saad Ur Rahman](https://github.com/surahman)).
|
||||
* Fix system.opentelemetry_span_log attribute.values alias to values instead of keys. [#37275](https://github.com/ClickHouse/ClickHouse/pull/37275) ([Aleksandr Razumov](https://github.com/ernado)).
|
||||
* Fix Nullable(String) to Nullable(Bool/IPv4/IPv6) conversion Closes [#37221](https://github.com/ClickHouse/ClickHouse/issues/37221). [#37270](https://github.com/ClickHouse/ClickHouse/pull/37270) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Experimental feature: Fix execution of mutations in tables, in which there exist columns of type `Object`. Using subcolumns of type `Object` in `WHERE` expression of `UPDATE` or `DELETE` queries is now allowed yet, as well as manipulating (`DROP`, `MODIFY`) of separate subcolumns. Fixes [#37205](https://github.com/ClickHouse/ClickHouse/issues/37205). [#37266](https://github.com/ClickHouse/ClickHouse/pull/37266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Kafka does not need `group.id` on producer stage. In console log you can find Warning that describe this issue: ``` 2022.05.15 17:59:13.270227 [ 137 ] {} <Warning> StorageKafka (topic-name): [rdk:CONFWARN] [thrd:app]: Configuration property group.id is a consumer property and will be ignored by this producer instance ```. [#37228](https://github.com/ClickHouse/ClickHouse/pull/37228) ([Mark Andreev](https://github.com/mrk-andreev)).
|
||||
* Experimental feature (WindowView): Update `max_fired_watermark ` after blocks actually fired, in case delete data that hasn't been fired yet. [#37225](https://github.com/ClickHouse/ClickHouse/pull/37225) ([vxider](https://github.com/Vxider)).
|
||||
* Fix "Cannot create column of type Set" for distributed queries with LIMIT BY. [#37193](https://github.com/ClickHouse/ClickHouse/pull/37193) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Experimental feature: Now WindowView `WATCH EVENTS` query will not be terminated due to the nonempty Chunk created in `WindowViewSource.h:58`. [#37182](https://github.com/ClickHouse/ClickHouse/pull/37182) ([vxider](https://github.com/Vxider)).
|
||||
* Enable `enable_global_with_statement` for subqueries, close [#37141](https://github.com/ClickHouse/ClickHouse/issues/37141). [#37166](https://github.com/ClickHouse/ClickHouse/pull/37166) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix implicit cast for optimize_skip_unused_shards_rewrite_in. [#37153](https://github.com/ClickHouse/ClickHouse/pull/37153) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The ILIKE function on FixedString columns could have returned wrong results (i.e. match less than it should). [#37117](https://github.com/ClickHouse/ClickHouse/pull/37117) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix `GROUP BY` `AggregateFunction` (i.e. you `GROUP BY` by the column that has `AggregateFunction` type). [#37093](https://github.com/ClickHouse/ClickHouse/pull/37093) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Experimental feature: Fix optimize_aggregation_in_order with prefix GROUP BY and *Array aggregate functions. [#37050](https://github.com/ClickHouse/ClickHouse/pull/37050) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed performance degradation of some INSERT SELECT queries with implicit aggregation. Fixes [#36792](https://github.com/ClickHouse/ClickHouse/issues/36792). [#37047](https://github.com/ClickHouse/ClickHouse/pull/37047) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Experimental feature: Fix in-order `GROUP BY` (`optimize_aggregation_in_order=1`) with `*Array` (`groupArrayArray`/...) aggregate functions. [#37046](https://github.com/ClickHouse/ClickHouse/pull/37046) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix LowCardinality->ArrowDictionary invalid output when type of indexes is not UInt8. Closes [#36832](https://github.com/ClickHouse/ClickHouse/issues/36832). [#37043](https://github.com/ClickHouse/ClickHouse/pull/37043) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed problem with infs in `quantileTDigest`. Fixes [#32107](https://github.com/ClickHouse/ClickHouse/issues/32107). [#37021](https://github.com/ClickHouse/ClickHouse/pull/37021) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Fix sending external tables data in HedgedConnections with max_parallel_replicas != 1. [#36981](https://github.com/ClickHouse/ClickHouse/pull/36981) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed logical error on `TRUNCATE` query in `Replicated` database. Fixes [#33747](https://github.com/ClickHouse/ClickHouse/issues/33747). [#36976](https://github.com/ClickHouse/ClickHouse/pull/36976) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Experimental feature: Fix stuck when dropping source table in WindowView. Closes [#35678](https://github.com/ClickHouse/ClickHouse/issues/35678). [#36967](https://github.com/ClickHouse/ClickHouse/pull/36967) ([vxider](https://github.com/Vxider)).
|
||||
* Experimental feature (rocksdb cache): Fix issue: [#36671](https://github.com/ClickHouse/ClickHouse/issues/36671). [#36929](https://github.com/ClickHouse/ClickHouse/pull/36929) ([李扬](https://github.com/taiyang-li)).
|
||||
* Experimental feature: Fix bugs when using multiple columns in WindowView by adding converting actions to make it possible to call`writeIntoWindowView` with a slightly different schema. [#36928](https://github.com/ClickHouse/ClickHouse/pull/36928) ([vxider](https://github.com/Vxider)).
|
||||
* Fix bug in clickhouse-keeper which can lead to corrupted compressed log files in case of small load and restarts. [#36910](https://github.com/ClickHouse/ClickHouse/pull/36910) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix incorrect query result when doing constant aggregation. This fixes [#36728](https://github.com/ClickHouse/ClickHouse/issues/36728) . [#36888](https://github.com/ClickHouse/ClickHouse/pull/36888) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Experimental feature: Fix `current_size` count in cache. [#36887](https://github.com/ClickHouse/ClickHouse/pull/36887) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Experimental feature: Fix fire in window view with hop window [#34044](https://github.com/ClickHouse/ClickHouse/issues/34044). [#36861](https://github.com/ClickHouse/ClickHouse/pull/36861) ([vxider](https://github.com/Vxider)).
|
||||
* Experimental feature: Fix incorrect cast in cached buffer from remote fs. [#36809](https://github.com/ClickHouse/ClickHouse/pull/36809) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix creation of tables with `flatten_nested = 0`. Previously unflattened `Nested` columns could be flattened after server restart. [#36803](https://github.com/ClickHouse/ClickHouse/pull/36803) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix some issues with async reads from remote filesystem which happened when reading low cardinality. [#36763](https://github.com/ClickHouse/ClickHouse/pull/36763) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Experimental feature: Fix insertion to columns of type `Object` from multiple files, e.g. via table function `file` with globs. [#36762](https://github.com/ClickHouse/ClickHouse/pull/36762) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix timeouts in Hedged requests. Connection hang right after sending remote query could lead to eternal waiting. [#36749](https://github.com/ClickHouse/ClickHouse/pull/36749) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Experimental feature: Fix a bug of `groupBitmapAndState`/`groupBitmapOrState`/`groupBitmapXorState` on distributed table. [#36739](https://github.com/ClickHouse/ClickHouse/pull/36739) ([Zhang Yifan](https://github.com/zhangyifan27)).
|
||||
* Experimental feature: During the [test](https://s3.amazonaws.com/clickhouse-test-reports/36376/1cb1c7275cb53769ab826772db9b71361bb3e413/stress_test__thread__actions_/clickhouse-server.clean.log) in [PR](https://github.com/ClickHouse/ClickHouse/pull/36376), I found that the one cache class was initialized twice, it throws a exception. Although the cause of this problem is not clear, there should be code logic of repeatedly loading disk in ClickHouse, so we need to make special judgment for this situation. [#36737](https://github.com/ClickHouse/ClickHouse/pull/36737) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Fix vertical merges in wide parts. Previously an exception `There is no column` can be thrown during merge. [#36707](https://github.com/ClickHouse/ClickHouse/pull/36707) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix server reload on port change (do not wait for current connections from query context). [#36700](https://github.com/ClickHouse/ClickHouse/pull/36700) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Experimental feature: In the previous [PR](https://github.com/ClickHouse/ClickHouse/pull/36376), I found that testing (stateless tests, flaky check (address, actions)) is timeout. Moreover, testing locally can also trigger unstable system deadlocks. This problem still exists when using the latest source code of master. [#36697](https://github.com/ClickHouse/ClickHouse/pull/36697) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Experimental feature: Fix server restart if cache configuration changed. [#36685](https://github.com/ClickHouse/ClickHouse/pull/36685) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible heap-use-after-free in schema inference. Closes [#36661](https://github.com/ClickHouse/ClickHouse/issues/36661). [#36679](https://github.com/ClickHouse/ClickHouse/pull/36679) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed parsing of query settings in `CREATE` query when engine is not specified. Fixes https://github.com/ClickHouse/ClickHouse/pull/34187#issuecomment-1103812419. [#36642](https://github.com/ClickHouse/ClickHouse/pull/36642) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Experimental feature: Fix merges of wide parts with type `Object`. [#36637](https://github.com/ClickHouse/ClickHouse/pull/36637) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix format crash when default expression follow EPHEMERAL not literal. Closes [#36618](https://github.com/ClickHouse/ClickHouse/issues/36618). [#36633](https://github.com/ClickHouse/ClickHouse/pull/36633) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix `Missing column` exception which could happen while using `INTERPOLATE` with `ENGINE = MergeTree` table. [#36549](https://github.com/ClickHouse/ClickHouse/pull/36549) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix offset update ReadBufferFromEncryptedFile, which could cause undefined behaviour. [#36493](https://github.com/ClickHouse/ClickHouse/pull/36493) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix hostname sanity checks for Keeper cluster configuration. Add `keeper_server.host_checks_enabled` config to enable/disable those checks. [#36492](https://github.com/ClickHouse/ClickHouse/pull/36492) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix usage of executable user defined functions in GROUP BY. Before executable user defined functions cannot be used as expressions in GROUP BY. Closes [#36448](https://github.com/ClickHouse/ClickHouse/issues/36448). [#36486](https://github.com/ClickHouse/ClickHouse/pull/36486) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix possible exception with unknown packet from server in client. [#36481](https://github.com/ClickHouse/ClickHouse/pull/36481) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Experimental feature (please never use `system.session_log`, it is going to be removed): Add missing enum values in system.session_log table. Closes [#36474](https://github.com/ClickHouse/ClickHouse/issues/36474). [#36480](https://github.com/ClickHouse/ClickHouse/pull/36480) ([Memo](https://github.com/Joeywzr)).
|
||||
* Fix bug in s3Cluster schema inference that let to the fact that not all data was read in the select from s3Cluster. The bug appeared in https://github.com/ClickHouse/ClickHouse/pull/35544. [#36434](https://github.com/ClickHouse/ClickHouse/pull/36434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix nullptr dereference in JOIN and COLUMNS matcher. This fixes [#36416](https://github.com/ClickHouse/ClickHouse/issues/36416). This is for https://github.com/ClickHouse/ClickHouse/pull/36417. [#36430](https://github.com/ClickHouse/ClickHouse/pull/36430) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix dictionary reload for `ClickHouseDictionarySource` if it contains scalar subqueries. [#36390](https://github.com/ClickHouse/ClickHouse/pull/36390) ([lthaooo](https://github.com/lthaooo)).
|
||||
* Fix assertion in JOIN, close [#36199](https://github.com/ClickHouse/ClickHouse/issues/36199). [#36201](https://github.com/ClickHouse/ClickHouse/pull/36201) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Queries with aliases inside special operators returned parsing error (was broken in 22.1). Example: `SELECT substring('test' AS t, 1, 1)`. [#36167](https://github.com/ClickHouse/ClickHouse/pull/36167) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Experimental feature: Fix insertion of complex JSONs with nested arrays to columns of type `Object`. [#36077](https://github.com/ClickHouse/ClickHouse/pull/36077) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix ALTER DROP COLUMN of nested column with compact parts (i.e. `ALTER TABLE x DROP COLUMN n`, when there is column `n.d`). [#35797](https://github.com/ClickHouse/ClickHouse/pull/35797) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix substring function range error length when `offset` and `length` is negative constant and `s` is not constant. [#33861](https://github.com/ClickHouse/ClickHouse/pull/33861) ([RogerYK](https://github.com/RogerYK)).
|
||||
|
||||
|
||||
### <a id="224"></a> ClickHouse release 22.4, 2022-04-19
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
|
@ -378,4 +378,4 @@ void ReplxxLineReader::enableBracketedPaste()
|
||||
{
|
||||
bracketed_paste_enabled = true;
|
||||
rx.enable_bracketed_paste();
|
||||
};
|
||||
}
|
||||
|
@ -1,56 +1,53 @@
|
||||
# Setup integration with ccache to speed up builds, see https://ccache.dev/
|
||||
|
||||
if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_C_COMPILER_LAUNCHER MATCHES "ccache")
|
||||
set(COMPILER_MATCHES_CCACHE 1)
|
||||
else()
|
||||
set(COMPILER_MATCHES_CCACHE 0)
|
||||
endif()
|
||||
|
||||
if ((ENABLE_CCACHE OR NOT DEFINED ENABLE_CCACHE) AND NOT COMPILER_MATCHES_CCACHE)
|
||||
find_program (CCACHE_FOUND ccache)
|
||||
if (CCACHE_FOUND)
|
||||
set(ENABLE_CCACHE_BY_DEFAULT 1)
|
||||
else()
|
||||
set(ENABLE_CCACHE_BY_DEFAULT 0)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT CCACHE_FOUND AND NOT DEFINED ENABLE_CCACHE AND NOT COMPILER_MATCHES_CCACHE)
|
||||
message(WARNING "CCache is not found. We recommend setting it up if you build ClickHouse from source often. "
|
||||
"Setting it up will significantly reduce compilation time for 2nd and consequent builds")
|
||||
endif()
|
||||
|
||||
# https://ccache.dev/
|
||||
option(ENABLE_CCACHE "Speedup re-compilations using ccache (external tool)" ${ENABLE_CCACHE_BY_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_CCACHE)
|
||||
# custom compiler launcher already defined, most likely because cmake was invoked with like "-DCMAKE_CXX_COMPILER_LAUNCHER=ccache" or
|
||||
# via environment variable --> respect setting and trust that the launcher was specified correctly
|
||||
message(STATUS "Using custom C compiler launcher: ${CMAKE_C_COMPILER_LAUNCHER}")
|
||||
message(STATUS "Using custom C++ compiler launcher: ${CMAKE_CXX_COMPILER_LAUNCHER}")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
|
||||
execute_process(COMMAND ${CCACHE_FOUND} "-V" OUTPUT_VARIABLE CCACHE_VERSION)
|
||||
string(REGEX REPLACE "ccache version ([0-9\\.]+).*" "\\1" CCACHE_VERSION ${CCACHE_VERSION})
|
||||
option(ENABLE_CCACHE "Speedup re-compilations using ccache (external tool)" ON)
|
||||
|
||||
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
message(STATUS "Using ccache: ${CCACHE_FOUND} (version ${CCACHE_VERSION})")
|
||||
set(LAUNCHER ${CCACHE_FOUND})
|
||||
if (NOT ENABLE_CCACHE)
|
||||
message(STATUS "Using ccache: no (disabled via configuration)")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# debian (debhelpers) set SOURCE_DATE_EPOCH environment variable, that is
|
||||
# filled from the debian/changelog or current time.
|
||||
#
|
||||
# - 4.0+ ccache always includes this environment variable into the hash
|
||||
# of the manifest, which do not allow to use previous cache,
|
||||
# - 4.2+ ccache ignores SOURCE_DATE_EPOCH for every file w/o __DATE__/__TIME__
|
||||
#
|
||||
# Exclude SOURCE_DATE_EPOCH env for ccache versions between [4.0, 4.2).
|
||||
if (CCACHE_VERSION VERSION_GREATER_EQUAL "4.0" AND CCACHE_VERSION VERSION_LESS "4.2")
|
||||
message(STATUS "Ignore SOURCE_DATE_EPOCH for ccache")
|
||||
set(LAUNCHER env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND})
|
||||
endif()
|
||||
find_program (CCACHE_EXECUTABLE ccache)
|
||||
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER ${LAUNCHER} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||
set (CMAKE_C_COMPILER_LAUNCHER ${LAUNCHER} ${CMAKE_C_COMPILER_LAUNCHER})
|
||||
else ()
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Using ccache: No. Found ${CCACHE_FOUND} (version ${CCACHE_VERSION}) but disabled because of bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
|
||||
endif ()
|
||||
elseif (NOT CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Using ccache: No")
|
||||
endif ()
|
||||
if (NOT CCACHE_EXECUTABLE)
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Using ccache: no (Could not find find ccache. To significantly reduce compile times for the 2nd, 3rd, etc. build, it is highly recommended to install ccache. To suppress this message, run cmake with -DENABLE_CCACHE=0)")
|
||||
return()
|
||||
endif()
|
||||
|
||||
execute_process(COMMAND ${CCACHE_EXECUTABLE} "-V" OUTPUT_VARIABLE CCACHE_VERSION)
|
||||
string(REGEX REPLACE "ccache version ([0-9\\.]+).*" "\\1" CCACHE_VERSION ${CCACHE_VERSION})
|
||||
|
||||
set (CCACHE_MINIMUM_VERSION 3.3)
|
||||
|
||||
if (CCACHE_VERSION VERSION_LESS_EQUAL ${CCACHE_MINIMUM_VERSION})
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "Using ccache: no (found ${CCACHE_EXECUTABLE} (version ${CCACHE_VERSION}), the minimum required version is ${CCACHE_MINIMUM_VERSION}")
|
||||
return()
|
||||
endif()
|
||||
|
||||
message(STATUS "Using ccache: ${CCACHE_EXECUTABLE} (version ${CCACHE_VERSION})")
|
||||
set(LAUNCHER ${CCACHE_EXECUTABLE})
|
||||
|
||||
# Work around a well-intended but unfortunate behavior of ccache 4.0 & 4.1 with
|
||||
# environment variable SOURCE_DATE_EPOCH. This variable provides an alternative
|
||||
# to source-code embedded timestamps (__DATE__/__TIME__) and therefore helps with
|
||||
# reproducible builds (*). SOURCE_DATE_EPOCH is set automatically by the
|
||||
# distribution, e.g. Debian. Ccache 4.0 & 4.1 incorporate SOURCE_DATE_EPOCH into
|
||||
# the hash calculation regardless they contain timestamps or not. This invalidates
|
||||
# the cache whenever SOURCE_DATE_EPOCH changes. As a fix, ignore SOURCE_DATE_EPOCH.
|
||||
#
|
||||
# (*) https://reproducible-builds.org/specs/source-date-epoch/
|
||||
if (CCACHE_VERSION VERSION_GREATER_EQUAL "4.0" AND CCACHE_VERSION VERSION_LESS "4.2")
|
||||
message(STATUS "Ignore SOURCE_DATE_EPOCH for ccache 4.0 / 4.1")
|
||||
set(LAUNCHER env -u SOURCE_DATE_EPOCH ${CCACHE_EXECUTABLE})
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER ${LAUNCHER} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||
set (CMAKE_C_COMPILER_LAUNCHER ${LAUNCHER} ${CMAKE_C_COMPILER_LAUNCHER})
|
||||
|
@ -32,7 +32,8 @@ elseif (ARCH_AARCH64)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8-a+crc")
|
||||
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -DNO_WARN_X86_INTRINSICS")
|
||||
# Note that gcc and clang have support for x86 SSE2 intrinsics when building for PowerPC
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||
|
||||
elseif (ARCH_AMD64)
|
||||
set (TEST_FLAG "-mssse3")
|
||||
|
@ -1,5 +1,5 @@
|
||||
macro(add_glob cur_list)
|
||||
file(GLOB __tmp RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${ARGN})
|
||||
file(GLOB __tmp CONFIGURE_DEPENDS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${ARGN})
|
||||
list(APPEND ${cur_list} ${__tmp})
|
||||
endmacro()
|
||||
|
||||
|
@ -1,5 +0,0 @@
|
||||
function(generate_code TEMPLATE_FILE)
|
||||
foreach(NAME IN LISTS ARGN)
|
||||
configure_file (${TEMPLATE_FILE}.cpp.in ${CMAKE_CURRENT_BINARY_DIR}/generated/${TEMPLATE_FILE}_${NAME}.cpp)
|
||||
endforeach()
|
||||
endfunction()
|
@ -1,17 +1,22 @@
|
||||
# Print the status of the git repository (if git is available).
|
||||
# This is useful for troubleshooting build failure reports
|
||||
|
||||
find_package(Git)
|
||||
|
||||
if (Git_FOUND)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} rev-parse HEAD
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE GIT_COMMIT_ID
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
message(STATUS "HEAD's commit hash ${GIT_COMMIT_ID}")
|
||||
|
||||
execute_process(
|
||||
COMMAND ${GIT_EXECUTABLE} status
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
else()
|
||||
message(STATUS "The git program could not be found.")
|
||||
message(STATUS "Git could not be found.")
|
||||
endif()
|
||||
|
@ -27,7 +27,7 @@ macro(clickhouse_strip_binary)
|
||||
)
|
||||
|
||||
install(PROGRAMS ${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
install(FILES ${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug DESTINATION ${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR}/${STRIP_TARGET}.debug COMPONENT clickhouse)
|
||||
install(FILES ${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug DESTINATION ${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR} COMPONENT clickhouse)
|
||||
endmacro()
|
||||
|
||||
|
||||
|
@ -9,11 +9,6 @@
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra")
|
||||
|
||||
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
|
||||
# Intended for exploration of new compiler warnings that may be found useful.
|
||||
# Applies to clang only
|
||||
option (WEVERYTHING "Enable -Weverything option with some exceptions." ON)
|
||||
|
||||
# Control maximum size of stack frames. It can be important if the code is run in fibers with small stack size.
|
||||
# Only in release build because debug has too large stack frames.
|
||||
if ((NOT CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") AND (NOT SANITIZE) AND (NOT CMAKE_CXX_COMPILER_ID MATCHES "AppleClang"))
|
||||
@ -21,81 +16,42 @@ if ((NOT CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") AND (NOT SANITIZE) AND (NOT CMAKE
|
||||
endif ()
|
||||
|
||||
if (COMPILER_CLANG)
|
||||
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
|
||||
# We want to get everything out of the compiler for code quality.
|
||||
add_warning(everything)
|
||||
|
||||
add_warning(pedantic)
|
||||
no_warning(vla-extension)
|
||||
no_warning(zero-length-array)
|
||||
no_warning(c11-extensions)
|
||||
no_warning(unused-command-line-argument)
|
||||
|
||||
if (WEVERYTHING)
|
||||
add_warning(everything)
|
||||
no_warning(c++98-compat-pedantic)
|
||||
no_warning(c++98-compat)
|
||||
no_warning(c99-extensions)
|
||||
no_warning(conversion)
|
||||
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
|
||||
no_warning(deprecated-dynamic-exception-spec)
|
||||
no_warning(disabled-macro-expansion)
|
||||
no_warning(documentation-unknown-command)
|
||||
no_warning(double-promotion)
|
||||
no_warning(exit-time-destructors)
|
||||
no_warning(float-equal)
|
||||
no_warning(global-constructors)
|
||||
no_warning(missing-prototypes)
|
||||
no_warning(missing-variable-declarations)
|
||||
no_warning(nested-anon-types)
|
||||
no_warning(packed)
|
||||
no_warning(padded)
|
||||
no_warning(return-std-move-in-c++11) # clang 7+
|
||||
no_warning(shift-sign-overflow)
|
||||
no_warning(sign-conversion)
|
||||
no_warning(switch-enum)
|
||||
no_warning(undefined-func-template)
|
||||
no_warning(unused-template)
|
||||
no_warning(vla)
|
||||
no_warning(weak-template-vtables)
|
||||
no_warning(weak-vtables)
|
||||
|
||||
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
||||
else ()
|
||||
add_warning(comma)
|
||||
add_warning(conditional-uninitialized)
|
||||
add_warning(covered-switch-default)
|
||||
add_warning(deprecated)
|
||||
add_warning(embedded-directive)
|
||||
add_warning(empty-init-stmt) # linux-only
|
||||
add_warning(extra-semi-stmt) # linux-only
|
||||
add_warning(extra-semi)
|
||||
add_warning(gnu-case-range)
|
||||
add_warning(inconsistent-missing-destructor-override)
|
||||
add_warning(newline-eof)
|
||||
add_warning(old-style-cast)
|
||||
add_warning(range-loop-analysis)
|
||||
add_warning(redundant-parens)
|
||||
add_warning(reserved-id-macro)
|
||||
add_warning(shadow-field)
|
||||
add_warning(shadow-uncaptured-local)
|
||||
add_warning(shadow)
|
||||
add_warning(string-plus-int)
|
||||
add_warning(undef)
|
||||
add_warning(unreachable-code-return)
|
||||
add_warning(unreachable-code)
|
||||
add_warning(unused-exception-parameter)
|
||||
add_warning(unused-macros)
|
||||
add_warning(unused-member-function)
|
||||
add_warning(unneeded-internal-declaration)
|
||||
add_warning(implicit-int-float-conversion)
|
||||
add_warning(no-delete-null-pointer-checks)
|
||||
add_warning(anon-enum-enum-conversion)
|
||||
add_warning(assign-enum)
|
||||
add_warning(bitwise-op-parentheses)
|
||||
add_warning(int-in-bool-context)
|
||||
add_warning(sometimes-uninitialized)
|
||||
add_warning(tautological-bitwise-compare)
|
||||
|
||||
# XXX: libstdc++ has some of these for 3way compare
|
||||
add_warning(zero-as-null-pointer-constant)
|
||||
endif ()
|
||||
no_warning(c++98-compat-pedantic)
|
||||
no_warning(c++98-compat)
|
||||
no_warning(c99-extensions)
|
||||
no_warning(conversion)
|
||||
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
|
||||
no_warning(deprecated-dynamic-exception-spec)
|
||||
no_warning(disabled-macro-expansion)
|
||||
no_warning(documentation-unknown-command)
|
||||
no_warning(double-promotion)
|
||||
no_warning(exit-time-destructors)
|
||||
no_warning(float-equal)
|
||||
no_warning(global-constructors)
|
||||
no_warning(missing-prototypes)
|
||||
no_warning(missing-variable-declarations)
|
||||
no_warning(nested-anon-types)
|
||||
no_warning(packed)
|
||||
no_warning(padded)
|
||||
no_warning(return-std-move-in-c++11) # clang 7+
|
||||
no_warning(shift-sign-overflow)
|
||||
no_warning(sign-conversion)
|
||||
no_warning(switch-enum)
|
||||
no_warning(undefined-func-template)
|
||||
no_warning(unused-template)
|
||||
no_warning(vla)
|
||||
no_warning(weak-template-vtables)
|
||||
no_warning(weak-vtables)
|
||||
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
||||
elseif (COMPILER_GCC)
|
||||
# Add compiler options only to c++ compiler
|
||||
function(add_cxx_compile_options option)
|
||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -140,6 +140,7 @@ add_contrib (libpq-cmake libpq)
|
||||
add_contrib (nuraft-cmake NuRaft)
|
||||
add_contrib (fast_float-cmake fast_float)
|
||||
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
||||
add_contrib (hashidsxx-cmake hashidsxx)
|
||||
|
||||
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_NLP)
|
||||
|
@ -2,22 +2,15 @@ set(EIGEN_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/eigen")
|
||||
|
||||
add_library (_eigen INTERFACE)
|
||||
|
||||
option (ENABLE_MKL "Build Eigen with Intel MKL" OFF)
|
||||
if (ENABLE_MKL)
|
||||
set(MKL_THREADING sequential)
|
||||
set(MKL_INTERFACE lp64)
|
||||
find_package(MKL REQUIRED)
|
||||
if (MKL_FOUND)
|
||||
message("MKL INCLUDE: ${MKL_INCLUDE}")
|
||||
message("MKL LIBRARIES: ${MKL_LIBRARIES}")
|
||||
target_compile_definitions(_eigen INTERFACE EIGEN_USE_MKL_ALL)
|
||||
target_include_directories(_eigen INTERFACE ${MKL_INCLUDE})
|
||||
target_link_libraries(_eigen INTERFACE ${MKL_LIBRARIES})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Only include MPL2 code from Eigen library
|
||||
target_compile_definitions(_eigen INTERFACE EIGEN_MPL2_ONLY)
|
||||
|
||||
# Clang by default mimics gcc 4.2.1 compatibility but Eigen checks __GNUC__ version to enable
|
||||
# a workaround for bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 fixed in 6.3
|
||||
# So we fake gcc > 6.3 when building with clang
|
||||
if (COMPILER_CLANG AND ARCH_PPC64LE)
|
||||
target_compile_options(_eigen INTERFACE -fgnuc-version=6.4)
|
||||
endif()
|
||||
|
||||
target_include_directories (_eigen SYSTEM INTERFACE ${EIGEN_LIBRARY_DIR})
|
||||
add_library(ch_contrib::eigen ALIAS _eigen)
|
||||
|
1
contrib/hashidsxx
vendored
Submodule
1
contrib/hashidsxx
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 783f6911ccfdaca83e3cfac084c4aad888a80cee
|
14
contrib/hashidsxx-cmake/CMakeLists.txt
Normal file
14
contrib/hashidsxx-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,14 @@
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/hashidsxx")
|
||||
|
||||
set (SRCS
|
||||
"${LIBRARY_DIR}/hashids.cpp"
|
||||
)
|
||||
|
||||
set (HDRS
|
||||
"${LIBRARY_DIR}/hashids.h"
|
||||
)
|
||||
|
||||
add_library(_hashidsxx ${SRCS} ${HDRS})
|
||||
target_include_directories(_hashidsxx SYSTEM PUBLIC "${LIBRARY_DIR}")
|
||||
|
||||
add_library(ch_contrib::hashidsxx ALIAS _hashidsxx)
|
45
docker/docs/release/Dockerfile
Normal file
45
docker/docs/release/Dockerfile
Normal file
@ -0,0 +1,45 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/docs-release .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
RUN apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
|
||||
wget \
|
||||
bash \
|
||||
python \
|
||||
curl \
|
||||
python3-requests \
|
||||
sudo \
|
||||
git \
|
||||
openssl \
|
||||
python3-pip \
|
||||
software-properties-common \
|
||||
fonts-arphic-ukai \
|
||||
fonts-arphic-uming \
|
||||
fonts-ipafont-mincho \
|
||||
fonts-ipafont-gothic \
|
||||
fonts-unfonts-core \
|
||||
xvfb \
|
||||
ssh-client \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 install --ignore-installed --upgrade setuptools pip virtualenv
|
||||
|
||||
# We create the most popular default 1000:1000 ubuntu user to not have ssh issues when running with UID==1000
|
||||
RUN useradd --create-home --uid 1000 --user-group ubuntu \
|
||||
&& ssh-keyscan -t rsa github.com >> /etc/ssh/ssh_known_hosts
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
12
docker/docs/release/run.sh
Normal file
12
docker/docs/release/run.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd "$REPO_PATH/docs/tools"
|
||||
if ! [ -d venv ]; then
|
||||
mkdir -p venv
|
||||
virtualenv -p "$(which python3)" venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
fi
|
||||
source venv/bin/activate
|
||||
./release.sh 2>&1 | tee "$OUTPUT_PATH/output.log"
|
@ -1,8 +1,4 @@
|
||||
{
|
||||
"docker/packager/deb": {
|
||||
"name": "clickhouse/deb-builder",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/packager/binary": {
|
||||
"name": "clickhouse/binary-builder",
|
||||
"dependent": [
|
||||
@ -150,5 +146,9 @@
|
||||
"name": "clickhouse/docs-builder",
|
||||
"dependent": [
|
||||
]
|
||||
},
|
||||
"docker/docs/release": {
|
||||
"name": "clickhouse/docs-release",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -97,12 +97,15 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
ARG NFPM_VERSION=2.15.0
|
||||
ARG NFPM_VERSION=2.15.1
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \
|
||||
&& dpkg -i /tmp/nfpm.deb \
|
||||
&& rm /tmp/nfpm.deb
|
||||
|
||||
RUN mkdir /workdir && chmod 777 /workdir
|
||||
WORKDIR /workdir
|
||||
|
||||
COPY build.sh /
|
||||
CMD ["bash", "-c", "/build.sh 2>&1"]
|
||||
|
@ -1,18 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
set -x -e
|
||||
|
||||
exec &> >(ts)
|
||||
set -x -e
|
||||
|
||||
cache_status () {
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
}
|
||||
|
||||
git config --global --add safe.directory /build
|
||||
[ -O /build ] || git config --global --add safe.directory /build
|
||||
|
||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||
mkdir -p /build/cmake/toolchain/darwin-x86_64
|
||||
tar xJf /MacOSX11.0.sdk.tar.xz -C /build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||
ln -sf darwin-x86_64 /build/cmake/toolchain/darwin-aarch64
|
||||
|
||||
# Uncomment to debug ccache. Don't put ccache log in /output right away, or it
|
||||
# will be confusingly packed into the "performance" package.
|
||||
@ -20,8 +20,8 @@ ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64
|
||||
# export CCACHE_DEBUG=1
|
||||
|
||||
|
||||
mkdir -p build/build_docker
|
||||
cd build/build_docker
|
||||
mkdir -p /build/build_docker
|
||||
cd /build/build_docker
|
||||
rm -f CMakeCache.txt
|
||||
# Read cmake arguments into array (possibly empty)
|
||||
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
@ -61,10 +61,10 @@ fi
|
||||
|
||||
if [ "coverity" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
mkdir -p /opt/cov-analysis
|
||||
mkdir -p /workdir/cov-analysis
|
||||
|
||||
wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1
|
||||
export PATH=$PATH:/opt/cov-analysis/bin
|
||||
wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /workdir/cov-analysis --strip-components 1
|
||||
export PATH=$PATH:/workdir/cov-analysis/bin
|
||||
cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC"
|
||||
SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int"
|
||||
fi
|
||||
@ -89,16 +89,36 @@ mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
find . -name '*.so' -print -exec mv '{}' /output \;
|
||||
find . -name '*.so.*' -print -exec mv '{}' /output \;
|
||||
|
||||
# Different files for performance test.
|
||||
if [ "performance" == "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
cp -r ../tests/performance /output
|
||||
cp -r ../tests/config/top_level_domains /output
|
||||
cp -r ../docker/test/performance-comparison/config /output ||:
|
||||
rm /output/unit_tests_dbms ||:
|
||||
rm /output/clickhouse-odbc-bridge ||:
|
||||
prepare_combined_output () {
|
||||
local OUTPUT
|
||||
OUTPUT="$1"
|
||||
|
||||
cp -r ../docker/test/performance-comparison /output/scripts ||:
|
||||
mkdir -p "$OUTPUT"/config
|
||||
cp /build/programs/server/config.xml "$OUTPUT"/config
|
||||
cp /build/programs/server/users.xml "$OUTPUT"/config
|
||||
cp -r --dereference /build/programs/server/config.d "$OUTPUT"/config
|
||||
}
|
||||
|
||||
# Different files for performance test.
|
||||
if [ "$WITH_PERFORMANCE" == 1 ]
|
||||
then
|
||||
PERF_OUTPUT=/workdir/performance/output
|
||||
mkdir -p "$PERF_OUTPUT"
|
||||
cp -r ../tests/performance "$PERF_OUTPUT"
|
||||
cp -r ../tests/config/top_level_domains "$PERF_OUTPUT"
|
||||
cp -r ../docker/test/performance-comparison/config "$PERF_OUTPUT" ||:
|
||||
for SRC in /output/clickhouse*; do
|
||||
# Copy all clickhouse* files except packages and bridges
|
||||
[[ "$SRC" != *.* ]] && [[ "$SRC" != *-bridge ]] && \
|
||||
cp -d "$SRC" "$PERF_OUTPUT"
|
||||
done
|
||||
if [ -x "$PERF_OUTPUT"/clickhouse-keeper ]; then
|
||||
# Replace standalone keeper by symlink
|
||||
ln -sf clickhouse "$PERF_OUTPUT"/clickhouse-keeper
|
||||
fi
|
||||
|
||||
cp -r ../docker/test/performance-comparison "$PERF_OUTPUT"/scripts ||:
|
||||
prepare_combined_output "$PERF_OUTPUT"
|
||||
|
||||
# We have to know the revision that corresponds to this binary build.
|
||||
# It is not the nominal SHA from pull/*/head, but the pull/*/merge, which is
|
||||
@ -111,22 +131,23 @@ then
|
||||
# for a given nominal SHA, but it is not accessible outside Yandex.
|
||||
# This is why we add this repository snapshot from CI to the performance test
|
||||
# package.
|
||||
mkdir /output/ch
|
||||
git -C /output/ch init --bare
|
||||
git -C /output/ch remote add origin /build
|
||||
git -C /output/ch fetch --no-tags --depth 50 origin HEAD:pr
|
||||
git -C /output/ch fetch --no-tags --depth 50 origin master:master
|
||||
git -C /output/ch reset --soft pr
|
||||
git -C /output/ch log -5
|
||||
mkdir "$PERF_OUTPUT"/ch
|
||||
git -C "$PERF_OUTPUT"/ch init --bare
|
||||
git -C "$PERF_OUTPUT"/ch remote add origin /build
|
||||
git -C "$PERF_OUTPUT"/ch fetch --no-tags --depth 50 origin HEAD:pr
|
||||
git -C "$PERF_OUTPUT"/ch fetch --no-tags --depth 50 origin master:master
|
||||
git -C "$PERF_OUTPUT"/ch reset --soft pr
|
||||
git -C "$PERF_OUTPUT"/ch log -5
|
||||
(
|
||||
cd "$PERF_OUTPUT"/..
|
||||
tar -cv -I pigz -f /output/performance.tgz output
|
||||
)
|
||||
fi
|
||||
|
||||
# May be set for split build or for performance test.
|
||||
if [ "" != "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
mkdir -p /output/config
|
||||
cp ../programs/server/config.xml /output/config
|
||||
cp ../programs/server/users.xml /output/config
|
||||
cp -r --dereference ../programs/server/config.d /output/config
|
||||
prepare_combined_output /output
|
||||
tar -cv -I pigz -f "$COMBINED_OUTPUT.tgz" /output
|
||||
rm -r /output/*
|
||||
mv "$COMBINED_OUTPUT.tgz" /output
|
||||
@ -138,13 +159,6 @@ then
|
||||
mv "coverity-scan.tgz" /output
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
# # Currently we are in build/build_docker directory
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# fi
|
||||
|
||||
cache_status
|
||||
|
||||
if [ "${CCACHE_DEBUG:-}" == "1" ]
|
||||
@ -159,3 +173,5 @@ then
|
||||
# files in place, and will fail because this directory is not writable.
|
||||
tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE"
|
||||
fi
|
||||
|
||||
ls -l /output
|
||||
|
@ -1,81 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/deb-builder .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=13
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install ca-certificates lsb-release wget gnupg apt-transport-https \
|
||||
--yes --no-install-recommends --verbose-versions \
|
||||
&& export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \
|
||||
&& wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \
|
||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
&& echo "deb [trusted=yes] http://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||
/etc/apt/sources.list
|
||||
|
||||
# initial packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
bash \
|
||||
fakeroot \
|
||||
ccache \
|
||||
curl \
|
||||
software-properties-common \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||
# Significantly increase deb packaging speed and compatible with old systems
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /usr/bin/dpkg-deb https://github.com/ClickHouse-Extras/dpkg/releases/download/1.21.1-clickhouse/dpkg-deb-${arch}
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install \
|
||||
alien \
|
||||
clang-${LLVM_VERSION} \
|
||||
clang-tidy-${LLVM_VERSION} \
|
||||
cmake \
|
||||
debhelper \
|
||||
devscripts \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
perl \
|
||||
pigz \
|
||||
pixz \
|
||||
pkg-config \
|
||||
tzdata \
|
||||
--yes --no-install-recommends
|
||||
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
&& apt-get update \
|
||||
&& apt-get install gcc-11 g++-11 --yes
|
||||
|
||||
|
||||
# These symlinks are required:
|
||||
# /usr/bin/ld.lld: by gcc to find lld compiler
|
||||
# /usr/bin/aarch64-linux-gnu-obj*: for debug symbols stripping
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld \
|
||||
&& ln -sf /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-objcopy /usr/bin/aarch64-linux-gnu-strip \
|
||||
&& ln -sf /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-objcopy /usr/bin/aarch64-linux-gnu-objcopy \
|
||||
&& ln -sf /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-objdump /usr/bin/aarch64-linux-gnu-objdump
|
||||
|
||||
|
||||
COPY build.sh /
|
||||
|
||||
CMD ["/bin/bash", "/build.sh"]
|
@ -1,58 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -x -e
|
||||
|
||||
# Uncomment to debug ccache.
|
||||
# export CCACHE_LOGFILE=/build/ccache.log
|
||||
# export CCACHE_DEBUG=1
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
ccache --zero-stats ||:
|
||||
|
||||
read -ra ALIEN_PKGS <<< "${ALIEN_PKGS:-}"
|
||||
build/release "${ALIEN_PKGS[@]}" | ts '%Y-%m-%d %H:%M:%S'
|
||||
mv /*.deb /output
|
||||
mv -- *.changes /output
|
||||
mv -- *.buildinfo /output
|
||||
mv /*.rpm /output ||: # if exists
|
||||
mv /*.tgz /output ||: # if exists
|
||||
|
||||
if [ -n "$BINARY_OUTPUT" ] && { [ "$BINARY_OUTPUT" = "programs" ] || [ "$BINARY_OUTPUT" = "tests" ] ;}
|
||||
then
|
||||
echo "Place $BINARY_OUTPUT to output"
|
||||
mkdir /output/binary ||: # if exists
|
||||
mv /build/obj-*/programs/clickhouse* /output/binary
|
||||
|
||||
if [ "$BINARY_OUTPUT" = "tests" ]
|
||||
then
|
||||
mv /build/obj-*/src/unit_tests_dbms /output/binary
|
||||
fi
|
||||
fi
|
||||
|
||||
# Also build fuzzers if any sanitizer specified
|
||||
# if [ -n "$SANITIZER" ]
|
||||
# then
|
||||
# # Script is supposed that we are in build directory.
|
||||
# mkdir -p build/build_docker
|
||||
# cd build/build_docker
|
||||
# # Launching build script
|
||||
# ../docker/packager/other/fuzzer.sh
|
||||
# cd
|
||||
# fi
|
||||
|
||||
ccache --show-config ||:
|
||||
ccache --show-stats ||:
|
||||
|
||||
if [ "${CCACHE_DEBUG:-}" == "1" ]
|
||||
then
|
||||
find /build -name '*.ccache-*' -print0 \
|
||||
| tar -c -I pixz -f /output/ccache-debug.txz --null -T -
|
||||
fi
|
||||
|
||||
if [ -n "$CCACHE_LOGFILE" ]
|
||||
then
|
||||
# Compress the log as well, or else the CI will try to compress all log
|
||||
# files in place, and will fail because this directory is not writable.
|
||||
tar -cv -I pixz -f /output/ccache.log.txz "$CCACHE_LOGFILE"
|
||||
fi
|
@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is responsible for building all fuzzers, and copy them to output directory
|
||||
# as an archive.
|
||||
# Script is supposed that we are in build directory.
|
||||
|
||||
set -x -e
|
||||
|
||||
printenv
|
||||
|
||||
# Delete previous cache, because we add a new flags -DENABLE_FUZZING=1 and -DFUZZER=libfuzzer
|
||||
rm -f CMakeCache.txt
|
||||
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
# Hope, that the most part of files will be in cache, so we just link new executables
|
||||
# Please, add or change flags directly in cmake
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_C_COMPILER="$CC" -DCMAKE_CXX_COMPILER="$CXX" \
|
||||
-DSANITIZE="$SANITIZER" -DENABLE_FUZZING=1 -DFUZZER='libfuzzer' -DENABLE_PROTOBUF=1 "${CMAKE_FLAGS[@]}" ..
|
||||
|
||||
FUZZER_TARGETS=$(find ../src -name '*_fuzzer.cpp' -execdir basename {} .cpp ';' | tr '\n' ' ')
|
||||
|
||||
NUM_JOBS=$(($(nproc || grep -c ^processor /proc/cpuinfo)))
|
||||
|
||||
mkdir -p /output/fuzzers
|
||||
for FUZZER_TARGET in $FUZZER_TARGETS
|
||||
do
|
||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||
ninja $NINJA_FLAGS $FUZZER_TARGET -j $NUM_JOBS
|
||||
# Find this binary in build directory and strip it
|
||||
FUZZER_PATH=$(find ./src -name "$FUZZER_TARGET")
|
||||
strip --strip-unneeded "$FUZZER_PATH"
|
||||
mv "$FUZZER_PATH" /output/fuzzers
|
||||
done
|
||||
|
||||
|
||||
tar -zcvf /output/fuzzers.tar.gz /output/fuzzers
|
||||
rm -rf /output/fuzzers
|
@ -5,8 +5,10 @@ import os
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
IMAGE_TYPE = "binary"
|
||||
|
||||
|
||||
def check_image_exists_locally(image_name):
|
||||
@ -38,8 +40,40 @@ def build_image(image_name, filepath):
|
||||
)
|
||||
|
||||
|
||||
def pre_build(repo_path: str, env_variables: List[str]):
|
||||
if "WITH_PERFORMANCE=1" in env_variables:
|
||||
current_branch = subprocess.check_output(
|
||||
"git branch --show-current", shell=True, encoding="utf-8"
|
||||
).strip()
|
||||
is_shallow = (
|
||||
subprocess.check_output(
|
||||
"git rev-parse --is-shallow-repository", shell=True, encoding="utf-8"
|
||||
)
|
||||
== "true\n"
|
||||
)
|
||||
if is_shallow:
|
||||
# I've spent quite some time on looking around the problem, and my
|
||||
# conclusion is: in the current state the easiest way to go is to force
|
||||
# unshallow repository for performance artifacts.
|
||||
# To change it we need to rework our performance tests docker image
|
||||
raise Exception("shallow repository is not suitable for performance builds")
|
||||
if current_branch != "master":
|
||||
cmd = (
|
||||
f"git -C {repo_path} fetch --no-recurse-submodules "
|
||||
"--no-tags origin master:master"
|
||||
)
|
||||
logging.info("Getting master branch for performance artifact: ''%s'", cmd)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def run_docker_image_with_env(
|
||||
image_name, output, env_variables, ch_root, ccache_dir, docker_image_version
|
||||
image_name,
|
||||
as_root,
|
||||
output,
|
||||
env_variables,
|
||||
ch_root,
|
||||
ccache_dir,
|
||||
docker_image_version,
|
||||
):
|
||||
env_part = " -e ".join(env_variables)
|
||||
if env_part:
|
||||
@ -50,8 +84,13 @@ def run_docker_image_with_env(
|
||||
else:
|
||||
interactive = ""
|
||||
|
||||
if as_root:
|
||||
user = "0:0"
|
||||
else:
|
||||
user = f"{os.geteuid()}:{os.getegid()}"
|
||||
|
||||
cmd = (
|
||||
f"docker run --network=host --rm --volume={output}:/output "
|
||||
f"docker run --network=host --user={user} --rm --volume={output}:/output "
|
||||
f"--volume={ch_root}:/build --volume={ccache_dir}:/ccache {env_part} "
|
||||
f"{interactive} {image_name}:{docker_image_version}"
|
||||
)
|
||||
@ -75,7 +114,6 @@ def parse_env_variables(
|
||||
compiler,
|
||||
sanitizer,
|
||||
package_type,
|
||||
image_type,
|
||||
cache,
|
||||
distcc_hosts,
|
||||
split_binary,
|
||||
@ -153,7 +191,7 @@ def parse_env_variables(
|
||||
|
||||
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
||||
|
||||
if image_type == "deb":
|
||||
if package_type == "deb":
|
||||
result.append("MAKE_DEB=true")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
cmake_flags.append("-DENABLE_UTILS=0")
|
||||
@ -165,6 +203,7 @@ def parse_env_variables(
|
||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||
if is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
cmake_flags.append("-DINSTALL_STRIPPED_BINARIES=ON")
|
||||
result.append("WITH_PERFORMANCE=1")
|
||||
if is_cross_arm:
|
||||
cmake_flags.append("-DBUILD_STANDALONE_KEEPER=1")
|
||||
else:
|
||||
@ -176,10 +215,7 @@ def parse_env_variables(
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for split build and for performance tests.
|
||||
if package_type == "performance":
|
||||
result.append("COMBINED_OUTPUT=performance")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
elif package_type == "coverity":
|
||||
if package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
elif split_binary:
|
||||
@ -258,27 +294,30 @@ def parse_env_variables(
|
||||
return result
|
||||
|
||||
|
||||
def dir_name(name: str) -> str:
|
||||
if not os.path.isabs(name):
|
||||
name = os.path.abspath(os.path.join(os.getcwd(), name))
|
||||
return name
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
description="ClickHouse building script using prebuilt Docker image",
|
||||
)
|
||||
# 'performance' creates a combined .tgz with server
|
||||
# and configs to be used for performance test.
|
||||
parser.add_argument(
|
||||
"--package-type",
|
||||
choices=["deb", "binary", "performance", "coverity"],
|
||||
choices=["deb", "binary", "coverity"],
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clickhouse-repo-path",
|
||||
default=os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir
|
||||
),
|
||||
default=os.path.join(os.path.dirname(SCRIPT_PATH), os.pardir, os.pardir),
|
||||
type=dir_name,
|
||||
help="ClickHouse git repository",
|
||||
)
|
||||
parser.add_argument("--output-dir", required=True)
|
||||
parser.add_argument("--output-dir", type=dir_name, required=True)
|
||||
parser.add_argument("--build-type", choices=("debug", ""), default="")
|
||||
parser.add_argument(
|
||||
"--compiler",
|
||||
@ -315,6 +354,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--ccache_dir",
|
||||
default=os.getenv("HOME", "") + "/.ccache",
|
||||
type=dir_name,
|
||||
help="a directory with ccache",
|
||||
)
|
||||
parser.add_argument("--distcc-hosts", nargs="+")
|
||||
@ -330,39 +370,28 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--docker-image-version", default="latest", help="docker image tag to use"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--as-root", action="store_true", help="if the container should run as root"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if not os.path.isabs(args.output_dir):
|
||||
args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir))
|
||||
|
||||
image_type = (
|
||||
"binary"
|
||||
if args.package_type in ("performance", "coverity")
|
||||
else args.package_type
|
||||
)
|
||||
image_name = "clickhouse/binary-builder"
|
||||
image_name = f"clickhouse/{IMAGE_TYPE}-builder"
|
||||
|
||||
if not os.path.isabs(args.clickhouse_repo_path):
|
||||
ch_root = os.path.abspath(os.path.join(os.getcwd(), args.clickhouse_repo_path))
|
||||
else:
|
||||
ch_root = args.clickhouse_repo_path
|
||||
ch_root = args.clickhouse_repo_path
|
||||
|
||||
if args.additional_pkgs and image_type != "deb":
|
||||
if args.additional_pkgs and args.package_type != "deb":
|
||||
raise Exception("Can build additional packages only in deb build")
|
||||
|
||||
if args.with_binaries != "" and image_type != "deb":
|
||||
if args.with_binaries != "" and args.package_type != "deb":
|
||||
raise Exception("Can add additional binaries only in deb build")
|
||||
|
||||
if args.with_binaries != "" and image_type == "deb":
|
||||
if args.with_binaries != "" and args.package_type == "deb":
|
||||
logging.info("Should place %s to output", args.with_binaries)
|
||||
|
||||
dockerfile = os.path.join(ch_root, "docker/packager", image_type, "Dockerfile")
|
||||
dockerfile = os.path.join(ch_root, "docker/packager", IMAGE_TYPE, "Dockerfile")
|
||||
image_with_version = image_name + ":" + args.docker_image_version
|
||||
if (
|
||||
image_type != "freebsd"
|
||||
and not check_image_exists_locally(image_name)
|
||||
or args.force_build_image
|
||||
):
|
||||
if not check_image_exists_locally(image_name) or args.force_build_image:
|
||||
if not pull_image(image_with_version) or args.force_build_image:
|
||||
build_image(image_with_version, dockerfile)
|
||||
env_prepared = parse_env_variables(
|
||||
@ -370,7 +399,6 @@ if __name__ == "__main__":
|
||||
args.compiler,
|
||||
args.sanitizer,
|
||||
args.package_type,
|
||||
image_type,
|
||||
args.cache,
|
||||
args.distcc_hosts,
|
||||
args.split_binary,
|
||||
@ -383,8 +411,10 @@ if __name__ == "__main__":
|
||||
args.with_binaries,
|
||||
)
|
||||
|
||||
pre_build(args.clickhouse_repo_path, env_prepared)
|
||||
run_docker_image_with_env(
|
||||
image_name,
|
||||
args.as_root,
|
||||
args.output_dir,
|
||||
env_prepared,
|
||||
ch_root,
|
||||
|
@ -178,6 +178,7 @@ function clone_submodules
|
||||
contrib/replxx
|
||||
contrib/wyhash
|
||||
contrib/eigen
|
||||
contrib/hashidsxx
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
|
@ -3,6 +3,7 @@ set -ex
|
||||
set -o pipefail
|
||||
trap "exit" INT TERM
|
||||
trap 'kill $(jobs -pr) ||:' EXIT
|
||||
BUILD_NAME=${BUILD_NAME:-package_release}
|
||||
|
||||
mkdir db0 ||:
|
||||
mkdir left ||:
|
||||
@ -26,7 +27,10 @@ function download
|
||||
{
|
||||
# Historically there were various paths for the performance test package.
|
||||
# Test all of them.
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz")
|
||||
declare -a urls_to_try=(
|
||||
"https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/$BUILD_NAME/performance.tgz"
|
||||
"https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz"
|
||||
)
|
||||
|
||||
for path in "${urls_to_try[@]}"
|
||||
do
|
||||
@ -41,7 +45,7 @@ function download
|
||||
# download anything, for example in some manual runs. In this case, SHAs are not set.
|
||||
if ! [ "$left_sha" = "$right_sha" ]
|
||||
then
|
||||
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv &
|
||||
wget -nv -nd -c "$left_path" -O- | tar -C left --no-same-owner --strip-components=1 -zxv &
|
||||
elif [ "$right_sha" != "" ]
|
||||
then
|
||||
mkdir left ||:
|
||||
|
@ -5,6 +5,7 @@ CHPC_CHECK_START_TIMESTAMP="$(date +%s)"
|
||||
export CHPC_CHECK_START_TIMESTAMP
|
||||
|
||||
S3_URL=${S3_URL:="https://clickhouse-builds.s3.amazonaws.com"}
|
||||
BUILD_NAME=${BUILD_NAME:-package_release}
|
||||
|
||||
COMMON_BUILD_PREFIX="/clickhouse_build_check"
|
||||
if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then
|
||||
@ -64,7 +65,12 @@ function find_reference_sha
|
||||
# Historically there were various path for the performance test package,
|
||||
# test all of them.
|
||||
unset found
|
||||
declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz")
|
||||
declare -a urls_to_try=(
|
||||
"https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/$BUILD_NAME/performance.tgz"
|
||||
# FIXME: the following link is left there for backward compatibility.
|
||||
# We should remove it after 2022-11-01
|
||||
"https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz"
|
||||
)
|
||||
for path in "${urls_to_try[@]}"
|
||||
do
|
||||
if curl_with_retry "$path"
|
||||
@ -88,13 +94,13 @@ chmod 777 workspace output
|
||||
cd workspace
|
||||
|
||||
# Download the package for the version we are going to test.
|
||||
if curl_with_retry "$S3_URL/$PR_TO_TEST/$SHA_TO_TEST$COMMON_BUILD_PREFIX/performance/performance.tgz"
|
||||
if curl_with_retry "$S3_URL/$PR_TO_TEST/$SHA_TO_TEST$COMMON_BUILD_PREFIX/$BUILD_NAME/performance.tgz"
|
||||
then
|
||||
right_path="$S3_URL/$PR_TO_TEST/$SHA_TO_TEST$COMMON_BUILD_PREFIX/performance/performance.tgz"
|
||||
right_path="$S3_URL/$PR_TO_TEST/$SHA_TO_TEST$COMMON_BUILD_PREFIX/$BUILD_NAME/performance.tgz"
|
||||
fi
|
||||
|
||||
mkdir right
|
||||
wget -nv -nd -c "$right_path" -O- | tar -C right --strip-components=1 -zxv
|
||||
wget -nv -nd -c "$right_path" -O- | tar -C right --no-same-owner --strip-components=1 -zxv
|
||||
|
||||
# Find reference revision if not specified explicitly
|
||||
if [ "$REF_SHA" == "" ]; then find_reference_sha; fi
|
||||
@ -155,7 +161,7 @@ ulimit -c unlimited
|
||||
cat /proc/sys/kernel/core_pattern
|
||||
|
||||
# Start the main comparison script.
|
||||
{ \
|
||||
{
|
||||
time ../download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \
|
||||
time stage=configure "$script_path"/compare.sh ; \
|
||||
} 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log
|
||||
@ -178,4 +184,6 @@ ls -lath
|
||||
report analyze benchmark metrics \
|
||||
./*.core.dmp ./*.core
|
||||
|
||||
cp compare.log /output
|
||||
# If the files aren't same, copy it
|
||||
cmp --silent compare.log /output/compare.log || \
|
||||
cp compare.log /output
|
||||
|
@ -420,12 +420,6 @@ Note that ClickHouse uses forks of these libraries, see https://github.com/Click
|
||||
<td>Using system libs can cause a lot of warnings in includes (on macro expansion).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a name="weverything"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/warnings.cmake#L15" rel="external nofollow noreferrer" target="_blank"><code class="syntax">WEVERYTHING</code></a></td>
|
||||
<td><code class="syntax">ON</code></td>
|
||||
<td>Enable -Weverything option with some exceptions.</td>
|
||||
<td>Add some warnings that are not available even with -Wall -Wextra -Wpedantic. Intended for exploration of new compiler warnings that may be found useful. Applies to clang only</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a name="with-coverage"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L344" rel="external nofollow noreferrer" target="_blank"><code class="syntax">WITH_COVERAGE</code></a></td>
|
||||
<td><code class="syntax">OFF</code></td>
|
||||
<td>Profile the resulting binary/binaries</td>
|
||||
|
@ -669,6 +669,7 @@ Storage policies configuration markup:
|
||||
<volume_name_1>
|
||||
<disk>disk_name_from_disks_configuration</disk>
|
||||
<max_data_part_size_bytes>1073741824</max_data_part_size_bytes>
|
||||
<load_balancing>round_robin</load_balancing>
|
||||
</volume_name_1>
|
||||
<volume_name_2>
|
||||
<!-- configuration -->
|
||||
@ -695,6 +696,8 @@ Tags:
|
||||
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
|
||||
- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
|
||||
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||
- `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3).
|
||||
- `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`.
|
||||
|
||||
Cofiguration examples:
|
||||
|
||||
@ -724,7 +727,7 @@ Cofiguration examples:
|
||||
<move_factor>0.2</move_factor>
|
||||
</moving_from_ssd_to_hdd>
|
||||
|
||||
<small_jbod_with_external_no_merges>
|
||||
<small_jbod_with_external_no_merges>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>jbod1</disk>
|
||||
|
@ -238,7 +238,7 @@ To start the server as a daemon, run:
|
||||
$ sudo clickhouse start
|
||||
```
|
||||
|
||||
There are also another ways to run ClickHouse:
|
||||
There are also other ways to run ClickHouse:
|
||||
|
||||
``` bash
|
||||
$ sudo service clickhouse-server start
|
||||
|
@ -31,8 +31,11 @@ The supported formats are:
|
||||
| [JSON](#json) | ✗ | ✔ |
|
||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||
| [JSONStrings](#jsonstrings) | ✗ | ✔ |
|
||||
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnswithmetadata) | ✗ | ✔ |
|
||||
| [JSONCompact](#jsoncompact) | ✗ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
||||
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
|
||||
@ -400,6 +403,8 @@ Both data output and parsing are supported in this format. For parsing, any orde
|
||||
|
||||
Parsing allows the presence of the additional field `tskv` without the equal sign or a value. This field is ignored.
|
||||
|
||||
During import, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input-format-skip-unknown-fields) is set to 1.
|
||||
|
||||
## CSV {#csv}
|
||||
|
||||
Comma Separated Values format ([RFC](https://tools.ietf.org/html/rfc4180)).
|
||||
@ -459,15 +464,15 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "'hello'",
|
||||
"name": "num",
|
||||
"type": "Int32"
|
||||
},
|
||||
{
|
||||
"name": "str",
|
||||
"type": "String"
|
||||
},
|
||||
{
|
||||
"name": "multiply(42, number)",
|
||||
"type": "UInt64"
|
||||
},
|
||||
{
|
||||
"name": "range(5)",
|
||||
"name": "arr",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
@ -475,25 +480,32 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA
|
||||
"data":
|
||||
[
|
||||
{
|
||||
"'hello'": "hello",
|
||||
"multiply(42, number)": "0",
|
||||
"range(5)": [0,1,2,3,4]
|
||||
"num": 42,
|
||||
"str": "hello",
|
||||
"arr": [0,1]
|
||||
},
|
||||
{
|
||||
"'hello'": "hello",
|
||||
"multiply(42, number)": "42",
|
||||
"range(5)": [0,1,2,3,4]
|
||||
"num": 43,
|
||||
"str": "hello",
|
||||
"arr": [0,1,2]
|
||||
},
|
||||
{
|
||||
"'hello'": "hello",
|
||||
"multiply(42, number)": "84",
|
||||
"range(5)": [0,1,2,3,4]
|
||||
"num": 44,
|
||||
"str": "hello",
|
||||
"arr": [0,1,2,3]
|
||||
}
|
||||
],
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3
|
||||
"rows_before_limit_at_least": 3,
|
||||
|
||||
"statistics":
|
||||
{
|
||||
"elapsed": 0.001137687,
|
||||
"rows_read": 3,
|
||||
"bytes_read": 24
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -528,15 +540,15 @@ Example:
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "'hello'",
|
||||
"name": "num",
|
||||
"type": "Int32"
|
||||
},
|
||||
{
|
||||
"name": "str",
|
||||
"type": "String"
|
||||
},
|
||||
{
|
||||
"name": "multiply(42, number)",
|
||||
"type": "UInt64"
|
||||
},
|
||||
{
|
||||
"name": "range(5)",
|
||||
"name": "arr",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
@ -544,25 +556,95 @@ Example:
|
||||
"data":
|
||||
[
|
||||
{
|
||||
"'hello'": "hello",
|
||||
"multiply(42, number)": "0",
|
||||
"range(5)": "[0,1,2,3,4]"
|
||||
"num": "42",
|
||||
"str": "hello",
|
||||
"arr": "[0,1]"
|
||||
},
|
||||
{
|
||||
"'hello'": "hello",
|
||||
"multiply(42, number)": "42",
|
||||
"range(5)": "[0,1,2,3,4]"
|
||||
"num": "43",
|
||||
"str": "hello",
|
||||
"arr": "[0,1,2]"
|
||||
},
|
||||
{
|
||||
"'hello'": "hello",
|
||||
"multiply(42, number)": "84",
|
||||
"range(5)": "[0,1,2,3,4]"
|
||||
"num": "44",
|
||||
"str": "hello",
|
||||
"arr": "[0,1,2,3]"
|
||||
}
|
||||
],
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3
|
||||
"rows_before_limit_at_least": 3,
|
||||
|
||||
"statistics":
|
||||
{
|
||||
"elapsed": 0.001403233,
|
||||
"rows_read": 3,
|
||||
"bytes_read": 24
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## JSONColumns {#jsoncolumns}
|
||||
|
||||
In this format, all data is represented as a single JSON Object.
|
||||
Note that JSONColumns output format buffers all data in memory to output it as a single block and it can lead to high memory consumption.
|
||||
|
||||
Example:
|
||||
```json
|
||||
{
|
||||
"num": [42, 43, 44],
|
||||
"str": ["hello", "hello", "hello"],
|
||||
"arr": [[0,1], [0,1,2], [0,1,2,3]]
|
||||
}
|
||||
```
|
||||
|
||||
During import, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input-format-skip-unknown-fields) is set to 1.
|
||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) setting here)
|
||||
|
||||
|
||||
## JSONColumnsWithMetadata {#jsoncolumnsmonoblock}
|
||||
|
||||
Differs from JSONColumns output format in that it also outputs some metadata and statistics (similar to JSON output format).
|
||||
This format buffers all data in memory and then outputs them as a single block, so, it can lead to high memory consumption.
|
||||
|
||||
Example:
|
||||
```json
|
||||
{
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "num",
|
||||
"type": "Int32"
|
||||
},
|
||||
{
|
||||
"name": "str",
|
||||
"type": "String"
|
||||
},
|
||||
|
||||
{
|
||||
"name": "arr",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
|
||||
"data":
|
||||
{
|
||||
"num": [42, 43, 44],
|
||||
"str": ["hello", "hello", "hello"],
|
||||
"arr": [[0,1], [0,1,2], [0,1,2,3]]
|
||||
},
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3,
|
||||
|
||||
"statistics":
|
||||
{
|
||||
"elapsed": 0.000272376,
|
||||
"rows_read": 3,
|
||||
"bytes_read": 24
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -618,71 +700,101 @@ Result:
|
||||
|
||||
Differs from JSON only in that data rows are output in arrays, not in objects.
|
||||
|
||||
Examples:
|
||||
|
||||
1) JSONCompact:
|
||||
```json
|
||||
{
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "num",
|
||||
"type": "Int32"
|
||||
},
|
||||
{
|
||||
"name": "str",
|
||||
"type": "String"
|
||||
},
|
||||
{
|
||||
"name": "arr",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
|
||||
"data":
|
||||
[
|
||||
[42, "hello", [0,1]],
|
||||
[43, "hello", [0,1,2]],
|
||||
[44, "hello", [0,1,2,3]]
|
||||
],
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3,
|
||||
|
||||
"statistics":
|
||||
{
|
||||
"elapsed": 0.001222069,
|
||||
"rows_read": 3,
|
||||
"bytes_read": 24
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2) JSONCompactStrings
|
||||
```json
|
||||
{
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "num",
|
||||
"type": "Int32"
|
||||
},
|
||||
{
|
||||
"name": "str",
|
||||
"type": "String"
|
||||
},
|
||||
{
|
||||
"name": "arr",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
|
||||
"data":
|
||||
[
|
||||
["42", "hello", "[0,1]"],
|
||||
["43", "hello", "[0,1,2]"],
|
||||
["44", "hello", "[0,1,2,3]"]
|
||||
],
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3,
|
||||
|
||||
"statistics":
|
||||
{
|
||||
"elapsed": 0.001572097,
|
||||
"rows_read": 3,
|
||||
"bytes_read": 24
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## JSONCompactColumns {#jsoncompactcolumns}
|
||||
|
||||
In this format, all data is represented as a single JSON Array.
|
||||
Note that JSONCompactColumns output format buffers all data in memory to output it as a single block and it can lead to high memory consumption
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
// JSONCompact
|
||||
{
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "'hello'",
|
||||
"type": "String"
|
||||
},
|
||||
{
|
||||
"name": "multiply(42, number)",
|
||||
"type": "UInt64"
|
||||
},
|
||||
{
|
||||
"name": "range(5)",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
|
||||
"data":
|
||||
[
|
||||
["hello", "0", [0,1,2,3,4]],
|
||||
["hello", "42", [0,1,2,3,4]],
|
||||
["hello", "84", [0,1,2,3,4]]
|
||||
],
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3
|
||||
}
|
||||
```json
|
||||
[
|
||||
[42, 43, 44],
|
||||
["hello", "hello", "hello"],
|
||||
[[0,1], [0,1,2], [0,1,2,3]]
|
||||
]
|
||||
```
|
||||
|
||||
```
|
||||
// JSONCompactStrings
|
||||
{
|
||||
"meta":
|
||||
[
|
||||
{
|
||||
"name": "'hello'",
|
||||
"type": "String"
|
||||
},
|
||||
{
|
||||
"name": "multiply(42, number)",
|
||||
"type": "UInt64"
|
||||
},
|
||||
{
|
||||
"name": "range(5)",
|
||||
"type": "Array(UInt8)"
|
||||
}
|
||||
],
|
||||
|
||||
"data":
|
||||
[
|
||||
["hello", "0", "[0,1,2,3,4]"],
|
||||
["hello", "42", "[0,1,2,3,4]"],
|
||||
["hello", "84", "[0,1,2,3,4]"]
|
||||
],
|
||||
|
||||
"rows": 3,
|
||||
|
||||
"rows_before_limit_at_least": 3
|
||||
}
|
||||
```
|
||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields) setting here)
|
||||
|
||||
## JSONEachRow {#jsoneachrow}
|
||||
## JSONStringsEachRow {#jsonstringseachrow}
|
||||
@ -699,15 +811,17 @@ When using these formats, ClickHouse outputs rows as separated, newline-delimite
|
||||
|
||||
When inserting the data, you should provide a separate JSON value for each row.
|
||||
|
||||
In JSONEachRow/JSONStringsEachRow input formats columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input-format-skip-unknown-fields) is set to 1.
|
||||
|
||||
## JSONEachRowWithProgress {#jsoneachrowwithprogress}
|
||||
## JSONStringsEachRowWithProgress {#jsonstringseachrowwithprogress}
|
||||
|
||||
Differs from `JSONEachRow`/`JSONStringsEachRow` in that ClickHouse will also yield progress information as JSON values.
|
||||
|
||||
```json
|
||||
{"row":{"'hello'":"hello","multiply(42, number)":"0","range(5)":[0,1,2,3,4]}}
|
||||
{"row":{"'hello'":"hello","multiply(42, number)":"42","range(5)":[0,1,2,3,4]}}
|
||||
{"row":{"'hello'":"hello","multiply(42, number)":"84","range(5)":[0,1,2,3,4]}}
|
||||
{"row":{"num":42,"str":"hello","arr":[0,1]}}
|
||||
{"row":{"num":43,"str":"hello","arr":[0,1,2]}}
|
||||
{"row":{"num":44,"str":"hello","arr":[0,1,2,3]}}
|
||||
{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}}
|
||||
```
|
||||
|
||||
@ -728,11 +842,11 @@ Differs from `JSONCompactStringsEachRow` in that in that it also prints the head
|
||||
Differs from `JSONCompactStringsEachRow` in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
```json
|
||||
["'hello'", "multiply(42, number)", "range(5)"]
|
||||
["String", "UInt64", "Array(UInt8)"]
|
||||
["hello", "0", [0,1,2,3,4]]
|
||||
["hello", "42", [0,1,2,3,4]]
|
||||
["hello", "84", [0,1,2,3,4]]
|
||||
["num", "str", "arr"]
|
||||
["Int32", "String", "Array(UInt8)"]
|
||||
[42, "hello", [0,1]]
|
||||
[43, "hello", [0,1,2]]
|
||||
[44, "hello", [0,1,2,3]]
|
||||
```
|
||||
|
||||
### Inserting Data {#inserting-data}
|
||||
|
@ -12,11 +12,13 @@ Columns:
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
|
||||
|
||||
- `engine` ([String](../../sql-reference/data-types/string.md)) — Table engine name (without parameters).
|
||||
|
||||
- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) - Flag that indicates whether the table is temporary.
|
||||
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table data in the file system.
|
||||
- `data_paths` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Paths to the table data in the file systems.
|
||||
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) - Path to the table metadata in the file system.
|
||||
|
||||
@ -60,6 +62,14 @@ Columns:
|
||||
|
||||
- `has_own_data` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the table itself stores some data on disk or only accesses some other source.
|
||||
|
||||
- `loading_dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database loading dependencies (list of objects which should be loaded before the current object).
|
||||
|
||||
- `loading_dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table loading dependencies (list of objects which should be loaded before the current object).
|
||||
|
||||
- `loading_dependent_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Dependent loading database.
|
||||
|
||||
- `loading_dependent_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Dependent loading table.
|
||||
|
||||
The `system.tables` table is used in `SHOW TABLES` query implementation.
|
||||
|
||||
**Example**
|
||||
@ -95,6 +105,10 @@ lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
loading_dependencies_database: []
|
||||
loading_dependencies_table: []
|
||||
loading_dependent_database: []
|
||||
loading_dependent_table: []
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -122,4 +136,8 @@ lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
loading_dependencies_database: []
|
||||
loading_dependencies_table: []
|
||||
loading_dependent_database: []
|
||||
loading_dependent_table: []
|
||||
```
|
||||
|
@ -96,10 +96,14 @@ SELECT fuzzBits(materialize('abacaba'), 0.1)
|
||||
FROM numbers(3)
|
||||
```
|
||||
|
||||
\`\`\` text
|
||||
┌─fuzzBits(materialize(‘abacaba’), 0.1)─┐
|
||||
│ abaaaja │
|
||||
│ a\*cjab+ │
|
||||
│ aeca2A │
|
||||
└───────────────────────────────────────┘
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─fuzzBits(materialize('abacaba'), 0.1)─┐
|
||||
│ abaaaja │
|
||||
│ a*cjab+ │
|
||||
│ aeca2A │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
@ -11,10 +11,16 @@ The functions for working with UUID are listed below.
|
||||
|
||||
Generates the [UUID](../data-types/uuid.md) of [version 4](https://tools.ietf.org/html/rfc4122#section-4.4).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
generateUUIDv4()
|
||||
generateUUIDv4([x])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — [Expression](../../sql-reference/syntax.md#syntax-expressions) resulting in any of the [supported data types](../../sql-reference/data-types/index.md#data_types). The resulting value is discarded, but the expression itself if used for bypassing [common subexpression elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) if the function is called multiple times in one query. Optional parameter.
|
||||
|
||||
**Returned value**
|
||||
|
||||
The UUID type value.
|
||||
@ -37,6 +43,15 @@ SELECT * FROM t_uuid
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Usage example if it is needed to generate multiple values in one row**
|
||||
|
||||
```sql
|
||||
SELECT generateUUIDv4(1), generateUUIDv4(2)
|
||||
┌─generateUUIDv4(1)────────────────────┬─generateUUIDv4(2)────────────────────┐
|
||||
│ 2d49dc6e-ddce-4cd0-afb8-790956df54c1 │ 8abf8c13-7dea-4fdf-af3e-0e18767770e6 │
|
||||
└──────────────────────────────────────┴──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## empty {#empty}
|
||||
|
||||
Checks whether the input UUID is empty.
|
||||
|
@ -105,7 +105,7 @@ Example: `regionToCountry(toUInt32(213)) = 225` converts Moscow (213) to Russia
|
||||
Converts a region to a continent. In every other way, this function is the same as ‘regionToCity’.
|
||||
Example: `regionToContinent(toUInt32(213)) = 10001` converts Moscow (213) to Eurasia (10001).
|
||||
|
||||
### regionToTopContinent (#regiontotopcontinent) {#regiontotopcontinent-regiontotopcontinent}
|
||||
### regionToTopContinent(id\[, geobase\]) {#regiontotopcontinentid-geobase}
|
||||
|
||||
Finds the highest continent in the hierarchy for the region.
|
||||
|
||||
|
@ -29,12 +29,14 @@ There are multiple ways of user identification:
|
||||
- `IDENTIFIED WITH no_password`
|
||||
- `IDENTIFIED WITH plaintext_password BY 'qwerty'`
|
||||
- `IDENTIFIED WITH sha256_password BY 'qwerty'` or `IDENTIFIED BY 'password'`
|
||||
- `IDENTIFIED WITH sha256_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH sha256_hash BY 'hash'` or `IDENTIFIED WITH sha256_hash BY 'hash' SALT 'salt'`
|
||||
- `IDENTIFIED WITH double_sha1_password BY 'qwerty'`
|
||||
- `IDENTIFIED WITH double_sha1_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH ldap SERVER 'server_name'`
|
||||
- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'`
|
||||
|
||||
For identification with sha256_hash using `SALT` - hash must be calculated from concatination of 'password' and 'salt'.
|
||||
|
||||
## User Host {#user-host}
|
||||
|
||||
User host is a host from which a connection to ClickHouse server could be established. The host can be specified in the `HOST` query section in the following ways:
|
||||
|
@ -12,11 +12,13 @@
|
||||
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — имя таблицы.
|
||||
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Uuid таблицы (Atomic database).
|
||||
|
||||
- `engine` ([String](../../sql-reference/data-types/string.md)) — движок таблицы (без параметров).
|
||||
|
||||
- `is_temporary` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, указывающий на то, временная это таблица или нет.
|
||||
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) — путь к данным таблицы в файловой системе.
|
||||
- `data_paths` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — пути к данным таблицы в файловых системах.
|
||||
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/string.md)) — путь к табличным метаданным в файловой системе.
|
||||
|
||||
@ -60,6 +62,14 @@
|
||||
|
||||
- `has_own_data` ([UInt8](../../sql-reference/data-types/int-uint.md)) — флаг, показывающий хранит ли таблица сама какие-то данные на диске или только обращается к какому-то другому источнику.
|
||||
|
||||
- `loading_dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - базы данных необходимые для загрузки объекта.
|
||||
|
||||
- `loading_dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - таблицы необходимые для загрузки объекта.
|
||||
|
||||
- `loading_dependent_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - базы данных, которым объект необходим для загрузки.
|
||||
|
||||
- `loading_dependent_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - таблицы, которым объект необходим для загрузки.
|
||||
|
||||
Таблица `system.tables` используется при выполнении запроса `SHOW TABLES`.
|
||||
|
||||
**Пример**
|
||||
@ -95,6 +105,10 @@ lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
loading_dependencies_database: []
|
||||
loading_dependencies_table: []
|
||||
loading_dependent_database: []
|
||||
loading_dependent_table: []
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -122,4 +136,8 @@ lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
loading_dependencies_database: []
|
||||
loading_dependencies_table: []
|
||||
loading_dependent_database: []
|
||||
loading_dependent_table: []
|
||||
```
|
||||
|
@ -9,10 +9,16 @@ sidebar_label: "Функции для работы с UUID"
|
||||
|
||||
Генерирует идентификатор [UUID версии 4](https://tools.ietf.org/html/rfc4122#section-4.4).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
generateUUIDv4()
|
||||
generateUUIDv4([x])
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `x` — [выражение](../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../data-types/index.md#data_types). Значение используется, чтобы избежать [склейки одинаковых выражений](index.md#common-subexpression-elimination), если функция вызывается несколько раз в одном запросе. Необязательный параметр.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Значение типа [UUID](../../sql-reference/functions/uuid-functions.md).
|
||||
@ -35,6 +41,15 @@ SELECT * FROM t_uuid
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Пример использования, для генерации нескольких значений в одной строке**
|
||||
|
||||
```sql
|
||||
SELECT generateUUIDv4(1), generateUUIDv4(2)
|
||||
┌─generateUUIDv4(1)────────────────────┬─generateUUIDv4(2)────────────────────┐
|
||||
│ 2d49dc6e-ddce-4cd0-afb8-790956df54c1 │ 8abf8c13-7dea-4fdf-af3e-0e18767770e6 │
|
||||
└──────────────────────────────────────┴──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## empty {#empty}
|
||||
|
||||
Проверяет, является ли входной UUID пустым.
|
||||
|
@ -29,12 +29,14 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
|
||||
- `IDENTIFIED WITH no_password`
|
||||
- `IDENTIFIED WITH plaintext_password BY 'qwerty'`
|
||||
- `IDENTIFIED WITH sha256_password BY 'qwerty'` or `IDENTIFIED BY 'password'`
|
||||
- `IDENTIFIED WITH sha256_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH sha256_hash BY 'hash'` or `IDENTIFIED WITH sha256_hash BY 'hash' SALT 'salt'`
|
||||
- `IDENTIFIED WITH double_sha1_password BY 'qwerty'`
|
||||
- `IDENTIFIED WITH double_sha1_hash BY 'hash'`
|
||||
- `IDENTIFIED WITH ldap SERVER 'server_name'`
|
||||
- `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'`
|
||||
|
||||
Для идентификации с sha256_hash используя `SALT` - хэш должен быть вычислен от конкатенации 'password' и 'salt'.
|
||||
|
||||
## Пользовательский хост
|
||||
|
||||
Пользовательский хост — это хост, с которого можно установить соединение с сервером ClickHouse. Хост задается в секции `HOST` следующими способами:
|
||||
|
@ -1,113 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
import nav # monkey patches mkdocs
|
||||
|
||||
import mkdocs.commands
|
||||
from mkdocs import config
|
||||
from mkdocs import exceptions
|
||||
|
||||
import mdx_clickhouse
|
||||
import redirects
|
||||
|
||||
import util
|
||||
|
||||
|
||||
def build_for_lang(lang, args):
|
||||
logging.info(f"Building {lang} blog")
|
||||
|
||||
try:
|
||||
theme_cfg = {
|
||||
"name": None,
|
||||
"custom_dir": os.path.join(os.path.dirname(__file__), "..", args.theme_dir),
|
||||
"language": lang,
|
||||
"direction": "ltr",
|
||||
"static_templates": ["404.html"],
|
||||
"extra": {
|
||||
"now": int(
|
||||
time.mktime(datetime.datetime.now().timetuple())
|
||||
) # TODO better way to avoid caching
|
||||
},
|
||||
}
|
||||
|
||||
# the following list of languages is sorted according to
|
||||
# https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers
|
||||
languages = {"en": "English"}
|
||||
|
||||
site_names = {"en": "ClickHouse Blog"}
|
||||
|
||||
assert len(site_names) == len(languages)
|
||||
|
||||
site_dir = os.path.join(args.blog_output_dir, lang)
|
||||
|
||||
plugins = ["macros"]
|
||||
if args.htmlproofer:
|
||||
plugins.append("htmlproofer")
|
||||
|
||||
website_url = "https://clickhouse.com"
|
||||
site_name = site_names.get(lang, site_names["en"])
|
||||
blog_nav, post_meta = nav.build_blog_nav(lang, args)
|
||||
raw_config = dict(
|
||||
site_name=site_name,
|
||||
site_url=f"{website_url}/blog/{lang}/",
|
||||
docs_dir=os.path.join(args.blog_dir, lang),
|
||||
site_dir=site_dir,
|
||||
strict=True,
|
||||
theme=theme_cfg,
|
||||
nav=blog_nav,
|
||||
copyright="©2016–2022 ClickHouse, Inc.",
|
||||
use_directory_urls=True,
|
||||
repo_name="ClickHouse/ClickHouse",
|
||||
repo_url="https://github.com/ClickHouse/ClickHouse/",
|
||||
edit_uri=f"edit/master/website/blog/{lang}",
|
||||
markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS,
|
||||
plugins=plugins,
|
||||
extra=dict(
|
||||
now=datetime.datetime.now().isoformat(),
|
||||
rev=args.rev,
|
||||
rev_short=args.rev_short,
|
||||
rev_url=args.rev_url,
|
||||
website_url=website_url,
|
||||
events=args.events,
|
||||
languages=languages,
|
||||
includes_dir=os.path.join(os.path.dirname(__file__), "..", "_includes"),
|
||||
is_blog=True,
|
||||
post_meta=post_meta,
|
||||
today=datetime.date.today().isoformat(),
|
||||
),
|
||||
)
|
||||
|
||||
cfg = config.load_config(**raw_config)
|
||||
mkdocs.commands.build.build(cfg)
|
||||
|
||||
redirects.build_blog_redirects(args)
|
||||
|
||||
env = util.init_jinja2_env(args)
|
||||
with open(
|
||||
os.path.join(args.website_dir, "templates", "blog", "rss.xml"), "rb"
|
||||
) as f:
|
||||
rss_template_string = f.read().decode("utf-8").strip()
|
||||
rss_template = env.from_string(rss_template_string)
|
||||
with open(os.path.join(args.blog_output_dir, lang, "rss.xml"), "w") as f:
|
||||
f.write(rss_template.render({"config": raw_config}))
|
||||
|
||||
logging.info(f"Finished building {lang} blog")
|
||||
|
||||
except exceptions.ConfigurationError as e:
|
||||
raise SystemExit("\n" + str(e))
|
||||
|
||||
|
||||
def build_blog(args):
|
||||
tasks = []
|
||||
for lang in args.blog_lang.split(","):
|
||||
if lang:
|
||||
tasks.append(
|
||||
(
|
||||
lang,
|
||||
args,
|
||||
)
|
||||
)
|
||||
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
|
@ -1,144 +1,17 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import jinja2
|
||||
import livereload
|
||||
import markdown.util
|
||||
|
||||
import nav # monkey patches mkdocs
|
||||
|
||||
from mkdocs import config
|
||||
from mkdocs import exceptions
|
||||
import mkdocs.commands.build
|
||||
|
||||
import blog
|
||||
import mdx_clickhouse
|
||||
import redirects
|
||||
import util
|
||||
import website
|
||||
|
||||
from cmake_in_clickhouse_generator import generate_cmake_flags_files
|
||||
|
||||
|
||||
class ClickHouseMarkdown(markdown.extensions.Extension):
|
||||
class ClickHousePreprocessor(markdown.util.Processor):
|
||||
def run(self, lines):
|
||||
for line in lines:
|
||||
if "<!--hide-->" not in line:
|
||||
yield line
|
||||
|
||||
def extendMarkdown(self, md):
|
||||
md.preprocessors.register(
|
||||
self.ClickHousePreprocessor(), "clickhouse_preprocessor", 31
|
||||
)
|
||||
|
||||
|
||||
markdown.extensions.ClickHouseMarkdown = ClickHouseMarkdown
|
||||
|
||||
|
||||
def build_for_lang(lang, args):
|
||||
logging.info(f"Building {lang} docs")
|
||||
|
||||
try:
|
||||
theme_cfg = {
|
||||
"name": None,
|
||||
"custom_dir": os.path.join(os.path.dirname(__file__), "..", args.theme_dir),
|
||||
"language": lang,
|
||||
"direction": "rtl" if lang == "fa" else "ltr",
|
||||
"static_templates": ["404.html"],
|
||||
"extra": {
|
||||
"now": int(
|
||||
time.mktime(datetime.datetime.now().timetuple())
|
||||
) # TODO better way to avoid caching
|
||||
},
|
||||
}
|
||||
|
||||
# the following list of languages is sorted according to
|
||||
# https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers
|
||||
languages = {"en": "English", "zh": "中文", "ru": "Русский", "ja": "日本語"}
|
||||
|
||||
site_names = {
|
||||
"en": "ClickHouse %s Documentation",
|
||||
"zh": "ClickHouse文档 %s",
|
||||
"ru": "Документация ClickHouse %s",
|
||||
"ja": "ClickHouseドキュメント %s",
|
||||
}
|
||||
|
||||
assert len(site_names) == len(languages)
|
||||
|
||||
site_dir = os.path.join(args.docs_output_dir, lang)
|
||||
|
||||
plugins = ["macros"]
|
||||
if args.htmlproofer:
|
||||
plugins.append("htmlproofer")
|
||||
|
||||
website_url = "https://clickhouse.com"
|
||||
site_name = site_names.get(lang, site_names["en"]) % ""
|
||||
site_name = site_name.replace(" ", " ")
|
||||
|
||||
raw_config = dict(
|
||||
site_name=site_name,
|
||||
site_url=f"{website_url}/docs/{lang}/",
|
||||
docs_dir=os.path.join(args.docs_dir, lang),
|
||||
site_dir=site_dir,
|
||||
strict=True,
|
||||
theme=theme_cfg,
|
||||
copyright="©2016–2022 ClickHouse, Inc.",
|
||||
use_directory_urls=True,
|
||||
repo_name="ClickHouse/ClickHouse",
|
||||
repo_url="https://github.com/ClickHouse/ClickHouse/",
|
||||
edit_uri=f"edit/master/docs/{lang}",
|
||||
markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS,
|
||||
plugins=plugins,
|
||||
extra=dict(
|
||||
now=datetime.datetime.now().isoformat(),
|
||||
rev=args.rev,
|
||||
rev_short=args.rev_short,
|
||||
rev_url=args.rev_url,
|
||||
website_url=website_url,
|
||||
events=args.events,
|
||||
languages=languages,
|
||||
includes_dir=os.path.join(os.path.dirname(__file__), "..", "_includes"),
|
||||
is_blog=False,
|
||||
),
|
||||
)
|
||||
|
||||
raw_config["nav"] = nav.build_docs_nav(lang, args)
|
||||
|
||||
cfg = config.load_config(**raw_config)
|
||||
|
||||
if not args.skip_multi_page:
|
||||
mkdocs.commands.build.build(cfg)
|
||||
|
||||
mdx_clickhouse.PatchedMacrosPlugin.disabled = False
|
||||
|
||||
logging.info(f"Finished building {lang} docs")
|
||||
|
||||
except exceptions.ConfigurationError as e:
|
||||
raise SystemExit("\n" + str(e))
|
||||
|
||||
|
||||
def build_docs(args):
|
||||
tasks = []
|
||||
for lang in args.lang.split(","):
|
||||
if lang:
|
||||
tasks.append(
|
||||
(
|
||||
lang,
|
||||
args,
|
||||
)
|
||||
)
|
||||
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
|
||||
redirects.build_docs_redirects(args)
|
||||
|
||||
|
||||
def build(args):
|
||||
if os.path.exists(args.output_dir):
|
||||
@ -147,14 +20,6 @@ def build(args):
|
||||
if not args.skip_website:
|
||||
website.build_website(args)
|
||||
|
||||
if not args.skip_docs:
|
||||
generate_cmake_flags_files()
|
||||
|
||||
build_docs(args)
|
||||
|
||||
if not args.skip_blog:
|
||||
blog.build_blog(args)
|
||||
|
||||
if not args.skip_website:
|
||||
website.process_benchmark_results(args)
|
||||
website.minify_website(args)
|
||||
@ -171,20 +36,14 @@ if __name__ == "__main__":
|
||||
|
||||
arg_parser = argparse.ArgumentParser()
|
||||
arg_parser.add_argument("--lang", default="en,ru,zh,ja")
|
||||
arg_parser.add_argument("--blog-lang", default="en")
|
||||
arg_parser.add_argument("--docs-dir", default=".")
|
||||
arg_parser.add_argument("--theme-dir", default=website_dir)
|
||||
arg_parser.add_argument("--website-dir", default=website_dir)
|
||||
arg_parser.add_argument("--src-dir", default=src_dir)
|
||||
arg_parser.add_argument("--blog-dir", default=os.path.join(website_dir, "blog"))
|
||||
arg_parser.add_argument("--output-dir", default="build")
|
||||
arg_parser.add_argument("--nav-limit", type=int, default="0")
|
||||
arg_parser.add_argument("--skip-multi-page", action="store_true")
|
||||
arg_parser.add_argument("--skip-website", action="store_true")
|
||||
arg_parser.add_argument("--skip-blog", action="store_true")
|
||||
arg_parser.add_argument("--skip-docs", action="store_true")
|
||||
arg_parser.add_argument("--htmlproofer", action="store_true")
|
||||
arg_parser.add_argument("--no-docs-macros", action="store_true")
|
||||
arg_parser.add_argument("--livereload", type=int, default="0")
|
||||
arg_parser.add_argument("--verbose", action="store_true")
|
||||
|
||||
@ -196,11 +55,6 @@ if __name__ == "__main__":
|
||||
|
||||
logging.getLogger("MARKDOWN").setLevel(logging.INFO)
|
||||
|
||||
args.docs_output_dir = os.path.join(os.path.abspath(args.output_dir), "docs")
|
||||
args.blog_output_dir = os.path.join(os.path.abspath(args.output_dir), "blog")
|
||||
|
||||
from github import get_events
|
||||
|
||||
args.rev = (
|
||||
subprocess.check_output("git rev-parse HEAD", shell=True)
|
||||
.decode("utf-8")
|
||||
@ -212,9 +66,6 @@ if __name__ == "__main__":
|
||||
.strip()
|
||||
)
|
||||
args.rev_url = f"https://github.com/ClickHouse/ClickHouse/commit/{args.rev}"
|
||||
args.events = get_events(args)
|
||||
|
||||
from build import build
|
||||
|
||||
build(args)
|
||||
|
||||
@ -223,9 +74,6 @@ if __name__ == "__main__":
|
||||
new_args = sys.executable + " " + " ".join(new_args)
|
||||
|
||||
server = livereload.Server()
|
||||
server.watch(
|
||||
args.docs_dir + "**/*", livereload.shell(new_args, cwd="tools", shell=True)
|
||||
)
|
||||
server.watch(
|
||||
args.website_dir + "**/*",
|
||||
livereload.shell(new_args, cwd="tools", shell=True),
|
||||
|
@ -1,181 +0,0 @@
|
||||
import re
|
||||
import os
|
||||
from typing import TextIO, List, Tuple, Optional, Dict
|
||||
|
||||
# name, default value, description
|
||||
Entity = Tuple[str, str, str]
|
||||
|
||||
# https://regex101.com/r/R6iogw/12
|
||||
cmake_option_regex: str = (
|
||||
r"^\s*option\s*\(([A-Z_0-9${}]+)\s*(?:\"((?:.|\n)*?)\")?\s*(.*)?\).*$"
|
||||
)
|
||||
|
||||
ch_master_url: str = "https://github.com/clickhouse/clickhouse/blob/master/"
|
||||
|
||||
name_str: str = '<a name="{anchor}"></a>[`{name}`](' + ch_master_url + "{path}#L{line})"
|
||||
default_anchor_str: str = "[`{name}`](#{anchor})"
|
||||
|
||||
comment_var_regex: str = r"\${(.+)}"
|
||||
comment_var_replace: str = "`\\1`"
|
||||
|
||||
table_header: str = """
|
||||
| Name | Default value | Description | Comment |
|
||||
|------|---------------|-------------|---------|
|
||||
"""
|
||||
|
||||
# Needed to detect conditional variables (those which are defined twice)
|
||||
# name -> (path, values)
|
||||
entities: Dict[str, Tuple[str, str]] = {}
|
||||
|
||||
|
||||
def make_anchor(t: str) -> str:
|
||||
return "".join(
|
||||
["-" if i == "_" else i.lower() for i in t if i.isalpha() or i == "_"]
|
||||
)
|
||||
|
||||
|
||||
def process_comment(comment: str) -> str:
|
||||
return re.sub(comment_var_regex, comment_var_replace, comment, flags=re.MULTILINE)
|
||||
|
||||
|
||||
def build_entity(path: str, entity: Entity, line_comment: Tuple[int, str]) -> None:
|
||||
(line, comment) = line_comment
|
||||
(name, description, default) = entity
|
||||
|
||||
if name in entities:
|
||||
return
|
||||
|
||||
if len(default) == 0:
|
||||
formatted_default: str = "`OFF`"
|
||||
elif default[0] == "$":
|
||||
formatted_default: str = "`{}`".format(default[2:-1])
|
||||
else:
|
||||
formatted_default: str = "`" + default + "`"
|
||||
|
||||
formatted_name: str = name_str.format(
|
||||
anchor=make_anchor(name), name=name, path=path, line=line
|
||||
)
|
||||
|
||||
formatted_description: str = "".join(description.split("\n"))
|
||||
|
||||
formatted_comment: str = process_comment(comment)
|
||||
|
||||
formatted_entity: str = "| {} | {} | {} | {} |".format(
|
||||
formatted_name, formatted_default, formatted_description, formatted_comment
|
||||
)
|
||||
|
||||
entities[name] = path, formatted_entity
|
||||
|
||||
|
||||
def process_file(root_path: str, file_path: str, file_name: str) -> None:
|
||||
with open(os.path.join(file_path, file_name), "r") as cmake_file:
|
||||
contents: str = cmake_file.read()
|
||||
|
||||
def get_line_and_comment(target: str) -> Tuple[int, str]:
|
||||
contents_list: List[str] = contents.split("\n")
|
||||
comment: str = ""
|
||||
|
||||
for n, line in enumerate(contents_list):
|
||||
if "option" not in line.lower() or target not in line:
|
||||
continue
|
||||
|
||||
for maybe_comment_line in contents_list[n - 1 :: -1]:
|
||||
if not re.match("\s*#\s*", maybe_comment_line):
|
||||
break
|
||||
|
||||
comment = re.sub("\s*#\s*", "", maybe_comment_line) + " " + comment
|
||||
|
||||
# line numbering starts with 1
|
||||
return n + 1, comment
|
||||
|
||||
matches: Optional[List[Entity]] = re.findall(
|
||||
cmake_option_regex, contents, re.MULTILINE
|
||||
)
|
||||
|
||||
file_rel_path_with_name: str = os.path.join(
|
||||
file_path[len(root_path) :], file_name
|
||||
)
|
||||
if file_rel_path_with_name.startswith("/"):
|
||||
file_rel_path_with_name = file_rel_path_with_name[1:]
|
||||
|
||||
if matches:
|
||||
for entity in matches:
|
||||
build_entity(
|
||||
file_rel_path_with_name, entity, get_line_and_comment(entity[0])
|
||||
)
|
||||
|
||||
|
||||
def process_folder(root_path: str, name: str) -> None:
|
||||
for root, _, files in os.walk(os.path.join(root_path, name)):
|
||||
for f in files:
|
||||
if f == "CMakeLists.txt" or ".cmake" in f:
|
||||
process_file(root_path, root, f)
|
||||
|
||||
|
||||
def generate_cmake_flags_files() -> None:
|
||||
root_path: str = os.path.join(os.path.dirname(__file__), "..", "..")
|
||||
|
||||
output_file_name: str = os.path.join(
|
||||
root_path, "docs/en/development/cmake-in-clickhouse.md"
|
||||
)
|
||||
header_file_name: str = os.path.join(
|
||||
root_path, "docs/_includes/cmake_in_clickhouse_header.md"
|
||||
)
|
||||
footer_file_name: str = os.path.join(
|
||||
root_path, "docs/_includes/cmake_in_clickhouse_footer.md"
|
||||
)
|
||||
|
||||
process_file(root_path, root_path, "CMakeLists.txt")
|
||||
process_file(root_path, os.path.join(root_path, "programs"), "CMakeLists.txt")
|
||||
|
||||
process_folder(root_path, "base")
|
||||
process_folder(root_path, "cmake")
|
||||
process_folder(root_path, "src")
|
||||
|
||||
with open(output_file_name, "w") as f:
|
||||
with open(header_file_name, "r") as header:
|
||||
f.write(header.read())
|
||||
|
||||
sorted_keys: List[str] = sorted(entities.keys())
|
||||
ignored_keys: List[str] = []
|
||||
|
||||
f.write("### ClickHouse modes\n" + table_header)
|
||||
|
||||
for k in sorted_keys:
|
||||
if k.startswith("ENABLE_CLICKHOUSE_"):
|
||||
f.write(entities[k][1] + "\n")
|
||||
ignored_keys.append(k)
|
||||
|
||||
f.write(
|
||||
"\n### External libraries\nNote that ClickHouse uses forks of these libraries, see https://github.com/ClickHouse-Extras.\n"
|
||||
+ table_header
|
||||
)
|
||||
|
||||
for k in sorted_keys:
|
||||
if k.startswith("ENABLE_") and ".cmake" in entities[k][0]:
|
||||
f.write(entities[k][1] + "\n")
|
||||
ignored_keys.append(k)
|
||||
|
||||
f.write("\n\n### Other flags\n" + table_header)
|
||||
|
||||
for k in sorted(set(sorted_keys).difference(set(ignored_keys))):
|
||||
f.write(entities[k][1] + "\n")
|
||||
|
||||
with open(footer_file_name, "r") as footer:
|
||||
f.write(footer.read())
|
||||
|
||||
other_languages = [
|
||||
"docs/ja/development/cmake-in-clickhouse.md",
|
||||
"docs/zh/development/cmake-in-clickhouse.md",
|
||||
"docs/ru/development/cmake-in-clickhouse.md",
|
||||
]
|
||||
|
||||
for lang in other_languages:
|
||||
other_file_name = os.path.join(root_path, lang)
|
||||
if os.path.exists(other_file_name):
|
||||
os.unlink(other_file_name)
|
||||
os.symlink(output_file_name, other_file_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
generate_cmake_flags_files()
|
@ -1,186 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os, sys
|
||||
import argparse
|
||||
import subprocess
|
||||
import contextlib
|
||||
from git import cmd
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
SCRIPT_DESCRIPTION = """
|
||||
usage: ./easy_diff.py language/document path
|
||||
|
||||
Show the difference between a language document and an English document.
|
||||
|
||||
This script is based on the assumption that documents in other languages are fully synchronized with the en document at a commit.
|
||||
|
||||
For example:
|
||||
Execute:
|
||||
./easy_diff.py --no-pager zh/data_types
|
||||
Output:
|
||||
Need translate document:~/ClickHouse/docs/en/data_types/uuid.md
|
||||
Need link document:~/ClickHouse/docs/en/data_types/decimal.md to ~/ClickHouse/docs/zh/data_types/decimal.md
|
||||
diff --git a/docs/en/data_types/domains/ipv6.md b/docs/en/data_types/domains/ipv6.md
|
||||
index 1bfbe3400b..e2abaff017 100644
|
||||
--- a/docs/en/data_types/domains/ipv6.md
|
||||
+++ b/docs/en/data_types/domains/ipv6.md
|
||||
@@ -4,13 +4,13 @@
|
||||
|
||||
### Basic Usage
|
||||
|
||||
-``` sql
|
||||
+```sql
|
||||
CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url;
|
||||
|
||||
DESCRIBE TABLE hits;
|
||||
```
|
||||
|
||||
-```
|
||||
+```text
|
||||
┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐
|
||||
│ url │ String │ │ │ │ │
|
||||
│ from │ IPv6 │ │ │ │ │
|
||||
@@ -19,19 +19,19 @@ DESCRIBE TABLE hits;
|
||||
|
||||
OR you can use `IPv6` domain as a key:
|
||||
|
||||
-``` sql
|
||||
+```sql
|
||||
CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from;
|
||||
... MORE
|
||||
|
||||
OPTIONS:
|
||||
-h, --help show this help message and exit
|
||||
--no-pager use stdout as difference result output
|
||||
"""
|
||||
|
||||
SCRIPT_PATH = os.path.abspath(__file__)
|
||||
CLICKHOUSE_REPO_HOME = os.path.join(os.path.dirname(SCRIPT_PATH), "..", "..")
|
||||
SCRIPT_COMMAND_EXECUTOR = cmd.Git(CLICKHOUSE_REPO_HOME)
|
||||
|
||||
SCRIPT_COMMAND_PARSER = argparse.ArgumentParser(add_help=False)
|
||||
SCRIPT_COMMAND_PARSER.add_argument("path", type=bytes, nargs="?", default=None)
|
||||
SCRIPT_COMMAND_PARSER.add_argument("--no-pager", action="store_true", default=False)
|
||||
SCRIPT_COMMAND_PARSER.add_argument("-h", "--help", action="store_true", default=False)
|
||||
|
||||
|
||||
def execute(commands):
|
||||
return SCRIPT_COMMAND_EXECUTOR.execute(commands)
|
||||
|
||||
|
||||
def get_hash(file_name):
|
||||
return execute(["git", "log", "-n", "1", '--pretty=format:"%H"', file_name])
|
||||
|
||||
|
||||
def diff_file(reference_file, working_file, out):
|
||||
if not os.path.exists(reference_file):
|
||||
raise RuntimeError(
|
||||
"reference file [" + os.path.abspath(reference_file) + "] is not exists."
|
||||
)
|
||||
|
||||
if os.path.islink(working_file):
|
||||
out.writelines(["Need translate document:" + os.path.abspath(reference_file)])
|
||||
elif not os.path.exists(working_file):
|
||||
out.writelines(
|
||||
[
|
||||
"Need link document "
|
||||
+ os.path.abspath(reference_file)
|
||||
+ " to "
|
||||
+ os.path.abspath(working_file)
|
||||
]
|
||||
)
|
||||
elif get_hash(working_file) != get_hash(reference_file):
|
||||
out.writelines(
|
||||
[
|
||||
(
|
||||
execute(
|
||||
[
|
||||
"git",
|
||||
"diff",
|
||||
get_hash(working_file).strip('"'),
|
||||
reference_file,
|
||||
]
|
||||
).encode("utf-8")
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def diff_directory(reference_directory, working_directory, out):
|
||||
if not os.path.isdir(reference_directory):
|
||||
return diff_file(reference_directory, working_directory, out)
|
||||
|
||||
for list_item in os.listdir(reference_directory):
|
||||
working_item = os.path.join(working_directory, list_item)
|
||||
reference_item = os.path.join(reference_directory, list_item)
|
||||
if (
|
||||
diff_file(reference_item, working_item, out)
|
||||
if os.path.isfile(reference_item)
|
||||
else diff_directory(reference_item, working_item, out) != 0
|
||||
):
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def find_language_doc(custom_document, other_language="en", children=[]):
|
||||
if len(custom_document) == 0:
|
||||
raise RuntimeError(
|
||||
"The "
|
||||
+ os.path.join(custom_document, *children)
|
||||
+ " is not in docs directory."
|
||||
)
|
||||
|
||||
if os.path.samefile(os.path.join(CLICKHOUSE_REPO_HOME, "docs"), custom_document):
|
||||
return os.path.join(CLICKHOUSE_REPO_HOME, "docs", other_language, *children[1:])
|
||||
children.insert(0, os.path.split(custom_document)[1])
|
||||
return find_language_doc(
|
||||
os.path.split(custom_document)[0], other_language, children
|
||||
)
|
||||
|
||||
|
||||
class ToPager:
|
||||
def __init__(self, temp_named_file):
|
||||
self.temp_named_file = temp_named_file
|
||||
|
||||
def writelines(self, lines):
|
||||
self.temp_named_file.writelines(lines)
|
||||
|
||||
def close(self):
|
||||
self.temp_named_file.flush()
|
||||
git_pager = execute(["git", "var", "GIT_PAGER"])
|
||||
subprocess.check_call([git_pager, self.temp_named_file.name])
|
||||
self.temp_named_file.close()
|
||||
|
||||
|
||||
class ToStdOut:
|
||||
def writelines(self, lines):
|
||||
self.system_stdout_stream.writelines(lines)
|
||||
|
||||
def close(self):
|
||||
self.system_stdout_stream.flush()
|
||||
|
||||
def __init__(self, system_stdout_stream):
|
||||
self.system_stdout_stream = system_stdout_stream
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
arguments = SCRIPT_COMMAND_PARSER.parse_args()
|
||||
if arguments.help or not arguments.path:
|
||||
sys.stdout.write(SCRIPT_DESCRIPTION)
|
||||
sys.exit(0)
|
||||
|
||||
working_language = os.path.join(CLICKHOUSE_REPO_HOME, "docs", arguments.path)
|
||||
with contextlib.closing(
|
||||
ToStdOut(sys.stdout)
|
||||
if arguments.no_pager
|
||||
else ToPager(NamedTemporaryFile("r+"))
|
||||
) as writer:
|
||||
exit(
|
||||
diff_directory(
|
||||
find_language_doc(working_language), working_language, writer
|
||||
)
|
||||
)
|
@ -1,41 +0,0 @@
|
||||
import collections
|
||||
import copy
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import tarfile
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
import util
|
||||
|
||||
|
||||
def get_events(args):
|
||||
events = []
|
||||
skip = True
|
||||
with open(os.path.join(args.docs_dir, "..", "README.md")) as f:
|
||||
for line in f:
|
||||
if skip:
|
||||
if "Upcoming Events" in line:
|
||||
skip = False
|
||||
else:
|
||||
if not line:
|
||||
continue
|
||||
line = line.strip().split("](")
|
||||
if len(line) == 2:
|
||||
tail = line[1].split(") ")
|
||||
events.append(
|
||||
{
|
||||
"signup_link": tail[0],
|
||||
"event_name": line[0].replace("* [", ""),
|
||||
"event_date": tail[1].replace("on ", "").replace(".", ""),
|
||||
}
|
||||
)
|
||||
return events
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
|
@ -1,190 +0,0 @@
|
||||
import collections
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
import mkdocs.structure.nav
|
||||
|
||||
import util
|
||||
|
||||
|
||||
def find_first_header(content):
|
||||
for line in content.split("\n"):
|
||||
if line.startswith("#"):
|
||||
no_hash = line.lstrip("#")
|
||||
return no_hash.split("{", 1)[0].strip()
|
||||
|
||||
|
||||
def build_nav_entry(root, args):
|
||||
if root.endswith("images"):
|
||||
return None, None, None
|
||||
result_items = []
|
||||
index_meta, index_content = util.read_md_file(os.path.join(root, "index.md"))
|
||||
current_title = index_meta.get("toc_folder_title", index_meta.get("toc_title"))
|
||||
current_title = current_title or index_meta.get(
|
||||
"title", find_first_header(index_content)
|
||||
)
|
||||
for filename in os.listdir(root):
|
||||
path = os.path.join(root, filename)
|
||||
if os.path.isdir(path):
|
||||
prio, title, payload = build_nav_entry(path, args)
|
||||
if title and payload:
|
||||
result_items.append((prio, title, payload))
|
||||
elif filename.endswith(".md"):
|
||||
path = os.path.join(root, filename)
|
||||
|
||||
meta = ""
|
||||
content = ""
|
||||
|
||||
try:
|
||||
meta, content = util.read_md_file(path)
|
||||
except:
|
||||
print("Error in file: {}".format(path))
|
||||
raise
|
||||
|
||||
path = path.split("/", 2)[-1]
|
||||
title = meta.get("toc_title", find_first_header(content))
|
||||
if title:
|
||||
title = title.strip().rstrip(".")
|
||||
else:
|
||||
title = meta.get("toc_folder_title", "hidden")
|
||||
prio = meta.get("toc_priority", 9999)
|
||||
logging.debug(f"Nav entry: {prio}, {title}, {path}")
|
||||
if meta.get("toc_hidden") or not content.strip():
|
||||
title = "hidden"
|
||||
if title == "hidden":
|
||||
title = "hidden-" + hashlib.sha1(content.encode("utf-8")).hexdigest()
|
||||
if args.nav_limit and len(result_items) >= args.nav_limit:
|
||||
break
|
||||
result_items.append((prio, title, path))
|
||||
result_items = sorted(result_items, key=lambda x: (x[0], x[1]))
|
||||
result = collections.OrderedDict([(item[1], item[2]) for item in result_items])
|
||||
if index_meta.get("toc_hidden_folder"):
|
||||
current_title += "|hidden-folder"
|
||||
return index_meta.get("toc_priority", 10000), current_title, result
|
||||
|
||||
|
||||
def build_docs_nav(lang, args):
|
||||
docs_dir = os.path.join(args.docs_dir, lang)
|
||||
_, _, nav = build_nav_entry(docs_dir, args)
|
||||
result = []
|
||||
index_key = None
|
||||
for key, value in list(nav.items()):
|
||||
if key and value:
|
||||
if value == "index.md":
|
||||
index_key = key
|
||||
continue
|
||||
result.append({key: value})
|
||||
if args.nav_limit and len(result) >= args.nav_limit:
|
||||
break
|
||||
if index_key:
|
||||
key = list(result[0].keys())[0]
|
||||
result[0][key][index_key] = "index.md"
|
||||
result[0][key].move_to_end(index_key, last=False)
|
||||
return result
|
||||
|
||||
|
||||
def build_blog_nav(lang, args):
|
||||
blog_dir = os.path.join(args.blog_dir, lang)
|
||||
years = sorted(os.listdir(blog_dir), reverse=True)
|
||||
result_nav = [{"hidden": "index.md"}]
|
||||
post_meta = collections.OrderedDict()
|
||||
for year in years:
|
||||
year_dir = os.path.join(blog_dir, year)
|
||||
if not os.path.isdir(year_dir):
|
||||
continue
|
||||
result_nav.append({year: collections.OrderedDict()})
|
||||
posts = []
|
||||
post_meta_items = []
|
||||
for post in os.listdir(year_dir):
|
||||
post_path = os.path.join(year_dir, post)
|
||||
if not post.endswith(".md"):
|
||||
raise RuntimeError(
|
||||
f"Unexpected non-md file in posts folder: {post_path}"
|
||||
)
|
||||
meta, _ = util.read_md_file(post_path)
|
||||
post_date = meta["date"]
|
||||
post_title = meta["title"]
|
||||
if datetime.date.fromisoformat(post_date) > datetime.date.today():
|
||||
continue
|
||||
posts.append(
|
||||
(
|
||||
post_date,
|
||||
post_title,
|
||||
os.path.join(year, post),
|
||||
)
|
||||
)
|
||||
if post_title in post_meta:
|
||||
raise RuntimeError(f"Duplicate post title: {post_title}")
|
||||
if not post_date.startswith(f"{year}-"):
|
||||
raise RuntimeError(
|
||||
f"Post date {post_date} doesn't match the folder year {year}: {post_title}"
|
||||
)
|
||||
post_url_part = post.replace(".md", "")
|
||||
post_meta_items.append(
|
||||
(
|
||||
post_date,
|
||||
{
|
||||
"date": post_date,
|
||||
"title": post_title,
|
||||
"image": meta.get("image"),
|
||||
"url": f"/blog/{lang}/{year}/{post_url_part}/",
|
||||
},
|
||||
)
|
||||
)
|
||||
for _, title, path in sorted(posts, reverse=True):
|
||||
result_nav[-1][year][title] = path
|
||||
for _, post_meta_item in sorted(
|
||||
post_meta_items, reverse=True, key=lambda item: item[0]
|
||||
):
|
||||
post_meta[post_meta_item["title"]] = post_meta_item
|
||||
return result_nav, post_meta
|
||||
|
||||
|
||||
def _custom_get_navigation(files, config):
|
||||
nav_config = config["nav"] or mkdocs.structure.nav.nest_paths(
|
||||
f.src_path for f in files.documentation_pages()
|
||||
)
|
||||
items = mkdocs.structure.nav._data_to_navigation(nav_config, files, config)
|
||||
if not isinstance(items, list):
|
||||
items = [items]
|
||||
|
||||
pages = mkdocs.structure.nav._get_by_type(items, mkdocs.structure.nav.Page)
|
||||
|
||||
mkdocs.structure.nav._add_previous_and_next_links(pages)
|
||||
mkdocs.structure.nav._add_parent_links(items)
|
||||
|
||||
missing_from_config = [
|
||||
file for file in files.documentation_pages() if file.page is None
|
||||
]
|
||||
if missing_from_config:
|
||||
files._files = [
|
||||
file for file in files._files if file not in missing_from_config
|
||||
]
|
||||
|
||||
links = mkdocs.structure.nav._get_by_type(items, mkdocs.structure.nav.Link)
|
||||
for link in links:
|
||||
scheme, netloc, path, params, query, fragment = mkdocs.structure.nav.urlparse(
|
||||
link.url
|
||||
)
|
||||
if scheme or netloc:
|
||||
mkdocs.structure.nav.log.debug(
|
||||
"An external link to '{}' is included in "
|
||||
"the 'nav' configuration.".format(link.url)
|
||||
)
|
||||
elif link.url.startswith("/"):
|
||||
mkdocs.structure.nav.log.debug(
|
||||
"An absolute path to '{}' is included in the 'nav' configuration, "
|
||||
"which presumably points to an external resource.".format(link.url)
|
||||
)
|
||||
else:
|
||||
msg = (
|
||||
"A relative path to '{}' is included in the 'nav' configuration, "
|
||||
"which is not found in the documentation files".format(link.url)
|
||||
)
|
||||
mkdocs.structure.nav.log.warning(msg)
|
||||
return mkdocs.structure.nav.Navigation(items, pages)
|
||||
|
||||
|
||||
mkdocs.structure.nav.get_navigation = _custom_get_navigation
|
@ -27,45 +27,6 @@ def write_redirect_html(out_path, to_url):
|
||||
)
|
||||
|
||||
|
||||
def build_redirect_html(args, base_prefix, lang, output_dir, from_path, to_path):
|
||||
out_path = os.path.join(
|
||||
output_dir,
|
||||
lang,
|
||||
from_path.replace("/index.md", "/index.html").replace(".md", "/index.html"),
|
||||
)
|
||||
target_path = to_path.replace("/index.md", "/").replace(".md", "/")
|
||||
|
||||
if target_path[0:7] != "http://" and target_path[0:8] != "https://":
|
||||
to_url = f"/{base_prefix}/{lang}/{target_path}"
|
||||
else:
|
||||
to_url = target_path
|
||||
|
||||
to_url = to_url.strip()
|
||||
write_redirect_html(out_path, to_url)
|
||||
|
||||
|
||||
def build_docs_redirects(args):
|
||||
with open(os.path.join(args.docs_dir, "redirects.txt"), "r") as f:
|
||||
for line in f:
|
||||
for lang in args.lang.split(","):
|
||||
from_path, to_path = line.split(" ", 1)
|
||||
build_redirect_html(
|
||||
args, "docs", lang, args.docs_output_dir, from_path, to_path
|
||||
)
|
||||
|
||||
|
||||
def build_blog_redirects(args):
|
||||
for lang in args.blog_lang.split(","):
|
||||
redirects_path = os.path.join(args.blog_dir, lang, "redirects.txt")
|
||||
if os.path.exists(redirects_path):
|
||||
with open(redirects_path, "r") as f:
|
||||
for line in f:
|
||||
from_path, to_path = line.split(" ", 1)
|
||||
build_redirect_html(
|
||||
args, "blog", lang, args.blog_output_dir, from_path, to_path
|
||||
)
|
||||
|
||||
|
||||
def build_static_redirects(args):
|
||||
for static_redirect in [
|
||||
("benchmark.html", "/benchmark/dbms/"),
|
||||
|
@ -1,39 +1,32 @@
|
||||
Babel==2.9.1
|
||||
backports-abc==0.5
|
||||
backports.functools-lru-cache==1.6.1
|
||||
beautifulsoup4==4.9.1
|
||||
certifi==2020.4.5.2
|
||||
chardet==3.0.4
|
||||
click==7.1.2
|
||||
closure==20191111
|
||||
cssmin==0.2.0
|
||||
future==0.18.2
|
||||
htmlmin==0.1.12
|
||||
idna==2.10
|
||||
Jinja2==3.0.3
|
||||
jinja2-highlight==0.6.1
|
||||
jsmin==3.0.0
|
||||
livereload==2.6.3
|
||||
Markdown==3.3.2
|
||||
MarkupSafe==2.1.0
|
||||
mkdocs==1.3.0
|
||||
mkdocs-htmlproofer-plugin==0.0.3
|
||||
mkdocs-macros-plugin==0.4.20
|
||||
nltk==3.7
|
||||
nose==1.3.7
|
||||
protobuf==3.14.0
|
||||
numpy==1.21.2
|
||||
pymdown-extensions==8.0
|
||||
python-slugify==4.0.1
|
||||
MarkupSafe==2.1.1
|
||||
MarkupSafe==2.1.1
|
||||
PyYAML==6.0
|
||||
repackage==0.7.3
|
||||
requests==2.25.1
|
||||
singledispatch==3.4.0.3
|
||||
Pygments>=2.12.0
|
||||
beautifulsoup4==4.9.1
|
||||
click==7.1.2
|
||||
ghp_import==2.1.0
|
||||
importlib_metadata==4.11.4
|
||||
jinja2-highlight==0.6.1
|
||||
livereload==2.6.3
|
||||
mergedeep==1.3.4
|
||||
mkdocs-macros-plugin==0.4.20
|
||||
mkdocs-macros-test==0.1.0
|
||||
mkdocs-material==8.2.15
|
||||
mkdocs==1.3.0
|
||||
mkdocs_material_extensions==1.0.3
|
||||
packaging==21.3
|
||||
pygments==2.12.0
|
||||
pymdown_extensions==9.4
|
||||
pyparsing==3.0.9
|
||||
python-slugify==4.0.1
|
||||
python_dateutil==2.8.2
|
||||
pytz==2022.1
|
||||
six==1.15.0
|
||||
soupsieve==2.0.1
|
||||
soupsieve==2.3.2
|
||||
termcolor==1.1.0
|
||||
text_unidecode==1.3
|
||||
tornado==6.1
|
||||
Unidecode==1.1.1
|
||||
urllib3>=1.26.8
|
||||
Pygments>=2.11.2
|
||||
|
||||
zipp==3.8.0
|
||||
|
@ -124,7 +124,7 @@ def init_jinja2_env(args):
|
||||
|
||||
env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(
|
||||
[args.website_dir, os.path.join(args.docs_dir, "_includes")]
|
||||
[args.website_dir, os.path.join(args.src_dir, "docs", "_includes")]
|
||||
),
|
||||
extensions=["jinja2.ext.i18n", "jinja2_highlight.HighlightExtension"],
|
||||
)
|
||||
|
@ -1,81 +0,0 @@
|
||||
const path = require('path');
|
||||
const jsPath = path.resolve(__dirname, '../../website/src/js');
|
||||
const scssPath = path.resolve(__dirname, '../../website/src/scss');
|
||||
|
||||
console.log(path.resolve(__dirname, 'node_modules/bootstrap', require('bootstrap/package.json').sass));
|
||||
|
||||
module.exports = {
|
||||
|
||||
mode: ('development' === process.env.NODE_ENV) && 'development' || 'production',
|
||||
|
||||
...(('development' === process.env.NODE_ENV) && {
|
||||
watch: true,
|
||||
}),
|
||||
|
||||
entry: [
|
||||
path.resolve(scssPath, 'bootstrap.scss'),
|
||||
path.resolve(scssPath, 'main.scss'),
|
||||
path.resolve(jsPath, 'main.js'),
|
||||
],
|
||||
|
||||
output: {
|
||||
path: path.resolve(__dirname, '../../website'),
|
||||
filename: 'js/main.js',
|
||||
},
|
||||
|
||||
resolve: {
|
||||
alias: {
|
||||
bootstrap: path.resolve(__dirname, 'node_modules/bootstrap', require('bootstrap/package.json').sass),
|
||||
},
|
||||
},
|
||||
|
||||
module: {
|
||||
rules: [{
|
||||
test: /\.js$/,
|
||||
exclude: /(node_modules)/,
|
||||
use: [{
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
presets: ['@babel/preset-env'],
|
||||
},
|
||||
}],
|
||||
}, {
|
||||
test: /\.scss$/,
|
||||
use: [{
|
||||
loader: 'file-loader',
|
||||
options: {
|
||||
sourceMap: true,
|
||||
outputPath: (url, entryPath, context) => {
|
||||
if (0 === entryPath.indexOf(scssPath)) {
|
||||
const outputFile = entryPath.slice(entryPath.lastIndexOf('/') + 1, -5)
|
||||
const outputPath = entryPath.slice(0, entryPath.lastIndexOf('/')).slice(scssPath.length + 1)
|
||||
return `./css/${outputPath}/${outputFile}.css`
|
||||
}
|
||||
return `./css/${url}`
|
||||
},
|
||||
},
|
||||
}, {
|
||||
loader: 'postcss-loader',
|
||||
options: {
|
||||
options: {},
|
||||
plugins: () => ([
|
||||
require('autoprefixer'),
|
||||
('production' === process.env.NODE_ENV) && require('cssnano'),
|
||||
].filter(plugin => plugin)),
|
||||
}
|
||||
}, {
|
||||
loader: 'sass-loader',
|
||||
options: {
|
||||
implementation: require('sass'),
|
||||
implementation: require('sass'),
|
||||
sourceMap: ('development' === process.env.NODE_ENV),
|
||||
sassOptions: {
|
||||
importer: require('node-sass-glob-importer')(),
|
||||
precision: 10,
|
||||
},
|
||||
},
|
||||
}],
|
||||
}],
|
||||
},
|
||||
|
||||
};
|
@ -68,12 +68,306 @@ SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello;
|
||||
|
||||
## hex {#hex}
|
||||
|
||||
接受`String`,`unsigned integer`,`Date`或`DateTime`类型的参数。返回包含参数的十六进制表示的字符串。使用大写字母`A-F`。不使用`0x`前缀或`h`后缀。对于字符串,所有字节都简单地编码为两个十六进制数字。数字转换为大端(«易阅读»)格式。对于数字,去除其中较旧的零,但仅限整个字节。例如,`hex(1)='01'`。 `Date`被编码为自Unix时间开始以来的天数。 `DateTime`编码为自Unix时间开始以来的秒数。
|
||||
返回包含参数的十六进制表示的字符串。
|
||||
|
||||
## unhex(str) {#unhexstr}
|
||||
别名为: `HEX`。
|
||||
|
||||
接受包含任意数量的十六进制数字的字符串,并返回包含相应字节的字符串。支持大写和小写字母A-F。十六进制数字的数量不必是偶数。如果是奇数,则最后一位数被解释为00-0F字节的低位。如果参数字符串包含除十六进制数字以外的任何内容,则返回一些实现定义的结果(不抛出异常)。
|
||||
如果要将结果转换为数字,可以使用«reverse»和«reinterpretAsType»函数。
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
hex(arg)
|
||||
```
|
||||
|
||||
该函数使用大写字母`A-F`,不使用任何前缀(如`0x`)或后缀(如`h`)
|
||||
|
||||
对于整数参数,它从高到低(大端或“人类可读”顺序)打印十六进制数字(“半字节”)。它从左侧第一个非零字节开始(省略前导零字节),但即使前导数字为零,也始终打印每个字节的两个数字。
|
||||
|
||||
类型为[Date](../../sql-reference/data-types/date.md)和[DateTime](../../sql-reference/data-types/datetime.md)的值将被格式化为相应的整数(日期为 Epoch 以来的天数,DateTime 为 Unix Timestamp 的值)。
|
||||
|
||||
对于[String](../../sql-reference/data-types/string.md)和[FixedString](../../sql-reference/data-types/fixedstring.md),所有字节都被简单地编码为两个十六进制数字。零字节不会被省略。
|
||||
|
||||
类型为[Float](../../sql-reference/data-types/float.md)和[Decimal](../../sql-reference/data-types/decimal.md)的值被编码为它们在内存中的表示。由于我们支持小端架构,它们以小端编码。零前导尾随字节不会被省略。
|
||||
|
||||
类型为[UUID](../data-types/uuid.md)的值被编码为大端顺序字符串。
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — 要转换为十六进制的值。类型为[String](../../sql-reference/data-types/string.md),[UInt](../../sql-reference/data-types/int-uint.md),[Float](../../sql-reference/data-types/float.md),[Decimal](../../sql-reference/data-types/decimal.md),[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 具有参数的十六进制表示的字符串。
|
||||
|
||||
类型为:[String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT hex(1);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
01
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT hex(toFloat32(number)) AS hex_presentation FROM numbers(15, 2);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─hex_presentation─┐
|
||||
│ 00007041 │
|
||||
│ 00008041 │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT hex(toFloat64(number)) AS hex_presentation FROM numbers(15, 2);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─hex_presentation─┐
|
||||
│ 0000000000002E40 │
|
||||
│ 0000000000003040 │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT lower(hex(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'))) as uuid_hex
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─uuid_hex─────────────────────────┐
|
||||
│ 61f0c4045cb311e7907ba6006ad3dba0 │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## unhex {#unhexstr}
|
||||
|
||||
执行[hex](#hex)函数的相反操作。它将每对十六进制数字(在参数中)解释为一个数字,并将其转换为该数字表示的字节。返回值是一个二进制字符串 (BLOB)。
|
||||
|
||||
如果要将结果转换为数字,可以使用 [reverse](../../sql-reference/functions/string-functions.md#reverse) 和 [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#type-conversion-functions) 函数。
|
||||
|
||||
:::注意
|
||||
如果从 `clickhouse-client` 中调用 `unhex`,二进制字符串将使用 UTF-8 显示。
|
||||
:::
|
||||
|
||||
别名为:`UNHEX`。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
unhex(arg)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — 包含任意数量的十六进制数字的字符串。类型为:[String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
支持大写和小写字母A-F。十六进制数字的数量不必是偶数。如果是奇数,则最后一位数被解释为00-0F字节的低位。如果参数字符串包含除十六进制数字以外的任何内容,则返回一些实现定义的结果(不抛出异常)。对于数字参数, unhex()不执行 hex(N) 的倒数。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 二进制字符串 (BLOB)。
|
||||
|
||||
类型为: [String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
``` sql
|
||||
SELECT unhex('303132'), UNHEX('4D7953514C');
|
||||
```
|
||||
|
||||
结果:
|
||||
``` text
|
||||
┌─unhex('303132')─┬─unhex('4D7953514C')─┐
|
||||
│ 012 │ MySQL │
|
||||
└─────────────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT reinterpretAsUInt64(reverse(unhex('FFF'))) AS num;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌──num─┐
|
||||
│ 4095 │
|
||||
└──────┘
|
||||
```
|
||||
|
||||
## bin {#bin}
|
||||
|
||||
返回一个包含参数二进制表示的字符串。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
bin(arg)
|
||||
```
|
||||
|
||||
别名为: `BIN`。
|
||||
|
||||
对于整数参数,它从最高有效到最低有效(大端或“人类可读”顺序)打印 bin 数字。它从最重要的非零字节开始(省略前导零字节),但如果前导数字为零,则始终打印每个字节的八位数字。
|
||||
|
||||
类型为[Date](../../sql-reference/data-types/date.md)和[DateTime](../../sql-reference/data-types/datetime.md)的值被格式化为相应的整数(`Date` 为 Epoch 以来的天数,`DateTime` 为 Unix Timestamp 的值)。
|
||||
|
||||
对于[String](../../sql-reference/data-types/string.md)和[FixedString](../../sql-reference/data-types/fixedstring.md),所有字节都被简单地编码为八个二进制数。零字节不会被省略。
|
||||
|
||||
类型为[Float](../../sql-reference/data-types/float.md)和[Decimal](../../sql-reference/data-types/decimal.md)的值被编码为它们在内存中的表示。由于我们支持小端架构,它们以小端编码。零前导尾随字节不会被省略。
|
||||
|
||||
类型为[UUID](../data-types/uuid.md)的值被编码为大端顺序字符串。
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — 要转换为二进制的值。类型为[String](../../sql-reference/data-types/string.md),[FixedString](../../sql-reference/data-types/fixedstring.md),[UInt](../../sql-reference/data-types/int-uint.md),[Float](../../sql-reference/data-types/float.md),[Decimal](../../sql-reference/data-types/decimal.md),[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 具有参数的二进制表示的字符串。
|
||||
|
||||
类型为: [String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT bin(14);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─bin(14)──┐
|
||||
│ 00001110 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT bin(toFloat32(number)) AS bin_presentation FROM numbers(15, 2);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─bin_presentation─────────────────┐
|
||||
│ 00000000000000000111000001000001 │
|
||||
│ 00000000000000001000000001000001 │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT bin(toFloat64(number)) AS bin_presentation FROM numbers(15, 2);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─bin_presentation─────────────────────────────────────────────────┐
|
||||
│ 0000000000000000000000000000000000000000000000000010111001000000 │
|
||||
│ 0000000000000000000000000000000000000000000000000011000001000000 │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT bin(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')) as bin_uuid
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─bin_uuid─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ 01100001111100001100010000000100010111001011001100010001111001111001000001111011101001100000000001101010110100111101101110100000 │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## unbin {#unbinstr}
|
||||
|
||||
将每对二进制数字(在参数中)解释为一个数字,并将其转换为该数字表示的字节。这些函数执行与 [bin](#bin) 相反的操作。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
unbin(arg)
|
||||
```
|
||||
|
||||
别名为: `UNBIN`。
|
||||
|
||||
对于数字参数,`unbin()` 不会返回 `bin()` 的倒数。如果要将结果转换为数字,可以使用[reverse](../../sql-reference/functions/string-functions.md#reverse) 和 [reinterpretAs<Type>](../../sql-reference/functions/type-conversion-functions.md#reinterpretasuint8163264) 函数。
|
||||
|
||||
:::note
|
||||
如果从 `clickhouse-client` 中调用 `unbin`,则使用 UTF-8 显示二进制字符串。
|
||||
:::
|
||||
|
||||
支持二进制数字`0`和`1`。二进制位数不必是八的倍数。如果参数字符串包含二进制数字以外的任何内容,则返回一些实现定义的结果(不抛出异常)。
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — 包含任意数量的二进制数字的字符串。类型为[String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 二进制字符串 (BLOB)。
|
||||
|
||||
类型为:[String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT UNBIN('001100000011000100110010'), UNBIN('0100110101111001010100110101000101001100');
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─unbin('001100000011000100110010')─┬─unbin('0100110101111001010100110101000101001100')─┐
|
||||
│ 012 │ MySQL │
|
||||
└───────────────────────────────────┴───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT reinterpretAsUInt64(reverse(unbin('1110'))) AS num;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─num─┐
|
||||
│ 14 │
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## UUIDStringToNum(str) {#uuidstringtonumstr}
|
||||
|
||||
@ -91,4 +385,55 @@ SELECT char(0xE4, 0xBD, 0xA0, 0xE5, 0xA5, 0xBD) AS hello;
|
||||
|
||||
接受一个整数。返回一个UInt64类型数组,其中包含一组2的幂列表,其列表中的所有值相加等于这个整数。数组中的数字按升序排列。
|
||||
|
||||
## bitPositionsToArray(num) {#bitpositionstoarraynum}
|
||||
|
||||
接受整数并将其转换为无符号整数。返回一个 `UInt64` 数字数组,其中包含 `arg` 中等于 `1` 的位的位置列表,按升序排列。
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
bitPositionsToArray(arg)
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `arg` — 整数值。类型为[Int/UInt](../../sql-reference/data-types/int-uint.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 包含等于 `1` 的位位置列表的数组,按升序排列。
|
||||
|
||||
类型为: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md))。
|
||||
|
||||
**示例**
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT bitPositionsToArray(toInt8(1)) AS bit_positions;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─bit_positions─┐
|
||||
│ [0] │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
查询语句:
|
||||
|
||||
``` sql
|
||||
SELECT bitPositionsToArray(toInt8(-1)) AS bit_positions;
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─bit_positions─────┐
|
||||
│ [0,1,2,3,4,5,6,7] │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
|
||||
[来源文章](https://clickhouse.com/docs/en/query_language/functions/encoding_functions/) <!--hide-->
|
||||
|
@ -544,8 +544,7 @@ void LocalServer::processConfig()
|
||||
if (uncompressed_cache_size)
|
||||
global_context->setUncompressedCache(uncompressed_cache_size);
|
||||
|
||||
/// Size of cache for marks (index of MergeTree family of tables). It is necessary.
|
||||
/// Specify default value for mark_cache_size explicitly!
|
||||
/// Size of cache for marks (index of MergeTree family of tables).
|
||||
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
||||
if (mark_cache_size)
|
||||
global_context->setMarkCache(mark_cache_size);
|
||||
@ -555,8 +554,7 @@ void LocalServer::processConfig()
|
||||
if (index_uncompressed_cache_size)
|
||||
global_context->setIndexUncompressedCache(index_uncompressed_cache_size);
|
||||
|
||||
/// Size of cache for index marks (index of MergeTree skip indices). It is necessary.
|
||||
/// Specify default value for index_mark_cache_size explicitly!
|
||||
/// Size of cache for index marks (index of MergeTree skip indices).
|
||||
size_t index_mark_cache_size = config().getUInt64("index_mark_cache_size", 0);
|
||||
if (index_mark_cache_size)
|
||||
global_context->setIndexMarkCache(index_mark_cache_size);
|
||||
|
@ -1351,8 +1351,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
settings.async_insert_max_data_size,
|
||||
AsynchronousInsertQueue::Timeout{.busy = settings.async_insert_busy_timeout_ms, .stale = settings.async_insert_stale_timeout_ms}));
|
||||
|
||||
/// Size of cache for marks (index of MergeTree family of tables). It is mandatory.
|
||||
size_t mark_cache_size = config().getUInt64("mark_cache_size");
|
||||
/// Size of cache for marks (index of MergeTree family of tables).
|
||||
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
||||
if (!mark_cache_size)
|
||||
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
|
||||
if (mark_cache_size > max_cache_size)
|
||||
@ -1368,8 +1368,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
if (index_uncompressed_cache_size)
|
||||
global_context->setIndexUncompressedCache(index_uncompressed_cache_size);
|
||||
|
||||
/// Size of cache for index marks (index of MergeTree skip indices). It is necessary.
|
||||
/// Specify default value for index_mark_cache_size explicitly!
|
||||
/// Size of cache for index marks (index of MergeTree skip indices).
|
||||
size_t index_mark_cache_size = config().getUInt64("index_mark_cache_size", 0);
|
||||
if (index_mark_cache_size)
|
||||
global_context->setIndexMarkCache(index_mark_cache_size);
|
||||
|
@ -365,6 +365,59 @@
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Multi-disk configuration example: -->
|
||||
<!--
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>0</keep_free_space_bytes>
|
||||
</default>
|
||||
<data>
|
||||
<path>/data/</path>
|
||||
<keep_free_space_bytes>0</keep_free_space_bytes>
|
||||
</data>
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>http://path/to/endpoint</endpoint>
|
||||
<access_key_id>your_access_key_id</access_key_id>
|
||||
<secret_access_key>your_secret_access_key</secret_access_key>
|
||||
</s3>
|
||||
<blob_storage_disk>
|
||||
<type>azure_blob_storage</type>
|
||||
<storage_account_url>http://account.blob.core.windows.net</storage_account_url>
|
||||
<container_name>container</container_name>
|
||||
<account_name>account</account_name>
|
||||
<account_key>pass123</account_key>
|
||||
<metadata_path>/var/lib/clickhouse/disks/blob_storage_disk/</metadata_path>
|
||||
<cache_enabled>true</cache_enabled>
|
||||
<cache_path>/var/lib/clickhouse/disks/blob_storage_disk/cache/</cache_path>
|
||||
<skip_access_check>false</skip_access_check>
|
||||
</blob_storage_disk>
|
||||
</disks>
|
||||
|
||||
<policies>
|
||||
<all>
|
||||
<volumes>
|
||||
<main>
|
||||
<disk>default</disk>
|
||||
<disk>data</disk>
|
||||
<disk>s3</disk>
|
||||
<disk>blob_storage_disk</disk>
|
||||
|
||||
<max_data_part_size_bytes></max_data_part_size_bytes>
|
||||
<max_data_part_size_ratio></max_data_part_size_ratio>
|
||||
<perform_ttl_move_on_insert>true</perform_ttl_move_on_insert>
|
||||
<prefer_not_to_merge>false</prefer_not_to_merge>
|
||||
<load_balancing>round_robin</load_balancing>
|
||||
</main>
|
||||
</volumes>
|
||||
<move_factor>0.2</move_factor>
|
||||
</all>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
<path>./</path>
|
||||
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
<mlock_executable>true</mlock_executable>
|
||||
|
||||
<users>
|
||||
|
@ -71,7 +71,7 @@ void processFile(const fs::path & file_path, const fs::path & dst_path, bool tes
|
||||
dst_buf->next();
|
||||
dst_buf->finalize();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void processTableFiles(const fs::path & data_path, fs::path dst_path, bool test_mode, bool link)
|
||||
|
@ -245,4 +245,4 @@ public:
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
@ -136,4 +136,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
@ -102,4 +102,4 @@ public:
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
@ -234,4 +234,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
@ -37,18 +37,6 @@ if (USE_DEBUG_HELPERS)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:${INCLUDE_DEBUG_HELPERS}>)
|
||||
endif ()
|
||||
|
||||
if (COMPILER_GCC)
|
||||
# If we leave this optimization enabled, gcc-7 replaces a pair of SSE intrinsics (16 byte load, store) with a call to memcpy.
|
||||
# It leads to slow code. This is compiler bug. It looks like this:
|
||||
#
|
||||
# (gdb) bt
|
||||
#0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11
|
||||
#1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=<optimized out>) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720
|
||||
#2 memcpySmallAllowReadWriteOverflow15Impl (n=<optimized out>, src=<optimized out>, dst=<optimized out>) at ../src/Common/memcpySmall.h:37
|
||||
|
||||
add_definitions ("-fno-tree-loop-distribute-patterns")
|
||||
endif ()
|
||||
|
||||
# ClickHouse developers may use platform-dependent code under some macro (e.g. `#ifdef ENABLE_MULTITARGET`).
|
||||
# If turned ON, this option defines such macro.
|
||||
# See `src/Common/TargetSpecific.h`
|
||||
@ -562,7 +550,7 @@ include ("${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake")
|
||||
if (ENABLE_TESTS)
|
||||
macro (grep_gtest_sources BASE_DIR DST_VAR)
|
||||
# Cold match files that are not in tests/ directories
|
||||
file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp")
|
||||
file(GLOB_RECURSE "${DST_VAR}" CONFIGURE_DEPENDS RELATIVE "${BASE_DIR}" "gtest*.cpp")
|
||||
endmacro()
|
||||
|
||||
# attach all dbms gtest sources
|
||||
|
@ -345,7 +345,7 @@ HedgedConnections::ReplicaLocation HedgedConnections::getReadyReplicaLocation(As
|
||||
else
|
||||
throw Exception("Unknown event from epoll", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bool HedgedConnections::resumePacketReceiver(const HedgedConnections::ReplicaLocation & location)
|
||||
{
|
||||
|
@ -219,7 +219,7 @@ protected:
|
||||
/// Get internal immutable ptr. Does not change internal use counter.
|
||||
immutable_ptr<T> detach() && { return std::move(value); }
|
||||
|
||||
operator bool() const { return value != nullptr; } /// NOLINT
|
||||
explicit operator bool() const { return value != nullptr; }
|
||||
bool operator! () const { return value == nullptr; }
|
||||
|
||||
bool operator== (const chameleon_ptr & rhs) const { return value == rhs.value; }
|
||||
|
@ -18,7 +18,7 @@ namespace
|
||||
result += '.';
|
||||
result += subkey;
|
||||
return result;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -14,7 +14,7 @@ bool safeFsExists(const String & path)
|
||||
{
|
||||
std::error_code ec;
|
||||
return fs::exists(path, ec);
|
||||
};
|
||||
}
|
||||
|
||||
bool configReadClient(Poco::Util::LayeredConfiguration & config, const std::string & home_path)
|
||||
{
|
||||
|
@ -46,8 +46,8 @@ public:
|
||||
|
||||
Exception * clone() const override { return new Exception(*this); }
|
||||
void rethrow() const override { throw *this; }
|
||||
const char * name() const throw() override { return "DB::Exception"; }
|
||||
const char * what() const throw() override { return message().data(); }
|
||||
const char * name() const noexcept override { return "DB::Exception"; }
|
||||
const char * what() const noexcept override { return message().data(); }
|
||||
|
||||
/// Add something to the existing message.
|
||||
template <typename... Args>
|
||||
@ -75,7 +75,7 @@ private:
|
||||
#endif
|
||||
bool remote = false;
|
||||
|
||||
const char * className() const throw() override { return "DB::Exception"; }
|
||||
const char * className() const noexcept override { return "DB::Exception"; }
|
||||
};
|
||||
|
||||
|
||||
@ -100,8 +100,8 @@ private:
|
||||
int saved_errno;
|
||||
std::optional<std::string> path;
|
||||
|
||||
const char * name() const throw() override { return "DB::ErrnoException"; }
|
||||
const char * className() const throw() override { return "DB::ErrnoException"; }
|
||||
const char * name() const noexcept override { return "DB::ErrnoException"; }
|
||||
const char * className() const noexcept override { return "DB::ErrnoException"; }
|
||||
};
|
||||
|
||||
|
||||
@ -141,8 +141,8 @@ private:
|
||||
String file_name;
|
||||
mutable std::string formatted_message;
|
||||
|
||||
const char * name() const throw() override { return "DB::ParsingException"; }
|
||||
const char * className() const throw() override { return "DB::ParsingException"; }
|
||||
const char * name() const noexcept override { return "DB::ParsingException"; }
|
||||
const char * className() const noexcept override { return "DB::ParsingException"; }
|
||||
};
|
||||
|
||||
|
||||
|
@ -169,7 +169,7 @@ struct StringHashTableLookupResult
|
||||
auto & operator*() const { return *this; }
|
||||
auto * operator->() { return this; }
|
||||
auto * operator->() const { return this; }
|
||||
operator bool() const { return mapped_ptr; } /// NOLINT
|
||||
explicit operator bool() const { return mapped_ptr; }
|
||||
friend bool operator==(const StringHashTableLookupResult & a, const std::nullptr_t &) { return !a.mapped_ptr; }
|
||||
friend bool operator==(const std::nullptr_t &, const StringHashTableLookupResult & b) { return !b.mapped_ptr; }
|
||||
friend bool operator!=(const StringHashTableLookupResult & a, const std::nullptr_t &) { return a.mapped_ptr; }
|
||||
|
@ -22,8 +22,8 @@ public:
|
||||
void rethrow() const override { throw *this; }
|
||||
|
||||
private:
|
||||
const char * name() const throw() override { return "DB::NetException"; }
|
||||
const char * className() const throw() override { return "DB::NetException"; }
|
||||
const char * name() const noexcept override { return "DB::NetException"; }
|
||||
const char * className() const noexcept override { return "DB::NetException"; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -69,4 +69,4 @@ public:
|
||||
size_t rulesCount() const;
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
@ -401,8 +401,8 @@ public:
|
||||
Exception(const Error code_, const std::string & path); /// NOLINT
|
||||
Exception(const Exception & exc);
|
||||
|
||||
const char * name() const throw() override { return "Coordination::Exception"; }
|
||||
const char * className() const throw() override { return "Coordination::Exception"; }
|
||||
const char * name() const noexcept override { return "Coordination::Exception"; }
|
||||
const char * className() const noexcept override { return "Coordination::Exception"; }
|
||||
Exception * clone() const override { return new Exception(*this); }
|
||||
|
||||
const Error code;
|
||||
|
@ -32,7 +32,7 @@ void assertRange(
|
||||
ASSERT_EQ(range.left, expected_range.left);
|
||||
ASSERT_EQ(range.right, expected_range.right);
|
||||
ASSERT_EQ(file_segment->state(), expected_state);
|
||||
};
|
||||
}
|
||||
|
||||
void printRanges(const auto & segments)
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ extern const int CANNOT_COMPILE_REGEXP;
|
||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||
extern const int INVALID_CONFIG_PARAMETER;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
TEST(Common, SensitiveDataMasker)
|
||||
|
@ -790,7 +790,7 @@ std::vector<CodecTestSequence> generatePyramidOfSequences(const size_t sequences
|
||||
}
|
||||
|
||||
return sequences;
|
||||
};
|
||||
}
|
||||
|
||||
// helper macro to produce human-friendly sequence name from generator
|
||||
#define G(generator) generator, #generator
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <libnuraft/cluster_config.hxx>
|
||||
#include <libnuraft/log_val_type.hxx>
|
||||
#include <libnuraft/raft_server.hxx>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
@ -315,6 +316,22 @@ void KeeperServer::startup(const Poco::Util::AbstractConfiguration & config, boo
|
||||
|
||||
state_manager->loadLogStore(state_machine->last_commit_index() + 1, coordination_settings->reserved_log_items);
|
||||
|
||||
auto log_store = state_manager->load_log_store();
|
||||
auto next_log_idx = log_store->next_slot();
|
||||
if (next_log_idx > 0 && next_log_idx > state_machine->last_commit_index())
|
||||
{
|
||||
auto log_entries = log_store->log_entries(state_machine->last_commit_index() + 1, next_log_idx);
|
||||
|
||||
auto idx = state_machine->last_commit_index() + 1;
|
||||
for (const auto & entry : *log_entries)
|
||||
{
|
||||
if (entry && entry->get_val_type() == nuraft::log_val_type::app_log)
|
||||
state_machine->preprocess(idx, entry->get_buf());
|
||||
|
||||
++idx;
|
||||
}
|
||||
}
|
||||
|
||||
loadLatestConfig();
|
||||
|
||||
last_local_config = state_manager->parseServersConfiguration(config, true).cluster_config;
|
||||
|
@ -44,7 +44,6 @@ namespace
|
||||
else /// backward compatibility
|
||||
request_for_session.time = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
|
||||
|
||||
return request_for_session;
|
||||
}
|
||||
}
|
||||
@ -114,6 +113,21 @@ void KeeperStateMachine::init()
|
||||
storage = std::make_unique<KeeperStorage>(coordination_settings->dead_session_check_period_ms.totalMilliseconds(), superdigest);
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::pre_commit(uint64_t log_idx, nuraft::buffer & data)
|
||||
{
|
||||
preprocess(log_idx, data);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void KeeperStateMachine::preprocess(const uint64_t log_idx, nuraft::buffer & data)
|
||||
{
|
||||
auto request_for_session = parseRequest(data);
|
||||
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
|
||||
return;
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
storage->preprocessRequest(request_for_session.request, request_for_session.session_id, request_for_session.time, log_idx);
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data)
|
||||
{
|
||||
auto request_for_session = parseRequest(data);
|
||||
@ -182,6 +196,12 @@ void KeeperStateMachine::commit_config(const uint64_t /* log_idx */, nuraft::ptr
|
||||
cluster_config = ClusterConfig::deserialize(*tmp);
|
||||
}
|
||||
|
||||
void KeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & /*data*/)
|
||||
{
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
storage->rollbackRequest(log_idx);
|
||||
}
|
||||
|
||||
nuraft::ptr<nuraft::snapshot> KeeperStateMachine::last_snapshot()
|
||||
{
|
||||
/// Just return the latest snapshot.
|
||||
@ -343,7 +363,7 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
|
||||
{
|
||||
/// Pure local request, just process it with storage
|
||||
std::lock_guard lock(storage_and_responses_lock);
|
||||
auto responses = storage->processRequest(request_for_session.request, request_for_session.session_id, request_for_session.time, std::nullopt);
|
||||
auto responses = storage->processRequest(request_for_session.request, request_for_session.session_id, request_for_session.time, std::nullopt, true /*check_acl*/, true /*is_local*/);
|
||||
for (const auto & response : responses)
|
||||
if (!responses_queue.push(response))
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Could not push response with session id {} into responses queue", response.session_id);
|
||||
|
@ -27,16 +27,16 @@ public:
|
||||
/// Read state from the latest snapshot
|
||||
void init();
|
||||
|
||||
/// Currently not supported
|
||||
nuraft::ptr<nuraft::buffer> pre_commit(const uint64_t /*log_idx*/, nuraft::buffer & /*data*/) override { return nullptr; }
|
||||
void preprocess(uint64_t log_idx, nuraft::buffer & data);
|
||||
|
||||
nuraft::ptr<nuraft::buffer> pre_commit(uint64_t log_idx, nuraft::buffer & data) override;
|
||||
|
||||
nuraft::ptr<nuraft::buffer> commit(const uint64_t log_idx, nuraft::buffer & data) override; /// NOLINT
|
||||
|
||||
/// Save new cluster config to our snapshot (copy of the config stored in StateManager)
|
||||
void commit_config(const uint64_t log_idx, nuraft::ptr<nuraft::cluster_config> & new_conf) override; /// NOLINT
|
||||
|
||||
/// Currently not supported
|
||||
void rollback(const uint64_t /*log_idx*/, nuraft::buffer & /*data*/) override {}
|
||||
void rollback(uint64_t log_idx, nuraft::buffer & data) override;
|
||||
|
||||
uint64_t last_commit_index() override { return last_committed_idx; }
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,14 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Coordination/SessionExpiryQueue.h>
|
||||
#include <Coordination/ACLMap.h>
|
||||
#include <Coordination/SnapshotableHashTable.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <Coordination/ACLMap.h>
|
||||
#include <Coordination/SessionExpiryQueue.h>
|
||||
#include <Coordination/SnapshotableHashTable.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
|
||||
#include <absl/container/flat_hash_set.h>
|
||||
|
||||
@ -29,7 +29,6 @@ struct KeeperStorageSnapshot;
|
||||
class KeeperStorage
|
||||
{
|
||||
public:
|
||||
|
||||
struct Node
|
||||
{
|
||||
uint64_t acl_id = 0; /// 0 -- no ACL by default
|
||||
@ -41,26 +40,18 @@ public:
|
||||
Node() : size_bytes(sizeof(Node)) { }
|
||||
|
||||
/// Object memory size
|
||||
uint64_t sizeInBytes() const
|
||||
{
|
||||
return size_bytes;
|
||||
}
|
||||
uint64_t sizeInBytes() const { return size_bytes; }
|
||||
|
||||
void setData(String new_data);
|
||||
|
||||
const auto & getData() const noexcept
|
||||
{
|
||||
return data;
|
||||
}
|
||||
const auto & getData() const noexcept { return data; }
|
||||
|
||||
void addChild(StringRef child_path);
|
||||
|
||||
void removeChild(StringRef child_path);
|
||||
|
||||
const auto & getChildren() const noexcept
|
||||
{
|
||||
return children;
|
||||
}
|
||||
const auto & getChildren() const noexcept { return children; }
|
||||
|
||||
private:
|
||||
String data;
|
||||
ChildrenSet children{};
|
||||
@ -85,10 +76,7 @@ public:
|
||||
std::string scheme;
|
||||
std::string id;
|
||||
|
||||
bool operator==(const AuthID & other) const
|
||||
{
|
||||
return scheme == other.scheme && id == other.id;
|
||||
}
|
||||
bool operator==(const AuthID & other) const { return scheme == other.scheme && id == other.id; }
|
||||
};
|
||||
|
||||
using RequestsForSessions = std::vector<RequestForSession>;
|
||||
@ -112,6 +100,146 @@ public:
|
||||
/// container.
|
||||
Container container;
|
||||
|
||||
// Applying ZooKeeper request to storage consists of two steps:
|
||||
// - preprocessing which, instead of applying the changes directly to storage,
|
||||
// generates deltas with those changes, denoted with the request ZXID
|
||||
// - processing which applies deltas with the correct ZXID to the storage
|
||||
//
|
||||
// Delta objects allow us two things:
|
||||
// - fetch the latest, uncommitted state of an object by getting the committed
|
||||
// state of that same object from the storage and applying the deltas
|
||||
// in the same order as they are defined
|
||||
// - quickly commit the changes to the storage
|
||||
struct CreateNodeDelta
|
||||
{
|
||||
Coordination::Stat stat;
|
||||
bool is_ephemeral;
|
||||
bool is_sequental;
|
||||
Coordination::ACLs acls;
|
||||
String data;
|
||||
};
|
||||
|
||||
struct RemoveNodeDelta
|
||||
{
|
||||
int32_t version{-1};
|
||||
};
|
||||
|
||||
struct UpdateNodeDelta
|
||||
{
|
||||
std::function<void(Node &)> update_fn;
|
||||
int32_t version{-1};
|
||||
};
|
||||
|
||||
struct SetACLDelta
|
||||
{
|
||||
Coordination::ACLs acls;
|
||||
int32_t version{-1};
|
||||
};
|
||||
|
||||
struct ErrorDelta
|
||||
{
|
||||
Coordination::Error error;
|
||||
};
|
||||
|
||||
struct FailedMultiDelta
|
||||
{
|
||||
std::vector<Coordination::Error> error_codes;
|
||||
};
|
||||
|
||||
// Denotes end of a subrequest in multi request
|
||||
struct SubDeltaEnd
|
||||
{
|
||||
};
|
||||
|
||||
struct AddAuthDelta
|
||||
{
|
||||
int64_t session_id;
|
||||
AuthID auth_id;
|
||||
};
|
||||
|
||||
using Operation
|
||||
= std::variant<CreateNodeDelta, RemoveNodeDelta, UpdateNodeDelta, SetACLDelta, AddAuthDelta, ErrorDelta, SubDeltaEnd, FailedMultiDelta>;
|
||||
|
||||
struct Delta
|
||||
{
|
||||
Delta(String path_, int64_t zxid_, Operation operation_) : path(std::move(path_)), zxid(zxid_), operation(std::move(operation_)) { }
|
||||
|
||||
Delta(int64_t zxid_, Coordination::Error error) : Delta("", zxid_, ErrorDelta{error}) { }
|
||||
|
||||
Delta(int64_t zxid_, Operation subdelta) : Delta("", zxid_, subdelta) { }
|
||||
|
||||
String path;
|
||||
int64_t zxid;
|
||||
Operation operation;
|
||||
};
|
||||
|
||||
struct UncommittedState
|
||||
{
|
||||
explicit UncommittedState(KeeperStorage & storage_) : storage(storage_) { }
|
||||
|
||||
template <typename Visitor>
|
||||
void applyDeltas(StringRef path, const Visitor & visitor) const
|
||||
{
|
||||
for (const auto & delta : deltas)
|
||||
{
|
||||
if (path.empty() || delta.path == path)
|
||||
std::visit(visitor, delta.operation);
|
||||
}
|
||||
}
|
||||
|
||||
bool hasACL(int64_t session_id, bool is_local, std::function<bool(const AuthID &)> predicate)
|
||||
{
|
||||
for (const auto & session_auth : storage.session_and_auth[session_id])
|
||||
{
|
||||
if (predicate(session_auth))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_local)
|
||||
return false;
|
||||
|
||||
|
||||
for (const auto & delta : deltas)
|
||||
{
|
||||
if (const auto * auth_delta = std::get_if<KeeperStorage::AddAuthDelta>(&delta.operation);
|
||||
auth_delta && auth_delta->session_id == session_id && predicate(auth_delta->auth_id))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> getNode(StringRef path);
|
||||
bool hasNode(StringRef path) const;
|
||||
Coordination::ACLs getACLs(StringRef path) const;
|
||||
|
||||
std::deque<Delta> deltas;
|
||||
KeeperStorage & storage;
|
||||
};
|
||||
|
||||
UncommittedState uncommitted_state{*this};
|
||||
|
||||
Coordination::Error commit(int64_t zxid, int64_t session_id);
|
||||
|
||||
// Create node in the storage
|
||||
// Returns false if it failed to create the node, true otherwise
|
||||
// We don't care about the exact failure because we should've caught it during preprocessing
|
||||
bool createNode(
|
||||
const std::string & path,
|
||||
String data,
|
||||
const Coordination::Stat & stat,
|
||||
bool is_sequental,
|
||||
bool is_ephemeral,
|
||||
Coordination::ACLs node_acls,
|
||||
int64_t session_id);
|
||||
|
||||
// Remove node in the storage
|
||||
// Returns false if it failed to remove the node, true otherwise
|
||||
// We don't care about the exact failure because we should've caught it during preprocessing
|
||||
bool removeNode(const std::string & path, int32_t version);
|
||||
|
||||
bool checkACL(StringRef path, int32_t permissions, int64_t session_id, bool is_local);
|
||||
|
||||
/// Mapping session_id -> set of ephemeral nodes paths
|
||||
Ephemerals ephemerals;
|
||||
/// Mapping session_id -> set of watched nodes paths
|
||||
@ -130,15 +258,12 @@ public:
|
||||
|
||||
/// Currently active watches (node_path -> subscribed sessions)
|
||||
Watches watches;
|
||||
Watches list_watches; /// Watches for 'list' request (watches on children).
|
||||
Watches list_watches; /// Watches for 'list' request (watches on children).
|
||||
|
||||
void clearDeadWatches(int64_t session_id);
|
||||
|
||||
/// Get current zxid
|
||||
int64_t getZXID() const
|
||||
{
|
||||
return zxid;
|
||||
}
|
||||
int64_t getZXID() const { return zxid; }
|
||||
|
||||
const String superdigest;
|
||||
|
||||
@ -162,78 +287,53 @@ public:
|
||||
|
||||
/// Process user request and return response.
|
||||
/// check_acl = false only when converting data from ZooKeeper.
|
||||
ResponsesForSessions processRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, int64_t time, std::optional<int64_t> new_last_zxid, bool check_acl = true);
|
||||
ResponsesForSessions processRequest(
|
||||
const Coordination::ZooKeeperRequestPtr & request,
|
||||
int64_t session_id,
|
||||
int64_t time,
|
||||
std::optional<int64_t> new_last_zxid,
|
||||
bool check_acl = true,
|
||||
bool is_local = false);
|
||||
void preprocessRequest(
|
||||
const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, int64_t time, int64_t new_last_zxid, bool check_acl = true);
|
||||
void rollbackRequest(int64_t rollback_zxid);
|
||||
|
||||
void finalize();
|
||||
|
||||
/// Set of methods for creating snapshots
|
||||
|
||||
/// Turn on snapshot mode, so data inside Container is not deleted, but replaced with new version.
|
||||
void enableSnapshotMode(size_t up_to_version)
|
||||
{
|
||||
container.enableSnapshotMode(up_to_version);
|
||||
|
||||
}
|
||||
void enableSnapshotMode(size_t up_to_version) { container.enableSnapshotMode(up_to_version); }
|
||||
|
||||
/// Turn off snapshot mode.
|
||||
void disableSnapshotMode()
|
||||
{
|
||||
container.disableSnapshotMode();
|
||||
}
|
||||
void disableSnapshotMode() { container.disableSnapshotMode(); }
|
||||
|
||||
Container::const_iterator getSnapshotIteratorBegin() const
|
||||
{
|
||||
return container.begin();
|
||||
}
|
||||
Container::const_iterator getSnapshotIteratorBegin() const { return container.begin(); }
|
||||
|
||||
/// Clear outdated data from internal container.
|
||||
void clearGarbageAfterSnapshot()
|
||||
{
|
||||
container.clearOutdatedNodes();
|
||||
}
|
||||
void clearGarbageAfterSnapshot() { container.clearOutdatedNodes(); }
|
||||
|
||||
/// Get all active sessions
|
||||
const SessionAndTimeout & getActiveSessions() const
|
||||
{
|
||||
return session_and_timeout;
|
||||
}
|
||||
const SessionAndTimeout & getActiveSessions() const { return session_and_timeout; }
|
||||
|
||||
/// Get all dead sessions
|
||||
std::vector<int64_t> getDeadSessions() const
|
||||
{
|
||||
return session_expiry_queue.getExpiredSessions();
|
||||
}
|
||||
std::vector<int64_t> getDeadSessions() const { return session_expiry_queue.getExpiredSessions(); }
|
||||
|
||||
/// Introspection functions mostly used in 4-letter commands
|
||||
uint64_t getNodesCount() const
|
||||
{
|
||||
return container.size();
|
||||
}
|
||||
uint64_t getNodesCount() const { return container.size(); }
|
||||
|
||||
uint64_t getApproximateDataSize() const
|
||||
{
|
||||
return container.getApproximateDataSize();
|
||||
}
|
||||
uint64_t getApproximateDataSize() const { return container.getApproximateDataSize(); }
|
||||
|
||||
uint64_t getArenaDataSize() const
|
||||
{
|
||||
return container.keyArenaSize();
|
||||
}
|
||||
uint64_t getArenaDataSize() const { return container.keyArenaSize(); }
|
||||
|
||||
|
||||
uint64_t getTotalWatchesCount() const;
|
||||
|
||||
uint64_t getWatchedPathsCount() const
|
||||
{
|
||||
return watches.size() + list_watches.size();
|
||||
}
|
||||
uint64_t getWatchedPathsCount() const { return watches.size() + list_watches.size(); }
|
||||
|
||||
uint64_t getSessionsWithWatchesCount() const;
|
||||
|
||||
uint64_t getSessionWithEphemeralNodesCount() const
|
||||
{
|
||||
return ephemerals.size();
|
||||
}
|
||||
uint64_t getSessionWithEphemeralNodesCount() const { return ephemerals.size(); }
|
||||
uint64_t getTotalEphemeralNodesCount() const;
|
||||
|
||||
void dumpWatches(WriteBufferFromOwnString & buf) const;
|
||||
|
@ -12,7 +12,6 @@ public:
|
||||
WriteBufferFromNuraftBuffer();
|
||||
|
||||
nuraft::ptr<nuraft::buffer> getBuffer();
|
||||
bool isFinished() const { return finalized; }
|
||||
|
||||
~WriteBufferFromNuraftBuffer() override;
|
||||
|
||||
|
@ -520,6 +520,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*l
|
||||
if (request->getOpNum() == Coordination::OpNum::Multi && hasErrorsInMultiRequest(request))
|
||||
return true;
|
||||
|
||||
storage.preprocessRequest(request, session_id, time, zxid, /* check_acl = */ false);
|
||||
storage.processRequest(request, session_id, time, zxid, /* check_acl = */ false);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include <chrono>
|
||||
#include <gtest/gtest.h>
|
||||
#include "Common/ZooKeeper/IKeeper.h"
|
||||
|
||||
#include "Coordination/KeeperStorage.h"
|
||||
#include "config_core.h"
|
||||
|
||||
#if USE_NURAFT
|
||||
@ -1261,6 +1263,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
||||
changelog.append(entry);
|
||||
changelog.end_of_append_batch(0, 0);
|
||||
|
||||
state_machine->pre_commit(i, changelog.entry_at(i)->get_buf());
|
||||
state_machine->commit(i, changelog.entry_at(i)->get_buf());
|
||||
bool snapshot_created = false;
|
||||
if (i % settings->snapshot_distance == 0)
|
||||
@ -1305,6 +1308,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
||||
|
||||
for (size_t i = restore_machine->last_commit_index() + 1; i < restore_changelog.next_slot(); ++i)
|
||||
{
|
||||
restore_machine->pre_commit(i, changelog.entry_at(i)->get_buf());
|
||||
restore_machine->commit(i, changelog.entry_at(i)->get_buf());
|
||||
}
|
||||
|
||||
@ -1407,6 +1411,7 @@ TEST_P(CoordinationTest, TestEphemeralNodeRemove)
|
||||
request_c->path = "/hello";
|
||||
request_c->is_ephemeral = true;
|
||||
auto entry_c = getLogEntryFromZKRequest(0, 1, request_c);
|
||||
state_machine->pre_commit(1, entry_c->get_buf());
|
||||
state_machine->commit(1, entry_c->get_buf());
|
||||
const auto & storage = state_machine->getStorage();
|
||||
|
||||
@ -1415,6 +1420,7 @@ TEST_P(CoordinationTest, TestEphemeralNodeRemove)
|
||||
request_d->path = "/hello";
|
||||
/// Delete from other session
|
||||
auto entry_d = getLogEntryFromZKRequest(0, 2, request_d);
|
||||
state_machine->pre_commit(2, entry_d->get_buf());
|
||||
state_machine->commit(2, entry_d->get_buf());
|
||||
|
||||
EXPECT_EQ(storage.ephemerals.size(), 0);
|
||||
@ -1777,6 +1783,130 @@ TEST_P(CoordinationTest, TestLogGap)
|
||||
EXPECT_EQ(changelog1.next_slot(), 61);
|
||||
}
|
||||
|
||||
template <typename ResponseType>
|
||||
ResponseType getSingleResponse(const auto & responses)
|
||||
{
|
||||
EXPECT_FALSE(responses.empty());
|
||||
return dynamic_cast<ResponseType &>(*responses[0].response);
|
||||
}
|
||||
|
||||
TEST_P(CoordinationTest, TestUncommittedStateBasicCrud)
|
||||
{
|
||||
using namespace DB;
|
||||
using namespace Coordination;
|
||||
|
||||
DB::KeeperStorage storage{500, ""};
|
||||
|
||||
constexpr std::string_view path = "/test";
|
||||
|
||||
const auto get_committed_data = [&]() -> std::optional<String>
|
||||
{
|
||||
auto request = std::make_shared<ZooKeeperGetRequest>();
|
||||
request->path = path;
|
||||
auto responses = storage.processRequest(request, 0, 0, std::nullopt, true, true);
|
||||
const auto & get_response = getSingleResponse<ZooKeeperGetResponse>(responses);
|
||||
|
||||
if (get_response.error != Error::ZOK)
|
||||
return std::nullopt;
|
||||
|
||||
return get_response.data;
|
||||
};
|
||||
|
||||
const auto preprocess_get = [&](int64_t zxid)
|
||||
{
|
||||
auto get_request = std::make_shared<ZooKeeperGetRequest>();
|
||||
get_request->path = path;
|
||||
storage.preprocessRequest(get_request, 0, 0, zxid);
|
||||
return get_request;
|
||||
};
|
||||
|
||||
const auto create_request = std::make_shared<ZooKeeperCreateRequest>();
|
||||
create_request->path = path;
|
||||
create_request->data = "initial_data";
|
||||
storage.preprocessRequest(create_request, 0, 0, 1);
|
||||
storage.preprocessRequest(create_request, 0, 0, 2);
|
||||
|
||||
ASSERT_FALSE(get_committed_data());
|
||||
|
||||
const auto after_create_get = preprocess_get(3);
|
||||
|
||||
ASSERT_FALSE(get_committed_data());
|
||||
|
||||
const auto set_request = std::make_shared<ZooKeeperSetRequest>();
|
||||
set_request->path = path;
|
||||
set_request->data = "new_data";
|
||||
storage.preprocessRequest(set_request, 0, 0, 4);
|
||||
|
||||
const auto after_set_get = preprocess_get(5);
|
||||
|
||||
ASSERT_FALSE(get_committed_data());
|
||||
|
||||
const auto remove_request = std::make_shared<ZooKeeperRemoveRequest>();
|
||||
remove_request->path = path;
|
||||
storage.preprocessRequest(remove_request, 0, 0, 6);
|
||||
storage.preprocessRequest(remove_request, 0, 0, 7);
|
||||
|
||||
const auto after_remove_get = preprocess_get(8);
|
||||
|
||||
ASSERT_FALSE(get_committed_data());
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(create_request, 0, 0, 1);
|
||||
const auto & create_response = getSingleResponse<ZooKeeperCreateResponse>(responses);
|
||||
ASSERT_EQ(create_response.error, Error::ZOK);
|
||||
}
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(create_request, 0, 0, 2);
|
||||
const auto & create_response = getSingleResponse<ZooKeeperCreateResponse>(responses);
|
||||
ASSERT_EQ(create_response.error, Error::ZNODEEXISTS);
|
||||
}
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(after_create_get, 0, 0, 3);
|
||||
const auto & get_response = getSingleResponse<ZooKeeperGetResponse>(responses);
|
||||
ASSERT_EQ(get_response.error, Error::ZOK);
|
||||
ASSERT_EQ(get_response.data, "initial_data");
|
||||
}
|
||||
|
||||
ASSERT_EQ(get_committed_data(), "initial_data");
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(set_request, 0, 0, 4);
|
||||
const auto & create_response = getSingleResponse<ZooKeeperSetResponse>(responses);
|
||||
ASSERT_EQ(create_response.error, Error::ZOK);
|
||||
}
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(after_set_get, 0, 0, 5);
|
||||
const auto & get_response = getSingleResponse<ZooKeeperGetResponse>(responses);
|
||||
ASSERT_EQ(get_response.error, Error::ZOK);
|
||||
ASSERT_EQ(get_response.data, "new_data");
|
||||
}
|
||||
|
||||
ASSERT_EQ(get_committed_data(), "new_data");
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(remove_request, 0, 0, 6);
|
||||
const auto & create_response = getSingleResponse<ZooKeeperRemoveResponse>(responses);
|
||||
ASSERT_EQ(create_response.error, Error::ZOK);
|
||||
}
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(remove_request, 0, 0, 7);
|
||||
const auto & create_response = getSingleResponse<ZooKeeperRemoveResponse>(responses);
|
||||
ASSERT_EQ(create_response.error, Error::ZNONODE);
|
||||
}
|
||||
|
||||
{
|
||||
const auto responses = storage.processRequest(after_remove_get, 0, 0, 8);
|
||||
const auto & get_response = getSingleResponse<ZooKeeperGetResponse>(responses);
|
||||
ASSERT_EQ(get_response.error, Error::ZNONODE);
|
||||
}
|
||||
|
||||
ASSERT_FALSE(get_committed_data());
|
||||
}
|
||||
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(CoordinationTestSuite,
|
||||
CoordinationTest,
|
||||
|
@ -161,7 +161,7 @@ public:
|
||||
task_info->deactivate();
|
||||
}
|
||||
|
||||
operator bool() const { return task_info != nullptr; } /// NOLINT
|
||||
explicit operator bool() const { return task_info != nullptr; }
|
||||
|
||||
BackgroundSchedulePoolTaskInfo * operator->() { return task_info.get(); }
|
||||
const BackgroundSchedulePoolTaskInfo * operator->() const { return task_info.get(); }
|
||||
|
@ -35,7 +35,7 @@ static ReturnType onError(const std::string & message [[maybe_unused]], int code
|
||||
throw Exception(message, code);
|
||||
else
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
template <typename ReturnType>
|
||||
@ -601,6 +601,15 @@ NamesAndTypesList Block::getNamesAndTypesList() const
|
||||
return res;
|
||||
}
|
||||
|
||||
NamesAndTypes Block::getNamesAndTypes() const
|
||||
{
|
||||
NamesAndTypes res;
|
||||
|
||||
for (const auto & elem : data)
|
||||
res.emplace_back(elem.name, elem.type);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Names Block::getNames() const
|
||||
{
|
||||
@ -756,6 +765,17 @@ void Block::updateHash(SipHash & hash) const
|
||||
col.column->updateHashWithValue(row_no, hash);
|
||||
}
|
||||
|
||||
Serializations Block::getSerializations() const
|
||||
{
|
||||
Serializations res;
|
||||
res.reserve(data.size());
|
||||
|
||||
for (const auto & column : data)
|
||||
res.push_back(column.type->getDefaultSerialization());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void convertToFullIfSparse(Block & block)
|
||||
{
|
||||
for (auto & column : block)
|
||||
|
@ -89,11 +89,14 @@ public:
|
||||
|
||||
const ColumnsWithTypeAndName & getColumnsWithTypeAndName() const;
|
||||
NamesAndTypesList getNamesAndTypesList() const;
|
||||
NamesAndTypes getNamesAndTypes() const;
|
||||
Names getNames() const;
|
||||
DataTypes getDataTypes() const;
|
||||
Names getDataTypeNames() const;
|
||||
std::unordered_map<String, size_t> getNamesToIndexesMap() const;
|
||||
|
||||
Serializations getSerializations() const;
|
||||
|
||||
/// Returns number of rows from first column in block, not equal to nullptr. If no columns, returns 0.
|
||||
size_t rows() const;
|
||||
|
||||
@ -108,7 +111,7 @@ public:
|
||||
/// Approximate number of allocated bytes in memory - for profiling and limits.
|
||||
size_t allocatedBytes() const;
|
||||
|
||||
operator bool() const { return !!columns(); } /// NOLINT
|
||||
explicit operator bool() const { return !!columns(); }
|
||||
bool operator!() const { return !this->operator bool(); } /// NOLINT
|
||||
|
||||
/** Get a list of column names separated by commas. */
|
||||
|
@ -65,6 +65,13 @@ void BlockMissingValues::setBit(size_t column_idx, size_t row_idx)
|
||||
mask[row_idx] = true;
|
||||
}
|
||||
|
||||
void BlockMissingValues::setBits(size_t column_idx, size_t rows)
|
||||
{
|
||||
RowsBitMask & mask = rows_mask_by_column_id[column_idx];
|
||||
mask.resize(rows);
|
||||
std::fill(mask.begin(), mask.end(), true);
|
||||
}
|
||||
|
||||
const BlockMissingValues::RowsBitMask & BlockMissingValues::getDefaultsBitmask(size_t column_idx) const
|
||||
{
|
||||
static RowsBitMask none;
|
||||
|
@ -56,7 +56,10 @@ public:
|
||||
const RowsBitMask & getDefaultsBitmask(size_t column_idx) const;
|
||||
/// Check that we have to replace default value at least in one of columns
|
||||
bool hasDefaultBits(size_t column_idx) const;
|
||||
/// Set bit for a specified row in a single column.
|
||||
void setBit(size_t column_idx, size_t row_idx);
|
||||
/// Set bits for all rows in a single column.
|
||||
void setBits(size_t column_idx, size_t rows);
|
||||
bool empty() const { return rows_mask_by_column_id.empty(); }
|
||||
size_t size() const { return rows_mask_by_column_id.size(); }
|
||||
void clear() { rows_mask_by_column_id.clear(); }
|
||||
|
@ -33,8 +33,6 @@
|
||||
|
||||
#define DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC 5
|
||||
#define DEFAULT_PERIODIC_LIVE_VIEW_REFRESH_SEC 60
|
||||
#define DEFAULT_WINDOW_VIEW_CLEAN_INTERVAL_SEC 5
|
||||
#define DEFAULT_WINDOW_VIEW_HEARTBEAT_INTERVAL_SEC 15
|
||||
#define SHOW_CHARS_ON_SYNTAX_ERROR ptrdiff_t(160)
|
||||
#define DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES 3
|
||||
/// each period reduces the error counter by 2 times
|
||||
|
@ -435,8 +435,9 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Seconds, live_view_heartbeat_interval, 15, "The heartbeat interval in seconds to indicate live query is alive.", 0) \
|
||||
M(UInt64, max_live_view_insert_blocks_before_refresh, 64, "Limit maximum number of inserted blocks after which mergeable blocks are dropped and query is re-executed.", 0) \
|
||||
M(Bool, allow_experimental_window_view, false, "Enable WINDOW VIEW. Not mature enough.", 0) \
|
||||
M(Seconds, window_view_clean_interval, DEFAULT_WINDOW_VIEW_CLEAN_INTERVAL_SEC, "The clean interval of window view in seconds to free outdated data.", 0) \
|
||||
M(Seconds, window_view_heartbeat_interval, DEFAULT_WINDOW_VIEW_HEARTBEAT_INTERVAL_SEC, "The heartbeat interval in seconds to indicate watch query is alive.", 0) \
|
||||
M(Seconds, window_view_clean_interval, 60, "The clean interval of window view in seconds to free outdated data.", 0) \
|
||||
M(Seconds, window_view_heartbeat_interval, 15, "The heartbeat interval in seconds to indicate watch query is alive.", 0) \
|
||||
M(Seconds, wait_for_window_view_fire_signal_timeout, 10, "Timeout for waiting for window view fire signal in event time processing", 0) \
|
||||
M(UInt64, min_free_disk_space_for_temporary_data, 0, "The minimum disk space to keep while writing temporary data used in external sorting and aggregation.", 0) \
|
||||
\
|
||||
M(DefaultDatabaseEngine, default_database_engine, DefaultDatabaseEngine::Atomic, "Default database engine.", 0) \
|
||||
@ -586,6 +587,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
/** Experimental functions */ \
|
||||
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
|
||||
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
|
||||
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
|
||||
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
|
||||
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
||||
M(Bool, count_distinct_optimization, false, "Rewrite count distinct to subquery of group by", 0) \
|
||||
@ -661,7 +663,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Char, input_format_hive_text_map_keys_delimiter, '\x03', "Delimiter between a pair of map key/values in Hive Text File", 0) \
|
||||
M(UInt64, input_format_msgpack_number_of_columns, 0, "The number of columns in inserted MsgPack data. Used for automatic schema inference from data.", 0) \
|
||||
M(MsgPackUUIDRepresentation, output_format_msgpack_uuid_representation, FormatSettings::MsgPackUUIDRepresentation::EXT, "The way how to output UUID in MsgPack format.", 0) \
|
||||
M(UInt64, input_format_max_rows_to_read_for_schema_inference, 100, "The maximum rows of data to read for automatic schema inference", 0) \
|
||||
M(UInt64, input_format_max_rows_to_read_for_schema_inference, 25000, "The maximum rows of data to read for automatic schema inference", 0) \
|
||||
M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \
|
||||
M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \
|
||||
M(Bool, input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference, false, "Allow to skip columns with unsupported types while schema inference for format Parquet", 0) \
|
||||
@ -699,6 +701,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(Bool, output_format_pretty_color, true, "Use ANSI escape sequences to paint colors in Pretty formats", 0) \
|
||||
M(String, output_format_pretty_grid_charset, "UTF-8", "Charset for printing grid borders. Available charsets: ASCII, UTF-8 (default one).", 0) \
|
||||
M(UInt64, output_format_parquet_row_group_size, 1000000, "Row group size in rows.", 0) \
|
||||
M(Bool, output_format_parquet_string_as_string, false, "Use Parquet String type instead of Binary for String columns.", 0) \
|
||||
M(String, output_format_avro_codec, "", "Compression codec used for output. Possible values: 'null', 'deflate', 'snappy'.", 0) \
|
||||
M(UInt64, output_format_avro_sync_interval, 16 * 1024, "Sync interval in bytes.", 0) \
|
||||
M(String, output_format_avro_string_column_pattern, "", "For Avro format: regexp of String columns to select as AVRO string.", 0) \
|
||||
@ -736,6 +739,9 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if possible. Possible values: 0 - no rewrite, 1 - apply if possible, 2 - force rewrite all cross joins", 0) \
|
||||
\
|
||||
M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \
|
||||
M(Bool, output_format_arrow_string_as_string, false, "Use Arrow String type instead of Binary for String columns", 0) \
|
||||
\
|
||||
M(Bool, output_format_orc_string_as_string, false, "Use ORC String type instead of Binary for String columns", 0) \
|
||||
\
|
||||
M(EnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::EnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
|
||||
\
|
||||
|
@ -451,7 +451,7 @@ static std::string createDirectory(const std::string & file)
|
||||
return "";
|
||||
fs::create_directories(path);
|
||||
return path;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path)
|
||||
@ -794,7 +794,7 @@ static void addSignalHandler(const std::vector<int> & signals, signal_function h
|
||||
|
||||
if (out_handled_signals)
|
||||
std::copy(signals.begin(), signals.end(), std::back_inserter(*out_handled_signals));
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
static void blockSignals(const std::vector<int> & signals)
|
||||
@ -816,7 +816,7 @@ static void blockSignals(const std::vector<int> & signals)
|
||||
|
||||
if (pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
|
||||
throw Poco::Exception("Cannot block signal.");
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
|
@ -24,4 +24,4 @@ namespace SentryWriter
|
||||
int sig,
|
||||
const std::string & error_message,
|
||||
const StackTrace & stack_trace);
|
||||
};
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ static auto typeFromString(const std::string & str)
|
||||
{
|
||||
auto & data_type_factory = DataTypeFactory::instance();
|
||||
return data_type_factory.get(str);
|
||||
};
|
||||
}
|
||||
|
||||
static auto typesFromString(const std::string & str)
|
||||
{
|
||||
@ -33,7 +33,7 @@ static auto typesFromString(const std::string & str)
|
||||
data_types.push_back(typeFromString(data_type));
|
||||
|
||||
return data_types;
|
||||
};
|
||||
}
|
||||
|
||||
struct TypesTestCase
|
||||
{
|
||||
|
@ -89,7 +89,7 @@ constexpr void callOnDictionaryAttributeType(AttributeUnderlyingType type, F &&
|
||||
if (type == other)
|
||||
func(DictionaryAttributeType<other>{});
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
struct DictionarySpecialAttribute final
|
||||
{
|
||||
|
@ -96,6 +96,7 @@ class ReservationDelegate : public IReservation
|
||||
public:
|
||||
ReservationDelegate(ReservationPtr delegate_, DiskPtr wrapper_) : delegate(std::move(delegate_)), wrapper(wrapper_) { }
|
||||
UInt64 getSize() const override { return delegate->getSize(); }
|
||||
UInt64 getUnreservedSpace() const override { return delegate->getUnreservedSpace(); }
|
||||
DiskPtr getDisk(size_t) const override { return wrapper; }
|
||||
Disks getDisks() const override { return {wrapper}; }
|
||||
void update(UInt64 new_size) override { delegate->update(new_size); }
|
||||
|
@ -182,6 +182,7 @@ public:
|
||||
}
|
||||
|
||||
UInt64 getSize() const override { return reservation->getSize(); }
|
||||
UInt64 getUnreservedSpace() const override { return reservation->getUnreservedSpace(); }
|
||||
|
||||
DiskPtr getDisk(size_t i) const override
|
||||
{
|
||||
|
@ -112,12 +112,15 @@ std::optional<size_t> fileSizeSafe(const fs::path & path)
|
||||
class DiskLocalReservation : public IReservation
|
||||
{
|
||||
public:
|
||||
DiskLocalReservation(const DiskLocalPtr & disk_, UInt64 size_)
|
||||
: disk(disk_), size(size_), metric_increment(CurrentMetrics::DiskSpaceReservedForMerge, size_)
|
||||
{
|
||||
}
|
||||
DiskLocalReservation(const DiskLocalPtr & disk_, UInt64 size_, UInt64 unreserved_space_)
|
||||
: disk(disk_)
|
||||
, size(size_)
|
||||
, unreserved_space(unreserved_space_)
|
||||
, metric_increment(CurrentMetrics::DiskSpaceReservedForMerge, size_)
|
||||
{}
|
||||
|
||||
UInt64 getSize() const override { return size; }
|
||||
UInt64 getUnreservedSpace() const override { return unreserved_space; }
|
||||
|
||||
DiskPtr getDisk(size_t i) const override
|
||||
{
|
||||
@ -165,6 +168,7 @@ public:
|
||||
private:
|
||||
DiskLocalPtr disk;
|
||||
UInt64 size;
|
||||
UInt64 unreserved_space;
|
||||
CurrentMetrics::Increment metric_increment;
|
||||
};
|
||||
|
||||
@ -201,32 +205,38 @@ private:
|
||||
|
||||
ReservationPtr DiskLocal::reserve(UInt64 bytes)
|
||||
{
|
||||
if (!tryReserve(bytes))
|
||||
auto unreserved_space = tryReserve(bytes);
|
||||
if (!unreserved_space.has_value())
|
||||
return {};
|
||||
return std::make_unique<DiskLocalReservation>(std::static_pointer_cast<DiskLocal>(shared_from_this()), bytes);
|
||||
return std::make_unique<DiskLocalReservation>(
|
||||
std::static_pointer_cast<DiskLocal>(shared_from_this()),
|
||||
bytes, unreserved_space.value());
|
||||
}
|
||||
|
||||
bool DiskLocal::tryReserve(UInt64 bytes)
|
||||
std::optional<UInt64> DiskLocal::tryReserve(UInt64 bytes)
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
|
||||
UInt64 available_space = getAvailableSpace();
|
||||
UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes);
|
||||
|
||||
if (bytes == 0)
|
||||
{
|
||||
LOG_DEBUG(log, "Reserving 0 bytes on disk {}", backQuote(name));
|
||||
++reservation_count;
|
||||
return true;
|
||||
return {unreserved_space};
|
||||
}
|
||||
|
||||
auto available_space = getAvailableSpace();
|
||||
UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes);
|
||||
if (unreserved_space >= bytes)
|
||||
{
|
||||
LOG_DEBUG(log, "Reserving {} on disk {}, having unreserved {}.",
|
||||
ReadableSize(bytes), backQuote(name), ReadableSize(unreserved_space));
|
||||
++reservation_count;
|
||||
reserved_bytes += bytes;
|
||||
return true;
|
||||
return {unreserved_space - bytes};
|
||||
}
|
||||
return false;
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
static UInt64 getTotalSpaceByName(const String & name, const String & disk_path, UInt64 keep_free_space_bytes)
|
||||
@ -437,7 +447,7 @@ void DiskLocal::copy(const String & from_path, const std::shared_ptr<IDisk> & to
|
||||
fs::copy(from, to, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
||||
}
|
||||
else
|
||||
copyThroughBuffers(from_path, to_disk, to_path); /// Base implementation.
|
||||
copyThroughBuffers(from_path, to_disk, to_path, /* copy_root_dir */ true); /// Base implementation.
|
||||
}
|
||||
|
||||
void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
||||
@ -445,7 +455,7 @@ void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_
|
||||
if (isSameDiskType(*this, *to_disk))
|
||||
fs::copy(from_dir, to_dir, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
||||
else
|
||||
copyThroughBuffers(from_dir, to_disk, to_dir); /// Base implementation.
|
||||
copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir */ false); /// Base implementation.
|
||||
}
|
||||
|
||||
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
||||
|
@ -121,7 +121,7 @@ public:
|
||||
bool canWrite() const noexcept;
|
||||
|
||||
private:
|
||||
bool tryReserve(UInt64 bytes);
|
||||
std::optional<UInt64> tryReserve(UInt64 bytes);
|
||||
|
||||
/// Setup disk for healthy check. Returns true if it's read-write, false if read-only.
|
||||
/// Throw exception if it's not possible to setup necessary files and directories.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user