Merge branch 'iaadeflate_upgrade_qpl_v1.3.0' of https://github.com/jinjunzh/ClickHouse into iaadeflate_upgrade_qpl_v1.3.0

This commit is contained in:
jinjunzh 2023-11-29 08:42:43 -05:00
commit 984a587955
2496 changed files with 74923 additions and 44342 deletions

View File

@ -7,6 +7,8 @@ assignees: ''
---
> Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)).
> You have to provide the following information whenever possible.
**Describe what's wrong**

11
.github/actions/clean/action.yml vendored Normal file
View File

@ -0,0 +1,11 @@
name: Clean runner
description: Clean the runner's temp path on ending
runs:
using: "composite"
steps:
- name: Clean
shell: bash
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "${{runner.temp}}"

35
.github/actions/common_setup/action.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Common setup
description: Setup necessary environments
inputs:
job_type:
description: the name to use in the TEMP_PATH and REPO_COPY
default: common
type: string
nested_job:
description: the fuse for unintended use inside of the reusable callable jobs
default: true
type: boolean
runs:
using: "composite"
steps:
- name: Setup and check ENV
shell: bash
run: |
echo "Setup the common ENV variables"
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
IMAGES_PATH=${{runner.temp}}/images_path
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
exit 1
fi
- name: Setup $TEMP_PATH
shell: bash
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH"
mkdir -p "$REPO_COPY"
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/

View File

@ -1,3 +1,4 @@
# yamllint disable rule:comments-indentation
name: BackportPR
env:
@ -33,7 +34,12 @@ jobs:
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 -m unittest discover -s . -p '*_test.py'
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
needs: CheckLabels
@ -69,7 +75,7 @@ jobs:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
@ -77,6 +83,7 @@ jobs:
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
@ -98,381 +105,64 @@ jobs:
path: ${{ runner.temp }}/changed_images.json
CompatibilityCheckX86:
needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/compatibility_check
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: CompatibilityCheckX86
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
CompatibilityCheckAarch64:
needs: [BuilderDebAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/compatibility_check
REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: CompatibilityCheckAarch64
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check X86
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=package_release
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # For a proper version and performance artifacts
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
BuilderDebAarch64:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=package_aarch64
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # For a proper version and performance artifacts
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
BuilderDebAsan:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=package_asan
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
BuilderDebTsan:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=package_tsan
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
BuilderDebDebug:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=package_debug
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
BuilderBinDarwin:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_darwin
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
checkout_depth: 0
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_darwin_aarch64
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
checkout_depth: 0
############################################################################################
##################################### Docker images #######################################
############################################################################################
@ -487,6 +177,7 @@ jobs:
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -504,303 +195,114 @@ jobs:
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
needs:
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebTsan
- BuilderDebDebug
runs-on: [self-hosted, style-checker]
if: ${{ success() || failure() }}
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=ClickHouse build check
REPORTS_PATH=${{runner.temp}}/reports_dir
TEMP_PATH=${{runner.temp}}/report_check
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Report Builder
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cat > "$NEEDS_DATA_PATH" << 'EOF'
${{ toJSON(needs) }}
EOF
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ success() || failure() }}
needs:
- BuilderBinDarwin
- BuilderBinDarwinAarch64
runs-on: [self-hosted, style-checker]
if: ${{ success() || failure() }}
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/report_check
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=ClickHouse special build check
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Report Builder
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cat > "$NEEDS_DATA_PATH" << 'EOF'
${{ toJSON(needs) }}
EOF
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [BuilderDebRelease]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/test_install
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Install packages (amd64)
REPO_COPY=${{runner.temp}}/test_install/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Test packages installation
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [BuilderDebAarch64]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/test_install
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Install packages (arm64)
REPO_COPY=${{runner.temp}}/test_install/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Test packages installation
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestAsan:
needs: [BuilderDebAsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (asan)
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (debug)
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestTsan:
needs: [BuilderDebTsan]
# func testers have 16 cores + 128 GB memory
# while stress testers have 36 cores + 72 memory
# It would be better to have something like 32 + 128,
# but such servers almost unavailable as spot instances.
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stress_thread
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stress test (tsan)
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Stress test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [BuilderDebRelease]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_release
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (release)
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
FinishCheck:
needs:
- DockerHubPush

View File

@ -18,6 +18,7 @@ on: # yamllint disable-line rule:truthy
- 'docs/**'
- 'utils/check-style/aspell-ignore/**'
- 'tests/ci/docs_check.py'
- '.github/workflows/docs_check.yml'
jobs:
CheckLabels:
runs-on: [self-hosted, style-checker]
@ -73,6 +74,7 @@ jobs:
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
@ -94,68 +96,30 @@ jobs:
path: ${{ runner.temp }}/changed_images.json
StyleCheck:
needs: DockerHubPush
runs-on: [self-hosted, style-checker]
if: ${{ success() || failure() }}
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{ runner.temp }}/style_check
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
EOF
- name: Download changed images
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
continue-on-error: true
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.TEMP_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Style Check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
# We need additional `&& ! cancelled()` to have the job being able to cancel
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 style_check.py
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
DocsCheck:
needs: DockerHubPush
runs-on: [self-hosted, func-tester-aarch64]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/docs_check
REPO_COPY=${{runner.temp}}/docs_check/ClickHouse
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.TEMP_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Docs Check
run: |
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 docs_check.py
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docs check
runner_type: func-tester-aarch64
additional_envs: |
run_command: |
cd "$REPO_COPY/tests/ci"
python3 docs_check.py
FinishCheck:
needs:
- StyleCheck

View File

@ -11,58 +11,19 @@ on: # yamllint disable-line rule:truthy
workflow_call:
jobs:
KeeperJepsenRelease:
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/keeper_jepsen
REPO_COPY=${{runner.temp}}/keeper_jepsen/ClickHouse
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0
- name: Jepsen Test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 jepsen_check.py keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Jepsen keeper check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 jepsen_check.py keeper
# ServerJepsenRelease:
# runs-on: [self-hosted, style-checker]
# if: ${{ always() }}
# needs: [KeeperJepsenRelease]
# steps:
# - name: Set envs
# run: |
# cat >> "$GITHUB_ENV" << 'EOF'
# TEMP_PATH=${{runner.temp}}/server_jepsen
# REPO_COPY=${{runner.temp}}/server_jepsen/ClickHouse
# EOF
# - name: Check out repository code
# uses: ClickHouse/checkout@v1
# with:
# clear-repository: true
# fetch-depth: 0
# - name: Jepsen Test
# run: |
# sudo rm -fr "$TEMP_PATH"
# mkdir -p "$TEMP_PATH"
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
# cd "$REPO_COPY/tests/ci"
# python3 jepsen_check.py server
# - name: Cleanup
# if: always()
# run: |
# docker ps --quiet | xargs --no-run-if-empty docker kill ||:
# docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
# sudo rm -fr "$TEMP_PATH"
# uses: ./.github/workflows/reusable_test.yml
# with:
# test_name: Jepsen server check
# runner_type: style-checker
# run_command: |
# cd "$REPO_COPY/tests/ci"
# python3 jepsen_check.py server

View File

@ -10,86 +10,17 @@ on: # yamllint disable-line rule:truthy
workflow_call:
jobs:
BuilderFuzzers:
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=fuzzers
EOF
- name: Download changed images
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
continue-on-error: true
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
ref: ${{github.ref}}
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
uses: ./.github/workflows/reusable_build.yml
with:
build_name: fuzzers
libFuzzerTest:
needs: [BuilderFuzzers]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/libfuzzer
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=libFuzzer tests
REPO_COPY=${{runner.temp}}/libfuzzer/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download changed images
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
continue-on-error: true
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.TEMP_PATH }}
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: libFuzzer test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
uses: ./.github/workflows/reusable_test.yml
with:
test_name: libFuzzer tests
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"

File diff suppressed because it is too large Load Diff

View File

@ -54,6 +54,7 @@ jobs:
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
@ -73,9 +74,6 @@ jobs:
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
Codebrowser:
needs: [DockerHubPush]
uses: ./.github/workflows/woboq.yml
SonarCloud:
runs-on: [self-hosted, builder]
env:
@ -90,6 +88,7 @@ jobs:
with:
clear-repository: true
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
filter: tree:0
submodules: true
- name: Set up JDK 11
uses: actions/setup-java@v1

File diff suppressed because it is too large Load Diff

View File

@ -49,6 +49,7 @@ jobs:
with:
clear-repository: true
fetch-depth: 0 # otherwise we will have no version info
filter: tree:0
ref: ${{ env.GITHUB_TAG }}
- name: Check docker clickhouse/clickhouse-server building
run: |

File diff suppressed because it is too large Load Diff

79
.github/workflows/reusable_build.yml vendored Normal file
View File

@ -0,0 +1,79 @@
### For the pure soul wishes to move it to another place
# https://github.com/orgs/community/discussions/9050
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
name: Build ClickHouse
'on':
workflow_call:
inputs:
build_name:
description: the value of build type from tests/ci/ci_config.py
required: true
type: string
checkout_depth:
description: the value of the git shallow checkout
required: false
type: number
default: 1
runner_type:
description: the label of runner to use
default: builder
type: string
additional_envs:
description: additional ENV variables to setup the job
type: string
jobs:
Build:
name: Build-${{inputs.build_name}}
env:
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
runs-on: [self-hosted, '${{inputs.runner_type}}']
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
EOF
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
# This step is done in GITHUB_WORKSPACE,
# because it's broken in REPO_COPY for some reason
if: ${{ env.BUILD_SPARSE_CHECKOUT == 'true' }}
run: |
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
du -hs "$GITHUB_WORKSPACE/contrib" ||:
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: build_check
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Build
run: |
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Clean
if: always()
uses: ./.github/actions/clean

113
.github/workflows/reusable_test.yml vendored Normal file
View File

@ -0,0 +1,113 @@
### For the pure soul wishes to move it to another place
# https://github.com/orgs/community/discussions/9050
name: Testing workflow
'on':
workflow_call:
inputs:
test_name:
description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
required: true
type: string
runner_type:
description: the label of runner to use
required: true
type: string
run_command:
description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'`
required: true
type: string
batches:
description: how many batches for the test will be launched
default: 1
type: number
checkout_depth:
description: the value of the git shallow checkout
required: false
type: number
default: 1
submodules:
description: if the submodules should be checked out
required: false
type: boolean
default: false
additional_envs:
description: additional ENV variables to setup the job
type: string
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
PrepareStrategy:
# batches < 1 is misconfiguration,
# and we need this step only for batches > 1
if: ${{ inputs.batches > 1 }}
runs-on: [self-hosted, style-checker-aarch64]
outputs:
batches: ${{steps.batches.outputs.batches}}
steps:
- name: Calculate batches
id: batches
run: |
batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))')
echo "batches=${batches_output}" >> "$GITHUB_OUTPUT"
Test:
# If PrepareStrategy is skipped for batches == 1,
# we still need to launch the test.
# `! failure()` is mandatory here to launch on skipped Job
# `&& !cancelled()` to allow the be cancelable
if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }}
# Do not add `-0` to the end, if there's only one batch
name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
runs-on: [self-hosted, '${{inputs.runner_type}}']
needs: [PrepareStrategy]
strategy:
fail-fast: false # we always wait for entire matrix
matrix:
# if PrepareStrategy does not have batches, we use 0
batch: ${{ needs.PrepareStrategy.outputs.batches
&& fromJson(needs.PrepareStrategy.outputs.batches)
|| fromJson('[0]')}}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
${{secrets.secret_envs}}
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Setup batch
if: ${{ inputs.batches > 1}}
run: |
cat >> "$GITHUB_ENV" << 'EOF'
RUN_BY_HASH_NUM=${{matrix.batch}}
RUN_BY_HASH_TOTAL=${{inputs.batches}}
EOF
- name: Run test
run: ${{inputs.run_command}}
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -38,6 +38,7 @@ jobs:
with:
ref: master
fetch-depth: 0
filter: tree:0
- name: Update versions, docker version, changelog, security
env:
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}

View File

@ -1,44 +0,0 @@
name: WoboqBuilder
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
concurrency:
group: woboq
on: # yamllint disable-line rule:truthy
workflow_dispatch:
workflow_call:
jobs:
# don't use dockerhub push because this image updates so rarely
WoboqCodebrowser:
runs-on: [self-hosted, style-checker]
timeout-minutes: 420 # the task is pretty heavy, so there's an additional hour
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/codebrowser
REPO_COPY=${{runner.temp}}/codebrowser/ClickHouse
IMAGES_PATH=${{runner.temp}}/images_path
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: 'true'
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.IMAGES_PATH }}
- name: Codebrowser
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 codebrowser_check.py
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"

8
.gitmodules vendored
View File

@ -1,3 +1,6 @@
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
# more confusing than useful.
[submodule "contrib/zstd"]
path = contrib/zstd
url = https://github.com/facebook/zstd
@ -184,7 +187,7 @@
url = https://github.com/ClickHouse/nanodbc
[submodule "contrib/datasketches-cpp"]
path = contrib/datasketches-cpp
url = https://github.com/ClickHouse/datasketches-cpp
url = https://github.com/apache/datasketches-cpp
[submodule "contrib/yaml-cpp"]
path = contrib/yaml-cpp
url = https://github.com/ClickHouse/yaml-cpp
@ -351,3 +354,6 @@
[submodule "contrib/aklomp-base64"]
path = contrib/aklomp-base64
url = https://github.com/aklomp/base64.git
[submodule "contrib/pocketfft"]
path = contrib/pocketfft
url = https://github.com/mreineck/pocketfft.git

View File

@ -1,4 +1,5 @@
### Table of Contents
**[ClickHouse release v23.10, 2023-11-02](#2310)**<br/>
**[ClickHouse release v23.9, 2023-09-28](#239)**<br/>
**[ClickHouse release v23.8 LTS, 2023-08-31](#238)**<br/>
**[ClickHouse release v23.7, 2023-07-27](#237)**<br/>
@ -12,6 +13,184 @@
# 2023 Changelog
### ClickHouse release 23.10, 2023-11-02
#### Backward Incompatible Change
* There is no longer an option to automatically remove broken data parts. This closes [#55174](https://github.com/ClickHouse/ClickHouse/issues/55174). [#55184](https://github.com/ClickHouse/ClickHouse/pull/55184) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#55557](https://github.com/ClickHouse/ClickHouse/pull/55557) ([Jihyuk Bok](https://github.com/tomahawk28)).
* The obsolete in-memory data parts can no longer be read from the write-ahead log. If you have configured in-memory parts before, they have to be removed before the upgrade. [#55186](https://github.com/ClickHouse/ClickHouse/pull/55186) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove the integration with Meilisearch. Reason: it was compatible only with the old version 0.18. The recent version of Meilisearch changed the protocol and does not work anymore. Note: we would appreciate it if you help to return it back. [#55189](https://github.com/ClickHouse/ClickHouse/pull/55189) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Rename directory monitor concept into background INSERT. All the settings `*directory_monitor*` had been renamed to `distributed_background_insert*`. *Backward compatibility should be preserved* (since old settings had been added as an alias). [#55978](https://github.com/ClickHouse/ClickHouse/pull/55978) ([Azat Khuzhin](https://github.com/azat)).
* Do not interpret the `send_timeout` set on the client side as the `receive_timeout` on the server side and vise-versa. [#56035](https://github.com/ClickHouse/ClickHouse/pull/56035) ([Azat Khuzhin](https://github.com/azat)).
* Comparison of time intervals with different units will throw an exception. This closes [#55942](https://github.com/ClickHouse/ClickHouse/issues/55942). You might have occasionally rely on the previous behavior when the underlying numeric values were compared regardless of the units. [#56090](https://github.com/ClickHouse/ClickHouse/pull/56090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Rewrited the experimental `S3Queue` table engine completely: changed the way we keep information in zookeeper which allows to make less zookeeper requests, added caching of zookeeper state in cases when we know the state will not change, improved the polling from s3 process to make it less aggressive, changed the way ttl and max set for trached files is maintained, now it is a background process. Added `system.s3queue` and `system.s3queue_log` tables. Closes [#54998](https://github.com/ClickHouse/ClickHouse/issues/54998). [#54422](https://github.com/ClickHouse/ClickHouse/pull/54422) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### New Feature
* Add function `arrayFold(accumulator, x1, ..., xn -> expression, initial, array1, ..., arrayn)` which applies a lambda function to multiple arrays of the same cardinality and collects the result in an accumulator. [#49794](https://github.com/ClickHouse/ClickHouse/pull/49794) ([Lirikl](https://github.com/Lirikl)).
* Support for `Npy` format. `SELECT * FROM file('example_array.npy', Npy)`. [#55982](https://github.com/ClickHouse/ClickHouse/pull/55982) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* If a table has a space-filling curve in its key, e.g., `ORDER BY mortonEncode(x, y)`, the conditions on its arguments, e.g., `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30` can be used for indexing. A setting `analyze_index_with_space_filling_curves` is added to enable or disable this analysis. This closes [#41195](https://github.com/ClickHouse/ClickHouse/issue/41195). Continuation of [#4538](https://github.com/ClickHouse/ClickHouse/pull/4538). Continuation of [#6286](https://github.com/ClickHouse/ClickHouse/pull/6286). Continuation of [#28130](https://github.com/ClickHouse/ClickHouse/pull/28130). Continuation of [#41753](https://github.com/ClickHouse/ClickHouse/pull/#41753). [#55642](https://github.com/ClickHouse/ClickHouse/pull/55642) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* A new setting called `force_optimize_projection_name`, it takes a name of projection as an argument. If it's value set to a non-empty string, ClickHouse checks that this projection is used in the query at least once. Closes [#55331](https://github.com/ClickHouse/ClickHouse/issues/55331). [#56134](https://github.com/ClickHouse/ClickHouse/pull/56134) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Support asynchronous inserts with external data via native protocol. Previously it worked only if data is inlined into query. [#54730](https://github.com/ClickHouse/ClickHouse/pull/54730) ([Anton Popov](https://github.com/CurtizJ)).
* Added aggregation function `lttb` which uses the [Largest-Triangle-Three-Buckets](https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf) algorithm for downsampling data for visualization. [#53145](https://github.com/ClickHouse/ClickHouse/pull/53145) ([Sinan](https://github.com/sinsinan)).
* Query`CHECK TABLE` has better performance and usability (sends progress updates, cancellable). Support checking particular part with `CHECK TABLE ... PART 'part_name'`. [#53404](https://github.com/ClickHouse/ClickHouse/pull/53404) ([vdimir](https://github.com/vdimir)).
* Added function `jsonMergePatch`. When working with JSON data as strings, it provides a way to merge these strings (of JSON objects) together to form a single string containing a single JSON object. [#54364](https://github.com/ClickHouse/ClickHouse/pull/54364) ([Memo](https://github.com/Joeywzr)).
* The second part of Kusto Query Language dialect support. [Phase 1 implementation ](https://github.com/ClickHouse/ClickHouse/pull/37961) has been merged. [#42510](https://github.com/ClickHouse/ClickHouse/pull/42510) ([larryluogit](https://github.com/larryluogit)).
* Added a new SQL function, `arrayRandomSample(arr, k)` which returns a sample of k elements from the input array. Similar functionality could previously be achieved only with less convenient syntax, e.g. "SELECT arrayReduce('groupArraySample(3)', range(10))". [#54391](https://github.com/ClickHouse/ClickHouse/pull/54391) ([itayisraelov](https://github.com/itayisraelov)).
* Introduce `-ArgMin`/`-ArgMax` aggregate combinators which allow to aggregate by min/max values only. One use case can be found in [#54818](https://github.com/ClickHouse/ClickHouse/issues/54818). This PR also reorganize combinators into dedicated folder. [#54947](https://github.com/ClickHouse/ClickHouse/pull/54947) ([Amos Bird](https://github.com/amosbird)).
* Allow to drop cache for Protobuf format with `SYSTEM DROP SCHEMA FORMAT CACHE [FOR Protobuf]`. [#55064](https://github.com/ClickHouse/ClickHouse/pull/55064) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* Add external HTTP Basic authenticator. [#55199](https://github.com/ClickHouse/ClickHouse/pull/55199) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Added function `byteSwap` which reverses the bytes of unsigned integers. This is particularly useful for reversing values of types which are represented as unsigned integers internally such as IPv4. [#55211](https://github.com/ClickHouse/ClickHouse/pull/55211) ([Priyansh Agrawal](https://github.com/Priyansh121096)).
* Added function `formatQuery()` which returns a formatted version (possibly spanning multiple lines) of a SQL query string. Also added function `formatQuerySingleLine()` which does the same but the returned string will not contain linebreaks. [#55239](https://github.com/ClickHouse/ClickHouse/pull/55239) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Added `DWARF` input format that reads debug symbols from an ELF executable/library/object file. [#55450](https://github.com/ClickHouse/ClickHouse/pull/55450) ([Michael Kolupaev](https://github.com/al13n321)).
* Allow to save unparsed records and errors in RabbitMQ, NATS and FileLog engines. Add virtual columns `_error` and `_raw_message`(for NATS and RabbitMQ), `_raw_record` (for FileLog) that are filled when ClickHouse fails to parse new record. The behaviour is controlled under storage settings `nats_handle_error_mode` for NATS, `rabbitmq_handle_error_mode` for RabbitMQ, `handle_error_mode` for FileLog similar to `kafka_handle_error_mode`. If it's set to `default`, en exception will be thrown when ClickHouse fails to parse a record, if it's set to `stream`, erorr and raw record will be saved into virtual columns. Closes [#36035](https://github.com/ClickHouse/ClickHouse/issues/36035). [#55477](https://github.com/ClickHouse/ClickHouse/pull/55477) ([Kruglov Pavel](https://github.com/Avogar)).
* Keeper client improvement: add `get_all_children_number command` that returns number of all children nodes under a specific path. [#55485](https://github.com/ClickHouse/ClickHouse/pull/55485) ([guoxiaolong](https://github.com/guoxiaolongzte)).
* Keeper client improvement: add `get_direct_children_number` command that returns number of direct children nodes under a path. [#55898](https://github.com/ClickHouse/ClickHouse/pull/55898) ([xuzifu666](https://github.com/xuzifu666)).
* Add statement `SHOW SETTING setting_name` which is a simpler version of existing statement `SHOW SETTINGS`. [#55979](https://github.com/ClickHouse/ClickHouse/pull/55979) ([Maksim Kita](https://github.com/kitaisreal)).
* Added fields `substreams` and `filenames` to the `system.parts_columns` table. [#55108](https://github.com/ClickHouse/ClickHouse/pull/55108) ([Anton Popov](https://github.com/CurtizJ)).
* Add support for `SHOW MERGES` query. [#55815](https://github.com/ClickHouse/ClickHouse/pull/55815) ([megao](https://github.com/jetgm)).
* Introduce a setting `create_table_empty_primary_key_by_default` for default `ORDER BY ()`. [#55899](https://github.com/ClickHouse/ClickHouse/pull/55899) ([Srikanth Chekuri](https://github.com/srikanthccv)).
#### Performance Improvement
* Add option `query_plan_preserve_num_streams_after_window_functions` to preserve the number of streams after evaluating window functions to allow parallel stream processing. [#50771](https://github.com/ClickHouse/ClickHouse/pull/50771) ([frinkr](https://github.com/frinkr)).
* Release more streams if data is small. [#53867](https://github.com/ClickHouse/ClickHouse/pull/53867) ([Jiebin Sun](https://github.com/jiebinn)).
* RoaringBitmaps being optimized before serialization. [#55044](https://github.com/ClickHouse/ClickHouse/pull/55044) ([UnamedRus](https://github.com/UnamedRus)).
* Posting lists in inverted indexes are now optimized to use the smallest possible representation for internal bitmaps. Depending on the repetitiveness of the data, this may significantly reduce the space consumption of inverted indexes. [#55069](https://github.com/ClickHouse/ClickHouse/pull/55069) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Fix contention on Context lock, this significantly improves performance for a lot of short-running concurrent queries. [#55121](https://github.com/ClickHouse/ClickHouse/pull/55121) ([Maksim Kita](https://github.com/kitaisreal)).
* Improved the performance of inverted index creation by 30%. This was achieved by replacing `std::unordered_map` with `absl::flat_hash_map`. [#55210](https://github.com/ClickHouse/ClickHouse/pull/55210) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Support ORC filter push down (rowgroup level). [#55330](https://github.com/ClickHouse/ClickHouse/pull/55330) ([李扬](https://github.com/taiyang-li)).
* Improve performance of external aggregation with a lot of temporary files. [#55489](https://github.com/ClickHouse/ClickHouse/pull/55489) ([Maksim Kita](https://github.com/kitaisreal)).
* Set a reasonable size for the marks cache for secondary indices by default to avoid loading the marks over and over again. [#55654](https://github.com/ClickHouse/ClickHouse/pull/55654) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Avoid unnecessary reconstruction of index granules when reading skip indexes. This addresses [#55653](https://github.com/ClickHouse/ClickHouse/issues/55653#issuecomment-1763766009). [#55683](https://github.com/ClickHouse/ClickHouse/pull/55683) ([Amos Bird](https://github.com/amosbird)).
* Cache CAST function in set during execution to improve the performance of function `IN` when set element type doesn't exactly match column type. [#55712](https://github.com/ClickHouse/ClickHouse/pull/55712) ([Duc Canh Le](https://github.com/canhld94)).
* Performance improvement for `ColumnVector::insertMany` and `ColumnVector::insertManyFrom`. [#55714](https://github.com/ClickHouse/ClickHouse/pull/55714) ([frinkr](https://github.com/frinkr)).
* Optimized Map subscript operations by predicting the next row's key position and reduce the comparisons. [#55929](https://github.com/ClickHouse/ClickHouse/pull/55929) ([lgbo](https://github.com/lgbo-ustc)).
* Support struct fields pruning in Parquet (in previous versions it didn't work in some cases). [#56117](https://github.com/ClickHouse/ClickHouse/pull/56117) ([lgbo](https://github.com/lgbo-ustc)).
* Add the ability to tune the number of parallel replicas used in a query execution based on the estimation of rows to read. [#51692](https://github.com/ClickHouse/ClickHouse/pull/51692) ([Raúl Marín](https://github.com/Algunenano)).
* Optimized external aggregation memory consumption in case many temporary files were generated. [#54798](https://github.com/ClickHouse/ClickHouse/pull/54798) ([Nikita Taranov](https://github.com/nickitat)).
* Distributed queries executed in `async_socket_for_remote` mode (default) now respect `max_threads` limit. Previously, some queries could create excessive threads (up to `max_distributed_connections`), causing server performance issues. [#53504](https://github.com/ClickHouse/ClickHouse/pull/53504) ([filimonov](https://github.com/filimonov)).
* Caching skip-able entries while executing DDL from Zookeeper distributed DDL queue. [#54828](https://github.com/ClickHouse/ClickHouse/pull/54828) ([Duc Canh Le](https://github.com/canhld94)).
* Experimental inverted indexes do not store tokens with too many matches (i.e. row ids in the posting list). This saves space and avoids ineffective index lookups when sequential scans would be equally fast or faster. The previous heuristics (`density` parameter passed to the index definition) that controlled when tokens would not be stored was too confusing for users. A much simpler heuristics based on parameter `max_rows_per_postings_list` (default: 64k) is introduced which directly controls the maximum allowed number of row ids in a postings list. [#55616](https://github.com/ClickHouse/ClickHouse/pull/55616) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Improve write performance to `EmbeddedRocksDB` tables. [#55732](https://github.com/ClickHouse/ClickHouse/pull/55732) ([Duc Canh Le](https://github.com/canhld94)).
* Improved overall resilience for ClickHouse in case of many parts within partition (more than 1000). It might reduce the number of `TOO_MANY_PARTS` errors. [#55526](https://github.com/ClickHouse/ClickHouse/pull/55526) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Reduced memory consumption during loading of hierarchical dictionaries. [#55838](https://github.com/ClickHouse/ClickHouse/pull/55838) ([Nikita Taranov](https://github.com/nickitat)).
* All dictionaries support setting `dictionary_use_async_executor`. [#55839](https://github.com/ClickHouse/ClickHouse/pull/55839) ([vdimir](https://github.com/vdimir)).
* Prevent excesive memory usage when deserializing AggregateFunctionTopKGenericData. [#55947](https://github.com/ClickHouse/ClickHouse/pull/55947) ([Raúl Marín](https://github.com/Algunenano)).
* On a Keeper with lots of watches AsyncMetrics threads can consume 100% of CPU for noticable time in `DB::KeeperStorage::getSessionsWithWatchesCount()`. The fix is to avoid traversing heavy `watches` and `list_watches` sets. [#56054](https://github.com/ClickHouse/ClickHouse/pull/56054) ([Alexander Gololobov](https://github.com/davenger)).
* Add setting `optimize_trivial_approximate_count_query` to use `count()` approximation for storage EmbeddedRocksDB. Enable trivial count for StorageJoin. [#55806](https://github.com/ClickHouse/ClickHouse/pull/55806) ([Duc Canh Le](https://github.com/canhld94)).
#### Improvement
* Functions `toDayOfWeek()` (MySQL alias: `DAYOFWEEK()`), `toYearWeek()` (`YEARWEEK()`) and `toWeek()` (`WEEK()`) now supports `String` arguments. This makes its behavior consistent with MySQL's behavior. [#55589](https://github.com/ClickHouse/ClickHouse/pull/55589) ([Robert Schulze](https://github.com/rschu1ze)).
* Introduced setting `date_time_overflow_behavior` with possible values `ignore`, `throw`, `saturate` that controls the overflow behavior when converting from Date, Date32, DateTime64, Integer or Float to Date, Date32, DateTime or DateTime64. [#55696](https://github.com/ClickHouse/ClickHouse/pull/55696) ([Andrey Zvonov](https://github.com/zvonand)).
* Implement query parameters support for `ALTER TABLE ... ACTION PARTITION [ID] {parameter_name:ParameterType}`. Merges [#49516](https://github.com/ClickHouse/ClickHouse/issues/49516). Closes [#49449](https://github.com/ClickHouse/ClickHouse/issues/49449). [#55604](https://github.com/ClickHouse/ClickHouse/pull/55604) ([alesapin](https://github.com/alesapin)).
* Print processor ids in a prettier manner in EXPLAIN. [#48852](https://github.com/ClickHouse/ClickHouse/pull/48852) ([Vlad Seliverstov](https://github.com/behebot)).
* Creating a direct dictionary with a lifetime field will be rejected at create time (as the lifetime does not make sense for direct dictionaries). Fixes: [#27861](https://github.com/ClickHouse/ClickHouse/issues/27861). [#49043](https://github.com/ClickHouse/ClickHouse/pull/49043) ([Rory Crispin](https://github.com/RoryCrispin)).
* Allow parameters in queries with partitions like `ALTER TABLE t DROP PARTITION`. Closes [#49449](https://github.com/ClickHouse/ClickHouse/issues/49449). [#49516](https://github.com/ClickHouse/ClickHouse/pull/49516) ([Nikolay Degterinsky](https://github.com/evillique)).
* Add a new column `xid` for `system.zookeeper_connection`. [#50702](https://github.com/ClickHouse/ClickHouse/pull/50702) ([helifu](https://github.com/helifu)).
* Display the correct server settings in `system.server_settings` after configuration reload. [#53774](https://github.com/ClickHouse/ClickHouse/pull/53774) ([helifu](https://github.com/helifu)).
* Add support for mathematical minus `` character in queries, similar to `-`. [#54100](https://github.com/ClickHouse/ClickHouse/pull/54100) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add replica groups to the experimental `Replicated` database engine. Closes [#53620](https://github.com/ClickHouse/ClickHouse/issues/53620). [#54421](https://github.com/ClickHouse/ClickHouse/pull/54421) ([Nikolay Degterinsky](https://github.com/evillique)).
* It is better to retry retriable s3 errors than totally fail the query. Set bigger value to the s3_retry_attempts by default. [#54770](https://github.com/ClickHouse/ClickHouse/pull/54770) ([Sema Checherinda](https://github.com/CheSema)).
* Add load balancing mode `hostname_levenshtein_distance`. [#54826](https://github.com/ClickHouse/ClickHouse/pull/54826) ([JackyWoo](https://github.com/JackyWoo)).
* Improve hiding secrets in logs. [#55089](https://github.com/ClickHouse/ClickHouse/pull/55089) ([Vitaly Baranov](https://github.com/vitlibar)).
* For now the projection analysis will be performed only on top of query plan. The setting `query_plan_optimize_projection` became obsolete (it was enabled by default long time ago). [#55112](https://github.com/ClickHouse/ClickHouse/pull/55112) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* When function `untuple` is now called on a tuple with named elements and itself has an alias (e.g. `select untuple(tuple(1)::Tuple(element_alias Int)) AS untuple_alias`), then the result column name is now generated from the untuple alias and the tuple element alias (in the example: "untuple_alias.element_alias"). [#55123](https://github.com/ClickHouse/ClickHouse/pull/55123) ([garcher22](https://github.com/garcher22)).
* Added setting `describe_include_virtual_columns`, which allows to include virtual columns of table into result of `DESCRIBE` query. Added setting `describe_compact_output`. If it is set to `true`, `DESCRIBE` query returns only names and types of columns without extra information. [#55129](https://github.com/ClickHouse/ClickHouse/pull/55129) ([Anton Popov](https://github.com/CurtizJ)).
* Sometimes `OPTIMIZE` with `optimize_throw_if_noop=1` may fail with an error `unknown reason` while the real cause of it - different projections in different parts. This behavior is fixed. [#55130](https://github.com/ClickHouse/ClickHouse/pull/55130) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Allow to have several `MaterializedPostgreSQL` tables following the same Postgres table. By default this behaviour is not enabled (for compatibility, because it is a backward-incompatible change), but can be turned on with setting `materialized_postgresql_use_unique_replication_consumer_identifier`. Closes [#54918](https://github.com/ClickHouse/ClickHouse/issues/54918). [#55145](https://github.com/ClickHouse/ClickHouse/pull/55145) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Allow to parse negative `DateTime64` and `DateTime` with fractional part from short strings. [#55146](https://github.com/ClickHouse/ClickHouse/pull/55146) ([Andrey Zvonov](https://github.com/zvonand)).
* To improve compatibility with MySQL, 1. `information_schema.tables` now includes the new field `table_rows`, and 2. `information_schema.columns` now includes the new field `extra`. [#55215](https://github.com/ClickHouse/ClickHouse/pull/55215) ([Robert Schulze](https://github.com/rschu1ze)).
* Clickhouse-client won't show "0 rows in set" if it is zero and if exception was thrown. [#55240](https://github.com/ClickHouse/ClickHouse/pull/55240) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Support rename table without keyword `TABLE` like `RENAME db.t1 to db.t2`. [#55373](https://github.com/ClickHouse/ClickHouse/pull/55373) ([凌涛](https://github.com/lingtaolf)).
* Add `internal_replication` to `system.clusters`. [#55377](https://github.com/ClickHouse/ClickHouse/pull/55377) ([Konstantin Morozov](https://github.com/k-morozov)).
* Select remote proxy resolver based on request protocol, add proxy feature docs and remove `DB::ProxyConfiguration::Protocol::ANY`. [#55430](https://github.com/ClickHouse/ClickHouse/pull/55430) ([Arthur Passos](https://github.com/arthurpassos)).
* Avoid retrying keeper operations on INSERT after table shutdown. [#55519](https://github.com/ClickHouse/ClickHouse/pull/55519) ([Azat Khuzhin](https://github.com/azat)).
* `SHOW COLUMNS` now correctly reports type `FixedString` as `BLOB` if setting `use_mysql_types_in_show_columns` is on. Also added two new settings, `mysql_map_string_to_text_in_show_columns` and `mysql_map_fixed_string_to_text_in_show_columns` to switch the output for types `String` and `FixedString` as `TEXT` or `BLOB`. [#55617](https://github.com/ClickHouse/ClickHouse/pull/55617) ([Serge Klochkov](https://github.com/slvrtrn)).
* During ReplicatedMergeTree tables startup clickhouse server checks set of parts for unexpected parts (exists locally, but not in zookeeper). All unexpected parts move to detached directory and instead of them server tries to restore some ancestor (covered) parts. Now server tries to restore closest ancestors instead of random covered parts. [#55645](https://github.com/ClickHouse/ClickHouse/pull/55645) ([alesapin](https://github.com/alesapin)).
* The advanced dashboard now supports draggable charts on touch devices. This closes [#54206](https://github.com/ClickHouse/ClickHouse/issues/54206). [#55649](https://github.com/ClickHouse/ClickHouse/pull/55649) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Use the default query format if declared when outputting exception with `http_write_exception_in_output_format`. [#55739](https://github.com/ClickHouse/ClickHouse/pull/55739) ([Raúl Marín](https://github.com/Algunenano)).
* Provide a better message for common MATERIALIZED VIEW pitfalls. [#55826](https://github.com/ClickHouse/ClickHouse/pull/55826) ([Raúl Marín](https://github.com/Algunenano)).
* If you dropped the current database, you will still be able to run some queries in `clickhouse-local` and switch to another database. This makes the behavior consistent with `clickhouse-client`. This closes [#55834](https://github.com/ClickHouse/ClickHouse/issues/55834). [#55853](https://github.com/ClickHouse/ClickHouse/pull/55853) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Functions `(add|subtract)(Year|Quarter|Month|Week|Day|Hour|Minute|Second|Millisecond|Microsecond|Nanosecond)` now support string-encoded date arguments, e.g. `SELECT addDays('2023-10-22', 1)`. This increases compatibility with MySQL and is needed by Tableau Online. [#55869](https://github.com/ClickHouse/ClickHouse/pull/55869) ([Robert Schulze](https://github.com/rschu1ze)).
* The setting `apply_deleted_mask` when disabled allows to read rows that where marked as deleted by lightweight DELETE queries. This is useful for debugging. [#55952](https://github.com/ClickHouse/ClickHouse/pull/55952) ([Alexander Gololobov](https://github.com/davenger)).
* Allow skipping `null` values when serailizing Tuple to json objects, which makes it possible to keep compatiability with Spark's `to_json` function, which is also useful for gluten. [#55956](https://github.com/ClickHouse/ClickHouse/pull/55956) ([李扬](https://github.com/taiyang-li)).
* Functions `(add|sub)Date()` now support string-encoded date arguments, e.g. `SELECT addDate('2023-10-22 11:12:13', INTERVAL 5 MINUTE)`. The same support for string-encoded date arguments is added to the plus and minus operators, e.g. `SELECT '2023-10-23' + INTERVAL 1 DAY`. This increases compatibility with MySQL and is needed by Tableau Online. [#55960](https://github.com/ClickHouse/ClickHouse/pull/55960) ([Robert Schulze](https://github.com/rschu1ze)).
* Allow unquoted strings with CR (`\r`) in CSV format. Closes [#39930](https://github.com/ClickHouse/ClickHouse/issues/39930). [#56046](https://github.com/ClickHouse/ClickHouse/pull/56046) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow to run `clickhouse-keeper` using embedded config. [#56086](https://github.com/ClickHouse/ClickHouse/pull/56086) ([Maksim Kita](https://github.com/kitaisreal)).
* Set limit of the maximum configuration value for `queued.min.messages` to avoid problem with start fetching data with Kafka. [#56121](https://github.com/ClickHouse/ClickHouse/pull/56121) ([Stas Morozov](https://github.com/r3b-fish)).
* Fixed a typo in SQL function `minSampleSizeContinous` (renamed `minSampleSizeContinuous`). Old name is preserved for backward compatibility. This closes: [#56139](https://github.com/ClickHouse/ClickHouse/issues/56139). [#56143](https://github.com/ClickHouse/ClickHouse/pull/56143) ([Dorota Szeremeta](https://github.com/orotaday)).
* Print path for broken parts on disk before shutting down the server. Before this change if a part is corrupted on disk and server cannot start, it was almost impossible to understand which part is broken. This is fixed. [#56181](https://github.com/ClickHouse/ClickHouse/pull/56181) ([Duc Canh Le](https://github.com/canhld94)).
#### Build/Testing/Packaging Improvement
* If the database in Docker is already initialized, it doesn't need to be initialized again upon subsequent launches. This can potentially fix the issue of infinite container restarts when the database fails to load within 1000 attempts (relevant for very large databases and multi-node setups). [#50724](https://github.com/ClickHouse/ClickHouse/pull/50724) ([Alexander Nikolaev](https://github.com/AlexNik)).
* Resource with source code including submodules is built in Darwin special build task. It may be used to build ClickHouse without checking out the submodules. [#51435](https://github.com/ClickHouse/ClickHouse/pull/51435) ([Ilya Yatsishin](https://github.com/qoega)).
* An error was occuring when building ClickHouse with the AVX series of instructions enabled globally (which isn't recommended). The reason is that snappy does not enable `SNAPPY_HAVE_X86_CRC32`. [#55049](https://github.com/ClickHouse/ClickHouse/pull/55049) ([monchickey](https://github.com/monchickey)).
* Solve issue with launching standalone `clickhouse-keeper` from `clickhouse-server` package. [#55226](https://github.com/ClickHouse/ClickHouse/pull/55226) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* In the tests, RabbitMQ version is updated to 3.12.6. Improved logs collection for RabbitMQ tests. [#55424](https://github.com/ClickHouse/ClickHouse/pull/55424) ([Ilya Yatsishin](https://github.com/qoega)).
* Modified the error message difference between openssl and boringssl to fix the functional test. [#55975](https://github.com/ClickHouse/ClickHouse/pull/55975) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
* Use upstream repo for apache datasketches. [#55787](https://github.com/ClickHouse/ClickHouse/pull/55787) ([Nikita Taranov](https://github.com/nickitat)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Skip hardlinking inverted index files in mutation [#47663](https://github.com/ClickHouse/ClickHouse/pull/47663) ([cangyin](https://github.com/cangyin)).
* Fixed bug of `match` function (regex) with pattern containing alternation produces incorrect key condition. Closes #53222. [#54696](https://github.com/ClickHouse/ClickHouse/pull/54696) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix 'Cannot find column' in read-in-order optimization with ARRAY JOIN [#51746](https://github.com/ClickHouse/ClickHouse/pull/51746) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Support missed experimental `Object(Nullable(json))` subcolumns in query. [#54052](https://github.com/ClickHouse/ClickHouse/pull/54052) ([zps](https://github.com/VanDarkholme7)).
* Re-add fix for `accurateCastOrNull()` [#54629](https://github.com/ClickHouse/ClickHouse/pull/54629) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Fix detecting `DEFAULT` for columns of a Distributed table created without AS [#55060](https://github.com/ClickHouse/ClickHouse/pull/55060) ([Vitaly Baranov](https://github.com/vitlibar)).
* Proper cleanup in case of exception in ctor of ShellCommandSource [#55103](https://github.com/ClickHouse/ClickHouse/pull/55103) ([Alexander Gololobov](https://github.com/davenger)).
* Fix deadlock in LDAP assigned role update [#55119](https://github.com/ClickHouse/ClickHouse/pull/55119) ([Julian Maicher](https://github.com/jmaicher)).
* Suppress error statistics update for internal exceptions [#55128](https://github.com/ClickHouse/ClickHouse/pull/55128) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix deadlock in backups [#55132](https://github.com/ClickHouse/ClickHouse/pull/55132) ([alesapin](https://github.com/alesapin)).
* Fix storage Iceberg files retrieval [#55144](https://github.com/ClickHouse/ClickHouse/pull/55144) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix partition pruning of extra columns in set. [#55172](https://github.com/ClickHouse/ClickHouse/pull/55172) ([Amos Bird](https://github.com/amosbird)).
* Fix recalculation of skip indexes in ALTER UPDATE queries when table has adaptive granularity [#55202](https://github.com/ClickHouse/ClickHouse/pull/55202) ([Duc Canh Le](https://github.com/canhld94)).
* Fix for background download in fs cache [#55252](https://github.com/ClickHouse/ClickHouse/pull/55252) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Avoid possible memory leaks in compressors in case of missing buffer finalization [#55262](https://github.com/ClickHouse/ClickHouse/pull/55262) ([Azat Khuzhin](https://github.com/azat)).
* Fix functions execution over sparse columns [#55275](https://github.com/ClickHouse/ClickHouse/pull/55275) ([Azat Khuzhin](https://github.com/azat)).
* Fix incorrect merging of Nested for SELECT FINAL FROM SummingMergeTree [#55276](https://github.com/ClickHouse/ClickHouse/pull/55276) ([Azat Khuzhin](https://github.com/azat)).
* Fix bug with inability to drop detached partition in replicated merge tree on top of S3 without zero copy [#55309](https://github.com/ClickHouse/ClickHouse/pull/55309) ([alesapin](https://github.com/alesapin)).
* Fix a crash in MergeSortingPartialResultTransform (due to zero chunks after `remerge`) [#55335](https://github.com/ClickHouse/ClickHouse/pull/55335) ([Azat Khuzhin](https://github.com/azat)).
* Fix data-race in CreatingSetsTransform (on errors) due to throwing shared exception [#55338](https://github.com/ClickHouse/ClickHouse/pull/55338) ([Azat Khuzhin](https://github.com/azat)).
* Fix trash optimization (up to a certain extent) [#55353](https://github.com/ClickHouse/ClickHouse/pull/55353) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix leak in StorageHDFS [#55370](https://github.com/ClickHouse/ClickHouse/pull/55370) ([Azat Khuzhin](https://github.com/azat)).
* Fix parsing of arrays in cast operator [#55417](https://github.com/ClickHouse/ClickHouse/pull/55417) ([Anton Popov](https://github.com/CurtizJ)).
* Fix filtering by virtual columns with OR filter in query [#55418](https://github.com/ClickHouse/ClickHouse/pull/55418) ([Azat Khuzhin](https://github.com/azat)).
* Fix MongoDB connection issues [#55419](https://github.com/ClickHouse/ClickHouse/pull/55419) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix MySQL interface boolean representation [#55427](https://github.com/ClickHouse/ClickHouse/pull/55427) ([Serge Klochkov](https://github.com/slvrtrn)).
* Fix MySQL text protocol DateTime formatting and LowCardinality(Nullable(T)) types reporting [#55479](https://github.com/ClickHouse/ClickHouse/pull/55479) ([Serge Klochkov](https://github.com/slvrtrn)).
* Make `use_mysql_types_in_show_columns` affect only `SHOW COLUMNS` [#55481](https://github.com/ClickHouse/ClickHouse/pull/55481) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix stack symbolizer parsing `DW_FORM_ref_addr` incorrectly and sometimes crashing [#55483](https://github.com/ClickHouse/ClickHouse/pull/55483) ([Michael Kolupaev](https://github.com/al13n321)).
* Destroy fiber in case of exception in cancelBefore in AsyncTaskExecutor [#55516](https://github.com/ClickHouse/ClickHouse/pull/55516) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix Query Parameters not working with custom HTTP handlers [#55521](https://github.com/ClickHouse/ClickHouse/pull/55521) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Fix checking of non handled data for Values format [#55527](https://github.com/ClickHouse/ClickHouse/pull/55527) ([Azat Khuzhin](https://github.com/azat)).
* Fix 'Invalid cursor state' in odbc interacting with MS SQL Server [#55558](https://github.com/ClickHouse/ClickHouse/pull/55558) ([vdimir](https://github.com/vdimir)).
* Fix max execution time and 'break' overflow mode [#55577](https://github.com/ClickHouse/ClickHouse/pull/55577) ([Alexander Gololobov](https://github.com/davenger)).
* Fix crash in QueryNormalizer with cyclic aliases [#55602](https://github.com/ClickHouse/ClickHouse/pull/55602) ([vdimir](https://github.com/vdimir)).
* Disable wrong optimization and add a test [#55609](https://github.com/ClickHouse/ClickHouse/pull/55609) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Merging [#52352](https://github.com/ClickHouse/ClickHouse/issues/52352) [#55621](https://github.com/ClickHouse/ClickHouse/pull/55621) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a test to avoid incorrect decimal sorting [#55662](https://github.com/ClickHouse/ClickHouse/pull/55662) ([Amos Bird](https://github.com/amosbird)).
* Fix progress bar for s3 and azure Cluster functions with url without globs [#55666](https://github.com/ClickHouse/ClickHouse/pull/55666) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix filtering by virtual columns with OR filter in query (resubmit) [#55678](https://github.com/ClickHouse/ClickHouse/pull/55678) ([Azat Khuzhin](https://github.com/azat)).
* Fixes and improvements for Iceberg storage [#55695](https://github.com/ClickHouse/ClickHouse/pull/55695) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix data race in CreatingSetsTransform (v2) [#55786](https://github.com/ClickHouse/ClickHouse/pull/55786) ([Azat Khuzhin](https://github.com/azat)).
* Throw exception when parsing illegal string as float if precise_float_parsing is true [#55861](https://github.com/ClickHouse/ClickHouse/pull/55861) ([李扬](https://github.com/taiyang-li)).
* Disable predicate pushdown if the CTE contains stateful functions [#55871](https://github.com/ClickHouse/ClickHouse/pull/55871) ([Raúl Marín](https://github.com/Algunenano)).
* Fix normalize ASTSelectWithUnionQuery, as it was stripping `FORMAT` from the query [#55887](https://github.com/ClickHouse/ClickHouse/pull/55887) ([flynn](https://github.com/ucasfl)).
* Try to fix possible segfault in Native ORC input format [#55891](https://github.com/ClickHouse/ClickHouse/pull/55891) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix window functions in case of sparse columns. [#55895](https://github.com/ClickHouse/ClickHouse/pull/55895) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* fix: StorageNull supports subcolumns [#55912](https://github.com/ClickHouse/ClickHouse/pull/55912) ([FFish](https://github.com/wxybear)).
* Do not write retriable errors for Replicated mutate/merge into error log [#55944](https://github.com/ClickHouse/ClickHouse/pull/55944) ([Azat Khuzhin](https://github.com/azat)).
* Fix `SHOW DATABASES LIMIT <N>` [#55962](https://github.com/ClickHouse/ClickHouse/pull/55962) ([Raúl Marín](https://github.com/Algunenano)).
* Fix autogenerated Protobuf schema with fields with underscore [#55974](https://github.com/ClickHouse/ClickHouse/pull/55974) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix dateTime64ToSnowflake64() with non-default scale [#55983](https://github.com/ClickHouse/ClickHouse/pull/55983) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix output/input of Arrow dictionary column [#55989](https://github.com/ClickHouse/ClickHouse/pull/55989) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix fetching schema from schema registry in AvroConfluent [#55991](https://github.com/ClickHouse/ClickHouse/pull/55991) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix 'Block structure mismatch' on concurrent ALTER and INSERTs in Buffer table [#55995](https://github.com/ClickHouse/ClickHouse/pull/55995) ([Michael Kolupaev](https://github.com/al13n321)).
* Fix incorrect free space accounting for least_used JBOD policy [#56030](https://github.com/ClickHouse/ClickHouse/pull/56030) ([Azat Khuzhin](https://github.com/azat)).
* Fix missing scalar issue when evaluating subqueries inside table functions [#56057](https://github.com/ClickHouse/ClickHouse/pull/56057) ([Amos Bird](https://github.com/amosbird)).
* Fix wrong query result when http_write_exception_in_output_format=1 [#56135](https://github.com/ClickHouse/ClickHouse/pull/56135) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix schema cache for fallback JSON->JSONEachRow with changed settings [#56172](https://github.com/ClickHouse/ClickHouse/pull/56172) ([Kruglov Pavel](https://github.com/Avogar)).
* Add error handler to odbc-bridge [#56185](https://github.com/ClickHouse/ClickHouse/pull/56185) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
### ClickHouse release 23.9, 2023-09-28
#### Backward Incompatible Change

View File

@ -21,8 +21,11 @@ include (cmake/clang_tidy.cmake)
include (cmake/git.cmake)
include (cmake/utils.cmake)
# This is needed to set up the CMAKE_INSTALL_BINDIR variable.
include (GNUInstallDirs)
# Ignore export() since we don't use it,
# but it gets broken with a global targets via link_libraries()
# but it gets broken with global targets via link_libraries()
macro (export)
endmacro ()
@ -164,7 +167,7 @@ if (OS_LINUX)
# and whatever is poisoning it by LD_PRELOAD should not link to our symbols.
# - The clickhouse-odbc-bridge and clickhouse-library-bridge binaries
# should not expose their symbols to ODBC drivers and libraries.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic -Wl,--gc-sections")
endif ()
if (OS_DARWIN)
@ -187,9 +190,10 @@ if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
endif ()
endif()
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL"))
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT ON)
else()
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT OFF)
@ -273,6 +277,11 @@ option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
if (ENABLE_BUILD_PROFILING)
if (COMPILER_CLANG)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
else ()
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
endif ()
@ -286,9 +295,6 @@ set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
# Compiler-specific coverage flags e.g. -fcoverage-mapping
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
if (COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
@ -304,26 +310,20 @@ if (COMPILER_CLANG)
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
if (WITH_COVERAGE)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
# If we want to disable coverage for specific translation units
set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
endif()
endif ()
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
# Our built-in unwinder only supports DWARF version up to 4.
set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
set (DEBUG_INFO_FLAGS "-g")
# Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer
option(DISABLE_OMIT_FRAME_POINTER "Disable omit frame pointer compiler optimization" OFF)
if (DISABLE_OMIT_FRAME_POINTER)
set (CMAKE_CXX_FLAGS_ADD "${CMAKE_CXX_FLAGS_ADD} -fno-omit-frame-pointer")
set (CMAKE_C_FLAGS_ADD "${CMAKE_C_FLAGS_ADD} -fno-omit-frame-pointer")
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer")
set (CMAKE_CXX_FLAGS_ADD "${CMAKE_CXX_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
set (CMAKE_C_FLAGS_ADD "${CMAKE_C_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer")
endif()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
@ -463,14 +463,6 @@ endif ()
message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}")
include (GNUInstallDirs)
# When testing for memory leaks with Valgrind, don't link tcmalloc or jemalloc.
if (TARGET global-group)
install (EXPORT global DESTINATION cmake)
endif ()
add_subdirectory (contrib EXCLUDE_FROM_ALL)
if (NOT ENABLE_JEMALLOC)
@ -554,10 +546,16 @@ if (ENABLE_RUST)
endif()
endif()
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
else ()
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
endif ()
option(CHECK_LARGE_OBJECT_SIZES "Check that there are no large object files after build." ${CHECK_LARGE_OBJECT_SIZES_DEFAULT})
add_subdirectory (base)
add_subdirectory (src)
add_subdirectory (programs)
add_subdirectory (tests)
add_subdirectory (utils)
if (FUZZER)

View File

@ -1,6 +1,17 @@
[<img alt="ClickHouse — open source distributed column-oriented DBMS" width="400px" src="https://clickhouse.com/images/ch_gh_logo_rounded.png" />](https://clickhouse.com?utm_source=github)
<div align=center>
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
[![Website](https://img.shields.io/website?up_message=AVAILABLE&down_message=DOWN&url=https%3A%2F%2Fclickhouse.com&style=for-the-badge)](https://clickhouse.com)
[![Apache 2.0 License](https://img.shields.io/badge/license-Apache%202.0-blueviolet?style=for-the-badge)](https://www.apache.org/licenses/LICENSE-2.0)
<picture align=center>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/ClickHouse/clickhouse-docs/assets/9611008/4ef9c104-2d3f-4646-b186-507358d2fe28">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/ClickHouse/clickhouse-docs/assets/9611008/b001dc7b-5a45-4dcd-9275-e03beb7f9177">
<img alt="The ClickHouse company logo." src="https://github.com/ClickHouse/clickhouse-docs/assets/9611008/b001dc7b-5a45-4dcd-9275-e03beb7f9177">
</picture>
<h4>ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.</h4>
</div>
## How To Install (Linux, macOS, FreeBSD)
```
@ -22,11 +33,6 @@ curl https://clickhouse.com/ | sh
## Upcoming Events
* [**v23.9 Community Call**]([https://clickhouse.com/company/events/v23-8-community-release-call](https://clickhouse.com/company/events/v23-9-community-release-call)?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-08) - Sep 28 - 23.9 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup in Amsterdam**](https://www.meetup.com/clickhouse-netherlands-user-group/events/296334590/) - Oct 31
* [**ClickHouse Meetup in Beijing**](https://www.meetup.com/clickhouse-beijing-user-group/events/296334856/) - Nov 4
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/296334923/) - Nov 8
* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/296334976/) - Nov 15
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/296488501/) - Nov 30
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/296488779/) - Dec 11
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/296488840/) - Dec 12
@ -35,7 +41,7 @@ Also, keep an eye out for upcoming meetups around the world. Somewhere else you
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
* **Recording available**: [**v23.6 Release Webinar**](https://www.youtube.com/watch?v=cuf_hYn7dqU) All the features of 23.6, one convenient video! Watch it now!
* **Recording available**: [**v23.10 Release Webinar**](https://www.youtube.com/watch?v=PGQS6uPb970) All the features of 23.10, one convenient video! Watch it now!
* **All release webinar recordings**: [YouTube playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3jAlSy1JxyP8zluvXaN3nxU)
@ -45,4 +51,4 @@ We are a globally diverse and distributed team, united behind a common goal of c
Check out our **current openings** here: https://clickhouse.com/company/careers
Cant find what you are looking for, but want to let us know you are interested in joining ClickHouse? Email careers@clickhouse.com!
Can't find what you are looking for, but want to let us know you are interested in joining ClickHouse? Email careers@clickhouse.com!

View File

@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported |
|:-|:-|
| 23.10 | ✔️ |
| 23.9 | ✔️ |
| 23.8 | ✔️ |
| 23.7 | ✔️ |
| 23.7 | |
| 23.6 | ❌ |
| 23.5 | ❌ |
| 23.4 | ❌ |

View File

@ -1,3 +1,5 @@
add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>)
if (USE_CLANG_TIDY)
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
endif ()
@ -45,6 +47,10 @@ else ()
target_compile_definitions(common PUBLIC WITH_COVERAGE=0)
endif ()
if (TARGET ch_contrib::crc32_s390x)
target_link_libraries(common PUBLIC ch_contrib::crc32_s390x)
endif()
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
target_link_libraries (common

View File

@ -35,6 +35,10 @@
#pragma clang diagnostic ignored "-Wreserved-identifier"
#endif
#if defined(__s390x__)
#include <base/crc32c_s390x.h>
#define CRC_INT s390x_crc32c
#endif
/**
* The std::string_view-like container to avoid creating strings to find substrings in the hash table.
@ -264,8 +268,8 @@ inline size_t hashLessThan8(const char * data, size_t size)
if (size >= 4)
{
UInt64 a = unalignedLoad<uint32_t>(data);
return hashLen16(size + (a << 3), unalignedLoad<uint32_t>(data + size - 4));
UInt64 a = unalignedLoadLittleEndian<uint32_t>(data);
return hashLen16(size + (a << 3), unalignedLoadLittleEndian<uint32_t>(data + size - 4));
}
if (size > 0)
@ -285,8 +289,8 @@ inline size_t hashLessThan16(const char * data, size_t size)
{
if (size > 8)
{
UInt64 a = unalignedLoad<UInt64>(data);
UInt64 b = unalignedLoad<UInt64>(data + size - 8);
UInt64 a = unalignedLoadLittleEndian<UInt64>(data);
UInt64 b = unalignedLoadLittleEndian<UInt64>(data + size - 8);
return hashLen16(a, rotateByAtLeast1(b + size, static_cast<UInt8>(size))) ^ b;
}
@ -315,13 +319,13 @@ struct CRC32Hash
do
{
UInt64 word = unalignedLoad<UInt64>(pos);
UInt64 word = unalignedLoadLittleEndian<UInt64>(pos);
res = static_cast<unsigned>(CRC_INT(res, word));
pos += 8;
} while (pos + 8 < end);
UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal.
UInt64 word = unalignedLoadLittleEndian<UInt64>(end - 8); /// I'm not sure if this is normal.
res = static_cast<unsigned>(CRC_INT(res, word));
return res;

View File

@ -1,11 +1,15 @@
#include "coverage.h"
#if WITH_COVERAGE
#pragma GCC diagnostic ignored "-Wreserved-identifier"
# include <mutex>
# include <unistd.h>
/// WITH_COVERAGE enables the default implementation of code coverage,
/// that dumps a map to the filesystem.
#if WITH_COVERAGE
#include <mutex>
#include <unistd.h>
# if defined(__clang__)
@ -31,3 +35,131 @@ void dumpCoverageReportIfPossible()
#endif
}
/// SANITIZE_COVERAGE enables code instrumentation,
/// but leaves the callbacks implementation to us,
/// which we use to calculate coverage on a per-test basis
/// and to write it to system tables.
#if defined(SANITIZE_COVERAGE)
namespace
{
bool pc_guards_initialized = false;
bool pc_table_initialized = false;
uint32_t * guards_start = nullptr;
uint32_t * guards_end = nullptr;
uintptr_t * coverage_array = nullptr;
size_t coverage_array_size = 0;
uintptr_t * all_addresses_array = nullptr;
size_t all_addresses_array_size = 0;
}
extern "C"
{
/// This is called at least once for every DSO for initialization.
/// But we will use it only for the main DSO.
void __sanitizer_cov_trace_pc_guard_init(uint32_t * start, uint32_t * stop)
{
if (pc_guards_initialized)
return;
pc_guards_initialized = true;
/// The function can be called multiple times, but we need to initialize only once.
if (start == stop || *start)
return;
guards_start = start;
guards_end = stop;
coverage_array_size = stop - start;
/// Note: we will leak this.
coverage_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
resetCoverage();
}
/// This is called at least once for every DSO for initialization
/// and provides information about all instrumented addresses.
void __sanitizer_cov_pcs_init(const uintptr_t * pcs_begin, const uintptr_t * pcs_end)
{
if (pc_table_initialized)
return;
pc_table_initialized = true;
all_addresses_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
all_addresses_array_size = pcs_end - pcs_begin;
/// They are not a real pointers, but also contain a flag in the most significant bit,
/// in which we are not interested for now. Reset it.
for (size_t i = 0; i < all_addresses_array_size; ++i)
all_addresses_array[i] = pcs_begin[i] & 0x7FFFFFFFFFFFFFFFULL;
}
/// This is called at every basic block / edge, etc.
void __sanitizer_cov_trace_pc_guard(uint32_t * guard)
{
/// Duplicate the guard check.
if (!*guard)
return;
*guard = 0;
/// If you set *guard to 0 this code will not be called again for this edge.
/// Now we can get the PC and do whatever you want:
/// - store it somewhere or symbolize it and print right away.
/// The values of `*guard` are as you set them in
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
/// and use them to dereference an array or a bit vector.
void * pc = __builtin_return_address(0);
coverage_array[guard - guards_start] = reinterpret_cast<uintptr_t>(pc);
}
}
__attribute__((no_sanitize("coverage")))
std::span<const uintptr_t> getCoverage()
{
return {coverage_array, coverage_array_size};
}
__attribute__((no_sanitize("coverage")))
std::span<const uintptr_t> getAllInstrumentedAddresses()
{
return {all_addresses_array, all_addresses_array_size};
}
__attribute__((no_sanitize("coverage")))
void resetCoverage()
{
memset(coverage_array, 0, coverage_array_size * sizeof(*coverage_array));
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
/// For example, you can unset it after first invocation to prevent excessive work.
/// Initially set all the guards to 1 to enable callbacks.
for (uint32_t * x = guards_start; x < guards_end; ++x)
*x = 1;
}
#else
std::span<const uintptr_t> getCoverage()
{
return {};
}
std::span<const uintptr_t> getAllInstrumentedAddresses()
{
return {};
}
void resetCoverage()
{
}
#endif

View File

@ -1,5 +1,8 @@
#pragma once
#include <span>
#include <cstdint>
/// Flush coverage report to file, depending on coverage system
/// proposed by compiler (llvm for clang and gcov for gcc).
///
@ -7,3 +10,16 @@
/// Thread safe (use exclusive lock).
/// Idempotent, may be called multiple times.
void dumpCoverageReportIfPossible();
/// This is effective if SANITIZE_COVERAGE is enabled at build time.
/// Get accumulated unique program addresses of the instrumented parts of the code,
/// seen so far after program startup or after previous reset.
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
std::span<const uintptr_t> getCoverage();
/// Get all instrumented addresses that could be in the coverage.
std::span<const uintptr_t> getAllInstrumentedAddresses();
/// Reset the accumulated coverage.
/// This is useful to compare coverage of different tests, including differential coverage.
void resetCoverage();

26
base/base/crc32c_s390x.h Normal file
View File

@ -0,0 +1,26 @@
#pragma once
#include <crc32-s390x.h>
inline uint32_t s390x_crc32c_u8(uint32_t crc, uint8_t v)
{
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
}
inline uint32_t s390x_crc32c_u16(uint32_t crc, uint16_t v)
{
v = __builtin_bswap16(v);
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
}
inline uint32_t s390x_crc32c_u32(uint32_t crc, uint32_t v)
{
v = __builtin_bswap32(v);
return crc32c_le_vx(crc, reinterpret_cast<unsigned char *>(&v), sizeof(v));
}
inline uint64_t s390x_crc32c(uint64_t crc, uint64_t v)
{
v = __builtin_bswap64(v);
return crc32c_le_vx(static_cast<uint32_t>(crc), reinterpret_cast<unsigned char *>(&v), sizeof(uint64_t));
}

View File

@ -119,17 +119,16 @@
#include <base/types.h>
namespace DB
{
void abortOnFailedAssertion(const String & description);
[[noreturn]] void abortOnFailedAssertion(const String & description);
}
#define chassert(x) static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x)
#define chassert(x) do { static_cast<bool>(x) ? void(0) : ::DB::abortOnFailedAssertion(#x); } while (0)
#define UNREACHABLE() abort()
// clang-format off
#else
/// Here sizeof() trick is used to suppress unused warning for result,
/// since simple "(void)x" will evaluate the expression, while
/// "sizeof(!(x))" will not.
#define NIL_EXPRESSION(x) (void)sizeof(!(x))
#define chassert(x) NIL_EXPRESSION(x)
#define chassert(x) (void)sizeof(!(x))
#define UNREACHABLE() __builtin_unreachable()
#endif
#endif
@ -150,6 +149,7 @@
# define TSA_ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) /// function acquires a shared capability, but does not release it
# define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
# define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
# define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of
@ -177,6 +177,7 @@
# define TSA_ACQUIRE_SHARED(...)
# define TSA_TRY_ACQUIRE_SHARED(...)
# define TSA_RELEASE_SHARED(...)
# define TSA_SCOPED_LOCKABLE
# define TSA_SUPPRESS_WARNING_FOR_READ(x) (x)
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) (x)

View File

@ -131,3 +131,29 @@ void sort(RandomIt first, RandomIt last)
using comparator = std::less<value_type>;
::sort(first, last, comparator());
}
/** Try to fast sort elements for common sorting patterns:
* 1. If elements are already sorted.
* 2. If elements are already almost sorted.
* 3. If elements are already sorted in reverse order.
*
* Returns true if fast sort was performed or elements were already sorted, false otherwise.
*/
template <typename RandomIt, typename Compare>
bool trySort(RandomIt first, RandomIt last, Compare compare)
{
#ifndef NDEBUG
::shuffle(first, last);
#endif
ComparatorWrapper<Compare> compare_wrapper = compare;
return ::pdqsort_try_sort(first, last, compare_wrapper);
}
template <typename RandomIt>
bool trySort(RandomIt first, RandomIt last)
{
using value_type = typename std::iterator_traits<RandomIt>::value_type;
using comparator = std::less<value_type>;
return ::trySort(first, last, comparator());
}

View File

@ -65,7 +65,7 @@ class IsTupleLike
static void check(...);
public:
static constexpr const bool value = !std::is_void<decltype(check<T>(nullptr))>::value;
static constexpr const bool value = !std::is_void_v<decltype(check<T>(nullptr))>;
};
}
@ -79,7 +79,7 @@ class numeric_limits<wide::integer<Bits, Signed>>
{
public:
static constexpr bool is_specialized = true;
static constexpr bool is_signed = is_same<Signed, signed>::value;
static constexpr bool is_signed = is_same_v<Signed, signed>;
static constexpr bool is_integer = true;
static constexpr bool is_exact = true;
static constexpr bool has_infinity = false;
@ -91,7 +91,7 @@ public:
static constexpr bool is_iec559 = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = true;
static constexpr int digits = Bits - (is_same<Signed, signed>::value ? 1 : 0);
static constexpr int digits = Bits - (is_same_v<Signed, signed> ? 1 : 0);
static constexpr int digits10 = digits * 0.30103 /*std::log10(2)*/;
static constexpr int max_digits10 = 0;
static constexpr int radix = 2;
@ -104,7 +104,7 @@ public:
static constexpr wide::integer<Bits, Signed> min() noexcept
{
if (is_same<Signed, signed>::value)
if constexpr (is_same_v<Signed, signed>)
{
using T = wide::integer<Bits, signed>;
T res{};
@ -118,7 +118,7 @@ public:
{
using T = wide::integer<Bits, Signed>;
T res{};
res.items[T::_impl::big(0)] = is_same<Signed, signed>::value
res.items[T::_impl::big(0)] = is_same_v<Signed, signed>
? std::numeric_limits<typename wide::integer<Bits, Signed>::signed_base_type>::max()
: std::numeric_limits<typename wide::integer<Bits, Signed>::base_type>::max();
for (unsigned i = 1; i < wide::integer<Bits, Signed>::_impl::item_count; ++i)

View File

@ -5,9 +5,6 @@ if (GLIBC_COMPATIBILITY)
endif()
enable_language(ASM)
include(CheckIncludeFile)
check_include_file("sys/random.h" HAVE_SYS_RANDOM_H)
add_headers_and_sources(glibc_compatibility .)
add_headers_and_sources(glibc_compatibility musl)
@ -21,11 +18,6 @@ if (GLIBC_COMPATIBILITY)
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
endif ()
list(REMOVE_ITEM glibc_compatibility_sources musl/getentropy.c)
if(HAVE_SYS_RANDOM_H)
list(APPEND glibc_compatibility_sources musl/getentropy.c)
endif()
# Need to omit frame pointers to match the performance of glibc
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
@ -43,12 +35,6 @@ if (GLIBC_COMPATIBILITY)
target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY})
install(
TARGETS glibc-compatibility ${MEMCPY_LIBRARY}
EXPORT global
ARCHIVE DESTINATION lib
)
message (STATUS "Some symbols from glibc will be replaced for compatibility")
elseif (CLICKHOUSE_OFFICIAL_BUILD)

View File

@ -1,5 +1,6 @@
#include "memcpy.h"
__attribute__((no_sanitize("coverage")))
extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
{
return inline_memcpy(dst, src, size);

View File

@ -93,7 +93,7 @@
* See https://habr.com/en/company/yandex/blog/457612/
*/
__attribute__((no_sanitize("coverage")))
static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
{
/// We will use pointer arithmetic, so char pointer will be used.

View File

@ -1,2 +1 @@
add_library(harmful harmful.c)
install(TARGETS harmful EXPORT global ARCHIVE DESTINATION lib)

View File

@ -68,7 +68,7 @@ namespace Net
struct ProxyConfig
/// HTTP proxy server configuration.
{
ProxyConfig() : port(HTTP_PORT), protocol("http"), tunnel(true) { }
ProxyConfig() : port(HTTP_PORT), protocol("http"), tunnel(true), originalRequestProtocol("http") { }
std::string host;
/// Proxy server host name or IP address.
@ -87,6 +87,9 @@ namespace Net
/// A regular expression defining hosts for which the proxy should be bypassed,
/// e.g. "localhost|127\.0\.0\.1|192\.168\.0\.\d+". Can also be an empty
/// string to disable proxy bypassing.
std::string originalRequestProtocol;
/// Original request protocol (http or https).
/// Required in the case of: HTTPS request over HTTP proxy with tunneling (CONNECT) off.
};
HTTPClientSession();

View File

@ -418,7 +418,7 @@ void HTTPClientSession::reconnect()
std::string HTTPClientSession::proxyRequestPrefix() const
{
std::string result("http://");
std::string result(_proxyConfig.originalRequestProtocol + "://");
result.append(_host);
/// Do not append default by default, since this may break some servers.
/// One example of such server is GCS (Google Cloud Storage).

View File

@ -26,7 +26,6 @@ HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParam
_maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
{
setTimeout(pParams->getTimeout());
this->socket().setReceiveTimeout(pParams->getTimeout());
}

View File

@ -93,9 +93,34 @@ void HTTPSession::setTimeout(const Poco::Timespan& timeout)
void HTTPSession::setTimeout(const Poco::Timespan& connectionTimeout, const Poco::Timespan& sendTimeout, const Poco::Timespan& receiveTimeout)
{
_connectionTimeout = connectionTimeout;
_sendTimeout = sendTimeout;
_receiveTimeout = receiveTimeout;
try
{
_connectionTimeout = connectionTimeout;
if (_sendTimeout.totalMicroseconds() != sendTimeout.totalMicroseconds()) {
_sendTimeout = sendTimeout;
if (connected())
_socket.setSendTimeout(_sendTimeout);
}
if (_receiveTimeout.totalMicroseconds() != receiveTimeout.totalMicroseconds()) {
_receiveTimeout = receiveTimeout;
if (connected())
_socket.setReceiveTimeout(_receiveTimeout);
}
}
catch (NetException &)
{
#ifndef NDEBUG
throw;
#else
// mute exceptions in release
// just in case when changing settings on socket is not allowed
// however it should be OK for timeouts
#endif
}
}

View File

@ -1,19 +0,0 @@
# Adding test output on failure
enable_testing ()
if (NOT TARGET check)
if (CMAKE_CONFIGURATION_TYPES)
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
--force-new-ctest-process --output-on-failure --build-config "$<CONFIGURATION>"
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
else ()
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
--force-new-ctest-process --output-on-failure
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
endif ()
endif ()
macro (add_check target)
add_test (NAME test_${target} COMMAND ${target} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
add_dependencies (check ${target})
endmacro (add_check)

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54479)
SET(VERSION_REVISION 54480)
SET(VERSION_MAJOR 23)
SET(VERSION_MINOR 10)
SET(VERSION_MINOR 11)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 8f9a227de1f530cdbda52c145d41a6b0f1d29961)
SET(VERSION_DESCRIBE v23.10.1.1-testing)
SET(VERSION_STRING 23.10.1.1)
SET(VERSION_GITHASH 13adae0e42fd48de600486fc5d4b64d39f80c43e)
SET(VERSION_DESCRIBE v23.11.1.1-testing)
SET(VERSION_STRING 23.11.1.1)
# end of autochange

View File

@ -9,10 +9,10 @@ if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_C_COMPILER_LAUNCHER MA
return()
endif()
set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (ccache, then sccache), 'ccache', 'sccache', or 'disabled'")
set(COMPILER_CACHE "auto" CACHE STRING "Speedup re-compilations using the caching tools; valid options are 'auto' (sccache, then ccache), 'ccache', 'sccache', or 'disabled'")
if(COMPILER_CACHE STREQUAL "auto")
find_program (CCACHE_EXECUTABLE NAMES ccache sccache)
find_program (CCACHE_EXECUTABLE NAMES sccache ccache)
elseif (COMPILER_CACHE STREQUAL "ccache")
find_program (CCACHE_EXECUTABLE ccache)
elseif(COMPILER_CACHE STREQUAL "sccache")
@ -21,7 +21,7 @@ elseif(COMPILER_CACHE STREQUAL "disabled")
message(STATUS "Using *ccache: no (disabled via configuration)")
return()
else()
message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|ccache|sccache|disabled), value: '${COMPILER_CACHE}'")
message(${RECONFIGURE_MESSAGE_LEVEL} "The COMPILER_CACHE must be one of (auto|sccache|ccache|disabled), value: '${COMPILER_CACHE}'")
endif()

View File

@ -1,10 +1,5 @@
# https://software.intel.com/sites/landingpage/IntrinsicsGuide/
include (CheckCXXSourceCompiles)
include (CMakePushCheckState)
cmake_push_check_state ()
# The variables HAVE_* determine if compiler has support for the flag to use the corresponding instruction set.
# The options ENABLE_* determine if we will tell compiler to actually use the corresponding instruction set if compiler can do it.
@ -137,189 +132,54 @@ elseif (ARCH_AMD64)
endif()
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
# AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
# Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code
# compile+link+run).
# AVX. We only assume that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
set (TEST_FLAG "-mssse3")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <tmmintrin.h>
int main() {
__m64 a = _mm_abs_pi8(__m64());
(void)a;
return 0;
}
" HAVE_SSSE3)
if (HAVE_SSSE3 AND ENABLE_SSSE3)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_SSSE3)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mssse3")
endif ()
set (TEST_FLAG "-msse4.1")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <smmintrin.h>
int main() {
auto a = _mm_insert_epi8(__m128i(), 0, 0);
(void)a;
return 0;
}
" HAVE_SSE41)
if (HAVE_SSE41 AND ENABLE_SSE41)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_SSE41)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.1")
endif ()
set (TEST_FLAG "-msse4.2")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <nmmintrin.h>
int main() {
auto a = _mm_crc32_u64(0, 0);
(void)a;
return 0;
}
" HAVE_SSE42)
if (HAVE_SSE42 AND ENABLE_SSE42)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_SSE42)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.2")
endif ()
set (TEST_FLAG "-mpclmul")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <wmmintrin.h>
int main() {
auto a = _mm_clmulepi64_si128(__m128i(), __m128i(), 0);
(void)a;
return 0;
}
" HAVE_PCLMULQDQ)
if (HAVE_PCLMULQDQ AND ENABLE_PCLMULQDQ)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_PCLMULQDQ)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpclmul")
endif ()
set (TEST_FLAG "-mpopcnt")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
int main() {
auto a = __builtin_popcountll(0);
(void)a;
return 0;
}
" HAVE_POPCNT)
if (HAVE_POPCNT AND ENABLE_POPCNT)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_BMI)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi")
endif ()
set (TEST_FLAG "-mavx")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <immintrin.h>
int main() {
auto a = _mm256_insert_epi8(__m256i(), 0, 0);
(void)a;
return 0;
}
" HAVE_AVX)
if (HAVE_AVX AND ENABLE_AVX)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_POPCNT)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpopcnt")
endif ()
set (TEST_FLAG "-mavx2")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <immintrin.h>
int main() {
auto a = _mm256_add_epi16(__m256i(), __m256i());
(void)a;
return 0;
}
" HAVE_AVX2)
if (HAVE_AVX2 AND ENABLE_AVX2)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
if (ENABLE_AVX)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx")
endif ()
set (TEST_FLAG "-mavx512f -mavx512bw -mavx512vl")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <immintrin.h>
int main() {
auto a = _mm512_setzero_epi32();
(void)a;
auto b = _mm512_add_epi16(__m512i(), __m512i());
(void)b;
auto c = _mm_cmp_epi8_mask(__m128i(), __m128i(), 0);
(void)c;
return 0;
}
" HAVE_AVX512)
if (HAVE_AVX512 AND ENABLE_AVX512)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
endif ()
set (TEST_FLAG "-mavx512vbmi")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <immintrin.h>
int main() {
auto a = _mm512_permutexvar_epi8(__m512i(), __m512i());
(void)a;
return 0;
}
" HAVE_AVX512_VBMI)
if (HAVE_AVX512 AND ENABLE_AVX512 AND HAVE_AVX512_VBMI AND ENABLE_AVX512_VBMI)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
endif ()
set (TEST_FLAG "-mbmi")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <immintrin.h>
int main() {
auto a = _blsr_u32(0);
(void)a;
return 0;
}
" HAVE_BMI)
if (HAVE_BMI AND ENABLE_BMI)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
endif ()
set (TEST_FLAG "-mbmi2")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
#include <immintrin.h>
int main() {
auto a = _pdep_u64(0, 0);
(void)a;
return 0;
}
" HAVE_BMI2)
if (HAVE_BMI2 AND HAVE_AVX2 AND ENABLE_AVX2 AND ENABLE_BMI2)
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
endif ()
# Limit avx2/avx512 flag for specific source build
set (X86_INTRINSICS_FLAGS "")
if (ENABLE_AVX2_FOR_SPEC_OP)
if (HAVE_BMI)
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
if (ENABLE_AVX2)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx2")
if (ENABLE_BMI2)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi2")
endif ()
if (HAVE_AVX AND HAVE_AVX2)
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx -mavx2")
endif ()
if (ENABLE_AVX512)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512f -mavx512bw -mavx512vl")
if (ENABLE_AVX512_VBMI)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512vbmi")
endif ()
endif ()
if (ENABLE_AVX512_FOR_SPEC_OP)
set (X86_INTRINSICS_FLAGS "")
if (HAVE_BMI)
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
endif ()
if (HAVE_AVX512)
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256")
endif ()
set (X86_INTRINSICS_FLAGS "-mbmi -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256")
endif ()
else ()
# RISC-V + exotic platforms
endif ()
cmake_pop_check_state ()

View File

@ -22,9 +22,3 @@ link_libraries(global-group)
target_link_libraries(global-group INTERFACE
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
)
# FIXME: remove when all contribs will get custom cmake lists
install(
TARGETS global-group global-libs
EXPORT global
)

View File

@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin")
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-apple-darwin")
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-x86_64")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -25,9 +25,3 @@ link_libraries(global-group)
target_link_libraries(global-group INTERFACE
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
)
# FIXME: remove when all contribs will get custom cmake lists
install(
TARGETS global-group global-libs
EXPORT global
)

View File

@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-unknown-freebsd12")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-aarch64")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-unknown-freebsd13")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-ppc64le")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -4,8 +4,8 @@ if (FUZZER)
# NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends.
# NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them
# (tests) have entry point for fuzzer and it's not checked.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1")
# NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable
if (NOT LIB_FUZZING_ENGINE)

View File

@ -21,7 +21,7 @@ if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY)
set (PARALLEL_COMPILE_JOBS 1)
endif ()
if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
message("The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
endif()
endif ()
@ -32,7 +32,7 @@ if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY)
set (PARALLEL_LINK_JOBS 1)
endif ()
if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
message("The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
endif()
endif ()

View File

@ -50,9 +50,3 @@ target_link_libraries(global-group INTERFACE
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
-Wl,--end-group
)
# FIXME: remove when all contribs will get custom cmake lists
install(
TARGETS global-group global-libs
EXPORT global
)

View File

@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "powerpc64le-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "powerpc64le-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-linux-gnu")
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "riscv64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "riscv64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "riscv64-linux-gnu")
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv64")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
@ -27,9 +23,3 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
# ld.lld: error: section size decrease is too large
# But GNU BinUtils work.
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE)
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "s390x-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "s390x-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "s390x-linux-gnu")
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-s390x")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
@ -23,9 +19,3 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-musl")
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-musl")
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-musl")
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64-musl")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
@ -21,11 +17,5 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (USE_MUSL 1)
add_definitions(-DUSE_MUSL=1)

View File

@ -19,10 +19,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-gnu")
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu")
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu")
# Will be changed later, but somehow needed to be set here.
set (CMAKE_AR "ar")
set (CMAKE_RANLIB "ranlib")
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64")
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc")
@ -32,9 +28,3 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -58,3 +58,27 @@ if (SANITIZE)
message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}")
endif ()
endif()
# Default coverage instrumentation (dumping the coverage map on exit)
option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF)
if (WITH_COVERAGE)
message (INFORMATION "Enabled instrumentation for code coverage")
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
endif()
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
if (SANITIZE_COVERAGE)
message (INFORMATION "Enabled instrumentation for code coverage")
# We set this define for whole build to indicate that at least some parts are compiled with coverage.
# And to expose it in system.build_options.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSANITIZE_COVERAGE=1")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSANITIZE_COVERAGE=1")
# But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party.
set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table")
endif()
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")

View File

@ -1,3 +1,5 @@
# Generates a separate file with debug symbols while stripping it from the main binary.
# This is needed for Debian packages.
macro(clickhouse_split_debug_symbols)
set(oneValueArgs TARGET DESTINATION_DIR BINARY_PATH)

2
contrib/AMQP-CPP vendored

@ -1 +1 @@
Subproject commit 818c2d8ad96a08a5d20fece7d1e1e8855a2b0860
Subproject commit 00f09897ce020a84e38f87dc416af4a19c5da9ae

View File

@ -1,16 +1,7 @@
#"${folder}/CMakeLists.txt" Third-party libraries may have substandard code.
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
if (WITH_COVERAGE)
set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE})
separate_arguments(WITHOUT_COVERAGE_LIST)
# disable coverage for contib files and build with optimisations
if (COMPILER_CLANG)
add_compile_options(-O3 -DNDEBUG -finline-functions -finline-hint-functions ${WITHOUT_COVERAGE_LIST})
endif()
endif()
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -ffunction-sections -fdata-sections")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -ffunction-sections -fdata-sections")
if (SANITIZE STREQUAL "undefined")
# 3rd-party libraries usually not intended to work with UBSan.
@ -53,6 +44,7 @@ else ()
endif ()
add_contrib (miniselect-cmake miniselect)
add_contrib (pdqsort-cmake pdqsort)
add_contrib (pocketfft-cmake pocketfft)
add_contrib (crc32-vpmsum-cmake crc32-vpmsum)
add_contrib (sparsehash-c11-cmake sparsehash-c11)
add_contrib (abseil-cpp-cmake abseil-cpp)

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit eb1572129c71beb2156dcdaadc3fb136954aed96
Subproject commit b7ea89b817a18dc0eafc1f909d568869f02d2d04

2
contrib/abseil-cpp vendored

@ -1 +1 @@
Subproject commit 5655528c41830f733160de4fb0b99073841bae9e
Subproject commit 3bd86026c93da5a40006fd53403dff9d5f5e30e3

File diff suppressed because it is too large Load Diff

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit 1d93838f69a802639ca144ea5704a98e2481810d
Subproject commit ba5c67934e8274d649befcffab56731632dc5253

View File

@ -77,16 +77,16 @@ set(FLATBUFFERS_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/flatbuffers")
set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
# set flatbuffers CMake options
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
set(FLATBUFFERS_SRCS
${FLATBUFFERS_SRC_DIR}/src/idl_parser.cpp
${FLATBUFFERS_SRC_DIR}/src/idl_gen_text.cpp
${FLATBUFFERS_SRC_DIR}/src/reflection.cpp
${FLATBUFFERS_SRC_DIR}/src/util.cpp)
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
add_library(_flatbuffers STATIC ${FLATBUFFERS_SRCS})
target_include_directories(_flatbuffers PUBLIC ${FLATBUFFERS_INCLUDE_DIR})
target_compile_definitions(_flatbuffers PRIVATE -DFLATBUFFERS_LOCALE_INDEPENDENT=0)
add_library(_flatbuffers INTERFACE)
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
# === hdfs
# NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/"
@ -109,7 +109,6 @@ set (ORC_CXX_HAS_CSTDINT 1)
set (ORC_CXX_HAS_THREAD_LOCAL 1)
include(orc_check.cmake)
configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh")
configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh")
@ -128,7 +127,6 @@ set(ORC_SRCS
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh"
"${ORC_SOURCE_SRC_DIR}/ByteRLE.cc"
"${ORC_SOURCE_SRC_DIR}/ByteRLE.hh"
"${ORC_SOURCE_SRC_DIR}/CMakeLists.txt"
"${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc"
"${ORC_SOURCE_SRC_DIR}/ColumnReader.cc"
"${ORC_SOURCE_SRC_DIR}/ColumnReader.hh"
@ -198,7 +196,9 @@ target_link_libraries(_orc PRIVATE
ch_contrib::snappy
ch_contrib::zlib
ch_contrib::zstd)
target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR})
target_include_directories(_orc SYSTEM BEFORE PUBLIC
${ORC_INCLUDE_DIR}
"${ClickHouse_SOURCE_DIR}/contrib/arrow-cmake/cpp/src/orc/c++/include")
target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR})
target_include_directories(_orc SYSTEM PRIVATE
${ORC_SOURCE_SRC_DIR}
@ -212,8 +212,6 @@ target_include_directories(_orc SYSTEM PRIVATE
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow")
configure_file("${LIBRARY_DIR}/util/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/cpp/src/arrow/util/config.h")
# arrow/cpp/src/arrow/CMakeLists.txt (ARROW_SRCS + ARROW_COMPUTE + ARROW_IPC)
set(ARROW_SRCS
"${LIBRARY_DIR}/array/array_base.cc"
@ -230,6 +228,8 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/array/builder_nested.cc"
"${LIBRARY_DIR}/array/builder_primitive.cc"
"${LIBRARY_DIR}/array/builder_union.cc"
"${LIBRARY_DIR}/array/builder_run_end.cc"
"${LIBRARY_DIR}/array/array_run_end.cc"
"${LIBRARY_DIR}/array/concatenate.cc"
"${LIBRARY_DIR}/array/data.cc"
"${LIBRARY_DIR}/array/diff.cc"
@ -309,9 +309,12 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/util/debug.cc"
"${LIBRARY_DIR}/util/tracing.cc"
"${LIBRARY_DIR}/util/atfork_internal.cc"
"${LIBRARY_DIR}/util/crc32.cc"
"${LIBRARY_DIR}/util/hashing.cc"
"${LIBRARY_DIR}/util/ree_util.cc"
"${LIBRARY_DIR}/util/union_util.cc"
"${LIBRARY_DIR}/vendored/base64.cpp"
"${LIBRARY_DIR}/vendored/datetime/tz.cpp"
"${LIBRARY_DIR}/vendored/musl/strptime.c"
"${LIBRARY_DIR}/vendored/uriparser/UriCommon.c"
"${LIBRARY_DIR}/vendored/uriparser/UriCompare.c"
@ -328,39 +331,20 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/vendored/uriparser/UriRecompose.c"
"${LIBRARY_DIR}/vendored/uriparser/UriResolve.c"
"${LIBRARY_DIR}/vendored/uriparser/UriShorten.c"
"${LIBRARY_DIR}/vendored/double-conversion/bignum.cc"
"${LIBRARY_DIR}/vendored/double-conversion/bignum-dtoa.cc"
"${LIBRARY_DIR}/vendored/double-conversion/cached-powers.cc"
"${LIBRARY_DIR}/vendored/double-conversion/double-to-string.cc"
"${LIBRARY_DIR}/vendored/double-conversion/fast-dtoa.cc"
"${LIBRARY_DIR}/vendored/double-conversion/fixed-dtoa.cc"
"${LIBRARY_DIR}/vendored/double-conversion/string-to-double.cc"
"${LIBRARY_DIR}/vendored/double-conversion/strtod.cc"
"${LIBRARY_DIR}/compute/api_aggregate.cc"
"${LIBRARY_DIR}/compute/api_scalar.cc"
"${LIBRARY_DIR}/compute/api_vector.cc"
"${LIBRARY_DIR}/compute/cast.cc"
"${LIBRARY_DIR}/compute/exec.cc"
"${LIBRARY_DIR}/compute/exec/accumulation_queue.cc"
"${LIBRARY_DIR}/compute/exec/accumulation_queue.h"
"${LIBRARY_DIR}/compute/exec/aggregate.cc"
"${LIBRARY_DIR}/compute/exec/aggregate_node.cc"
"${LIBRARY_DIR}/compute/exec/asof_join_node.cc"
"${LIBRARY_DIR}/compute/exec/bloom_filter.cc"
"${LIBRARY_DIR}/compute/exec/exec_plan.cc"
"${LIBRARY_DIR}/compute/exec/expression.cc"
"${LIBRARY_DIR}/compute/exec/filter_node.cc"
"${LIBRARY_DIR}/compute/exec/hash_join.cc"
"${LIBRARY_DIR}/compute/exec/hash_join_dict.cc"
"${LIBRARY_DIR}/compute/exec/hash_join_node.cc"
"${LIBRARY_DIR}/compute/exec/key_hash.cc"
"${LIBRARY_DIR}/compute/exec/key_map.cc"
"${LIBRARY_DIR}/compute/exec/map_node.cc"
"${LIBRARY_DIR}/compute/exec/options.cc"
"${LIBRARY_DIR}/compute/exec/order_by_impl.cc"
"${LIBRARY_DIR}/compute/exec/partition_util.cc"
"${LIBRARY_DIR}/compute/exec/project_node.cc"
"${LIBRARY_DIR}/compute/exec/query_context.cc"
"${LIBRARY_DIR}/compute/exec/sink_node.cc"
"${LIBRARY_DIR}/compute/exec/source_node.cc"
"${LIBRARY_DIR}/compute/exec/swiss_join.cc"
"${LIBRARY_DIR}/compute/exec/task_util.cc"
"${LIBRARY_DIR}/compute/exec/tpch_node.cc"
"${LIBRARY_DIR}/compute/exec/union_node.cc"
"${LIBRARY_DIR}/compute/exec/util.cc"
"${LIBRARY_DIR}/compute/function.cc"
"${LIBRARY_DIR}/compute/function_internal.cc"
"${LIBRARY_DIR}/compute/kernel.cc"
@ -403,8 +387,13 @@ set(ARROW_SRCS
"${LIBRARY_DIR}/compute/kernels/vector_select_k.cc"
"${LIBRARY_DIR}/compute/kernels/vector_selection.cc"
"${LIBRARY_DIR}/compute/kernels/vector_sort.cc"
"${LIBRARY_DIR}/compute/kernels/vector_selection_internal.cc"
"${LIBRARY_DIR}/compute/kernels/vector_selection_filter_internal.cc"
"${LIBRARY_DIR}/compute/kernels/vector_selection_take_internal.cc"
"${LIBRARY_DIR}/compute/light_array.cc"
"${LIBRARY_DIR}/compute/registry.cc"
"${LIBRARY_DIR}/compute/expression.cc"
"${LIBRARY_DIR}/compute/ordering.cc"
"${LIBRARY_DIR}/compute/row/compare_internal.cc"
"${LIBRARY_DIR}/compute/row/encode_internal.cc"
"${LIBRARY_DIR}/compute/row/grouper.cc"
@ -459,7 +448,7 @@ target_link_libraries(_arrow PUBLIC _orc)
add_dependencies(_arrow protoc)
target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR})
target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src")
target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/arrow-cmake/cpp/src")
target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR})
target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR})
@ -488,10 +477,10 @@ set(PARQUET_SRCS
"${LIBRARY_DIR}/exception.cc"
"${LIBRARY_DIR}/file_reader.cc"
"${LIBRARY_DIR}/file_writer.cc"
"${LIBRARY_DIR}/page_index.cc"
"${LIBRARY_DIR}/level_conversion.cc"
"${LIBRARY_DIR}/level_comparison.cc"
"${LIBRARY_DIR}/metadata.cc"
"${LIBRARY_DIR}/murmur3.cc"
"${LIBRARY_DIR}/platform.cc"
"${LIBRARY_DIR}/printer.cc"
"${LIBRARY_DIR}/properties.cc"
@ -500,6 +489,8 @@ set(PARQUET_SRCS
"${LIBRARY_DIR}/stream_reader.cc"
"${LIBRARY_DIR}/stream_writer.cc"
"${LIBRARY_DIR}/types.cc"
"${LIBRARY_DIR}/bloom_filter_reader.cc"
"${LIBRARY_DIR}/xxhasher.cc"
"${GEN_LIBRARY_DIR}/parquet_constants.cpp"
"${GEN_LIBRARY_DIR}/parquet_types.cpp"

View File

@ -1 +0,0 @@
../../../thrift/build/cmake/config.h.in

View File

@ -0,0 +1,61 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#define ARROW_VERSION_MAJOR 11
#define ARROW_VERSION_MINOR 0
#define ARROW_VERSION_PATCH 0
#define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH
#define ARROW_VERSION_STRING "11.0.0"
#define ARROW_SO_VERSION "1100"
#define ARROW_FULL_SO_VERSION "1100.0.0"
#define ARROW_CXX_COMPILER_ID "Clang"
#define ARROW_CXX_COMPILER_VERSION "ClickHouse"
#define ARROW_CXX_COMPILER_FLAGS ""
#define ARROW_BUILD_TYPE ""
#define ARROW_GIT_ID ""
#define ARROW_GIT_DESCRIPTION ""
#define ARROW_PACKAGE_KIND ""
/* #undef ARROW_COMPUTE */
/* #undef ARROW_CSV */
/* #undef ARROW_CUDA */
/* #undef ARROW_DATASET */
/* #undef ARROW_FILESYSTEM */
/* #undef ARROW_FLIGHT */
/* #undef ARROW_FLIGHT_SQL */
/* #undef ARROW_IPC */
/* #undef ARROW_JEMALLOC */
/* #undef ARROW_JEMALLOC_VENDORED */
/* #undef ARROW_JSON */
/* #undef ARROW_ORC */
/* #undef ARROW_PARQUET */
/* #undef ARROW_SUBSTRAIT */
/* #undef ARROW_GCS */
/* #undef ARROW_S3 */
/* #undef ARROW_USE_NATIVE_INT128 */
/* #undef ARROW_WITH_MUSL */
/* #undef ARROW_WITH_OPENTELEMETRY */
/* #undef ARROW_WITH_UCX */
/* #undef GRPCPP_PP_INCLUDE */

View File

@ -0,0 +1,38 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ORC_CONFIG_HH
#define ORC_CONFIG_HH
#define ORC_VERSION ""
#define ORC_CXX_HAS_CSTDINT
#ifdef ORC_CXX_HAS_CSTDINT
#include <cstdint>
#else
#include <stdint.h>
#endif
// Following MACROS should be keeped for backward compatibility.
#define ORC_NOEXCEPT noexcept
#define ORC_NULLPTR nullptr
#define ORC_OVERRIDE override
#define ORC_UNIQUE_PTR std::unique_ptr
#endif

2
contrib/avro vendored

@ -1 +1 @@
Subproject commit 7832659ec986075d560f930c288e973c64679552
Subproject commit 2fb8a8a6ec0eab9109b68abf3b4857e8c476b918

View File

@ -1,114 +1,13 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCSourceRuns)
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
# work-around, disable avx2 (and all other extensions) in mingw builds.
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
#
if (MINGW)
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
set(USE_CPU_EXTENSIONS OFF)
endif()
if (ARCH_AMD64)
set (AWS_ARCH_INTEL 1)
elseif (ARCH_AARCH64)
set (AWS_ARCH_ARM64 1)
endif ()
if(NOT CMAKE_CROSSCOMPILING)
check_c_source_runs("
#include <stdbool.h>
bool foo(int a, int b, int *c) {
return __builtin_mul_overflow(a, b, c);
}
int main() {
int out;
if (foo(1, 2, &out)) {
return 0;
}
return 0;
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
if (USE_CPU_EXTENSIONS)
check_c_source_runs("
int main() {
int foo = 42;
_mulx_u32(1, 2, &foo);
return foo != 2;
}" AWS_HAVE_MSVC_MULX)
endif()
endif()
check_c_source_compiles("
#include <Windows.h>
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
int main() {
return 0;
}
#else
it's not windows desktop
#endif
" AWS_HAVE_WINAPI_DESKTOP)
check_c_source_compiles("
int main() {
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
# error \"not intel\"
#endif
return 0;
}
" AWS_ARCH_INTEL)
check_c_source_compiles("
int main() {
#if !(defined(__aarch64__) || defined(_M_ARM64))
# error \"not arm64\"
#endif
return 0;
}
" AWS_ARCH_ARM64)
check_c_source_compiles("
int main() {
#if !(defined(__arm__) || defined(_M_ARM))
# error \"not arm\"
#endif
return 0;
}
" AWS_ARCH_ARM32)
check_c_source_compiles("
int main() {
int foo = 42, bar = 24;
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
}" AWS_HAVE_GCC_INLINE_ASM)
check_c_source_compiles("
#include <sys/auxv.h>
int main() {
#ifdef __linux__
getauxval(AT_HWCAP);
getauxval(AT_HWCAP2);
#endif
return 0;
}" AWS_HAVE_AUXV)
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
check_c_source_compiles("
#include <execinfo.h>
int main() {
backtrace(NULL, 0);
return 0;
}" AWS_HAVE_EXECINFO)
endif()
check_c_source_compiles("
#include <linux/if_link.h>
int main() {
return 1;
}" AWS_HAVE_LINUX_IF_LINK_H)
set (AWS_HAVE_GCC_INLINE_ASM 1)
set (AWS_HAVE_AUXV 1)

View File

@ -1,54 +1,13 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCCompilerFlag)
include(CheckIncludeFile)
if (USE_CPU_EXTENSIONS)
if (MSVC)
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "/arch:AVX2")
endif()
else()
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "-mavx -mavx2")
endif()
if (ENABLE_AVX2)
set (AVX2_CFLAGS "-mavx -mavx2")
set (HAVE_AVX2_INTRINSICS 1)
set (HAVE_MM256_EXTRACT_EPI64 1)
endif()
cmake_push_check_state()
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
check_c_source_compiles("
#include <immintrin.h>
#include <emmintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
_mm256_shuffle_epi8(vec, vec);
_mm256_set_epi32(1,2,3,4,5,6,7,8);
_mm256_permutevar8x32_epi32(vec, vec);
return 0;
}" HAVE_AVX2_INTRINSICS)
check_c_source_compiles("
#include <immintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
return (int)_mm256_extract_epi64(vec, 2);
}" HAVE_MM256_EXTRACT_EPI64)
cmake_pop_check_state()
endif() # USE_CPU_EXTENSIONS
endif()
macro(simd_add_definition_if target definition)
if(${definition})

View File

@ -1,50 +1,9 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check if the platform supports setting thread affinity
# (important for hitting full NIC entitlement on NUMA architectures)
function(aws_set_thread_affinity_method target)
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
if (NOT UNIX OR ANDROID OR APPLE)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
set(headers "pthread.h")
# BSDs put nonportable pthread declarations in a separate header.
if(CMAKE_SYSTEM_NAME MATCHES BSD)
set(headers "${headers};pthread_np.h")
endif()
# Using pthread attrs is the preferred method, but is glibc-specific.
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
if (USE_PTHREAD_ATTR_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
return()
endif()
# This method is still nonportable, but is supported by musl and BSDs.
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
if (USE_PTHREAD_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
return()
endif()
# If we got here, we expected thread affinity support but didn't find it.
# We still build with degraded NUMA performance, but show a warning.
message(WARNING "No supported method for setting thread affinity")
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
cmake_pop_check_state()
# This code has been cut, because I don't care about it.
target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
endfunction()

View File

@ -1,61 +1,13 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check how the platform supports setting thread name
function(aws_set_thread_name_method target)
if (WINDOWS)
# On Windows we do a runtime check, instead of compile-time check
return()
elseif (APPLE)
if (APPLE)
# All Apple platforms we support have the same function, so no need for compile-time check.
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
# The start of the test program
set(c_source_start "
#define _GNU_SOURCE
#include <pthread.h>
#if defined(__FreeBSD__) || defined(__NETBSD__)
#include <pthread_np.h>
#endif
int main() {
pthread_t thread_id;
")
# The end of the test program
set(c_source_end "}")
# pthread_setname_np() usually takes 2 args
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\");
${c_source_end}"
PTHREAD_SETNAME_TAKES_2ARGS)
if (PTHREAD_SETNAME_TAKES_2ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
return()
endif()
# But on NetBSD it takes 3!
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\", NULL);
${c_source_end}
" PTHREAD_SETNAME_TAKES_3ARGS)
if (PTHREAD_SETNAME_TAKES_3ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
return()
endif()
# And on many older/weirder platforms it's just not supported
cmake_pop_check_state()
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
endfunction()

View File

@ -48,9 +48,8 @@ set(AZURE_SDK_INCLUDES
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
)
include("${AZURE_DIR}/cmake-modules/AzureTransportAdapters.cmake")
add_library(_azure_sdk ${AZURE_SDK_UNIFIED_SRC})
target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER)
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
if (TARGET OpenSSL::SSL)

View File

@ -68,8 +68,7 @@ list(APPEND INCLUDE_DIRS
${CASS_SRC_DIR}/third_party/hdr_histogram
${CASS_SRC_DIR}/third_party/http-parser
${CASS_SRC_DIR}/third_party/mt19937_64
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson
${CASS_SRC_DIR}/third_party/sparsehash/src)
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson)
list(APPEND INCLUDE_DIRS ${CASS_INCLUDE_DIR} ${CASS_SRC_DIR})
@ -83,10 +82,6 @@ set(HAVE_MEMCPY 1)
set(HAVE_LONG_LONG 1)
set(HAVE_UINT16_T 1)
configure_file("${CASS_SRC_DIR}/third_party/sparsehash/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/sparsehash/internal/sparseconfig.h")
# Determine random availability
if (OS_LINUX)
#set (HAVE_GETRANDOM 1) - not on every Linux kernel
@ -116,17 +111,17 @@ configure_file(
${CASS_ROOT_DIR}/driver_config.hpp.in
${CMAKE_CURRENT_BINARY_DIR}/driver_config.hpp)
add_library(_cassandra
${SOURCES}
$<TARGET_OBJECTS:_curl_hostcheck>
$<TARGET_OBJECTS:_hdr_histogram>
$<TARGET_OBJECTS:_http-parser>)
target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip)
target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip ch_contrib::sparsehash)
target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS})
target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
target_compile_definitions(_cassandra PRIVATE CASS_BUILDING)
target_compile_definitions(_cassandra PRIVATE -DSPARSEHASH_HASH=std::hash -Dsparsehash=google)
target_link_libraries(_cassandra ch_contrib::uv)

@ -1 +1 @@
Subproject commit 7abd49bb2e72bf9a5029993d31dcb1872da88292
Subproject commit c3abaaefe5fa400eed99e082af07c1b61a7144db

View File

@ -13,12 +13,10 @@ set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/fastops")
set(SRCS "")
if(HAVE_AVX)
if(ARCH_AMD64)
set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp")
set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2")
endif()
if(HAVE_AVX2)
set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp")
set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma")
endif()

@ -1 +1 @@
Subproject commit c47efe2d8f6a60022b49ecd6cc23660687c8598f
Subproject commit 0862007f6ca1f5723c58f10f0ca34f3f25a63b2e

View File

@ -20,7 +20,6 @@ endif()
set(protobuf_source_dir "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf")
set(protobuf_binary_dir "${ClickHouse_BINARY_DIR}/contrib/google-protobuf")
add_definitions(-DGOOGLE_PROTOBUF_CMAKE_BUILD)
add_definitions(-DHAVE_PTHREAD)
@ -30,18 +29,69 @@ include_directories(
${protobuf_binary_dir}
${protobuf_source_dir}/src)
add_library(utf8_range
${protobuf_source_dir}/third_party/utf8_range/naive.c
${protobuf_source_dir}/third_party/utf8_range/range2-neon.c
${protobuf_source_dir}/third_party/utf8_range/range2-sse.c
)
include_directories(${protobuf_source_dir}/third_party/utf8_range)
add_library(utf8_validity
${protobuf_source_dir}/third_party/utf8_range/utf8_validity.cc
)
target_link_libraries(utf8_validity PUBLIC absl::strings)
set(protobuf_absl_used_targets
absl::absl_check
absl::absl_log
absl::algorithm
absl::base
absl::bind_front
absl::bits
absl::btree
absl::cleanup
absl::cord
absl::core_headers
absl::debugging
absl::die_if_null
absl::dynamic_annotations
absl::flags
absl::flat_hash_map
absl::flat_hash_set
absl::function_ref
absl::hash
absl::layout
absl::log_initialize
absl::log_severity
absl::memory
absl::node_hash_map
absl::node_hash_set
absl::optional
absl::span
absl::status
absl::statusor
absl::strings
absl::synchronization
absl::time
absl::type_traits
absl::utility
absl::variant
)
set(libprotobuf_lite_files
${protobuf_source_dir}/src/google/protobuf/any_lite.cc
${protobuf_source_dir}/src/google/protobuf/arena.cc
${protobuf_source_dir}/src/google/protobuf/arena_align.cc
${protobuf_source_dir}/src/google/protobuf/arenastring.cc
${protobuf_source_dir}/src/google/protobuf/arenaz_sampler.cc
${protobuf_source_dir}/src/google/protobuf/extension_set.cc
${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_table_driven_lite.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_lite.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc
${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc
${protobuf_source_dir}/src/google/protobuf/inlined_string_field.cc
${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc
${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc
${protobuf_source_dir}/src/google/protobuf/io/strtod.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc
@ -49,21 +99,15 @@ set(libprotobuf_lite_files
${protobuf_source_dir}/src/google/protobuf/message_lite.cc
${protobuf_source_dir}/src/google/protobuf/parse_context.cc
${protobuf_source_dir}/src/google/protobuf/repeated_field.cc
${protobuf_source_dir}/src/google/protobuf/stubs/bytestream.cc
${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc
${protobuf_source_dir}/src/google/protobuf/stubs/common.cc
${protobuf_source_dir}/src/google/protobuf/stubs/int128.cc
${protobuf_source_dir}/src/google/protobuf/stubs/status.cc
${protobuf_source_dir}/src/google/protobuf/stubs/statusor.cc
${protobuf_source_dir}/src/google/protobuf/stubs/stringpiece.cc
${protobuf_source_dir}/src/google/protobuf/stubs/stringprintf.cc
${protobuf_source_dir}/src/google/protobuf/stubs/structurally_valid.cc
${protobuf_source_dir}/src/google/protobuf/stubs/strutil.cc
${protobuf_source_dir}/src/google/protobuf/stubs/time.cc
${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc
)
add_library(_libprotobuf-lite ${libprotobuf_lite_files})
target_link_libraries(_libprotobuf-lite pthread)
target_link_libraries(_libprotobuf-lite
pthread
utf8_validity)
if(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
target_link_libraries(_libprotobuf-lite log)
endif()
@ -72,67 +116,93 @@ add_library(protobuf::libprotobuf-lite ALIAS _libprotobuf-lite)
set(libprotobuf_files
${protobuf_source_dir}/src/google/protobuf/any.cc
${protobuf_source_dir}/src/google/protobuf/any.pb.cc
${protobuf_source_dir}/src/google/protobuf/api.pb.cc
${protobuf_source_dir}/src/google/protobuf/duration.pb.cc
${protobuf_source_dir}/src/google/protobuf/empty.pb.cc
${protobuf_source_dir}/src/google/protobuf/field_mask.pb.cc
${protobuf_source_dir}/src/google/protobuf/source_context.pb.cc
${protobuf_source_dir}/src/google/protobuf/struct.pb.cc
${protobuf_source_dir}/src/google/protobuf/timestamp.pb.cc
${protobuf_source_dir}/src/google/protobuf/type.pb.cc
${protobuf_source_dir}/src/google/protobuf/wrappers.pb.cc
${protobuf_source_dir}/src/google/protobuf/any.cc
${protobuf_source_dir}/src/google/protobuf/any_lite.cc
${protobuf_source_dir}/src/google/protobuf/arena.cc
${protobuf_source_dir}/src/google/protobuf/arena_align.cc
${protobuf_source_dir}/src/google/protobuf/arenastring.cc
${protobuf_source_dir}/src/google/protobuf/arenaz_sampler.cc
${protobuf_source_dir}/src/google/protobuf/compiler/importer.cc
${protobuf_source_dir}/src/google/protobuf/compiler/parser.cc
${protobuf_source_dir}/src/google/protobuf/cpp_features.pb.cc
${protobuf_source_dir}/src/google/protobuf/descriptor.cc
${protobuf_source_dir}/src/google/protobuf/descriptor.pb.cc
${protobuf_source_dir}/src/google/protobuf/descriptor_database.cc
${protobuf_source_dir}/src/google/protobuf/duration.pb.cc
${protobuf_source_dir}/src/google/protobuf/dynamic_message.cc
${protobuf_source_dir}/src/google/protobuf/empty.pb.cc
${protobuf_source_dir}/src/google/protobuf/extension_set.cc
${protobuf_source_dir}/src/google/protobuf/extension_set_heavy.cc
${protobuf_source_dir}/src/google/protobuf/field_mask.pb.cc
${protobuf_source_dir}/src/google/protobuf/feature_resolver.cc
${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_bases.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_reflection.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_table_driven.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_full.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_gen.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_lite.cc
${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc
${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc
${protobuf_source_dir}/src/google/protobuf/inlined_string_field.cc
${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc
${protobuf_source_dir}/src/google/protobuf/io/gzip_stream.cc
${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc
${protobuf_source_dir}/src/google/protobuf/io/printer.cc
${protobuf_source_dir}/src/google/protobuf/io/strtod.cc
${protobuf_source_dir}/src/google/protobuf/io/tokenizer.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_sink.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/lexer.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/message_path.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/parser.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/unparser.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/untyped_message.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/writer.cc
${protobuf_source_dir}/src/google/protobuf/json/internal/zero_copy_buffered_stream.cc
${protobuf_source_dir}/src/google/protobuf/json/json.cc
${protobuf_source_dir}/src/google/protobuf/map.cc
${protobuf_source_dir}/src/google/protobuf/map_field.cc
${protobuf_source_dir}/src/google/protobuf/message.cc
${protobuf_source_dir}/src/google/protobuf/message_lite.cc
${protobuf_source_dir}/src/google/protobuf/parse_context.cc
${protobuf_source_dir}/src/google/protobuf/port.cc
${protobuf_source_dir}/src/google/protobuf/raw_ptr.cc
${protobuf_source_dir}/src/google/protobuf/reflection_mode.cc
${protobuf_source_dir}/src/google/protobuf/reflection_ops.cc
${protobuf_source_dir}/src/google/protobuf/repeated_field.cc
${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc
${protobuf_source_dir}/src/google/protobuf/service.cc
${protobuf_source_dir}/src/google/protobuf/source_context.pb.cc
${protobuf_source_dir}/src/google/protobuf/struct.pb.cc
${protobuf_source_dir}/src/google/protobuf/stubs/substitute.cc
${protobuf_source_dir}/src/google/protobuf/stubs/common.cc
${protobuf_source_dir}/src/google/protobuf/text_format.cc
${protobuf_source_dir}/src/google/protobuf/timestamp.pb.cc
${protobuf_source_dir}/src/google/protobuf/type.pb.cc
${protobuf_source_dir}/src/google/protobuf/unknown_field_set.cc
${protobuf_source_dir}/src/google/protobuf/util/delimited_message_util.cc
${protobuf_source_dir}/src/google/protobuf/util/field_comparator.cc
${protobuf_source_dir}/src/google/protobuf/util/field_mask_util.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/datapiece.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/default_value_objectwriter.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/error_listener.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/field_mask_utility.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/json_escaping.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/json_objectwriter.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/json_stream_parser.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/object_writer.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/proto_writer.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/protostream_objectsource.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/protostream_objectwriter.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/type_info.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/type_info_test_helper.cc
${protobuf_source_dir}/src/google/protobuf/util/internal/utility.cc
${protobuf_source_dir}/src/google/protobuf/util/json_util.cc
${protobuf_source_dir}/src/google/protobuf/util/message_differencer.cc
${protobuf_source_dir}/src/google/protobuf/util/time_util.cc
${protobuf_source_dir}/src/google/protobuf/util/type_resolver_util.cc
${protobuf_source_dir}/src/google/protobuf/wire_format.cc
${protobuf_source_dir}/src/google/protobuf/wrappers.pb.cc
${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc
)
add_library(_libprotobuf ${libprotobuf_lite_files} ${libprotobuf_files})
if (ENABLE_FUZZING)
target_compile_options(_libprotobuf PRIVATE "-fsanitize-recover=all")
endif()
target_link_libraries(_libprotobuf pthread)
target_link_libraries(_libprotobuf ch_contrib::zlib)
target_link_libraries(_libprotobuf
pthread
ch_contrib::zlib
utf8_validity
${protobuf_absl_used_targets})
if(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
target_link_libraries(_libprotobuf log)
endif()
@ -141,23 +211,26 @@ add_library(protobuf::libprotobuf ALIAS _libprotobuf)
set(libprotoc_files
${protobuf_source_dir}/src/google/protobuf/compiler/allowlists/editions.cc
${protobuf_source_dir}/src/google/protobuf/compiler/code_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/command_line_interface.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_enum_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_extension.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_file.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_map_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_message_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_parse_function_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_service.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/cpp_string_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/extension.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/cord_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/enum_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/map_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/message_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/string_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/file.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/padding_optimizer.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/parse_function_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/service.cc
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/tracker.cc
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum_field.cc
@ -174,60 +247,78 @@ set(libprotoc_files
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_context.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_doc_comment.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_enum_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_enum_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_enum_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_extension.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_extension_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_file.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_generator_factory.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_kotlin_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_map_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_map_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_message_builder.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_message_builder_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_message_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_message_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_message_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_name_resolver.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_primitive_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_service.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_shared_code_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_string_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/java_string_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/js/js_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/js/well_known_types_embed.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_enum_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_extension.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_file.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_map_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_message_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_oneof.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/names.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/context.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/doc_comment.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/enum_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/enum_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/enum_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/extension.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/extension_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/file.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/generator_factory.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/kotlin_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/map_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/map_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_builder.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_builder_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_serialization.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/name_resolver.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/names.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/service.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/shared_code_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field_lite.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/enum.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/enum_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/extension.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/file.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/import_writer.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/line_consumer.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/map_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/message_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/names.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/oneof.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/primitive_field.cc
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/text_format_decode_data.cc
${protobuf_source_dir}/src/google/protobuf/compiler/php/names.cc
${protobuf_source_dir}/src/google/protobuf/compiler/php/php_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/plugin.cc
${protobuf_source_dir}/src/google/protobuf/compiler/plugin.pb.cc
${protobuf_source_dir}/src/google/protobuf/compiler/python/python_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/python/generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/python/helpers.cc
${protobuf_source_dir}/src/google/protobuf/compiler/python/pyi_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/retention.cc
${protobuf_source_dir}/src/google/protobuf/compiler/ruby/ruby_generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/accessors.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/singular_bytes.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/singular_scalar.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/context.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/generator.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/message.cc
${protobuf_source_dir}/src/google/protobuf/compiler/rust/naming.cc
${protobuf_source_dir}/src/google/protobuf/compiler/subprocess.cc
${protobuf_source_dir}/src/google/protobuf/compiler/zip_writer.cc
)
add_library(_libprotoc ${libprotoc_files})
target_link_libraries(_libprotoc _libprotobuf)
target_link_libraries(_libprotoc
_libprotobuf
${protobuf_absl_used_targets})
add_library(protobuf::libprotoc ALIAS _libprotoc)
set(protoc_files ${protobuf_source_dir}/src/google/protobuf/compiler/main.cc)
@ -236,7 +327,11 @@ if (CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR)
add_executable(protoc ${protoc_files})
target_link_libraries(protoc _libprotoc _libprotobuf pthread)
target_link_libraries(protoc _libprotoc
_libprotobuf
pthread
utf8_validity
${protobuf_absl_used_targets})
add_executable(protobuf::protoc ALIAS protoc)
if (ENABLE_FUZZING)
@ -256,6 +351,8 @@ else ()
# This is quite ugly but I cannot make dependencies work propery.
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
execute_process(
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
COMMAND_ECHO STDOUT)
@ -270,7 +367,9 @@ else ()
"-Dprotobuf_BUILD_CONFORMANCE=0"
"-Dprotobuf_BUILD_EXAMPLES=0"
"-Dprotobuf_BUILD_PROTOC_BINARIES=1"
"${protobuf_source_dir}/cmake"
"-DABSL_ROOT_DIR=${abseil_source_dir}"
"-DABSL_ENABLE_INSTALL=0"
"${protobuf_source_dir}"
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
COMMAND_ECHO STDOUT)
@ -279,38 +378,6 @@ else ()
COMMAND_ECHO STDOUT)
endif ()
# add_custom_command (
# OUTPUT ${PROTOC_BUILD_DIR}
# COMMAND mkdir -p ${PROTOC_BUILD_DIR})
#
# add_custom_command (
# OUTPUT "${PROTOC_BUILD_DIR}/CMakeCache.txt"
#
# COMMAND ${CMAKE_COMMAND}
# -G"${CMAKE_GENERATOR}"
# -DCMAKE_MAKE_PROGRAM="${CMAKE_MAKE_PROGRAM}"
# -DCMAKE_C_COMPILER="${CMAKE_C_COMPILER}"
# -DCMAKE_CXX_COMPILER="${CMAKE_CXX_COMPILER}"
# -Dprotobuf_BUILD_TESTS=0
# -Dprotobuf_BUILD_CONFORMANCE=0
# -Dprotobuf_BUILD_EXAMPLES=0
# -Dprotobuf_BUILD_PROTOC_BINARIES=1
# "${protobuf_source_dir}/cmake"
#
# DEPENDS "${PROTOC_BUILD_DIR}"
# WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
# COMMENT "Configuring 'protoc' for host architecture."
# USES_TERMINAL)
#
# add_custom_command (
# OUTPUT "${PROTOC_BUILD_DIR}/protoc"
# COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
# DEPENDS "${PROTOC_BUILD_DIR}/CMakeCache.txt"
# COMMENT "Building 'protoc' for host architecture."
# USES_TERMINAL)
#
# add_custom_target (protoc-host DEPENDS "${PROTOC_BUILD_DIR}/protoc")
add_executable(protoc IMPORTED GLOBAL)
set_target_properties (protoc PROPERTIES IMPORTED_LOCATION "${PROTOC_BUILD_DIR}/protoc")
add_dependencies(protoc "${PROTOC_BUILD_DIR}/protoc")
@ -318,9 +385,25 @@ endif ()
include("${ClickHouse_SOURCE_DIR}/contrib/google-protobuf-cmake/protobuf_generate.cmake")
# These files needs to be installed to make it possible that users can use well-known protobuf types
set(google_proto_files
${protobuf_source_dir}/src/google/protobuf/any.proto
${protobuf_source_dir}/src/google/protobuf/api.proto
${protobuf_source_dir}/src/google/protobuf/descriptor.proto
${protobuf_source_dir}/src/google/protobuf/duration.proto
${protobuf_source_dir}/src/google/protobuf/empty.proto
${protobuf_source_dir}/src/google/protobuf/field_mask.proto
${protobuf_source_dir}/src/google/protobuf/source_context.proto
${protobuf_source_dir}/src/google/protobuf/struct.proto
${protobuf_source_dir}/src/google/protobuf/timestamp.proto
${protobuf_source_dir}/src/google/protobuf/type.proto
${protobuf_source_dir}/src/google/protobuf/wrappers.proto
)
add_library(_protobuf INTERFACE)
target_link_libraries(_protobuf INTERFACE _libprotobuf)
target_include_directories(_protobuf INTERFACE "${Protobuf_INCLUDE_DIR}")
set_target_properties(_protobuf PROPERTIES google_proto_files "${google_proto_files}")
add_library(ch_contrib::protobuf ALIAS _protobuf)
add_library(_protoc INTERFACE)

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 3f975ecab377cd5f739af780566596128f17bb74
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c

View File

@ -9,50 +9,14 @@ endif()
set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc")
set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc")
# Use re2 from ClickHouse contrib, not from gRPC third_party.
set(gRPC_RE2_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_RE2_INCLUDE_DIR "")
set(_gRPC_RE2_LIBRARIES ch_contrib::re2)
# Use zlib from ClickHouse contrib, not from gRPC third_party.
set(gRPC_ZLIB_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_ZLIB_INCLUDE_DIR "")
set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib)
# Use protobuf from ClickHouse contrib, not from gRPC third_party.
set(gRPC_PROTOBUF_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_PROTOBUF_LIBRARIES ch_contrib::protobuf)
set(_gRPC_PROTOBUF_PROTOC "protoc")
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ch_contrib::protoc)
if(TARGET OpenSSL::SSL)
set(gRPC_USE_UNSECURE_LIBRARIES FALSE)
else()
set(gRPC_USE_UNSECURE_LIBRARIES TRUE)
endif()
# Use OpenSSL from ClickHouse contrib, not from gRPC third_party.
set(gRPC_SSL_PROVIDER "clickhouse" CACHE STRING "" FORCE)
set(_gRPC_SSL_INCLUDE_DIR "")
set(_gRPC_SSL_LIBRARIES OpenSSL::Crypto OpenSSL::SSL)
# Use abseil-cpp from ClickHouse contrib, not from gRPC third_party.
set(gRPC_ABSL_PROVIDER "clickhouse" CACHE STRING "" FORCE)
# We don't want to build C# extensions.
set(gRPC_BUILD_CSHARP_EXT OFF)
# TODO: Remove this. We generally like to compile with C++23 but grpc isn't ready yet.
set (CMAKE_CXX_STANDARD 20)
set(_gRPC_CARES_LIBRARIES ch_contrib::c-ares)
set(gRPC_CARES_PROVIDER "clickhouse" CACHE STRING "" FORCE)
add_subdirectory("${_gRPC_SOURCE_DIR}" "${_gRPC_BINARY_DIR}")
# The contrib/grpc/CMakeLists.txt redefined the PROTOBUF_GENERATE_GRPC_CPP() function for its own purposes,
# so we need to redefine it back.
include("${ClickHouse_SOURCE_DIR}/contrib/grpc-cmake/protobuf_generate_grpc.cmake")
include(grpc.cmake)
include(protobuf_generate_grpc.cmake)
set(gRPC_CPP_PLUGIN $<TARGET_FILE:grpc_cpp_plugin>)
set(gRPC_PYTHON_PLUGIN $<TARGET_FILE:grpc_python_plugin>)

File diff suppressed because it is too large Load Diff

View File

@ -3,4 +3,4 @@ It allows to integrate JEMalloc into CMake project.
- Remove JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard.
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
- Add musl support (USE_MUSL)
- Also note, that darwin build requires JEMALLOC_PREFIX, while others don not
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not

View File

@ -1,5 +1,3 @@
include(CheckCXXCompilerFlag)
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx")
set(SRCS

2
contrib/libhdfs3 vendored

@ -1 +1 @@
Subproject commit 377220ef351ae24994a5fcd2b5fa3930d00c4db0
Subproject commit bdcb91354b1c05b21e73043a112a6f1e3b013497

View File

@ -1,4 +1,4 @@
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_PPC64LE AND NOT ARCH_S390X)
if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_PPC64LE)
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
elseif(ENABLE_HDFS)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")

2
contrib/libpqxx vendored

@ -1 +1 @@
Subproject commit 791d68fd89902835133c50435e380ec7a73271b7
Subproject commit c995193a3a14d71f4711f1f421f65a1a1db64640

View File

@ -1,11 +1,12 @@
option (ENABLE_SSH "Enable support for SSH keys and protocol" ON)
if (NOT ENABLE_SSH)
message(STATUS "Not using SSH")
return()
endif()
set(LIB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libssh")
set(LIB_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/libssh")
# Specify search path for CMake modules to be loaded by include()
# and find_package()
list(APPEND CMAKE_MODULE_PATH "${LIB_SOURCE_DIR}/cmake/Modules")
include(DefineCMakeDefaults)
include(DefineCompilerFlags)
project(libssh VERSION 0.9.7 LANGUAGES C)
@ -22,12 +23,6 @@ set(APPLICATION_NAME ${PROJECT_NAME})
set(LIBRARY_VERSION "4.8.7")
set(LIBRARY_SOVERSION "4")
# where to look first for cmake modules, before ${CMAKE_ROOT}/Modules/ is checked
# add definitions
include(DefinePlatformDefaults)
# Copy library files to a lib sub-directory
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${LIB_BINARY_DIR}/lib")

View File

@ -1,20 +1,8 @@
set(LIBSSH_LINK_LIBRARIES
${LIBSSH_REQUIRED_LIBRARIES}
)
set(LIBSSH_LINK_LIBRARIES
${LIBSSH_LINK_LIBRARIES}
OpenSSL::Crypto
)
if (MINGW AND Threads_FOUND)
set(LIBSSH_LINK_LIBRARIES
${LIBSSH_LINK_LIBRARIES}
Threads::Threads
)
endif()
set(libssh_SRCS
${LIB_SOURCE_DIR}/src/agent.c
${LIB_SOURCE_DIR}/src/auth.c
@ -66,30 +54,11 @@ set(libssh_SRCS
${LIB_SOURCE_DIR}/src/pki_ed25519_common.c
)
if (DEFAULT_C_NO_DEPRECATION_FLAGS)
set_source_files_properties(known_hosts.c
PROPERTIES
COMPILE_FLAGS ${DEFAULT_C_NO_DEPRECATION_FLAGS})
endif()
if (CMAKE_USE_PTHREADS_INIT)
set(libssh_SRCS
${libssh_SRCS}
${LIB_SOURCE_DIR}/src/threads/noop.c
${LIB_SOURCE_DIR}/src/threads/pthread.c
)
elseif (CMAKE_USE_WIN32_THREADS_INIT)
set(libssh_SRCS
${libssh_SRCS}
${LIB_SOURCE_DIR}/src/threads/noop.c
${LIB_SOURCE_DIR}/src/threads/winlocks.c
)
else()
set(libssh_SRCS
${libssh_SRCS}
${LIB_SOURCE_DIR}/src/threads/noop.c
)
endif()
set(libssh_SRCS
${libssh_SRCS}
${LIB_SOURCE_DIR}/src/threads/noop.c
${LIB_SOURCE_DIR}/src/threads/pthread.c
)
# LIBCRYPT specific
set(libssh_SRCS
@ -127,14 +96,3 @@ target_compile_options(_ssh
PRIVATE
${DEFAULT_C_COMPILE_FLAGS}
-D_GNU_SOURCE)
set_target_properties(_ssh
PROPERTIES
VERSION
${LIBRARY_VERSION}
SOVERSION
${LIBRARY_SOVERSION}
DEFINE_SYMBOL
LIBSSH_EXPORTS
)

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit 30cc1d3fd3655a5cfa0ab112fe320fb9fc0a8344
Subproject commit 40d8eadf96b127d9b22d53ce7a4fc52aaedea965

View File

@ -1,6 +1,3 @@
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
set(LIBUNWIND_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind")
set(LIBUNWIND_CXX_SOURCES
@ -23,15 +20,7 @@ set(LIBUNWIND_ASM_SOURCES
"${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S"
"${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S")
# CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1]
# Workaround these two issues by compiling as C.
#
# [1]: https://gitlab.kitware.com/cmake/cmake/-/issues/20771
if (APPLE AND CMAKE_VERSION VERSION_LESS 3.19)
set_source_files_properties(${LIBUNWIND_ASM_SOURCES} PROPERTIES LANGUAGE C)
else()
enable_language(ASM)
endif()
enable_language(ASM)
set(LIBUNWIND_SOURCES
${LIBUNWIND_CXX_SOURCES}
@ -48,27 +37,11 @@ target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIB
# and disable sanitizers (otherwise infinite loop may happen)
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
target_compile_options(unwind PRIVATE -Wno-unused-but-set-variable)
endif ()
check_cxx_compiler_flag(-Wmissing-attributes HAVE_WARNING_MISSING_ATTRIBUTES)
if (HAVE_WARNING_MISSING_ATTRIBUTES)
target_compile_options(unwind PRIVATE -Wno-missing-attributes)
endif ()
check_cxx_compiler_flag(-Wmaybe-uninitialized HAVE_WARNING_MAYBE_UNINITIALIZED)
if (HAVE_WARNING_MAYBE_UNINITIALIZED)
target_compile_options(unwind PRIVATE -Wno-maybe-uninitialized)
endif ()
target_compile_options(unwind PRIVATE -Wno-unused-but-set-variable)
# The library is using register variables that are bound to specific registers
# Example: DwarfInstructions.hpp: register unsigned long long x16 __asm("x16") = cfa;
check_cxx_compiler_flag(-Wregister HAVE_WARNING_REGISTER)
if (HAVE_WARNING_REGISTER)
target_compile_options(unwind PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:-Wno-register>")
endif ()
target_compile_options(unwind PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:-Wno-register>")
install(
TARGETS unwind

View File

@ -61,6 +61,9 @@ set (REQUIRED_LLVM_LIBRARIES
LLVMDemangle
)
# Skip useless "install" instructions from CMake:
set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "")
if (ARCH_AMD64)
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)

2
contrib/orc vendored

@ -1 +1 @@
Subproject commit a20d1d9d7ad4a4be7b7ba97588e16ca8b9abb2b6
Subproject commit e24f2c2a3ca0769c96704ab20ad6f512a83ea2ad

View File

@ -54,8 +54,10 @@ namespace pdqsort_detail {
block_size = 64,
// Cacheline size, assumes power of two.
cacheline_size = 64
cacheline_size = 64,
/// Try sort allowed iterations
try_sort_iterations = 3,
};
#if __cplusplus >= 201103L
@ -501,6 +503,167 @@ namespace pdqsort_detail {
leftmost = false;
}
}
template<class Iter, class Compare, bool Branchless>
inline bool pdqsort_try_sort_loop(Iter begin,
Iter end,
Compare comp,
size_t bad_allowed,
size_t iterations_allowed,
bool force_sort = false,
bool leftmost = true) {
typedef typename std::iterator_traits<Iter>::difference_type diff_t;
// Use a while loop for tail recursion elimination.
while (true) {
if (!force_sort && iterations_allowed == 0) {
return false;
}
diff_t size = end - begin;
// Insertion sort is faster for small arrays.
if (size < insertion_sort_threshold) {
if (leftmost) insertion_sort(begin, end, comp);
else unguarded_insertion_sort(begin, end, comp);
return true;
}
// Choose pivot as median of 3 or pseudomedian of 9.
diff_t s2 = size / 2;
if (size > ninther_threshold) {
sort3(begin, begin + s2, end - 1, comp);
sort3(begin + 1, begin + (s2 - 1), end - 2, comp);
sort3(begin + 2, begin + (s2 + 1), end - 3, comp);
sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp);
std::iter_swap(begin, begin + s2);
} else sort3(begin + s2, begin, end - 1, comp);
// If *(begin - 1) is the end of the right partition of a previous partition operation
// there is no element in [begin, end) that is smaller than *(begin - 1). Then if our
// pivot compares equal to *(begin - 1) we change strategy, putting equal elements in
// the left partition, greater elements in the right partition. We do not have to
// recurse on the left partition, since it's sorted (all equal).
if (!leftmost && !comp(*(begin - 1), *begin)) {
begin = partition_left(begin, end, comp) + 1;
continue;
}
// Partition and get results.
std::pair<Iter, bool> part_result =
Branchless ? partition_right_branchless(begin, end, comp)
: partition_right(begin, end, comp);
Iter pivot_pos = part_result.first;
bool already_partitioned = part_result.second;
// Check for a highly unbalanced partition.
diff_t l_size = pivot_pos - begin;
diff_t r_size = end - (pivot_pos + 1);
bool highly_unbalanced = l_size < size / 8 || r_size < size / 8;
// If we got a highly unbalanced partition we shuffle elements to break many patterns.
if (highly_unbalanced) {
if (!force_sort) {
return false;
}
// If we had too many bad partitions, switch to heapsort to guarantee O(n log n).
if (--bad_allowed == 0) {
std::make_heap(begin, end, comp);
std::sort_heap(begin, end, comp);
return true;
}
if (l_size >= insertion_sort_threshold) {
std::iter_swap(begin, begin + l_size / 4);
std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4);
if (l_size > ninther_threshold) {
std::iter_swap(begin + 1, begin + (l_size / 4 + 1));
std::iter_swap(begin + 2, begin + (l_size / 4 + 2));
std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1));
std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2));
}
}
if (r_size >= insertion_sort_threshold) {
std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4));
std::iter_swap(end - 1, end - r_size / 4);
if (r_size > ninther_threshold) {
std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4));
std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4));
std::iter_swap(end - 2, end - (1 + r_size / 4));
std::iter_swap(end - 3, end - (2 + r_size / 4));
}
}
} else {
// If we were decently balanced and we tried to sort an already partitioned
// sequence try to use insertion sort.
if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp)
&& partial_insertion_sort(pivot_pos + 1, end, comp)) {
return true;
}
}
// Sort the left partition first using recursion and do tail recursion elimination for
// the right-hand partition.
if (pdqsort_try_sort_loop<Iter, Compare, Branchless>(begin,
pivot_pos,
comp,
bad_allowed,
iterations_allowed - 1,
force_sort,
leftmost)) {
force_sort = true;
} else {
return false;
}
--iterations_allowed;
begin = pivot_pos + 1;
leftmost = false;
}
return false;
}
template<class Iter, class Compare, bool Branchless>
inline bool pdqsort_try_sort_impl(Iter begin, Iter end, Compare comp, size_t bad_allowed)
{
typedef typename std::iterator_traits<Iter>::difference_type diff_t;
static constexpr size_t iterations_allowed = pdqsort_detail::try_sort_iterations;
static constexpr size_t num_to_try = 16;
diff_t size = end - begin;
if (size > num_to_try * 10)
{
size_t out_of_order_elements = 0;
for (size_t i = 1; i < num_to_try; ++i)
{
diff_t offset = size / num_to_try;
diff_t prev_position = offset * (i - 1);
diff_t curr_position = offset * i;
diff_t next_position = offset * (i + 1) - 1;
bool prev_less_than_curr = comp(*(begin + prev_position), *(begin + curr_position));
bool curr_less_than_next = comp(*(begin + curr_position), *(begin + next_position));
if ((prev_less_than_curr && curr_less_than_next) || (!prev_less_than_curr && !curr_less_than_next))
continue;
++out_of_order_elements;
if (out_of_order_elements > iterations_allowed)
return false;
}
}
return pdqsort_try_sort_loop<Iter, Compare, Branchless>(begin, end, comp, bad_allowed, iterations_allowed);
}
}
@ -538,6 +701,41 @@ inline void pdqsort_branchless(Iter begin, Iter end) {
pdqsort_branchless(begin, end, std::less<T>());
}
template<class Iter, class Compare>
inline bool pdqsort_try_sort(Iter begin, Iter end, Compare comp) {
if (begin == end) return true;
#if __cplusplus >= 201103L
return pdqsort_detail::pdqsort_try_sort_impl<Iter, Compare,
pdqsort_detail::is_default_compare<typename std::decay<Compare>::type>::value &&
std::is_arithmetic<typename std::iterator_traits<Iter>::value_type>::value>(
begin, end, comp, pdqsort_detail::log2(end - begin));
#else
return pdqsort_detail::pdqsort_try_sort_impl<Iter, Compare, false>(
begin, end, comp, pdqsort_detail::log2(end - begin));
#endif
}
template<class Iter>
inline bool pdqsort_try_sort(Iter begin, Iter end) {
typedef typename std::iterator_traits<Iter>::value_type T;
return pdqsort_try_sort(begin, end, std::less<T>());
}
template<class Iter, class Compare>
inline bool pdqsort_try_sort_branchless(Iter begin, Iter end, Compare comp) {
if (begin == end) return true;
return pdqsort_detail::pdqsort_try_sort_impl<Iter, Compare, true>(
begin, end, comp, pdqsort_detail::log2(end - begin));
}
template<class Iter>
inline bool pdqsort_try_sort_branchless(Iter begin, Iter end) {
typedef typename std::iterator_traits<Iter>::value_type T;
return pdqsort_try_sort_branchless(begin, end, std::less<T>());
}
#undef PDQSORT_PREFER_MOVE

1
contrib/pocketfft vendored Submodule

@ -0,0 +1 @@
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546

View File

@ -0,0 +1,10 @@
option (ENABLE_POCKETFFT "Enable pocketfft" ${ENABLE_LIBRARIES})
if (NOT ENABLE_POCKETFFT)
message(STATUS "Not using pocketfft")
return()
endif()
add_library(_pocketfft INTERFACE)
target_include_directories(_pocketfft INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/pocketfft)
add_library(ch_contrib::pocketfft ALIAS _pocketfft)

View File

@ -16,8 +16,7 @@ function(GetLibraryVersion _content _outputVar)
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
endfunction()
FILE(READ "${QPL_PROJECT_DIR}/CMakeLists.txt" HEADER_CONTENT)
GetLibraryVersion("${HEADER_CONTENT}" QPL_VERSION)
set (QPL_VERSION 1.2.0)
message(STATUS "Intel QPL version: ${QPL_VERSION}")
@ -28,16 +27,422 @@ message(STATUS "Intel QPL version: ${QPL_VERSION}")
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
# only upstream isal (ch_contrib::isal) but at this point we can't.
include("${QPL_PROJECT_DIR}/cmake/CompileOptions.cmake")
# ==========================================================================
# Copyright (C) 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
# ==========================================================================
set(QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS "-fno-exceptions;-fno-rtti")
function(modify_standard_language_flag)
# Declaring function parameters
set(OPTIONS "")
set(ONE_VALUE_ARGS
LANGUAGE_NAME
FLAG_NAME
NEW_FLAG_VALUE)
set(MULTI_VALUE_ARGS "")
# Parsing function parameters
cmake_parse_arguments(MODIFY
"${OPTIONS}"
"${ONE_VALUE_ARGS}"
"${MULTI_VALUE_ARGS}"
${ARGN})
# Variables
set(FLAG_REGULAR_EXPRESSION "${MODIFY_FLAG_NAME}.*[ ]*")
set(NEW_VALUE "${MODIFY_FLAG_NAME}${MODIFY_NEW_FLAG_VALUE}")
# Replacing specified flag with new value
string(REGEX REPLACE
${FLAG_REGULAR_EXPRESSION} ${NEW_VALUE}
NEW_COMPILE_FLAGS
"${CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS}")
# Returning the value
set(CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS ${NEW_COMPILE_FLAGS} PARENT_SCOPE)
endfunction()
function(get_function_name_with_default_bit_width in_function_name bit_width out_function_name)
if(in_function_name MATCHES ".*_i")
string(REPLACE "_i" "" in_function_name ${in_function_name})
set(${out_function_name} "${in_function_name}_${bit_width}_i" PARENT_SCOPE)
else()
set(${out_function_name} "${in_function_name}_${bit_width}" PARENT_SCOPE)
endif()
endfunction()
macro(get_list_of_supported_optimizations PLATFORMS_LIST)
list(APPEND PLATFORMS_LIST "")
list(APPEND PLATFORMS_LIST "px")
list(APPEND PLATFORMS_LIST "avx512")
endmacro(get_list_of_supported_optimizations)
function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
list(APPEND UNPACK_POSTFIX_LIST "")
list(APPEND UNPACK_PRLE_POSTFIX_LIST "")
list(APPEND PACK_POSTFIX_LIST "")
list(APPEND PACK_INDEX_POSTFIX_LIST "")
list(APPEND SCAN_POSTFIX_LIST "")
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "")
list(APPEND DEFAULT_BIT_WIDTH_LIST "")
#create list of functions that use only 8u 16u 32u postfixes
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "unpack_prle")
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract")
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract_i")
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select")
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select_i")
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "expand")
#create default bit width list
list(APPEND DEFAULT_BIT_WIDTH_LIST "8u")
list(APPEND DEFAULT_BIT_WIDTH_LIST "16u")
list(APPEND DEFAULT_BIT_WIDTH_LIST "32u")
#create scan kernel postfixes
list(APPEND SCAN_COMPARATOR_LIST "")
list(APPEND SCAN_COMPARATOR_LIST "eq")
list(APPEND SCAN_COMPARATOR_LIST "ne")
list(APPEND SCAN_COMPARATOR_LIST "lt")
list(APPEND SCAN_COMPARATOR_LIST "le")
list(APPEND SCAN_COMPARATOR_LIST "gt")
list(APPEND SCAN_COMPARATOR_LIST "ge")
list(APPEND SCAN_COMPARATOR_LIST "range")
list(APPEND SCAN_COMPARATOR_LIST "not_range")
foreach(SCAN_COMPARATOR IN LISTS SCAN_COMPARATOR_LIST)
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_8u")
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_16u8u")
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_32u8u")
endforeach()
# create unpack kernel postfixes
foreach(input_width RANGE 1 32 1)
if(input_width LESS 8 OR input_width EQUAL 8)
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u8u")
elseif(input_width LESS 16 OR input_width EQUAL 16)
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u16u")
else()
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u32u")
endif()
endforeach()
# create pack kernel postfixes
foreach(output_width RANGE 1 8 1)
list(APPEND PACK_POSTFIX_LIST "_8u${output_width}u")
endforeach()
foreach(output_width RANGE 9 16 1)
list(APPEND PACK_POSTFIX_LIST "_16u${output_width}u")
endforeach()
foreach(output_width RANGE 17 32 1)
list(APPEND PACK_POSTFIX_LIST "_32u${output_width}u")
endforeach()
list(APPEND PACK_POSTFIX_LIST "_8u16u")
list(APPEND PACK_POSTFIX_LIST "_8u32u")
list(APPEND PACK_POSTFIX_LIST "_16u32u")
# create pack index kernel postfixes
list(APPEND PACK_INDEX_POSTFIX_LIST "_nu")
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u")
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u16u")
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u32u")
# write to file
file(MAKE_DIRECTORY ${current_directory}/generated)
foreach(PLATFORM_VALUE IN LISTS PLATFORMS_LIST)
set(directory "${current_directory}/generated")
set(PLATFORM_PREFIX "${PLATFORM_VALUE}_")
#
# Write unpack table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "unpack_table_t ${PLATFORM_PREFIX}unpack_table = {\n")
#write LE kernels
foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST)
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack${UNPACK_POSTFIX},\n")
endforeach()
#write BE kernels
#get last element of the list
set(LAST_ELEMENT "")
list(GET UNPACK_POSTFIX_LIST -1 LAST_ELEMENT)
foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST)
if(UNPACK_POSTFIX STREQUAL LAST_ELEMENT)
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX}};\n")
else()
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX},\n")
endif()
endforeach()
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "}\n")
#
# Write pack table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "pack_table_t ${PLATFORM_PREFIX}pack_table = {\n")
#write LE kernels
foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST)
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack${PACK_POSTFIX},\n")
endforeach()
#write BE kernels
#get last element of the list
set(LAST_ELEMENT "")
list(GET PACK_POSTFIX_LIST -1 LAST_ELEMENT)
foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST)
if(PACK_POSTFIX STREQUAL LAST_ELEMENT)
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX}};\n")
else()
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX},\n")
endif()
endforeach()
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "}\n")
#
# Write scan table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "scan_table_t ${PLATFORM_PREFIX}scan_table = {\n")
#get last element of the list
set(LAST_ELEMENT "")
list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT)
foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST)
if(SCAN_POSTFIX STREQUAL LAST_ELEMENT)
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}};\n")
else()
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX},\n")
endif()
endforeach()
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "}\n")
#
# Write scan_i table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "scan_i_table_t ${PLATFORM_PREFIX}scan_i_table = {\n")
#get last element of the list
set(LAST_ELEMENT "")
list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT)
foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST)
if(SCAN_POSTFIX STREQUAL LAST_ELEMENT)
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i};\n")
else()
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i,\n")
endif()
endforeach()
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "}\n")
#
# Write pack_index table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "pack_index_table_t ${PLATFORM_PREFIX}pack_index_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_nu,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u16u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u32u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_be_nu,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u16u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u32u};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "}\n")
#
# Write default bit width functions
#
foreach(DEAULT_BIT_WIDTH_FUNCTION IN LISTS DEFAULT_BIT_WIDTH_FUNCTIONS_LIST)
file(WRITE ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "${DEAULT_BIT_WIDTH_FUNCTION}_table_t ${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}_table = {\n")
#get last element of the list
set(LAST_ELEMENT "")
list(GET DEFAULT_BIT_WIDTH_LIST -1 LAST_ELEMENT)
foreach(BIT_WIDTH IN LISTS DEFAULT_BIT_WIDTH_LIST)
set(FUNCTION_NAME "")
get_function_name_with_default_bit_width(${DEAULT_BIT_WIDTH_FUNCTION} ${BIT_WIDTH} FUNCTION_NAME)
if(BIT_WIDTH STREQUAL LAST_ELEMENT)
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME}};\n")
else()
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME},\n")
endif()
endforeach()
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "}\n")
endforeach()
#
# Write aggregates table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "aggregates_table_t ${PLATFORM_PREFIX}aggregates_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_bit_aggregates_8u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_8u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_16u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_32u};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "}\n")
#
# Write mem_copy functions table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "memory_copy_table_t ${PLATFORM_PREFIX}memory_copy_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_8u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_16u,\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_32u};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "}\n")
#
# Write mem_copy functions table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "zero_table_t ${PLATFORM_PREFIX}zero_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "\t${PLATFORM_PREFIX}qplc_zero_8u};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "}\n")
#
# Write move functions table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "move_table_t ${PLATFORM_PREFIX}move_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "\t${PLATFORM_PREFIX}qplc_move_8u};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "}\n")
#
# Write crc64 function table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "crc64_table_t ${PLATFORM_PREFIX}crc64_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "\t${PLATFORM_PREFIX}qplc_crc64};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "}\n")
#
# Write xor_checksum function table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"qplc_api.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "xor_checksum_table_t ${PLATFORM_PREFIX}xor_checksum_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "\t${PLATFORM_PREFIX}qplc_xor_checksum_8u};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "}\n")
#
# Write deflate functions table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_slow_icf.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_hash_table.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_histogram.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "deflate_table_t ${PLATFORM_PREFIX}deflate_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}slow_deflate_icf_body),\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}deflate_histogram_reset),\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}deflate_hash_table_reset)};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "}\n")
#
# Write deflate fix functions table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"deflate_slow.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "deflate_fix_table_t ${PLATFORM_PREFIX}deflate_fix_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}slow_deflate_body)};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "}\n")
#
# Write setup_dictionary functions table
#
file(WRITE ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"deflate_slow_utils.h\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "namespace qpl::core_sw::dispatcher\n{\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "setup_dictionary_table_t ${PLATFORM_PREFIX}setup_dictionary_table = {\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}setup_dictionary)};\n")
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "}\n")
endforeach()
endfunction()
# check nasm compiler
include(CheckLanguage)
check_language(ASM_NASM)
if(NOT CMAKE_ASM_NASM_COMPILER)
message(FATAL_ERROR "Please install NASM from 'https://www.nasm.us/' because NASM compiler can not be found!")
endif()
# [SUBDIR]isal
enable_language(ASM_NASM)
set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c
@ -107,11 +512,6 @@ set_target_properties(isal PROPERTIES
CXX_STANDARD 11
C_STANDARD 99)
target_compile_options(isal PRIVATE
"$<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}>"
"$<$<CONFIG:Debug>:>"
"$<$<CONFIG:Release>:>")
# AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available".
# HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system.
target_compile_options(isal_asm PRIVATE "-I${QPL_SRC_DIR}/isal/include/"
@ -164,15 +564,7 @@ foreach(PLATFORM_ID IN LISTS PLATFORMS_LIST)
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/src/compression/include>
PRIVATE $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(qplcore_${PLATFORM_ID} PROPERTIES
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>)
target_compile_options(qplcore_${PLATFORM_ID}
PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}
PRIVATE "$<$<CONFIG:Debug>:>"
PRIVATE "$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>")
# Set specific compiler options and/or definitions based on a platform
# Set specific compiler options and/or definitions based on a platform
if (${PLATFORM_ID} MATCHES "avx512")
target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=2)
target_compile_options(qplcore_${PLATFORM_ID} PRIVATE -march=skylake-avx512)
@ -221,10 +613,7 @@ set_target_properties(qplcore_sw_dispatcher PROPERTIES CXX_STANDARD 17)
target_compile_definitions(qplcore_sw_dispatcher PUBLIC -DQPL_LIB)
target_compile_options(qplcore_sw_dispatcher
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS};
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
# [SUBDIR]core-iaa
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
@ -249,14 +638,6 @@ target_include_directories(core_iaa
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(core_iaa PROPERTIES
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>
CXX_STANDARD 17)
target_compile_options(core_iaa
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>)
target_compile_features(core_iaa PRIVATE c_std_11)
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
@ -286,10 +667,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
$<TARGET_OBJECTS:middle_layer_lib>)
target_compile_options(middle_layer_lib
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS};
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
target_compile_definitions(middle_layer_lib
PUBLIC QPL_VERSION="${QPL_VERSION}"
@ -324,15 +702,8 @@ target_include_directories(_qpl
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
set_target_properties(_qpl PROPERTIES
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>
CXX_STANDARD 17)
target_compile_options(_qpl
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS};
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
target_compile_definitions(_qpl
PRIVATE -DQPL_LIB

View File

@ -1,530 +0,0 @@
#!/bin/bash
ckhost="localhost"
ckport=("9000" "9001" "9002" "9003")
WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.."
OUTPUT_DIR="${WORKING_DIR}/output"
LOG_DIR="${OUTPUT_DIR}/log"
RAWDATA_DIR="${WORKING_DIR}/rawdata_dir"
database_dir="${WORKING_DIR}/database_dir"
CLIENT_SCRIPTS_DIR="${WORKING_DIR}/client_scripts"
LOG_PACK_FILE="$(date +%Y-%m-%d-%H-%M-%S)"
QUERY_FILE="queries_ssb.sql"
SERVER_BIND_CMD[0]="numactl -m 0 -N 0"
SERVER_BIND_CMD[1]="numactl -m 0 -N 0"
SERVER_BIND_CMD[2]="numactl -m 1 -N 1"
SERVER_BIND_CMD[3]="numactl -m 1 -N 1"
CLIENT_BIND_CMD=""
SSB_GEN_FACTOR=20
TABLE_NAME="lineorder_flat"
TALBE_ROWS="119994608"
CODEC_CONFIG="lz4 deflate zstd"
# define instance number
inst_num=$1
if [ ! -n "$1" ]; then
echo "Please clarify instance number from 1,2,3 or 4"
exit 1
else
echo "Benchmarking with instance number:$1"
fi
if [ ! -d "$OUTPUT_DIR" ]; then
mkdir $OUTPUT_DIR
fi
if [ ! -d "$LOG_DIR" ]; then
mkdir $LOG_DIR
fi
if [ ! -d "$RAWDATA_DIR" ]; then
mkdir $RAWDATA_DIR
fi
# define different directories
dir_server=("" "_s2" "_s3" "_s4")
ckreadSql="
CREATE TABLE customer
(
C_CUSTKEY UInt32,
C_NAME String,
C_ADDRESS String,
C_CITY LowCardinality(String),
C_NATION LowCardinality(String),
C_REGION LowCardinality(String),
C_PHONE String,
C_MKTSEGMENT LowCardinality(String)
)
ENGINE = MergeTree ORDER BY (C_CUSTKEY);
CREATE TABLE lineorder
(
LO_ORDERKEY UInt32,
LO_LINENUMBER UInt8,
LO_CUSTKEY UInt32,
LO_PARTKEY UInt32,
LO_SUPPKEY UInt32,
LO_ORDERDATE Date,
LO_ORDERPRIORITY LowCardinality(String),
LO_SHIPPRIORITY UInt8,
LO_QUANTITY UInt8,
LO_EXTENDEDPRICE UInt32,
LO_ORDTOTALPRICE UInt32,
LO_DISCOUNT UInt8,
LO_REVENUE UInt32,
LO_SUPPLYCOST UInt32,
LO_TAX UInt8,
LO_COMMITDATE Date,
LO_SHIPMODE LowCardinality(String)
)
ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY);
CREATE TABLE part
(
P_PARTKEY UInt32,
P_NAME String,
P_MFGR LowCardinality(String),
P_CATEGORY LowCardinality(String),
P_BRAND LowCardinality(String),
P_COLOR LowCardinality(String),
P_TYPE LowCardinality(String),
P_SIZE UInt8,
P_CONTAINER LowCardinality(String)
)
ENGINE = MergeTree ORDER BY P_PARTKEY;
CREATE TABLE supplier
(
S_SUPPKEY UInt32,
S_NAME String,
S_ADDRESS String,
S_CITY LowCardinality(String),
S_NATION LowCardinality(String),
S_REGION LowCardinality(String),
S_PHONE String
)
ENGINE = MergeTree ORDER BY S_SUPPKEY;
"
supplier_table="
CREATE TABLE supplier
(
S_SUPPKEY UInt32,
S_NAME String,
S_ADDRESS String,
S_CITY LowCardinality(String),
S_NATION LowCardinality(String),
S_REGION LowCardinality(String),
S_PHONE String
)
ENGINE = MergeTree ORDER BY S_SUPPKEY;
"
part_table="
CREATE TABLE part
(
P_PARTKEY UInt32,
P_NAME String,
P_MFGR LowCardinality(String),
P_CATEGORY LowCardinality(String),
P_BRAND LowCardinality(String),
P_COLOR LowCardinality(String),
P_TYPE LowCardinality(String),
P_SIZE UInt8,
P_CONTAINER LowCardinality(String)
)
ENGINE = MergeTree ORDER BY P_PARTKEY;
"
lineorder_table="
CREATE TABLE lineorder
(
LO_ORDERKEY UInt32,
LO_LINENUMBER UInt8,
LO_CUSTKEY UInt32,
LO_PARTKEY UInt32,
LO_SUPPKEY UInt32,
LO_ORDERDATE Date,
LO_ORDERPRIORITY LowCardinality(String),
LO_SHIPPRIORITY UInt8,
LO_QUANTITY UInt8,
LO_EXTENDEDPRICE UInt32,
LO_ORDTOTALPRICE UInt32,
LO_DISCOUNT UInt8,
LO_REVENUE UInt32,
LO_SUPPLYCOST UInt32,
LO_TAX UInt8,
LO_COMMITDATE Date,
LO_SHIPMODE LowCardinality(String)
)
ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY);
"
customer_table="
CREATE TABLE customer
(
C_CUSTKEY UInt32,
C_NAME String,
C_ADDRESS String,
C_CITY LowCardinality(String),
C_NATION LowCardinality(String),
C_REGION LowCardinality(String),
C_PHONE String,
C_MKTSEGMENT LowCardinality(String)
)
ENGINE = MergeTree ORDER BY (C_CUSTKEY);
"
lineorder_flat_table="
SET max_memory_usage = 20000000000;
CREATE TABLE lineorder_flat
ENGINE = MergeTree
PARTITION BY toYear(LO_ORDERDATE)
ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS
SELECT
l.LO_ORDERKEY AS LO_ORDERKEY,
l.LO_LINENUMBER AS LO_LINENUMBER,
l.LO_CUSTKEY AS LO_CUSTKEY,
l.LO_PARTKEY AS LO_PARTKEY,
l.LO_SUPPKEY AS LO_SUPPKEY,
l.LO_ORDERDATE AS LO_ORDERDATE,
l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY,
l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY,
l.LO_QUANTITY AS LO_QUANTITY,
l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE,
l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE,
l.LO_DISCOUNT AS LO_DISCOUNT,
l.LO_REVENUE AS LO_REVENUE,
l.LO_SUPPLYCOST AS LO_SUPPLYCOST,
l.LO_TAX AS LO_TAX,
l.LO_COMMITDATE AS LO_COMMITDATE,
l.LO_SHIPMODE AS LO_SHIPMODE,
c.C_NAME AS C_NAME,
c.C_ADDRESS AS C_ADDRESS,
c.C_CITY AS C_CITY,
c.C_NATION AS C_NATION,
c.C_REGION AS C_REGION,
c.C_PHONE AS C_PHONE,
c.C_MKTSEGMENT AS C_MKTSEGMENT,
s.S_NAME AS S_NAME,
s.S_ADDRESS AS S_ADDRESS,
s.S_CITY AS S_CITY,
s.S_NATION AS S_NATION,
s.S_REGION AS S_REGION,
s.S_PHONE AS S_PHONE,
p.P_NAME AS P_NAME,
p.P_MFGR AS P_MFGR,
p.P_CATEGORY AS P_CATEGORY,
p.P_BRAND AS P_BRAND,
p.P_COLOR AS P_COLOR,
p.P_TYPE AS P_TYPE,
p.P_SIZE AS P_SIZE,
p.P_CONTAINER AS P_CONTAINER
FROM lineorder AS l
INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY
INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY
INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY;
show settings ilike 'max_memory_usage';
"
function insert_data(){
echo "insert_data:$1"
create_table_prefix="clickhouse client --host ${ckhost} --port $2 --multiquery -q"
insert_data_prefix="clickhouse client --query "
case $1 in
all)
clickhouse client --host ${ckhost} --port $2 --multiquery -q"$ckreadSql" && {
${insert_data_prefix} "INSERT INTO customer FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/customer.tbl --port=$2
${insert_data_prefix} "INSERT INTO part FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/part.tbl --port=$2
${insert_data_prefix} "INSERT INTO supplier FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl --port=$2
${insert_data_prefix} "INSERT INTO lineorder FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl --port=$2
}
${create_table_prefix}"${lineorder_flat_table}"
;;
customer)
echo ${create_table_prefix}\"${customer_table}\"
${create_table_prefix}"${customer_table}" && {
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
}
;;
part)
echo ${create_table_prefix}\"${part_table}\"
${create_table_prefix}"${part_table}" && {
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
}
;;
supplier)
echo ${create_table_prefix}"${supplier_table}"
${create_table_prefix}"${supplier_table}" && {
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
}
;;
lineorder)
echo ${create_table_prefix}"${lineorder_table}"
${create_table_prefix}"${lineorder_table}" && {
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
}
;;
lineorder_flat)
echo ${create_table_prefix}"${lineorder_flat_table}"
${create_table_prefix}"${lineorder_flat_table}"
return 0
;;
*)
exit 0
;;
esac
}
function check_sql(){
select_sql="select * from "$1" limit 1"
clickhouse client --host ${ckhost} --port $2 --multiquery -q"${select_sql}"
}
function check_table(){
checknum=0
source_tables="customer part supplier lineorder lineorder_flat"
test_tables=${1:-${source_tables}}
echo "Checking table data required in server..."
for i in $(seq 0 $[inst_num-1])
do
for j in `echo ${test_tables}`
do
check_sql $j ${ckport[i]} &> /dev/null || {
let checknum+=1 && insert_data "$j" ${ckport[i]}
}
done
done
for i in $(seq 0 $[inst_num-1])
do
echo "clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q\"select count() from ${TABLE_NAME};\""
var=$(clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"select count() from ${TABLE_NAME};")
if [ $var -eq $TALBE_ROWS ];then
echo "Instance_${i} Table data integrity check OK -> Rows:$var"
else
echo "Instance_${i} Table data integrity check Failed -> Rows:$var"
exit 1
fi
done
if [ $checknum -gt 0 ];then
echo "Need sleep 10s after first table data insertion...$checknum"
sleep 10
fi
}
function check_instance(){
instance_alive=0
for i in {1..10}
do
sleep 1
netstat -nltp | grep ${1} > /dev/null
if [ $? -ne 1 ];then
instance_alive=1
break
fi
done
if [ $instance_alive -eq 0 ];then
echo "check_instance -> clickhouse server instance faild to launch due to 10s timeout!"
exit 1
else
echo "check_instance -> clickhouse server instance launch successfully!"
fi
}
function start_clickhouse_for_insertion(){
echo "start_clickhouse_for_insertion"
for i in $(seq 0 $[inst_num-1])
do
echo "cd ${database_dir}/$1${dir_server[i]}"
echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null"
cd ${database_dir}/$1${dir_server[i]}
${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null
check_instance ${ckport[i]}
done
}
function start_clickhouse_for_stressing(){
echo "start_clickhouse_for_stressing"
for i in $(seq 0 $[inst_num-1])
do
echo "cd ${database_dir}/$1${dir_server[i]}"
echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null&"
cd ${database_dir}/$1${dir_server[i]}
${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null&
check_instance ${ckport[i]}
done
}
yum -y install git make gcc sudo net-tools &> /dev/null
pip3 install clickhouse_driver numpy &> /dev/null
test -d ${RAWDATA_DIR}/ssb-dbgen || git clone https://github.com/vadimtk/ssb-dbgen.git ${RAWDATA_DIR}/ssb-dbgen && cd ${RAWDATA_DIR}/ssb-dbgen
if [ ! -f ${RAWDATA_DIR}/ssb-dbgen/dbgen ];then
make && {
test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y |./dbgen -s ${SSB_GEN_FACTOR} -T c
test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p
test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s
test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d
test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l
}
else
test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T c
test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p
test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s
test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d
test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l
fi
filenum=`find ${RAWDATA_DIR}/ssb-dbgen/ -name "*.tbl" | wc -l`
if [ $filenum -ne 5 ];then
echo "generate ssb data file *.tbl faild"
exit 1
fi
function kill_instance(){
instance_alive=1
for i in {1..2}
do
pkill clickhouse && sleep 5
instance_alive=0
for i in $(seq 0 $[inst_num-1])
do
netstat -nltp | grep ${ckport[i]} > /dev/null
if [ $? -ne 1 ];then
instance_alive=1
break;
fi
done
if [ $instance_alive -eq 0 ];then
break;
fi
done
if [ $instance_alive -eq 0 ];then
echo "kill_instance OK!"
else
echo "kill_instance Failed -> clickhouse server instance still alive due to 10s timeout"
exit 1
fi
}
function run_test(){
is_xml=0
for i in $(seq 0 $[inst_num-1])
do
if [ -f ${database_dir}/${1}${dir_server[i]}/config_${1}${dir_server[i]}.xml ]; then
is_xml=$[is_xml+1]
fi
done
if [ $is_xml -eq $inst_num ];then
echo "Benchmark with $inst_num instance"
start_clickhouse_for_insertion ${1}
for i in $(seq 0 $[inst_num-1])
do
clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null
done
if [ $? -eq 0 ];then
check_table
fi
kill_instance
if [ $1 == "deflate" ];then
test -f ${LOG_DIR}/${1}_server_log && deflatemsg=`cat ${LOG_DIR}/${1}_server_log | grep DeflateJobHWPool`
if [ -n "$deflatemsg" ];then
echo ------------------------------------------------------
echo $deflatemsg
echo ------------------------------------------------------
fi
fi
echo "Check table data required in server_${1} -> Done! "
start_clickhouse_for_stressing ${1}
for i in $(seq 0 $[inst_num-1])
do
clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null
done
if [ $? -eq 0 ];then
test -d ${CLIENT_SCRIPTS_DIR} && cd ${CLIENT_SCRIPTS_DIR}
echo "Client stressing... "
echo "${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log"
${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log
echo "Completed client stressing, checking log... "
finish_log=`grep "Finished" ${LOG_DIR}/${1}.log | wc -l`
if [ $finish_log -eq 1 ] ;then
kill_instance
test -f ${LOG_DIR}/${1}.log && echo "${1}.log ===> ${LOG_DIR}/${1}.log"
else
kill_instance
echo "No find 'Finished' in client log -> Performance test may fail"
exit 1
fi
else
echo "${1} clickhouse server start fail"
exit 1
fi
else
echo "clickhouse server start fail -> Please check xml files required in ${database_dir} for each instance"
exit 1
fi
}
function clear_log(){
if [ -d "$LOG_DIR" ]; then
cd ${LOG_DIR} && rm -rf *
fi
}
function gather_log_for_codec(){
cd ${OUTPUT_DIR} && mkdir -p ${LOG_PACK_FILE}/${1}
cp -rf ${LOG_DIR} ${OUTPUT_DIR}/${LOG_PACK_FILE}/${1}
}
function pack_log(){
if [ -e "${OUTPUT_DIR}/run.log" ]; then
cp ${OUTPUT_DIR}/run.log ${OUTPUT_DIR}/${LOG_PACK_FILE}/
fi
echo "Please check all log information in ${OUTPUT_DIR}/${LOG_PACK_FILE}"
}
function setup_check(){
iax_dev_num=`accel-config list | grep iax | wc -l`
if [ $iax_dev_num -eq 0 ] ;then
iax_dev_num=`accel-config list | grep iax | wc -l`
if [ $iax_dev_num -eq 0 ] ;then
echo "No IAA devices available -> Please check IAA hardware setup manually!"
exit 1
else
echo "IAA enabled devices number:$iax_dev_num"
fi
else
echo "IAA enabled devices number:$iax_dev_num"
fi
libaccel_version=`accel-config -v`
clickhouser_version=`clickhouse server --version`
kernel_dxd_log=`dmesg | grep dxd`
echo "libaccel_version:$libaccel_version"
echo "clickhouser_version:$clickhouser_version"
echo -e "idxd section in kernel log:\n$kernel_dxd_log"
}
setup_check
export CLICKHOUSE_WATCHDOG_ENABLE=0
for i in ${CODEC_CONFIG[@]}
do
clear_log
codec=${i}
echo "run test------------$codec"
run_test $codec
gather_log_for_codec $codec
done
pack_log
echo "Done."

View File

@ -1,278 +0,0 @@
from operator import eq
import os
import random
import time
import sys
from clickhouse_driver import Client
import numpy as np
import subprocess
import multiprocessing
from multiprocessing import Manager
warmup_runs = 10
calculated_runs = 10
seconds = 30
max_instances_number = 8
retest_number = 3
retest_tolerance = 10
def checkInt(str):
try:
int(str)
return True
except ValueError:
return False
def setup_client(index):
if index < 4:
port_idx = index
else:
port_idx = index + 4
client = Client(
host="localhost",
database="default",
user="default",
password="",
port="900%d" % port_idx,
)
union_mode_query = "SET union_default_mode='DISTINCT'"
client.execute(union_mode_query)
return client
def warm_client(clientN, clientL, query, loop):
for c_idx in range(clientN):
for _ in range(loop):
clientL[c_idx].execute(query)
def read_queries(queries_list):
queries = list()
queries_id = list()
with open(queries_list, "r") as f:
for line in f:
line = line.rstrip()
line = line.split("$")
queries_id.append(line[0])
queries.append(line[1])
return queries_id, queries
def run_task(client, cname, query, loop, query_latency):
start_time = time.time()
for i in range(loop):
client.execute(query)
query_latency.append(client.last_query.elapsed)
end_time = time.time()
p95 = np.percentile(query_latency, 95)
print(
"CLIENT: {0} end. -> P95: %f, qps: %f".format(cname)
% (p95, loop / (end_time - start_time))
)
def run_multi_clients(clientN, clientList, query, loop):
client_pids = {}
start_time = time.time()
manager = multiprocessing.Manager()
query_latency_list0 = manager.list()
query_latency_list1 = manager.list()
query_latency_list2 = manager.list()
query_latency_list3 = manager.list()
query_latency_list4 = manager.list()
query_latency_list5 = manager.list()
query_latency_list6 = manager.list()
query_latency_list7 = manager.list()
for c_idx in range(clientN):
client_name = "Role_%d" % c_idx
if c_idx == 0:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list0),
)
elif c_idx == 1:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list1),
)
elif c_idx == 2:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list2),
)
elif c_idx == 3:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list3),
)
elif c_idx == 4:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list4),
)
elif c_idx == 5:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list5),
)
elif c_idx == 6:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list6),
)
elif c_idx == 7:
client_pids[c_idx] = multiprocessing.Process(
target=run_task,
args=(clientList[c_idx], client_name, query, loop, query_latency_list7),
)
else:
print("ERROR: CLIENT number dismatch!!")
exit()
print("CLIENT: %s start" % client_name)
client_pids[c_idx].start()
for c_idx in range(clientN):
client_pids[c_idx].join()
end_time = time.time()
totalT = end_time - start_time
query_latencyTotal = list()
for item in query_latency_list0:
query_latencyTotal.append(item)
for item in query_latency_list1:
query_latencyTotal.append(item)
for item in query_latency_list2:
query_latencyTotal.append(item)
for item in query_latency_list3:
query_latencyTotal.append(item)
for item in query_latency_list4:
query_latencyTotal.append(item)
for item in query_latency_list5:
query_latencyTotal.append(item)
for item in query_latency_list6:
query_latencyTotal.append(item)
for item in query_latency_list7:
query_latencyTotal.append(item)
totalP95 = np.percentile(query_latencyTotal, 95) * 1000
return totalT, totalP95
def run_task_caculated(client, cname, query, loop):
query_latency = list()
start_time = time.time()
for i in range(loop):
client.execute(query)
query_latency.append(client.last_query.elapsed)
end_time = time.time()
p95 = np.percentile(query_latency, 95)
def run_multi_clients_caculated(clientN, clientList, query, loop):
client_pids = {}
start_time = time.time()
for c_idx in range(clientN):
client_name = "Role_%d" % c_idx
client_pids[c_idx] = multiprocessing.Process(
target=run_task_caculated,
args=(clientList[c_idx], client_name, query, loop),
)
client_pids[c_idx].start()
for c_idx in range(clientN):
client_pids[c_idx].join()
end_time = time.time()
totalT = end_time - start_time
return totalT
if __name__ == "__main__":
client_number = 1
queries = list()
queries_id = list()
if len(sys.argv) != 3:
print(
"usage: python3 client_stressing_test.py [queries_file_path] [client_number]"
)
sys.exit()
else:
queries_list = sys.argv[1]
client_number = int(sys.argv[2])
print(
"queries_file_path: %s, client_number: %d" % (queries_list, client_number)
)
if not os.path.isfile(queries_list) or not os.access(queries_list, os.R_OK):
print("please check the right path for queries file")
sys.exit()
if (
not checkInt(sys.argv[2])
or int(sys.argv[2]) > max_instances_number
or int(sys.argv[2]) < 1
):
print("client_number should be in [1~%d]" % max_instances_number)
sys.exit()
client_list = {}
queries_id, queries = read_queries(queries_list)
for c_idx in range(client_number):
client_list[c_idx] = setup_client(c_idx)
# clear cache
os.system("sync; echo 3 > /proc/sys/vm/drop_caches")
print("###Polit Run Begin")
for i in queries:
warm_client(client_number, client_list, i, 1)
print("###Polit Run End -> Start stressing....")
query_index = 0
for q in queries:
print(
"\n###START -> Index: %d, ID: %s, Query: %s"
% (query_index, queries_id[query_index], q)
)
warm_client(client_number, client_list, q, warmup_runs)
print("###Warm Done!")
for j in range(0, retest_number):
totalT = run_multi_clients_caculated(
client_number, client_list, q, calculated_runs
)
curr_loop = int(seconds * calculated_runs / totalT) + 1
print(
"###Calculation Done! -> loopN: %d, expected seconds:%d"
% (curr_loop, seconds)
)
print("###Stress Running! -> %d iterations......" % curr_loop)
totalT, totalP95 = run_multi_clients(
client_number, client_list, q, curr_loop
)
if totalT > (seconds - retest_tolerance) and totalT < (
seconds + retest_tolerance
):
break
else:
print(
"###totalT:%d is far way from expected seconds:%d. Run again ->j:%d!"
% (totalT, seconds, j)
)
print(
"###Completed! -> ID: %s, clientN: %d, totalT: %.2f s, latencyAVG: %.2f ms, P95: %.2f ms, QPS_Final: %.2f"
% (
queries_id[query_index],
client_number,
totalT,
totalT * 1000 / (curr_loop * client_number),
totalP95,
((curr_loop * client_number) / totalT),
)
)
query_index += 1
print("###Finished!")

Some files were not shown because too many files have changed in this diff Show More